diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 54a9d69bdc..655ffe289e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -33,117 +33,18 @@ body: id: _version attributes: label: Version - description: What version are you running? Look to OpenPype Tray + description: What version are you running? Look to AYON Tray options: - - 3.18.7-nightly.1 - - 3.18.6 - - 3.18.6-nightly.2 - - 3.18.6-nightly.1 - - 3.18.5 - - 3.18.5-nightly.3 - - 3.18.5-nightly.2 - - 3.18.5-nightly.1 - - 3.18.4 - - 3.18.4-nightly.1 - - 3.18.3 - - 3.18.3-nightly.2 - - 3.18.3-nightly.1 - - 3.18.2 - - 3.18.2-nightly.6 - - 3.18.2-nightly.5 - - 3.18.2-nightly.4 - - 3.18.2-nightly.3 - - 3.18.2-nightly.2 - - 3.18.2-nightly.1 - - 3.18.1 - - 3.18.1-nightly.1 - - 3.18.0 - - 3.17.7 - - 3.17.7-nightly.7 - - 3.17.7-nightly.6 - - 3.17.7-nightly.5 - - 3.17.7-nightly.4 - - 3.17.7-nightly.3 - - 3.17.7-nightly.2 - - 3.17.7-nightly.1 - - 3.17.6 - - 3.17.6-nightly.3 - - 3.17.6-nightly.2 - - 3.17.6-nightly.1 - - 3.17.5 - - 3.17.5-nightly.3 - - 3.17.5-nightly.2 - - 3.17.5-nightly.1 - - 3.17.4 - - 3.17.4-nightly.2 - - 3.17.4-nightly.1 - - 3.17.3 - - 3.17.3-nightly.2 - - 3.17.3-nightly.1 - - 3.17.2 - - 3.17.2-nightly.4 - - 3.17.2-nightly.3 - - 3.17.2-nightly.2 - - 3.17.2-nightly.1 - - 3.17.1 - - 3.17.1-nightly.3 - - 3.17.1-nightly.2 - - 3.17.1-nightly.1 - - 3.17.0 - - 3.16.7 - - 3.16.7-nightly.2 - - 3.16.7-nightly.1 - - 3.16.6 - - 3.16.6-nightly.1 - - 3.16.5 - - 3.16.5-nightly.5 - - 3.16.5-nightly.4 - - 3.16.5-nightly.3 - - 3.16.5-nightly.2 - - 3.16.5-nightly.1 - - 3.16.4 - - 3.16.4-nightly.3 - - 3.16.4-nightly.2 - - 3.16.4-nightly.1 - - 3.16.3 - - 3.16.3-nightly.5 - - 3.16.3-nightly.4 - - 3.16.3-nightly.3 - - 3.16.3-nightly.2 - - 3.16.3-nightly.1 - - 3.16.2 - - 3.16.2-nightly.2 - - 3.16.2-nightly.1 - - 3.16.1 - - 3.16.0 - - 3.16.0-nightly.2 - - 3.16.0-nightly.1 - - 3.15.12 - - 3.15.12-nightly.4 - - 3.15.12-nightly.3 - - 3.15.12-nightly.2 - - 3.15.12-nightly.1 - - 3.15.11 - - 3.15.11-nightly.5 - - 3.15.11-nightly.4 - - 3.15.11-nightly.3 - - 3.15.11-nightly.2 - - 3.15.11-nightly.1 - - 3.15.10 - - 3.15.10-nightly.2 - - 3.15.10-nightly.1 - - 3.15.9 - - 3.15.9-nightly.2 - - 3.15.9-nightly.1 + - 1.0.0 validations: required: true - type: dropdown validations: required: true attributes: - label: What platform you are running OpenPype on? + label: What platform you are running on? description: | - Please specify the operating systems you are running OpenPype with. + Please specify the operating systems you are using. multiple: true options: - Windows diff --git a/.github/pr-glob-labeler.yml b/.github/pr-glob-labeler.yml deleted file mode 100644 index 286e7768b5..0000000000 --- a/.github/pr-glob-labeler.yml +++ /dev/null @@ -1,102 +0,0 @@ -# Add type: unittest label if any changes in tests folders -'type: unittest': -- '*/*tests*/**/*' - -# any changes in documentation structure -'type: documentation': -- '*/**/*website*/**/*' -- '*/**/*docs*/**/*' - -# hosts triage -'host: Nuke': -- '*/**/*nuke*' -- '*/**/*nuke*/**/*' - -'host: Photoshop': -- '*/**/*photoshop*' -- '*/**/*photoshop*/**/*' - -'host: Harmony': -- '*/**/*harmony*' -- '*/**/*harmony*/**/*' - -'host: UE': -- '*/**/*unreal*' -- '*/**/*unreal*/**/*' - -'host: Houdini': -- '*/**/*houdini*' -- '*/**/*houdini*/**/*' - -'host: Maya': -- '*/**/*maya*' -- '*/**/*maya*/**/*' - -'host: Resolve': -- '*/**/*resolve*' -- '*/**/*resolve*/**/*' - -'host: Blender': -- '*/**/*blender*' -- '*/**/*blender*/**/*' - -'host: Hiero': -- '*/**/*hiero*' -- '*/**/*hiero*/**/*' - -'host: Fusion': -- '*/**/*fusion*' -- '*/**/*fusion*/**/*' - -'host: Flame': -- '*/**/*flame*' -- '*/**/*flame*/**/*' - -'host: TrayPublisher': -- '*/**/*traypublisher*' -- '*/**/*traypublisher*/**/*' - -'host: 3dsmax': -- '*/**/*max*' -- '*/**/*max*/**/*' - -'host: TV Paint': -- '*/**/*tvpaint*' -- '*/**/*tvpaint*/**/*' - -'host: CelAction': -- '*/**/*celaction*' -- '*/**/*celaction*/**/*' - -'host: After Effects': -- '*/**/*aftereffects*' -- '*/**/*aftereffects*/**/*' - -'host: Substance Painter': -- '*/**/*substancepainter*' -- '*/**/*substancepainter*/**/*' - -# modules triage -'module: Deadline': -- '*/**/*deadline*' -- '*/**/*deadline*/**/*' - -'module: RoyalRender': -- '*/**/*royalrender*' -- '*/**/*royalrender*/**/*' - -'module: Sitesync': -- '*/**/*sync_server*' -- '*/**/*sync_server*/**/*' - -'module: Ftrack': -- '*/**/*ftrack*' -- '*/**/*ftrack*/**/*' - -'module: Shotgrid': -- '*/**/*shotgrid*' -- '*/**/*shotgrid*/**/*' - -'module: Kitsu': -- '*/**/*kitsu*' -- '*/**/*kitsu*/**/*' diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index f2e7d1058f..0000000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: ๐Ÿ“œ Documentation - -on: - pull_request: - branches: [develop] - types: [review_requested, ready_for_review] - paths: - - 'website/**' - push: - branches: [main] - paths: - - 'website/**' - workflow_dispatch: - -jobs: - check-build: - if: github.event_name != 'push' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-node@v1 - with: - node-version: 14.x - cache: yarn - - name: Test Build - run: | - cd website - if [ -e yarn.lock ]; then - yarn install --frozen-lockfile - elif [ -e package-lock.json ]; then - npm ci - else - npm i - fi - npm run build - deploy-website: - if: github.event_name != 'pull_request' - runs-on: ubuntu-latest - steps: - - name: ๐Ÿšš Get latest code - uses: actions/checkout@v2 - - - uses: actions/setup-node@v1 - with: - node-version: 14.x - cache: yarn - - name: ๐Ÿ”จ Build - run: | - cd website - if [ -e yarn.lock ]; then - yarn install --frozen-lockfile - elif [ -e package-lock.json ]; then - npm ci - else - npm i - fi - npm run build - - - name: ๐Ÿ“‚ Sync files - uses: SamKirkland/FTP-Deploy-Action@4.0.0 - with: - server: ftp.openpype.io - username: ${{ secrets.ftp_user }} - password: ${{ secrets.ftp_password }} - local-dir: ./website/build/ \ No newline at end of file diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml deleted file mode 100644 index df4625c225..0000000000 --- a/.github/workflows/milestone_assign.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: ๐Ÿ‘‰๐Ÿป Milestone - assign to PRs - -on: - pull_request_target: - types: [closed] - -jobs: - run_if_release: - if: startsWith(github.base_ref, 'release/') - runs-on: ubuntu-latest - steps: - - name: 'Assign Milestone [next-minor]' - if: github.event.pull_request.milestone == null - uses: zoispag/action-assign-milestone@v1 - with: - repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" - milestone: 'next-minor' - - run_if_develop: - if: ${{ github.base_ref == 'develop' }} - runs-on: ubuntu-latest - steps: - - name: 'Assign Milestone [next-patch]' - if: github.event.pull_request.milestone == null - uses: zoispag/action-assign-milestone@v1 - with: - repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" - milestone: 'next-patch' diff --git a/.github/workflows/milestone_create.yml b/.github/workflows/milestone_create.yml deleted file mode 100644 index 437c9e31b4..0000000000 --- a/.github/workflows/milestone_create.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: โž• Milestone - create default - -on: - milestone: - types: [closed, edited] - -jobs: - generate-next-patch: - runs-on: ubuntu-latest - steps: - - name: 'Get Milestones' - uses: "WyriHaximus/github-action-get-milestones@master" - id: milestones - env: - GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - - - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') - id: querymilestone - env: - MILESTONES: ${{ steps.milestones.outputs.milestones }} - MILESTONE: "next-patch" - - - name: Read output - run: | - echo "${{ steps.querymilestone.outputs.number }}" - - - name: 'Create `next-patch` milestone' - if: steps.querymilestone.outputs.number == '' - id: createmilestone - uses: "WyriHaximus/github-action-create-milestone@v1" - with: - title: 'next-patch' - env: - GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - - generate-next-minor: - runs-on: ubuntu-latest - steps: - - name: 'Get Milestones' - uses: "WyriHaximus/github-action-get-milestones@master" - id: milestones - env: - GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - - - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') - id: querymilestone - env: - MILESTONES: ${{ steps.milestones.outputs.milestones }} - MILESTONE: "next-minor" - - - name: Read output - run: | - echo "${{ steps.querymilestone.outputs.number }}" - - - name: 'Create `next-minor` milestone' - if: steps.querymilestone.outputs.number == '' - id: createmilestone - uses: "WyriHaximus/github-action-create-milestone@v1" - with: - title: 'next-minor' - env: - GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" diff --git a/.github/workflows/miletone_release_trigger.yml b/.github/workflows/miletone_release_trigger.yml deleted file mode 100644 index d755f7eb9f..0000000000 --- a/.github/workflows/miletone_release_trigger.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: ๐Ÿšฉ Milestone Release [trigger] - -on: - workflow_dispatch: - inputs: - milestone: - required: true - milestone: - types: closed - - -jobs: - milestone-title: - runs-on: ubuntu-latest - outputs: - milestone: ${{ steps.milestoneTitle.outputs.value }} - steps: - - name: Switch input milestone - uses: haya14busa/action-cond@v1 - id: milestoneTitle - with: - cond: ${{ inputs.milestone == '' }} - if_true: ${{ github.event.milestone.title }} - if_false: ${{ inputs.milestone }} - - name: Print resulted milestone - run: | - echo "${{ steps.milestoneTitle.outputs.value }}" - - call-ci-tools-milestone-release: - needs: milestone-title - uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main - with: - milestone: ${{ needs.milestone-title.outputs.milestone }} - repo-owner: ${{ github.event.repository.owner.login }} - repo-name: ${{ github.event.repository.name }} - version-py-path: "./openpype/version.py" - pyproject-path: "./pyproject.toml" - secrets: - token: ${{ secrets.YNPUT_BOT_TOKEN }} - user_email: ${{ secrets.CI_EMAIL }} - user_name: ${{ secrets.CI_USER }} - cu_api_key: ${{ secrets.CLICKUP_API_KEY }} - cu_team_id: ${{ secrets.CLICKUP_TEAM_ID }} - cu_field_id: ${{ secrets.CLICKUP_RELEASE_FIELD_ID }} diff --git a/.github/workflows/nightly_merge.yml b/.github/workflows/nightly_merge.yml deleted file mode 100644 index 3f8c75dce3..0000000000 --- a/.github/workflows/nightly_merge.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: ๐Ÿ”€ Dev -> Main - -on: - schedule: - - cron: '21 3 * * 3,6' - workflow_dispatch: - -jobs: - develop-to-main: - - runs-on: ubuntu-latest - - steps: - - name: ๐Ÿš› Checkout Code - uses: actions/checkout@v2 - - - name: ๐Ÿ”จ Merge develop to main - uses: everlytic/branch-merge@1.1.0 - with: - github_token: ${{ secrets.YNPUT_BOT_TOKEN }} - source_ref: 'develop' - target_branch: 'main' - commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' - - - name: Invoke pre-release workflow - uses: benc-uk/workflow-dispatch@v1 - with: - workflow: prerelease.yml - token: ${{ secrets.YNPUT_BOT_TOKEN }} diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml deleted file mode 100644 index 8c5c733c08..0000000000 --- a/.github/workflows/prerelease.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: โณ Nightly Prerelease - -on: - workflow_dispatch: - - -jobs: - create_nightly: - runs-on: ubuntu-latest - - steps: - - name: ๐Ÿš› Checkout Code - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - - name: Install Python requirements - run: pip install gitpython semver PyGithub - - - name: ๐Ÿ”Ž Determine next version type - id: version_type - run: | - TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) - echo "type=${TYPE}" >> $GITHUB_OUTPUT - - - name: ๐Ÿ’‰ Inject new version into files - id: version - if: steps.version_type.outputs.type != 'skip' - run: | - NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) - echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT - - - name: ๐Ÿ’พ Commit and Tag - id: git_commit - if: steps.version_type.outputs.type != 'skip' - run: | - git config user.email ${{ secrets.CI_EMAIL }} - git config user.name ${{ secrets.CI_USER }} - git checkout main - git pull - git add . - git commit -m "[Automated] Bump version" - tag_name="CI/${{ steps.version.outputs.next_tag }}" - echo $tag_name - git tag -a $tag_name -m "nightly build" - - - name: Push to protected main branch - uses: CasperWA/push-protected@v2.10.0 - with: - token: ${{ secrets.YNPUT_BOT_TOKEN }} - branch: main - tags: true - unprotect_reviews: true - - - name: ๐Ÿ”จ Merge main back to develop - uses: everlytic/branch-merge@1.1.0 - if: steps.version_type.outputs.type != 'skip' - with: - github_token: ${{ secrets.YNPUT_BOT_TOKEN }} - source_ref: 'main' - target_branch: 'develop' - commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' - - - name: Invoke Update bug report workflow - uses: benc-uk/workflow-dispatch@v1 - with: - workflow: update_bug_report.yml - token: ${{ secrets.YNPUT_BOT_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml deleted file mode 100644 index fd8e0e642d..0000000000 --- a/.github/workflows/test_build.yml +++ /dev/null @@ -1,66 +0,0 @@ -# This workflow will upload a Python Package using Twine when a release is created -# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries - -name: ๐Ÿ—๏ธ Test Build - -on: - pull_request: - branches: [develop] - types: [review_requested, ready_for_review] - paths-ignore: - - 'docs/**' - - 'website/**' - - 'vendor/**' - -jobs: - Windows-latest: - - runs-on: windows-latest - strategy: - matrix: - python-version: [3.9] - - steps: - - name: ๐Ÿš› Checkout Code - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: ๐Ÿงต Install Requirements - shell: pwsh - run: | - ./tools/create_env.ps1 - - - name: ๐Ÿ”จ Build - shell: pwsh - run: | - $env:SKIP_THIRD_PARTY_VALIDATION="1" - ./tools/build.ps1 - - Ubuntu-latest: - - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.9] - - steps: - - name: ๐Ÿš› Checkout Code - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: ๐Ÿงต Install Requirements - run: | - ./tools/create_env.sh - - - name: ๐Ÿ”จ Build - run: | - export SKIP_THIRD_PARTY_VALIDATION="1" - ./tools/build.sh diff --git a/.gitignore b/.gitignore index 622d55fb88..502cf85b9f 100644 --- a/.gitignore +++ b/.gitignore @@ -32,22 +32,14 @@ Network Trash Folder Temporary Items .apdisk - -# CX_Freeze +# Package dirs ########### -/build -/dist/ /server_addon/packages/* +/package/* -/vendor/bin/* -/vendor/python/* /.venv /venv/ -# Documentation -############### -/docs/build - # Editor backup files # ####################### *~ @@ -74,9 +66,6 @@ package-lock.json package.json yarn.lock -openpype/premiere/ppro/js/debug.log - - # IDEA ###### .idea/ @@ -85,37 +74,12 @@ openpype/premiere/ppro/js/debug.log .vscode/ .env dump.sql -test_localsystem.txt - -# website -########## -website/translated_docs -website/build/ -website/node_modules -website/i18n/* - -website/debug.log - -website/.docusaurus # Poetry ######## - -.poetry/ .python-version .editorconfig .pre-commit-config.yaml mypy.ini -tools/run_eventserver.* - -# Developer tools -tools/dev_* - .github_changelog_generator - - -# Addons -######## -/openpype/addons/* -!/openpype/addons/README.md diff --git a/.gitmodules b/.gitmodules index 4de92471f7..95c8647d45 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,3 @@ -[submodule "tools/modules/powershell/BurntToast"] - path = tools/modules/powershell/BurntToast - url = https://github.com/Windos/BurntToast.git - -[submodule "tools/modules/powershell/PSWriteColor"] - path = tools/modules/powershell/PSWriteColor - url = https://github.com/EvotecIT/PSWriteColor.git -[submodule "openpype/hosts/unreal/integration"] - path = openpype/hosts/unreal/integration +[submodule "client/ayon_core/hosts/unreal/integration"] + path = client/ayon_core/hosts/unreal/integration url = https://github.com/ynput/ayon-unreal-plugin.git diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md deleted file mode 100644 index 912780d803..0000000000 --- a/ARCHITECTURE.md +++ /dev/null @@ -1,77 +0,0 @@ -# Architecture - -OpenPype is a monolithic Python project that bundles several parts, this document will try to give a birds eye overview of the project and, to a certain degree, each of the sub-projects. -The current file structure looks like this: - -``` -. -โ”œโ”€โ”€ common - Code in this folder is backend portion of Addon distribution logic for v4 server. -โ”œโ”€โ”€ docs - Documentation of the source code. -โ”œโ”€โ”€ igniter - The OpenPype bootstrapper, deals with running version resolution and setting up the connection to the mongodb. -โ”œโ”€โ”€ openpype - The actual OpenPype core package. -โ”œโ”€โ”€ schema - Collection of JSON files describing schematics of objects. This follows Avalon's convention. -โ”œโ”€โ”€ tests - Integration and unit tests. -โ”œโ”€โ”€ tools - Conveninece scripts to perform common actions (in both bash and ps1). -โ”œโ”€โ”€ vendor - When using the igniter, it deploys third party tools in here, such as ffmpeg. -โ””โ”€โ”€ website - Source files for https://openpype.io/ which is Docusaursus (https://docusaurus.io/). -``` - -The core functionality of the pipeline can be found in `igniter` and `openpype`, which in turn rely on the `schema` files, whenever you build (or download a pre-built) version of OpenPype, these two are bundled in there, and `Igniter` is the entry point. - - -## Igniter - -It's the setup and update tool for OpenPype, unless you want to package `openpype` separately and deal with all the config manually, this will most likely be your entry point. - -``` -igniter/ -โ”œโ”€โ”€ bootstrap_repos.py - Module that will find or install OpenPype versions in the system. -โ”œโ”€โ”€ __init__.py - Igniter entry point. -โ”œโ”€โ”€ install_dialog.py- Show dialog for choosing central pype repository. -โ”œโ”€โ”€ install_thread.py - Threading helpers for the install process. -โ”œโ”€โ”€ __main__.py - Like `__init__.py` ? -โ”œโ”€โ”€ message_dialog.py - Qt Dialog with a message and "Ok" button. -โ”œโ”€โ”€ nice_progress_bar.py - Fancy Qt progress bar. -โ”œโ”€โ”€ splash.txt - ASCII art for the terminal installer. -โ”œโ”€โ”€ stylesheet.css - Installer Qt styles. -โ”œโ”€โ”€ terminal_splash.py - Terminal installer animation, relies in `splash.txt`. -โ”œโ”€โ”€ tools.py - Collection of methods that don't fit in other modules. -โ”œโ”€โ”€ update_thread.py - Threading helper to update existing OpenPype installs. -โ”œโ”€โ”€ update_window.py - Qt UI to update OpenPype installs. -โ”œโ”€โ”€ user_settings.py - Interface for the OpenPype user settings. -โ””โ”€โ”€ version.py - Igniter's version number. -``` - -## OpenPype - -This is the main package of the OpenPype logic, it could be loosely described as a combination of [Avalon](https://getavalon.github.io), [Pyblish](https://pyblish.com/) and glue around those with custom OpenPype only elements, things are in progress of being moved around to better prepare for V4, which will be released under a new name AYON. - -``` -openpype/ -โ”œโ”€โ”€ client - Interface for the MongoDB. -โ”œโ”€โ”€ hooks - Hooks to be executed on certain OpenPype Applications defined in `openpype.lib.applications`. -โ”œโ”€โ”€ host - Base class for the different hosts. -โ”œโ”€โ”€ hosts - Integration with the different DCCs (hosts) using the `host` base class. -โ”œโ”€โ”€ lib - Libraries that stitch together the package, some have been moved into other parts. -โ”œโ”€โ”€ modules - OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its python API. -โ”œโ”€โ”€ pipeline - Core of the OpenPype pipeline, handles creation of data, publishing, etc. -โ”œโ”€โ”€ plugins - Global/core plugins for loader and publisher tool. -โ”œโ”€โ”€ resources - Icons, fonts, etc. -โ”œโ”€โ”€ scripts - Loose scipts that get run by tools/publishers. -โ”œโ”€โ”€ settings - OpenPype settings interface. -โ”œโ”€โ”€ style - Qt styling. -โ”œโ”€โ”€ tests - Unit tests. -โ”œโ”€โ”€ tools - Core tools, check out https://openpype.io/docs/artist_tools. -โ”œโ”€โ”€ vendor - Vendoring of needed required Python packes. -โ”œโ”€โ”€ widgets - Common re-usable Qt Widgets. -โ”œโ”€โ”€ action.py - LEGACY: Lives now in `openpype.pipeline.publish.action` Pyblish actions. -โ”œโ”€โ”€ cli.py - Command line interface, leverages `click`. -โ”œโ”€โ”€ __init__.py - Sets two constants. -โ”œโ”€โ”€ __main__.py - Entry point, calls the `cli.py` -โ”œโ”€โ”€ plugin.py - Pyblish plugins. -โ”œโ”€โ”€ pype_commands.py - Implementation of OpenPype commands. -โ””โ”€โ”€ version.py - Current version number. -``` - - - diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 009150ae7d..0000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,18614 +0,0 @@ -# Changelog - - -## [3.18.6](https://github.com/ynput/OpenPype/tree/3.18.6) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.5...3.18.6) - -### **๐Ÿš€ Enhancements** - - -
-AYON: Use `SettingsField` from ayon server #6173 - -This is preparation for new version of pydantic which will require to customize the field class for AYON purposes as raw pydantic Field could not be used. - - -___ - -
- - -
-Nuke: Expose write knobs - OP-7592 #6137 - -This PR adds `exposed_knobs` to the creator plugins settings at `ayon+settings://nuke/create/CreateWriteRender/exposed_knobs`.When exposed knobs will be linked from the write node to the outside publish group, for users to adjust. - - -___ - -
- - -
-AYON: Remove kitsu addon #6172 - -Removed kitsu addon from server addons because already has own repository. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Fusion: provide better logging for validate saver crash due type error #6082 - -Handles reported issue for `NoneType` error thrown in conversion `int(tool["Comments"][frame])`. It is most likely happening when saver node has no input connections.There is a validator for that, but it might be not obvious, that this error is caused by missing input connections and it has been already reported by `"Validate Saver Has Input"`. - - -___ - -
- - -
-Workfile Template Builder: Use correct variable in create placeholder #6141 - -Use correct variable where failed instances are stored for validation. - - -___ - -
- - -
-ExtractOIIOTranscode: Missing product_names to subsets conversion #6159 - -The `Product Names` filtering should be fixed with this. - - -___ - -
- - -
-Blender: Fix missing animation data when updating blend assets #6165 - -Fix missing animation data when updating blend assets. - - -___ - -
- - -
-TrayPublisher: Pre-fill of version works in AYON #6180 - -Use `folderPath` instead of `asset` in AYON mode to calculate next available version. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Chore: remove Muster #6085 - -Muster isn't maintained for a long time and it wasn't working anyway. This is removing related code from the code base. If there is renewed interest in Muster, it needs to be re-implemented in modern AYON compatible way. - - -___ - -
- -### **Merged pull requests** - - -
-Maya: change label in the render settings to be more readable #6134 - -AYON replacement for #5713. - - -___ - -
- - - - -## [3.18.5](https://github.com/ynput/OpenPype/tree/3.18.5) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.4...3.18.5) - -### **๐Ÿš€ Enhancements** - - -
-Chore: Add addons dir only if exists #6140 - -Do not add addons directory path for addons discovery if does not exists. - - -___ - -
- - -
-Hiero: Effect Categories - OP-7397 #6143 - -This PR introduces `Effect Categories` for the Hiero settings. This allows studios to split effect stacks into meaningful subsets. - - -___ - -
- - -
-Nuke: Render Workfile Attributes #6146 - -`Workfile Dependency` default value can now be controlled from project settings.`Use Published Workfile` makes using published workfiles for rendering optional. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Attributes are locked after publishing if they are locked in Camera Family #6073 - -This PR is to make sure unlock attributes only during the bake context, make sure attributes are relocked after to preserve the lock state of the original node being baked. - - -___ - -
- - -
-Missing nuke family Windows arguments #6131 - -Default Windows arguments for launching the Nuke family was missing. - - -___ - -
- - -
-AYON: Fix the bug on the limit group not being set correctly in Maya Deadline Setting #6139 - -This PR is to bug-fix the limit groups from maya deadline settings errored out when the user tries to edit the setting. - - -___ - -
- - -
-Chore: Transcoding extensions add missing '.tif' extension #6142 - -Image extensions in transcoding helper was missing `.tif` extension and had `.tiff` twice. - - -___ - -
- - -
-Blender: Use the new API for override context #6145 - -Blender 4.0 disabled the old API to override context. This API updates the code to use the new API. - - -___ - -
- - -
-BugFix: Include Model in FBX Loader in Houdini #6150 - -A quick bugfig where we can't load fbx exported from blender. The bug was reported here. - - -___ - -
- - -
-Blender: Restore actions to objects after update #6153 - -Restore the actions assigned to objects after updating assets from blend files. - - -___ - -
- - -
-Chore: Collect template data with hierarchy context #6154 - -Fixed queue loop where is used wrong variable to pop items from queue. - - -___ - -
- - -
-OP-6382 - Thumbnail Integration Problem #6156 - -This ticket alerted to 3 different cases of integration issues; -- [x] Using the Tray Publisher with the same image format (extension) for representation and review representation. -- [x] Clash on publish file path from output definitions in `ExtractOIIOTranscode`. -- [x] Clash on publish file from thumbnail in `ExtractThumbnail`There might be an issue with this fix, if a studio does not use the `{output}` token in their `render` anatomy template. But thinking if they have customized it, they will be responsible to maintain these edge cases. - - -___ - -
- - -
-Max: Bugfix saving camera scene errored out when creating render instance with multi-camera option turned off #6163 - -This PR is to make sure the integrator of saving camera scene turned off and the render submitted successfully when multi-camera options being turned off in 3dsmax - - -___ - -
- - -
-Chore: Fix duplicated project name on create project structure #6166 - -Small fix in project folders. It is not used same variable name to change values which breaks values on any next loop. - - -___ - -
- -### **Merged pull requests** - - -
-Maya: Remove duplicate plugin #6157 - -The two plugins below are doing the same work, so we can remove the one focused solely on lookdev.https://github.com/ynput/OpenPype/blob/develop/openpype/hosts/maya/plugins/publish/validate_look_members_unique.pyhttps://github.com/ynput/OpenPype/blob/develop/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py - - -___ - -
- - -
-Publish report viewer: Report items sorting #6092 - -Proposal of items sorting in Publish report viewer tool. Items are sorted by report creation time. Creation time is also added to publish report data when saved from publisher tool. - - -___ - -
- - -
-Maya: Extended error message #6161 - -Added more details to message - - -___ - -
- - -
-Fusion: Added settings for Fusion creators to legacy OP #6162 - -Added missing OP variant of setting for new Fusion creator. - - -___ - -
- - - - -## [3.18.4](https://github.com/ynput/OpenPype/tree/3.18.4) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.3...3.18.4) - -### **๐Ÿš€ Enhancements** - - -
-multiple render camera supports for 3dsmax #5124 - -Supports for rendering with multiple cameras in 3dsmax -- [x] Add Batch Render Layers functions -- [x] Rewrite lib.rendersetting and lib.renderproduct -- [x] Add multi-camera options in creator. -- [x] Collector with batch render-layer when multi-camera enabled. -- [x] Add instance plugin for saving scene files with different cameras respectively by using subprocess -- [x] Refactor submit_max_deadline -- [x] Check with metadata.json in submit publish job - - -___ - -
- - -
-Fusion: new creator for image product type #6057 - -In many DCC `render` product type is expected to be sequence of files. This PR adds new explicit creator for `image` product type which is focused on single frame image. Workflows for both product types might be a bit different, this gives artists more granularity to choose better workflow. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Account and ignore free image planes. #5993 - -Free image planes do not have the `->` path separator, so we need to account for that. - - -___ - -
- - -
-Blender: Fix long names for instances #6070 - -Changed naming for instances to use only final part of the `folderPath`. - - -___ - -
- - -
-Traypublisher & Chore: Instance version on follow workfile version #6117 - -If `follow_workfile_version` is enabled but context does not have filled workfile version, a version on instance is used instead. - - -___ - -
- - -
-Substance Painter: Thumbnail errors with PBR Texture Set #6127 - -When publishing with PBR Metallic Roughness as Output Template, Emissive Map errors out because of the missing channel in the material and the map can't be generated in Substance Painter. This PR is to make sure `imagestance.data["publish"] = False` so that the related "empty" texture instance would be skipped to generate the output. - - -___ - -
- - -
-Transcoding: Fix reading image sequences through oiiotool #6129 - -When transcoding image sequences, the second image onwards includes the invalid xml line of `Reading path/to/file.exr` of the oiiotool output.This is most likely not the best solution, but it fixes the issue and illustrates the problem.Error: -``` -ERROR:pyblish.plugin:Traceback (most recent call last): - File "C:\Users\tokejepsen\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process - runner(*args) - File "C:\Users\tokejepsen\OpenPype\openpype\plugins\publish\extract_color_transcode.py", line 152, in process - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 1136, in convert_colorspace - input_info = get_oiio_info_for_input(input_path, logger=logger) - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 124, in get_oiio_info_for_input - output.append(parse_oiio_xml_output(xml_text, logger=logger)) - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 276, in parse_oiio_xml_output - tree = xml.etree.ElementTree.fromstring(xml_string) - File "xml\etree\ElementTree.py", line 1347, in XML -xml.etree.ElementTree.ParseError: syntax error: line 1, column 0 -Traceback (most recent call last): - File "C:\Users\tokejepsen\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process - runner(*args) - File "", line 152, in process - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 1136, in convert_colorspace - input_info = get_oiio_info_for_input(input_path, logger=logger) - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 124, in get_oiio_info_for_input - output.append(parse_oiio_xml_output(xml_text, logger=logger)) - File "C:\Users\tokejepsen\OpenPype\openpype\lib\transcoding.py", line 276, in parse_oiio_xml_output - tree = xml.etree.ElementTree.fromstring(xml_string) - File "xml\etree\ElementTree.py", line 1347, in XML -xml.etree.ElementTree.ParseError: syntax error: line 1, column 0 -``` - - - -___ - -
- - -
-AYON: Remove 'IntegrateHeroVersion' conversion #6130 - -Remove settings conversion for `IntegrateHeroVersion`. - - -___ - -
- - -
-Chore tools: Make sure style object is not garbage collected #6136 - -Minor fix in tool utils to make sure style C++ object is not garbage collected when not stored into variable. - - -___ - -
- - - - -## [3.18.3](https://github.com/ynput/OpenPype/tree/3.18.3) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.2...3.18.3) - -### **๐Ÿš€ Enhancements** - - -
-Maya: Apply initial viewport shader for Redshift Proxy after loading #6102 - -When the published redshift proxy is being loaded, the shader of the proxy is missing. This is different from the manual load through creating redshift proxy for files. This PR is to assign the default lambert to the redshift proxy, which replicates the same approach when the user manually loads the proxy with filepath. - - -___ - -
- - -
-General: We should keep current subset version when we switch only the representation type #4629 - -When we switch only the representation type of subsets, we should not get the representation from the last version of the subset. - - -___ - -
- - -
-Houdini: Add loader for redshift proxy family #5948 - -Loader for Redshift Proxy in Houdini (Thanks for @BigRoy contribution) - - -___ - -
- - -
-AfterEffects: exposing Deadline pools fields in Publisher UI #6079 - -Deadline pools might be adhoc set by an artist during publishing. AfterEffects implementation wasn't providing this. - - -___ - -
- - -
-Chore: Event callbacks can have order #6080 - -Event callbacks can have order in which are called, and fixed issue with getting function name and file when using `partial` function as callback. - - -___ - -
- - -
-AYON: OpenPype addon defines runtime dependencies #6095 - -Moved runtime dependencies from ayon-launcher to openpype addon. - - -___ - -
- - -
-Max: User's setting for scene unit scale #6097 - -Options for users to set the default scene unit scale for their scenes.AYONLegacy OP - - -___ - -
- - -
-Chore: Remove deprecated templates profiles #6103 - -Remove deprecated usage of template profiles from settings. - - -___ - -
- - -
-Publisher: Window is not always on top #6107 - -Goal of this PR is to avoid using `WindowStaysOnTopHint` which causes issues, especially in cases when DCC shows a popup dialog that is behind the window, in that case both Publisher and DCC are frozen and there is nothing to do. - - -___ - -
- - -
-Houdini: add split job export support for Redshift ROP #6108 - -This is adding support for splitting of export and render jobs for Redshift as is already implemented for Vray, Mantra and Arnold. - - -___ - -
- - -
-Fusion: automatic installation of PySide2 #6111 - -This PR adds hook which tries to check if PySide2 is installed in Python used by Fusion and if not, it tries to install it automatically. - - -___ - -
- - -
-AYON: OpenPype addon dependencies #6113 - -Added `click` and `six` to requirements of openpype addon, and removed `Qt.py` requirement, which is not used anywhere. - - -___ - -
- - -
-Chore: Thumbnail representation has 'outputName' #6114 - -Add thumbnail output name to thumbnail representation to prevent same output filename during integration. - - -___ - -
- - -
-Kitsu: Clear credentials is safe #6116 - -Do not remove not existing keyring items. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: bug fix the playblast without textures #5942 - -Bug fix the texture not being displayed when users enable texture placement in the OP/AYON setting - - -___ - -
- - -
-Blender: Workfile instance update fix #6048 - -Make sure workfile instance has always available 'instance_node' in transient data. - - -___ - -
- - -
-Publisher: Fix issue with parenting of widgets #6106 - -Don't use publisher window parent (usually main DCC window) as parent for report widget. - - -___ - -
- - -
-:wrench: fix and update pydocstyle configuration #6109 - -Fix pydocstyle configuration and move it to `pyproject.toml` - - -___ - -
- - -
-Nuke: Create camera node with the latest camera node class in Nuke 14 #6118 - -Creating instance fails for certain cameras, and it seems to only exist in Nuke 14. The reason of causing that contributes to the new camera node class `Camera4` while the camera creator is working with the `Camera2` class. - - -___ - -
- - -
-Site Sync: small fixes in Loader #6119 - -Resolves issue: -- local and studio icons were same, they should be different -- `TypeError: string indices must be integers` error when downloading/uploading workfiles - - -___ - -
- - -
-Chore: Template data for editorial publishing #6120 - -Template data for editorial publishing are filled during `CollectInstanceAnatomyData`. The structure for editorial is determined, as it's required for ExtractHierarchy AYON/OpenPype plugins. - - -___ - -
- - -
-SceneInventory: Fix site sync icon conversion #6123 - -Use 'get_qt_icon' to convert icon definitions from site sync. - - -___ - -
- - - - -## [3.18.2](https://github.com/ynput/OpenPype/tree/3.18.2) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.1...3.18.2) - -### **๐Ÿš€ Enhancements** - - -
-Testing: Release Maya/Deadline job from pending when testing. #5988 - -When testing we wont put the Deadline jobs into pending with dependencies, so the worker can start as soon as possible. - - -___ - -
- - -
-Max: Tweaks on Extractions for the exporters #5814 - -With this PR -- Suspend Refresh would be introduced in abc & obj extractors for optimization. -- Allow users to choose the custom attributes to be included in abc exports - - -___ - -
- - -
-Maya: Optional preserve references. #5994 - -Optional preserve references when publishing Maya scenes. - - -___ - -
- - -
-AYON ftrack: Expect 'ayon' group in custom attributes #6066 - -Expect `ayon` group as one of options to get custom attributes. - - -___ - -
- - -
-AYON Chore: Remove dependencies related to separated addons #6074 - -Removed dependencies from openpype client pyproject.toml that are already defined by addons which require them. - - -___ - -
- - -
-Editorial & chore: Stop using pathlib2 #6075 - -Do not use `pathlib2` which is Python 2 backport for `pathlib` module in python 3. - - -___ - -
- - -
-Traypublisher: Correct validator label #6084 - -Use correct label for Validate filepaths. - - -___ - -
- - -
-Nuke: Extract Review Intermediate disabled when both Extract Review Mov and Extract Review Intermediate disabled in setting #6089 - -Report in Discord https://discord.com/channels/517362899170230292/563751989075378201/1187874498234556477 - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Bug fix the file from texture node not being collected correctly in Yeti Rig #5990 - -Fix the bug of collect Yeti Rig not being able to get the file parameter(s) from the texture node(s), resulting to the failure of publishing the textures to the resource directory. - - -___ - -
- - -
-Bug: fix AYON settings for Maya workspace #6069 - -This is changing bug in default AYON setting for Maya workspace, where missing semicolumn caused workspace not being set. This is also syncing default workspace settings to OpenPype - - -___ - -
- - -
-Refactor colorspace handling in CollectColorspace plugin #6033 - -Traypublisher is now capable set available colorspaces or roles to publishing images sequence or video. This is fix of new implementation where we allowed to use roles in the enumerator selector. - - -___ - -
- - -
-Bugfix: Houdini render split bugs #6037 - -This PR is a follow up PR to https://github.com/ynput/OpenPype/pull/5420This PR does: -- refactor `get_output_parameter` to what is used to be. -- fix a bug with split render -- rename `exportJob` flag to `split_render` - - -___ - -
- - -
-Fusion: fix for single frame rendering #6056 - -Fixes publishes of single frame of `render` product type. - - -___ - -
- - -
-Photoshop: fix layer publish thumbnail missing in loader #6061 - -Thumbnails from any products (either `review` nor separate layer instances) weren't stored in Ayon.This resulted in not showing them in Loader and Server UI. After this PR thumbnails should be shown in the Loader and on the Server (`http://YOUR_AYON_HOSTNAME:5000/projects/YOUR_PROJECT/browser`). - - -___ - -
- - -
-AYON Chore: Do not use thumbnailSource for thumbnail integration #6063 - -Do not use `thumbnailSource` for thumbnail integration. - - -___ - -
- - -
-Photoshop: fix creation of .mov #6064 - -Generation of .mov file with 1 frame per published layer was failing. - - -___ - -
- - -
-Photoshop: fix Collect Color Coded settings #6065 - -Fix for wrong default value for `Collect Color Coded Instances` Settings - - -___ - -
- - -
-Bug: Fix Publisher parent window in Nuke #6067 - -Fixing issue where publisher parent window wasn't set because wrong use of version constant. - - -___ - -
- - -
-Python console widget: Save registry fix #6076 - -Do not save registry until there is something to save. - - -___ - -
- - -
-Ftrack: update asset names for multiple reviewable items #6077 - -Multiple reviewable assetVersion components with better grouping to asset version name. - - -___ - -
- - -
-Ftrack: DJV action fixes #6098 - -Fix bugs in DJV ftrack action. - - -___ - -
- - -
-AYON Workfiles tool: Fix arrow to timezone typo #6099 - -Fix parenthesis typo with arrow local timezone function. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Chore: Update folder-favorite icon to ayon icon #5718 - -Updates old "Pype-2.0-era" (from ancient greece times) to AYON logo equivalent.I believe it's only used in Nuke. - - -___ - -
- -### **Merged pull requests** - - -
-Chore: Maya / Nuke remove publish gui filters from settings #5570 - -- Remove Publish GUI Filters from Nuke settings -- Remove Publish GUI Filters from Maya settings - - -___ - -
- - -
-Fusion: Project/User option for output format (create_saver) #6045 - -Adds "Output Image Format" option which can be set via project settings and overwritten by users in "Create" menu. This replaces the current behaviour of being hardcoded to "exr". Replacing the need for people to manually edit the saver path if they require a different extension. - - -___ - -
- - -
-Fusion: Output Image Format Updating Instances (create_saver) #6060 - -Adds the ability to update Saver image output format if changed in the Publish UI.~~Adds an optional validator that compares "Output Image Format" in the Publish menu against the one currently found on the saver. It then offers a repair action to update the output extension on the saver.~~ - - -___ - -
- - -
-Tests: Fix representation count for AE legacy test #6072 - - -___ - -
- - - - -## [3.18.1](https://github.com/ynput/OpenPype/tree/3.18.1) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.0...3.18.1) - -### **๐Ÿš€ Enhancements** - - -
-AYON: Update ayon api to 1.0.0-rc.3 #6052 - -Updated ayon python api to 1.0.0-rc.3. - - -___ - -
- - - - -## [3.18.0](https://github.com/ynput/OpenPype/tree/3.18.0) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/...3.18.0) - -### **๐Ÿ› Bug fixes** - - -
-Chore: Fix subst paths handling #5702 - -Make sure that source disk ends with `\` instead of destination disk. - - -___ - -
- - - - -## [3.17.7](https://github.com/ynput/OpenPype/tree/3.17.7) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.6...3.17.7) - -### **๐Ÿ†• New features** - - -
-AYON: Use folder path as unique identifier #5817 - -Use folder path instead of asset name as unique identifier, with OpenPype compatibility. - - -___ - -
- - -
-Houdini: Farm caching submission to Deadline #4903 - -Implements functionality to offload instances of the specific families to be processed on Deadline instead of locally. This increases productivity as artist can use local machine could be used for other tasks.Implemented for families: -- [x] ass -- [x] redshift proxy -- [x] ifd -- [x] abc -- [x] bgeo -- [x] vdb - - -___ - -
- - -
-Houdini: Add support to split Deadline render tasks in export + render #5420 - -This adds initial support in Houdini so when submitting render jobs to Deadline it's not running as a single Houdini task but rather it gets split in two different tasks: Export + Render. This way it's more efficient as we only need a Houdini license during the export step and the render tasks can run exclusively with a render license. Moreover, we aren't wasting all the overhead time of opening the render scene in Houdini for every frame.I have also added the corresponding settings json files so we can set some of the default values for the Houdini deadline submitter. - - -___ - -
- - -
-Wrap: new integration #5823 - -These modifications are necessary for adding Wrap integration (DCC handling scans and textures) . - - -___ - -
- - -
-AYON: Prepare for 'data' via graphql #5923 - -AYON server does support to query 'data' field for hierarchy entities (project > ... > representation) using GraphQl since version 0.5.5. Because of this PR in ayon-python-api it is required to modify custom graphql function in `openpype.client` to support that option. - - -___ - -
- - -
-Chore AYON: AYON addon class #5937 - -Introduced base class for AYON addon in openpype modules discovery logic. - - -___ - -
- - -
-Asset Usage Reporter Tool #5946 - -This adds simple tool for OpenPype mode that will go over all published workfiles and print linked assets and their version:This is created per project and can be exported in csv file or copied to clipboard in _"ASCII Human readable form"_. - - -___ - -
- - -
-Testing: dump_databases flag #5955 - -This introduces a `dump_databases` flag which makes it convenient to output the resulting database of a successful test run. The flag supports two formats; `bson` and `json`.Due to outputting to the test data folder, when dumping the databases, the test data folder will persist.Split from https://github.com/ynput/OpenPype/pull/5644 - - -___ - -
- - -
-SiteSync: implemented in Ayon Loader #5962 - -Implemented `Availability` column in Ayon loader and redo of loaders to `ActionItems` in representation window there. - - -___ - -
- - -
-AYON: Workfile template build works #5975 - -Modified workfile template builder to work, to some degree, in AYON mode. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Small Tweaks on Validator for Look Default Shader Connection for Maya 2024 #5957 - -Resolve https://github.com/ynput/OpenPype/issues/5269 - - -___ - -
- - -
-Settings: Changes in default settings #5983 - -We've made some changes in the default settings as several application versions were obsolete (Maya 18, Nuke 11, PS 2020, etc). Also added tools and changed settings for Blender, Maya, and Blender. - -All should work as usual. -___ - -
- - -
-Testing: Do not persist data by default in Maya/Deadline. #5987 - -This is similar to the Maya publishing test. - - -___ - -
- - -
-Max: Validate loaded plugins tweaks #5820 - -In the current development of 3dsMax, users need to use separate validators to validate if certain plugins being loaded before the extraction. For example, usd extractor in model family, prt/tycache extractor in pointcloud/tycache family.But with the PR where implements optional validate loaded plugin, users just need to put what kind of plugins they want to validate in the settings. They no longer need to go through all the separate plugin validators when publishing, and only one validator would do all the check on the loaded plugins before extraction. - - -___ - -
- - -
-Nuke: Change context label enhancement #5887 - -Use QAction to change label of context label in Nuke pipeline menu. - - -___ - -
- - -
-Chore: Do not use template data as source for context #5918 - -Use available information on context to receive context data instead of using `"anatomyData"` during publishing. - - -___ - -
- - -
-Houdini: Add python3.10 libs for Houdini 20 startup #5932 - -Add python3.10 libs for Houdini 20 startup - - -___ - -
- - -
-General: Use colorspace data when creating thumbnail #5938 - -Thumbnails with applied colormanagement. - - -___ - -
- - -
-Ftrack: rewriting component creation to support multiple thumbnails #5939 - -The creation of Ftrack components needs to allow for multiple thumbnails. This is important in situations where there could be several reviewable streams, like in the case of a nuke intermediate files preset. Customers have asked for unique thumbnails for each data stream.For instance, one stream might contain a baked LUT file along with Display and View. Another stream might only include the baked Display and View. These variations can change the overall look. Thus, we found it necessary to depict these differences via thumbnails. - - -___ - -
- - -
-Chore: PySide6 tree view style #5940 - -Define solid color for background of branch in QTreeView. - - -___ - -
- - -
-Nuke: Explicit Thumbnail workflow #5941 - -Nuke made a shift from using its own plugin to a global one for thumbnail creation. This was because it had to handle several thumbnail workflows for baking intermediate data streams. To manage this, the global plugin had to be upgraded. Now, each baking stream can set a unique tag 'need_thumbnail'. This tag is used to mark representations that need a thumbnail. - - -___ - -
- - -
-Global: extract thumbnail with new settings #5944 - -Settings are now configurable for the following: -- target size of thumbnail - source or constrained to specific -- where should be frame taken from in sequence or video file -- if thumbnail should be integrated or not -- background color for letter boxes -- added AYON settings - - -___ - -
- - -
-RoyalRender: inject submitter environment to the royal render job #5958 - -This is an attempt to solve runtime environment injection for render jobs in RoyalRender as there is no easy way to implement something like `GlobalJobPreload` logic in Deadline. Idea is to inject OpenPype environments directly to the job itself. - - -___ - -
- - -
-General: Use manual thumbnail if present when publishing #5969 - -Use manual thumbnail added to the publisher instead of using it from published representation. - - -___ - -
- - -
-AYON: Change of server url should work as expected #5971 - -Using login action in tray menu to change server url should correctly start new process without issues of missing bundle or previous url. - - -___ - -
- - -
-AYON: make sure the AYON menu bar in 3dsMax is named AYON when AYON launches #5972 - -Renaming the menu bar in 3dsMax for AYON and some cosmetic fix in the docstring - - -___ - -
- - -
-Resolve: renaming menu to AYON #5974 - -Resolve in Ayon is now having aligned name. - - -___ - -
- - -
-Hiero: custom tools menu rename #5976 - -- OpenPype Tools are now Custom Tools menu -- fixing order of tools. Create should be first. - - -___ - -
- - -
-nuke: updating name for custom tools menu item #5977 - -- Ayon variant of settings renamed `Custom Tools` menu item - - -___ - -
- - -
-fusion: AYON renaming menu #5978 - -Fusion is having Ayon menu. - - -___ - -
- - -
-Blender: Changed the labels for Layout JSON Extractor #5981 - -Changed the labels for Blender's Layout JSON Extractor. - - -___ - -
- - -
-Testing: Skip Arnold license for test rendering. #5984 - -Skip license check when rendering for testing. - - -___ - -
- - -
-Testing: Validate errors and failed status from Deadline jobs. #5986 - -While waiting for the Deadline jobs to finish, we query the errors on the job and its dependent jobs to fail as early as possible. Plus the failed status. - - -___ - -
- - -
-AYON: rename Openpype Tools as Custom Tools in Maya Host #5991 - -Rename Openpype Tools as Custom Tools in Maya Host in - - -___ - -
- - -
-AYON: Use AYON label in ayon mode #5995 - -Replaced OpenPype with AYON in AYON mode and added bundle nam to information. - - -___ - -
- - -
-AYON: Update ayon python api #6002 - -Updated ayon-python-api to '1.0.0-rc.1'. - - -___ - -
- - -
-Max: Add missing repair action in validate resolution setting #6014 - -Add missing repair action for validate resolution setting - - -___ - -
- - -
-Add the AYON/OP settings to enable extractor for model family in 3dsmax #6027 - -Add the AYON/OP settings to enable extractor for model family in 3dsmax - - -___ - -
- - -
-Bugfix: Fix error message formatting if ayon executable can't be found by deadline #6028 - -Without this fix the error message would report executables string with `;` between EACH character, similar to this PR: https://github.com/ynput/OpenPype/pull/5815However that PR apparently missed also fixing it in `GlobalJobPreLoad` and only fixed it in `Ayon.py` plugin. - - -___ - -
- - -
-Show slightly different info in AYON mode #6031 - -This PR changes what is shown in Tray menu in AYON mode. Previously, it showed version of OpenPype that is very confusing in AYON mode. So this now shows AYON version instead. When clicked, it will opene AYON info window, where OpenPype version is now added, for debugging purposes. - - -___ - -
- - -
-AYON Editorial: Hierarchy context have names as keys #6041 - -Use folder name as keys in `hierarchyContext` and modify hierachy extraction accordingly. - - -___ - -
- - -
-AYON: Convert the createAt value to local timezone #6043 - -Show correct create time in UIs. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Render creation - fix broken imports #5893 - -Maya specific imports were moved to specific methods but not in all cases by #5775. This is just quickly restoring functionality without questioning that decision. - - -___ - -
- - -
-Maya: fix crashing model renderset collector #5929 - -This fix is handling case where model is in some type of render sets but no other connections are made there. Publishing this model would fail with `RuntimeError: Found no items to list the history for.` - - -___ - -
- - -
-Maya: Remove duplicated attributes of MTOA verbosity level #5945 - -Remove duplicated attributes implementation mentioned in https://github.com/ynput/OpenPype/pull/5931#discussion_r1402175289 - - -___ - -
- - -
-Maya: Bug fix Redshift Proxy not being successfully published #5956 - -Bug fix redshift proxy family not being successfully published due to the error found in integrate.py - - -___ - -
- - -
-Maya: Bug fix load image for texturesetMain #6011 - -Bug fix load image with file node for texturesetMain - - -___ - -
- - -
-Maya: bug fix the repair function in validate_rendersettings #6021 - -The following error has been encountered below: -``` -// pyblish.pyblish.plugin.Action : Finding failed instances.. -// pyblish.pyblish.plugin.Action : Attempting repair for instance: renderLookdevMain ... -// Error: pyblish.plugin : Traceback (most recent call last): -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process -// runner(*args) -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\pipeline\publish\publish_plugins.py", line 241, in process -// plugin.repair(instance) -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\hosts\maya\plugins\publish\validate_rendersettings.py", line 395, in repair -// cmds.setAttr("{}.{}".format(node, prefix_attr), -// UnboundLocalError: local variable 'node' referenced before assignment -// Traceback (most recent call last): -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\dependency_packages\ayon_2310271602_windows.zip\dependencies\pyblish\plugin.py", line 527, in __explicit_process -// runner(*args) -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\pipeline\publish\publish_plugins.py", line 241, in process -// plugin.repair(instance) -// File "C:\Users\lbate\AppData\Local\Ynput\AYON\addons\openpype_3.17.7-nightly.6\openpype\hosts\maya\plugins\publish\validate_rendersettings.py", line 395, in repair -// cmds.setAttr("{}.{}".format(node, prefix_attr), -// UnboundLocalError: local variable 'node' referenced before assignment -``` -This PR is a fix for that - - -___ - -
- - -
-Fusion: Render avoid unhashable type `BlackmagicFusion.PyRemoteObject` error #5672 - -Fix Fusion 18.6+ support: Avoid issues with Fusion's `BlackmagicFusion.PyRemoteObject` instances being unhashable. -```python -Traceback (most recent call last): - File "E:\openpype\OpenPype\.venv\lib\site-packages\pyblish\plugin.py", line 527, in __explicit_process - runner(*args) - File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 61, in process - result = self.render(instance) - File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 118, in render - with enabled_savers(current_comp, savers_to_render): - File "C:\Users\User\AppData\Local\Programs\Python\Python39\lib\contextlib.py", line 119, in __enter__ - return next(self.gen) - File "E:\openpype\OpenPype\openpype\hosts\fusion\plugins\publish\extract_render_local.py", line 33, in enabled_savers - original_states[saver] = original_state -TypeError: unhashable type: 'BlackmagicFusion.PyRemoteObject' -``` - - - -___ - -
- - -
-Nuke: Validate Nuke Write Nodes refactor to use variable `node_value` instead of `value` #5764 - -Nuke: Validate Nuke Write Nodes refactor to use variable `node_value` instead of `value`The variable `value` only exists as the last variable value in the `for value in values` loop and might not be declared if `values` is an empty iterable. - - -___ - -
- - -
-resolve: fixing loader handles calculation #5863 - -Resolve was not correctly calculating duration of database related duration. - - -___ - -
- - -
-Chore: Staging mode determination #5895 - -Resources use `is_staging_enabled` function instead of `is_running_staging` to determine if should use staging icon. And fixed comparison bug in `is_running_staging`. - - -___ - -
- - -
-AYON: Handle staging templates category #5905 - -Staging anatomy templates category is handled during project templates conversion. The keys are stored into `others` with `"staging_"` prefix. - - -___ - -
- - -
-Max: fix the subset name not changing accordingly after the variant name changes #5911 - -Resolve #5902 - - -___ - -
- - -
-AYON: Loader tool bugs hunt #5915 - -Fix issues with invalid representation ids in loaded containers and handle missing product type in server database. - - -___ - -
- - -
-Publisher: Bugfixes and enhancements #5924 - -Small fixes/enhancements in publisher UI. - - -___ - -
- - -
-Maya: Supports for additional Job Info and Plugin Info in deadline submission #5931 - -This PR is to resolve some of the attributes such as MTOA's `ArnoldVerbose` are not preserved on farm and users can use the project settings to add the attributes back to either job or plugin Info. - - -___ - -
- - -
-Bugfix: Houdini license validator missing families #5934 - -Adding missing families to Houdini license validator. - - -___ - -
- - -
-TrayPublisher: adding back `asset_doc` variable #5943 - -Returning variable which had been removed accidentally in previous PR. - - -___ - -
- - -
-Settings: Fix ModulesManager init args #5947 - -Remove usage of kwargs to create ModulesManager. - - -___ - -
- - -
-Blender: Fix Deadline Frames per task #5949 - -Fixed a problem with Frames per task setting not being applied when publishing a render. - - -___ - -
- - -
-Testing: Fix is_test_failed #5951 - -`is_test_failed` is used (exclusively) on module fixtures to determine whether the tests have failed or not. This determines whether to run tear down code like cleaning up the database and temporary files.But in the module scope `request.node.rep_call` is not available, which results in `is_test_failed` always returning `True`, and no tear down code get executed.The solution was taken from; https://github.com/pytest-dev/pytest/issues/5090 - - -___ - -
- - -
-Harmony: Fix local rendering #5953 - -Local rendering was throwing warning about license, but didn't fail per se. It just didn't produce anything. - - -___ - -
- - -
-Testing: hou module should be within class code. #5954 - -`hou` module should be within the class code else we'll get pyblish errors from needing to skip the plugin. - - -___ - -
- - -
-Maya: Add Label to MayaUSDReferenceLoader #5964 - -As the create placeholder dialog displays the two distinct loaders with the same name, this PR is to distinguish Maya USD Reference Loaders from the loaders of which inherited from. See the screenshot below: - - -___ - -
- - -
-Max: Bug fix the resolution not being shown correctly in review burnin #5965 - -The resolution is not being shown correctly in review burnin - - -___ - -
- - -
-AYON: Fix thumbnail integration #5970 - -Thumbnail integration could cause crash of server if thumbnail id was changed for the same entity id multiple times. Modified the code to avoid that issue. - - -___ - -
- - -
-Photoshop: Updated label in Settings #5980 - -Replaced wrong label from different plugin. - - -___ - -
- - -
-Photoshop: Fix removed unsupported Path #5996 - -Path is not json serializable by default, it is not necessary, better model reused. - - -___ - -
- - -
-AYON: Prepare functions for newer ayon-python-api #5997 - -Newer ayon python api will add new filtering options or change order of existing. Kwargs are used in client code to prevent issues on update. - - -___ - -
- - -
-AYON: Conversion of the new playblast settings in Maya #6000 - -Conversion of the new playblast settings in Maya - - -___ - -
- - -
-AYON: Bug fix for loading Mesh in Substance Painter as new project not working #6004 - -Substance Painter in AYON can't load mesh for creating a new project - - -___ - -
- - -
-Deadline: correct webservice couldn't be selected in Ayon #6007 - -Changed the Setting model to mimic more OP approach as it needs to live together for time being. - - -___ - -
- - -
-AYON tools: Fix refresh thread #6008 - -Trigger 'refresh_finished' signal out of 'run' method. - - -___ - -
- - -
-Ftrack: multiple reviewable components missing variable #6013 - -Missing variable in code for editorial publishing in traypublisher. - - -___ - -
- - -
-TVPaint: Expect legacy instances in metadata #6015 - -Do not expect `"workfileInstances"` constains only new type instance data with `creator_identifier`. - - -___ - -
- - -
-Bugfix: handle missing key in Deadline #6019 - -This quickly fixes bug introduced by #5420 - - -___ - -
- - -
-Revert `extractenvironments` behaviour #6020 - -This is returning original behaviour of `extractenvironments` command from before #5958 so we restore functionality. - - -___ - -
- - -
-OP-7535 - Fix renaming composition in AE #6025 - -Removing of `render` instance caused renaming of composition to `dummyComp` which caused issue in publishing in next attempt.This PR stores original composition name(cleaned up for product name creation) and uses it if instance needs to be removed. - - -___ - -
- - -
-Refactor code to skip instance creation for new assets #6029 - -Publishing effects from hiero during editorial publish is working as expected again. - - -___ - -
- - -
-Refactor code to handle missing "representations" key in instance data #6032 - -Minor code change for optimisation of thumbnail workflow. - - -___ - -
- - -
-Traypublisher: editorial preserve clip case sensitivity #6036 - -Keep EDL clip name inheritance with case sensitivity. - - -___ - -
- - -
-Bugfix/add missing houdini settings #6039 - -add missing settings. now, it looks like this:| Ayon | OpenPype || -- | -- | | | || | | - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Maya: Remove RenderSetup layer observers #5836 - -Remove RenderSetup layer observers that are not needed since new publisher since Renderlayer Creators manage these themselves on Collect and Save/Update of instances. - - -___ - -
- -### **Merged pull requests** - - -
-Tests: Removed render instance #6026 - -This test was created as simple model and workfile publish, without Deadline rendering. Cleaned up render elements. - - -___ - -
- - -
-Tests: update after thumbnail default change #6040 - -https://github.com/ynput/OpenPype/pull/5944 changed default state of integration of Thumbnails to NOT integrate. This PR updates automatic tests to follow that. - - -___ - -
- - -
-Houdini: Remove legacy LOPs USD output processors #5861 - -Remove unused/broken legacy code for Houdini Solaris USD LOPs output processors. The code was originally written in Avalon, against early Houdini 18 betas which had a different API for output processors and thus the current state doesn't even work in recent versions of Houdini. - - -___ - -
- - -
-Chore: Substance Painter Addons for Ayon #5914 - -Substance Painter Addons for Ayon - - -___ - -
- - -
-Ayon: Updated name of Adobe extension to Ayon #5992 - -This changes name in menu in Adobe extensions to Ayon. - - -___ - -
- - -
-Chore/houdini update startup log #6003 - -print `Installing AYON ...` on startup when launching houdini from launcher in ayon mode.also update submenu to `ayon_menu` instead of `openpype_menu` - - -___ - -
- - -
-Revert "Ayon: Updated name of Adobe extension to Ayon" #6010 - -Reverts ynput/OpenPype#5992 - -That PR is only applicable to Ayon. -___ - -
- - -
-Standalone/Tray Publisher: Remove simple Unreal texture publishing #6012 - -We are removing _simple Unreal Texture publishing_ that was just renaming texture files to fit to Unreal naming conventions but without any additional functionality. We might return this functionality back with better texture publishing system.Related to #5983 - - -___ - -
- - -
-Deadline: Bump version because of Settings changes for Deadline #6023 - - -___ - -
- - -
-Change ASCII art in the Console based on the server mode #6030 - -This changes ASCII art in the console based on the AYON/OpenPype mode - - -___ - -
- - - - -## [3.17.6](https://github.com/ynput/OpenPype/tree/3.17.6) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.5...3.17.6) - -### **๐Ÿš€ Enhancements** - - -
-Testing: Validate Maya Logs #5775 - -This PR adds testing of the logs within Maya such as Python and Pyblish errors.The reason why we need to touch so many files outside of Maya is because of the pyblish errors below; -``` -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "collect_otio_frame_ranges" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "collect_otio_frame_ranges" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "collect_otio_review" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "collect_otio_review" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "collect_otio_subset_resources" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "collect_otio_subset_resources" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "extract_otio_audio_tracks" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "extract_otio_audio_tracks" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "extract_otio_file" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "extract_otio_file" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "extract_otio_review" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "extract_otio_review" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "extract_otio_trimming_video" (No module named 'opentimelineio') -# Error: pyblish.plugin : Skipped: "extract_otio_trimming_video" (No module named 'opentimelineio') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "submit_blender_deadline" (No module named 'bpy') -# Error: pyblish.plugin : Skipped: "submit_blender_deadline" (No module named 'bpy') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "submit_houdini_remote_publish" (No module named 'hou') -# Error: pyblish.plugin : Skipped: "submit_houdini_remote_publish" (No module named 'hou') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "submit_houdini_render_deadline" (No module named 'hou') -# Error: pyblish.plugin : Skipped: "submit_houdini_render_deadline" (No module named 'hou') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "submit_max_deadline" (No module named 'pymxs') -# Error: pyblish.plugin : Skipped: "submit_max_deadline" (No module named 'pymxs') # -pyblish (ERROR) (line: 1371) pyblish.plugin: -Skipped: "submit_nuke_deadline" (No module named 'nuke') -# Error: pyblish.plugin : Skipped: "submit_nuke_deadline" (No module named 'nuke') # -``` -We also needed to `stdout` and `stderr` from the launched application to capture the output.Split from #5644.Dependent on #5734 - - -___ - -
- - -
-Maya: Render Settings cleanup remove global `RENDER_ATTRS` #5801 - -Remove global `lib.RENDER_ATTRS` and implement a `RenderSettings.get_padding_attr(renderer)` method instead. - - -___ - -
- - -
-Testing: Ingest expected files and input workfile #5840 - -This ingests the Maya workfile from the Drive storage. Have changed the format to MayaAscii so its easier to see what changes are happening in a PR. This meant changing the expected files and database entries as well. - - -___ - -
- - -
-Chore: Create plugin auto-apply settings #5908 - -Create plugins can auto-apply settings. - - -___ - -
- - -
-Resolve: Add save current file button + "Save" shortcut when menu is active #5691 - -Adds a "Save current file" to the OpenPype menu.Also adds a "Save" shortcut key sequence (CTRL+S on Windows) to the button, so that clicking CTRL+S when the menu is active will save the current workfile. However this of course does not work if the menu does not receive the key press event (e.g. when Resolve UI is active instead)Resolves #5684 - - -___ - -
- - -
-Reference USD file as maya native geometry #5781 - -Add MayaUsdReferenceLoader to reference USD as Maya native geometry using `mayaUSDImport` file translator. - - -___ - -
- - -
-Max: Bug fix on wrong aspect ratio and viewport not being maximized during context in review family #5839 - -This PR will fix the bug on wrong aspect ratio and viewport not being maximized when creating preview animationBesides, the support of tga image format and the options for AA quality are implemented in this PR - - -___ - -
- - -
-Blender: Incorporate blender "Collections" into Publish/Load #5841 - -Allow `blendScene` family to include collections. - - -___ - -
- - -
-Max: Allows user preset the setting of preview animation in OP/AYON Setting #5859 - -Allows user preset the setting of preview animation in OP/AYON Setting for review family. -- [x] Openpype -- [x] AYON - - -___ - -
- - -
-Publisher: Center publisher window on first show #5877 - -Move publisher window to center of a screen on first show. - - -___ - -
- - -
-Publisher: Instance context changes confirm works #5881 - -Confirmation of context changes in publisher on existing instances does not cause glitches. - - -___ - -
- - -
-AYON workfiles tools: Revisit workfiles tool #5897 - -Revisited workfiles tool for AYON mode to reuse common models and widgets. - - -___ - -
- - -
-Nuke: updated colorspace settings #5906 - -Updating nuke colorspace settings into more convenient way with usage of ocio config roles rather then particular colorspace names. This way we should not have troubles to switch between linear Rec709 or ACES configs without any additional settings changes. - - -___ - -
- - -
-Blender: Refactor to new publisher #5910 - -Refactor Blender integration to use the new publisher - - -___ - -
- - -
-Enhancement: Some publish logs cosmetics #5917 - -General logging message tweaks: -- Sort some lists of folder/filenames so they appear sorted in the logs -- Fix some grammar / typos -- In some cases provide slightly more information in a log - - -___ - -
- - -
-Blender: Better name of 'asset_name' function #5927 - -Renamed function `asset_name` to `prepare_scene_name`. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Bug fix the fbx animation export errored out when the skeletonAnim set is empty #5875 - -Resolve this bug discordIf the skeletonAnim SET is empty and fbx animation collect, the fbx animation extractor would skip the fbx extraction - - -___ - -
- - -
-Bugfix: fix few typos in houdini's and Maya's Ayon settings #5882 - -Fixing few typos -- [x] Maya unreal static mesh -- [x] Houdini static mesh -- [x] Houdini collect asset handles - - -___ - -
- - -
-Bugfix: Ayon Deadline env vars + error message on no executable found #5815 - -Fix some Ayon x Deadline issues as came up in this topic: -- missing Environment Variables issue explained here for `deadlinePlugin.RunProcess` for the AYON _extract environments_ call. -- wrong error formatting described here with a `;` between each character like this: `Ayon executable was not found in the semicolon separated list "C;:;/;P;r;o;g;r;a;m; ;F;i;l;e;s;/;Y;n;p;u;t;/;A;Y;O;N; ;1;.;0;.;0;-;b;e;t;a;.;5;/;a;y;o;n;_;c;o;n;s;o;l;e;.;e;x;e". The path to the render executable can be configured from the Plugin Configuration in the Deadline Monitor.` - - -___ - -
- - -
-AYON: Fix bundles access in settings #5856 - -Fixed access to bundles data in settings to define correct develop variant. - - -___ - -
- - -
-AYON 3dsMax settings: 'ValidateAttributes' settings converte only if available #5878 - -Convert `ValidateAttributes` settings only if are available in AYON settings. - - -___ - -
- - -
-AYON: Fix TrayPublisher editorial settings #5880 - -Fixing Traypublisher settings for adding task in simple editorial. - - -___ - -
- - -
-TrayPublisher: editorial frame range check not needed #5884 - -Validator for frame ranges is not needed during editorial publishing since entity data are not yet in database. - - -___ - -
- - -
-Update houdini license validator #5886 - -As reported in this community commentHoudini USD publishing is only restricted in Houdini apprentice. - - -___ - -
- - -
-Blender: Fix blend extraction and packed images #5888 - -Fixed a with blend extractor and packed images. - - -___ - -
- - -
-AYON: Initialize connection with all information #5890 - -Create global AYON api connection with all informations all the time. - - -___ - -
- - -
-AYON: Scene inventory tool without site sync #5896 - -Skip 'get_site_icons' if site sync addon is disabled. - - -___ - -
- - -
-Publish report tool: Fix PySide6 #5898 - -Use constants from classes instead of objects. - - -___ - -
- - -
-fusion: removing hardcoded template name for saver #5907 - -Fusion is not hardcoded for `render` anatomy template only anymore. This was blocking AYON deployment. - - -___ - -
- - - - -## [3.17.5](https://github.com/ynput/OpenPype/tree/3.17.5) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.4...3.17.5) - -### **๐Ÿ†• New features** - - -
-Fusion: Add USD loader #4896 - -Add an OpenPype managed USD loader (`uLoader`) for Fusion. - - -___ - -
- - -
-Fusion: Resolution validator #5325 - -Added a resolution validator.The code is from my old PR (https://github.com/ynput/OpenPype/pull/4921) that I closed because the PR also contained a frame range validator that no longer is needed. - - -___ - -
- - -
-Context Selection tool: Refactor Context tool (for AYON) #5766 - -Context selection tool has AYON variant. - - -___ - -
- - -
-AYON: Use AYON username for user in template data #5842 - -Use ayon username for template data in AYON mode. - - -___ - -
- - -
-Testing: app_group flag #5869 - -`app_group` command flag. This is for changing which flavour of the host to launch. In the case of Maya, you can launch Maya and MayaPy, but it can be used for the Nuke family as well.Split from #5644 - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Enhancement: Fusion fix saver creation + minor Blender/Fusion logging tweaks #5558 - -- Blender change logs to `debug` level in preparation for new publisher artist facing reports (note that it currently still uses the old publisher) -- Fusion: Create Saver fix redeclaration of default_variants -- Fusion: Fix saver being created in incorrect state without saving directly after create -- Fusion: Allow reset frame range on render family -- Fusion: Tweak logging level for artist-facing report - - -___ - -
- - -
-Resolve: load clip to timeline at set time #5665 - -It is possible to load clip to correct place on timeline. - - -___ - -
- - -
-Nuke: Optional Deadline workfile dependency. #5732 - -Adds option to add the workfile as dependency for the Deadline job.Think it used to have something like this, but it disappeared. Usecase is for remote workflow where the Nuke script needs to be synced before the job can start. - - -___ - -
- - -
-Enhancement/houdini rearrange ayon houdini settings files #5748 - -Rearranging Houdini Settings to be more readable, easier to edit, update settings (include all families/product types)This PR is mainly for Ayon Settings to have more organized files. For Openpype, I'll make sure that each Houdini setting in Ayon has an equivalent in Openpype. -- [x] update Ayon settings, fix typos and remove deprecated settings. -- [x] Sync with Openpype -- [x] Test in Openpype -- [x] Test in Ayon - - -___ - -
- - -
-Chore: updating create ayon addon script #5822 - -Adding developers environment options. - - -___ - -
- - -
-Max: Implement Validator for Properties/Attributes Value Check #5824 - -Add optional validator which can check if the property attributes are valid in Max - - -___ - -
- - -
-Nuke: Remove unused 'get_render_path' function #5826 - -Remove unused function `get_render_path` from nuke integration. - - -___ - -
- - -
-Chore: Limit current context template data function #5845 - -Current implementation of `get_current_context_template_data` does return the same values as base template data function `get_template_data`. - - -___ - -
- - -
-Max: Make sure Collect Render not ignoring instance asset #5847 - -- Make sure Collect Render is not always using asset from context. -- Make sure Scene version being collected -- Clean up unnecessary uses of code in the collector. - - -___ - -
- - -
-Ftrack: Events are not processed if project is not available in OpenPype #5853 - -Events that happened on project which is not in OpenPype is not processed. - - -___ - -
- - -
-Nuke: Add Nuke 11.0 as default setting #5855 - -Found I needed Nuke 11.0 in the default settings to help with unit testing. - - -___ - -
- - -
-TVPaint: Code cleanup #5857 - -Removed unused import. Use `AYON` label in ayon mode. Removed unused data in publish context `"previous_context"`. - - -___ - -
- - -
-AYON settings: Use correct label for follow workfile version #5874 - -Follow workfile version label was marked as Collect Anatomy Instance Data label. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Nuke: Fix workfile template builder so representations get loaded next to each other #5061 - -Refactor when the cleanup of the placeholder happens for the cases where multiple representations are loaded by a single placeholder.The existing code didn't take into account the case where a template placeholder can load multiple representations so it was trying to do the cleanup of the placeholder node and the re-arrangement of the imported nodes too early. I assume this was designed only for the cases where a single representation can load multiple nodes. - - -___ - -
- - -
-Nuke: Dont update node name on update #5704 - -When updating `Image` containers the code is trying to set the name of the node. This results in a warning message from Nuke shown below;Suggesting to not change the node name when updating. - - -___ - -
- - -
-UIDefLabel can be unique #5827 - -`UILabelDef` have implemented comparison and uniqueness. - - -___ - -
- - -
-AYON: Skip kitsu module when creating ayon addons #5828 - -Create AYON packages is skipping kitsu module in creation of modules/addons and kitsu module is not loaded from modules on start. The addon already has it's repository https://github.com/ynput/ayon-kitsu. - - -___ - -
- - -
-Bugfix: Collect Rendered Files only collecting first instance #5832 - -Collect all instances from the metadata file - don't return on first instance iteration. - - -___ - -
- - -
-Houdini: set frame range for the created composite ROP #5833 - -Quick bug fix for created composite ROP, set its frame range to the frame range of the playbar. - - -___ - -
- - -
-Fix registering launcher actions from OpenPypeModules #5843 - -Fix typo `actions_dir` -> `path` to fix register launcher actions fromm OpenPypeModule - - -___ - -
- - -
-Bugfix in houdini shelves manager and beautify settings #5844 - -This PR fixes the problem in this PR https://github.com/ynput/OpenPype/issues/5457 by using the right function to load a pre-made houdini `.shelf` fileAlso, it beautifies houdini shelves settings to provide better guidance for users which helps with other issue https://github.com/ynput/OpenPype/issues/5458 , Rather adding default shelf and set names, I'll educate users how to use the tool correctly.Users now are able to select between the two options.| OpenPype | Ayon || -- | -- || | | - - -___ - -
- - -
-Blender: Fix missing Grease Pencils in review #5848 - -Fix Grease Pencil missing in review when isolating objects. - - -___ - -
- - -
-Blender: Fix Render Settings in Ayon #5849 - -Fix Render Settings in Ayon for Blender. - - -___ - -
- - -
-Bugfix: houdini tab menu working as expected #5850 - -This PR:Tab menu name changes to Ayon when using ayon get_network_categories is checked in all creator plugins. | Product | Network Category | | -- | -- | | Alembic camera | rop, obj | | Arnold Ass | rop | | Arnold ROP | rop | | Bgeo | rop, sop | | composite sequence | cop2, rop | | hda | obj | | Karma ROP | rop | | Mantra ROP | rop | | ABC | rop, sop | | RS proxy | rop, sop| | RS ROP | rop | | Review | rop | | Static mesh | rop, obj, sop | | USD | lop, rop | | USD Render | rop | | VDB | rop, obj, sop | | V Ray | rop | - - -___ - -
- - -
-Bigfix: Houdini skip frame_range_validator if node has no 'trange' parameter #5851 - -I faced a bug when publishing HDA instance as it has no `trange` parameter. As this PR title says : skip frame_range_validator if node has no 'trange' parameter - - -___ - -
- - -
-Bugfix: houdini image sequence loading and missing frames #5852 - -I made this PR in to fix issues mentioned here https://github.com/ynput/OpenPype/pull/5833#issuecomment-1789207727in short: -- image load doesn't work -- publisher only publish one frame - - -___ - -
- - -
-Nuke: loaders' containers updating as nodes #5854 - -Nuke loaded containers are updating correctly even they have been duplicating of originally loaded nodes. This had previously been removed duplicated nodes. - - -___ - -
- - -
-deadline: settings are not blocking extension input #5864 - -Settings are not blocking user input. - - -___ - -
- - -
-Blender: Fix loading of blend layouts #5866 - -Fix a problem with loading blend layouts. - - -___ - -
- - -
-AYON: Launcher refresh issues #5867 - -Fixed refresh of projects issue in launcher tool. And renamed Qt models to contain `Qt` in their name (it was really hard to find out where were used). It is not possible to click on disabled item in launcher's projects view. - - -___ - -
- - -
-Fix the Wrong key words for tycache workfile template settings in AYON #5870 - -Fix the wrong key words for the tycache workfile template settings in AYON(i.e. Instead of families, product_types should be used) - - -___ - -
- - -
-AYON tools: Handle empty icon definition #5876 - -Ignore if passed icon definition is `None`. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Houdini: Remove on instance toggled callback #5860 - -Remove on instance toggled callback which isn't relevant to the new publisher - - -___ - -
- - -
-Chore: Remove unused `instanceToggled` callbacks #5862 - -The `instanceToggled` callbacks should be irrelevant for new publisher. - - -___ - -
- - - - -## [3.17.4](https://github.com/ynput/OpenPype/tree/3.17.4) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.3...3.17.4) - -### **๐Ÿ†• New features** - - -
-Add Support for Husk-AYON Integration #5816 - -This draft pull request introduces support for integrating Husk with AYON within the OpenPype repository. - - -___ - -
- - -
-Push to project tool: Prepare push to project tool for AYON #5770 - -Cloned Push to project tool for AYON and modified it. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Max: tycache family support #5624 - -Tycache family supports for Tyflow Plugin in Max - - -___ - -
- - -
-Unreal: Changed behaviour for updating assets #5670 - -Changed how assets are updated in Unreal. - - -___ - -
- - -
-Unreal: Improved error reporting for Sequence Frame Validator #5730 - -Improved error reporting for Sequence Frame Validator. - - -___ - -
- - -
-Max: Setting tweaks on Review Family #5744 - -- Bug fix of not being able to publish the preferred visual style when creating preview animation -- Exposes the parameters after creating instance -- Add the Quality settings and viewport texture settings for preview animation -- add use selection for create review - - -___ - -
- - -
-Max: Add families with frame range extractions back to the frame range validator #5757 - -In 3dsMax, there are some instances which exports the files in frame range but not being added to the optional frame range validator. In this PR, these instances would have the optional frame range validators to allow users to check if frame range aligns with the context data from DB.The following families have been added to have optional frame range validator: -- maxrender -- review -- camera -- redshift proxy -- pointcache -- point cloud(tyFlow PRT) - - -___ - -
- - -
-TimersManager: Use available data to get context info #5804 - -Get context information from pyblish context data instead of using `legacy_io`. - - -___ - -
- - -
-Chore: Removed unused variable from `AbstractCollectRender` #5805 - -Removed unused `_asset` variable from `RenderInstance`. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Bugfix/houdini: wrong frame calculation with handles #5698 - -This PR make collect plugins to consider `handleStart` and `handleEnd` when collecting frame range it affects three parts: -- get frame range in collect plugins -- expected file in render plugins -- submit houdini job deadline plugin - - -___ - -
- - -
-Nuke: ayon server settings improvements #5746 - -Nuke settings were not aligned with OpenPype settings. Also labels needed to be improved. - - -___ - -
- - -
-Blender: Fix pointcache family and fix alembic extractor #5747 - -Fixed `pointcache` family and fixed behaviour of the alembic extractor. - - -___ - -
- - -
-AYON: Remove 'shotgun_api3' from dependencies #5803 - -Removed `shotgun_api3` dependency from openpype dependencies for AYON launcher. The dependency is already defined in shotgrid addon and change of version causes clashes. - - -___ - -
- - -
-Chore: Fix typo in filename #5807 - -Move content of `contants.py` into `constants.py`. - - -___ - -
- - -
-Chore: Create context respects instance changes #5809 - -Fix issue with unrespected change propagation in `CreateContext`. All successfully saved instances are marked as saved so they have no changes. Origin data of an instance are explicitly not handled directly by the object but by the attribute wrappers. - - -___ - -
- - -
-Blender: Fix tools handling in AYON mode #5811 - -Skip logic in `before_window_show` in blender when in AYON mode. Most of the stuff called there happes on show automatically. - - -___ - -
- - -
-Blender: Include Grease Pencil in review and thumbnails #5812 - -Include Grease Pencil in review and thumbnails. - - -___ - -
- - -
-Workfiles tool AYON: Fix double click of workfile #5813 - -Fix double click on workfiles in workfiles tool to open the file. - - -___ - -
- - -
-Webpublisher: removal of usage of no_of_frames in error message #5819 - -If it throws exception, `no_of_frames` value wont be available, so it doesn't make sense to log it. - - -___ - -
- - -
-Attribute Defs: Hide multivalue widget in Number by default #5821 - -Fixed default look of `NumberAttrWidget` by hiding its multiselection widget. - - -___ - -
- -### **Merged pull requests** - - -
-Corrected a typo in Readme.md (Top -> To) #5800 - - -___ - -
- - -
-Photoshop: Removed redundant copy of extension.zxp #5802 - -`extension.zxp` shouldn't be inside of extension folder. - - -___ - -
- - - - -## [3.17.3](https://github.com/ynput/OpenPype/tree/3.17.3) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.2...3.17.3) - -### **๐Ÿ†• New features** - - -
-Maya: Multi-shot Layout Creator #5710 - -New Multi-shot Layout creator is a way of automating creation of the new Layout instances in Maya, associated with correct shots, frame ranges and Camera Sequencer in Maya. - - -___ - -
- - -
-Colorspace: ociolook file product type workflow #5541 - -Traypublisher support for publishing of colorspace look files (ociolook) which are json files holding any LUT files. This new product is available for loading in Nuke host at the moment.Added colorspace selector to publisher attribute with better labeling. We are supporting also Roles and Alias (only v2 configs). - - -___ - -
- - -
-Scene Inventory tool: Refactor Scene Inventory tool (for AYON) #5758 - -Modified scene inventory tool for AYON. The main difference is in how project name is defined and replacement of assets combobox with folders dialog. - - -___ - -
- - -
-AYON: Support dev bundles #5783 - -Modules can be loaded in AYON dev mode from different location. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Testing: Ingest Maya userSetup #5734 - -Suggesting to ingest `userSetup.py` startup script for easier collaboration and transparency of testing. - - -___ - -
- - -
-Fusion: Work with pathmaps #5329 - -Path maps are a big part of our Fusion workflow. We map the project folder to a path map within Fusion so all loaders and savers point to the path map variable. This way any computer on any OS can open any comp no matter where the project folder is located. - - -___ - -
- - -
-Maya: Add Maya 2024 and remove pre 2022. #5674 - -Adding Maya 2024 as default application variant.Removing Maya 2020 and older, as these are not supported anymore. - - -___ - -
- - -
-Enhancement: Houdini: Allow using template keys in Houdini shelves manager #5727 - -Allow using Template keys in Houdini shelves manager. - - -___ - -
- - -
-Houdini: Fix Show in usdview loader action #5737 - -Fix the "Show in USD View" loader to show up in Houdini - - -___ - -
- - -
-Nuke: validator of asset context with repair actions #5749 - -Instance nodes with different context of asset and task can be now validated and repaired via repair action. - - -___ - -
- - -
-AYON: Tools enhancements #5753 - -Few enhancements and tweaks of AYON related tools. - - -___ - -
- - -
-Max: Tweaks on ValidateMaxContents #5759 - -This PR provides enhancements on ValidateMaxContent as follow: -- Rename `ValidateMaxContents` to `ValidateContainers` -- Add related families which are required to pass the validation(All families except `Render` as the render instance is the one which only allows empty container) - - -___ - -
- - -
-Enhancement: Nuke refactor `SelectInvalidAction` #5762 - -Refactor `SelectInvalidAction` to behave like other action for other host, create `SelectInstanceNodeAction` as dedicated action to select the instance node for a failed plugin. -- Note: Selecting Instance Node will still select the instance node even if the user has currently 'fixed' the problem. - - -___ - -
- - -
-Enhancement: Tweak logging for Nuke for artist facing reports #5763 - -Tweak logs that are not artist-facing to debug level + in some cases clarify what the logged value is. - - -___ - -
- - -
-AYON Settings: Disk mapping #5786 - -Added disk mapping settings to core addon settings. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: add colorspace argument to redshiftTextureProcessor #5645 - -In color managed Maya, texture processing during Look Extraction wasn't passing texture colorspaces set on textures to `redshiftTextureProcessor` tool. This in effect caused this tool to produce non-zero exit code (even though the texture was converted into wrong colorspace) and therefor crash of the extractor. This PR is passing colorspace to that tool if color management is enabled. - - -___ - -
- - -
-Maya: don't call `cmds.ogs()` in headless mode #5769 - -`cmds.ogs()` is a call that will crash if Maya is running in headless mode (mayabatch, mayapy). This is handling that case. - - -___ - -
- - -
-Resolve: inventory management fix #5673 - -Loaded Timeline item containers are now updating correctly and version management is working as it suppose to. -- [x] updating loaded timeline items -- [x] Removing of loaded timeline items - - -___ - -
- - -
-Blender: Remove 'update_hierarchy' #5756 - -Remove `update_hierarchy` function which is causing crashes in scene inventory tool. - - -___ - -
- - -
-Max: bug fix on the settings in pointcloud family #5768 - -Bug fix on the settings being errored out in validate point cloud(see links:https://github.com/ynput/OpenPype/pull/5759#pullrequestreview-1676681705) and passibly in point cloud extractor. - - -___ - -
- - -
-AYON settings: Fix default factory of tools #5773 - -Fix default factory of application tools. - - -___ - -
- - -
-Fusion: added missing OPENPYPE_VERSION #5776 - -Fusion submission to Deadline was missing OPENPYPE_VERSION env var when submitting from build (not source code directly). This missing env var might break rendering on DL if path to OP executable (openpype_console.exe) is not set explicitly and might cause an issue when different versions of OP are deployed.This PR adds this environment variable. - - -___ - -
- - -
-Ftrack: Skip tasks when looking for asset equivalent entity #5777 - -Skip tasks when looking for asset equivalent entity. - - -___ - -
- - -
-Nuke: loading gizmos fixes #5779 - -Gizmo product is not offered in Loader as plugin. It is also updating as expected. - - -___ - -
- - -
-General: thumbnail extractor as last extractor #5780 - -Fixing issue with the order of the `ExtractOIIOTranscode` and `ExtractThumbnail` plugins. The problem was that the `ExtractThumbnail` plugin was processed before the `ExtractOIIOTranscode` plugin. As a result, the `ExtractThumbnail` plugin did not inherit the `review` tag into the representation data. This caused the `ExtractThumbnail` plugin to fail in processing and creating thumbnails. - - -___ - -
- - -
-Bug: fix key in application json #5787 - -In PR #5705 `maya` was wrongly used instead of `mayapy`, breaking AYON defaults in AYON Application Addon. - - -___ - -
- - -
-'NumberAttrWidget' shows 'Multiselection' label on multiselection #5792 - -Attribute definition widget 'NumberAttrWidget' shows `< Multiselection >` label on multiselection. - - -___ - -
- - -
-Publisher: Selection change by enabled checkbox on instance update attributes #5793 - -Change of instance by clicking on enabled checkbox will actually update attributes on right side to match the selection. - - -___ - -
- - -
-Houdini: Remove `setParms` call since it's responsibility of `self.imprint` to set the values #5796 - -Revert a recent change made in #5621 due to this comment. However the change is faulty as can be seen mentioned here - - -___ - -
- - -
-AYON loader: Fix SubsetLoader functionality #5799 - -Fix SubsetLoader plugin processing in AYON loader tool. - - -___ - -
- -### **Merged pull requests** - - -
-Houdini: Add self publish button #5621 - -This PR allows single publishing by adding a publish button to created rop nodes in HoudiniAdmins are much welcomed to enable it from houdini general settingsPublish Button also includes all input publish instances. in this screen shot the alembic instance is ignored because the switch is turned off - - -___ - -
- - -
-Nuke: fixing UNC support for OCIO path #5771 - -UNC paths were broken on windows for custom OCIO path and this is solving the issue with removed double slash at start of path - - -___ - -
- - - - -## [3.17.2](https://github.com/ynput/OpenPype/tree/3.17.2) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.1...3.17.2) - -### **๐Ÿ†• New features** - - -
-Maya: Add MayaPy application. #5705 - -This adds mayapy to the application to be launched from a task. - - -___ - -
- - -
-Feature: Copy resources when downloading last workfile #4944 - -When the last published workfile is downloaded as a prelaunch hook, all resource files referenced in the workfile representation are copied to the `resources` folder, which is inside the local workfile folder. - - -___ - -
- - -
-Blender: Deadline support #5438 - -Add Deadline support for Blender. - - -___ - -
- - -
-Fusion: implement toggle to use Deadline plugin FusionCmd #5678 - -Fusion 17 doesn't work in DL 10.3, but FusionCmd does. It might be probably better option as headless variant.Fusion plugin seems to be closing and reopening application when worker is running on artist machine, not so with FusionCmdAdded configuration to Project Settings for admin to select appropriate Deadline plugin: - - -___ - -
- - -
-Loader tool: Refactor loader tool (for AYON) #5729 - -Refactored loader tool to new tool. Separated backend and frontend logic. Refactored logic is AYON-centric and is used only in AYON mode, so it does not affect OpenPype. The tool is also replacing library loader. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: implement matchmove publishing #5445 - -Add possibility to export multiple cameras in single `matchmove` family instance, both in `abc` and `ma`.Exposed flag 'Keep image planes' to control export of image planes. - - -___ - -
- - -
-Maya: Add optional Fbx extractors in Rig and Animation family #5589 - -This PR allows user to export control rigs(optionally with mesh) and animated rig in fbx optionally by attaching the rig objects to the two newly introduced sets. - - -___ - -
- - -
-Maya: Optional Resolution Validator for Render #5693 - -Adding optional resolution validator for maya in render family, similar to the one in Max.It checks if the resolution in render setting aligns with that in setting from the db. - - -___ - -
- - -
-Use host's node uniqueness for instance id in new publisher #5490 - -Instead of writing `instance_id` as parm or attributes on the publish instances we can, for some hosts, just rely on a unique name or path within the scene to refer to that particular instance. By doing so we fix #4820 because upon duplicating such a publish instance using the host's (DCC) functionality the uniqueness for the duplicate is then already ensured instead of attributes remaining exact same value as where to were duplicated from, making `instance_id` a non-unique value. - - -___ - -
- - -
-Max: Implementation of OCIO configuration #5499 - -Resolve #5473 Implementation of OCIO configuration for Max 2024 regarding to the update of Max 2024 - - -___ - -
- - -
-Nuke: Multiple format supports for ExtractReviewDataMov #5623 - -This PR would fix the bug of the plugin `ExtractReviewDataMov` not being able to support extensions other than `mov`. The plugin is also renamed to `ExtractReviewDataBakingStreams` as i provides multiple format supoort. - - -___ - -
- - -
-Bugfix: houdini switching context doesnt update variables #5651 - -Allows admins to have a list of vars (e.g. JOB) with (dynamic) values that will be updated on context changes, e.g. when switching to another asset or task.Using template keys is supported but formatting keys capitalization variants is not, e.g. {Asset} and {ASSET} won't workDisabling Update Houdini vars on context change feature will leave all Houdini vars unmanaged and thus no context update changes will occur.Also, this PR adds a new button in menu to update vars on demand. - - -___ - -
- - -
-Publisher: Fix report maker memory leak + optimize lookups using set #5667 - -Fixes a memory leak where resetting publisher does not clear the stored plugins for the Publish Report Maker.Also changes the stored plugins to a `set` to optimize the lookup speeds. - - -___ - -
- - -
-Add openpype_mongo command flag for testing. #5676 - -Instead of changing the environment, this command flag allows for changing the database. - - -___ - -
- - -
-Nuke: minor docstring and code tweaks for ExtractReviewMov #5695 - -Code and docstring tweaks on https://github.com/ynput/OpenPype/pull/5623 - - -___ - -
- - -
-AYON: Small settings fixes #5699 - -Small changes/fixes related to AYON settings. All foundry apps variant `13-0` has label `13.0`. Key `"ExtractReviewIntermediates"` is not mandatory in settings. - - -___ - -
- - -
-Blender: Alembic Animation loader #5711 - -Implemented loading Alembic Animations in Blender. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Missing "data" field and enabling of audio #5618 - -When updating audio containers, the field "data" was missing and the audio node was not enabled on the timeline. - - -___ - -
- - -
-Maya: Bug in validate Plug-in Path Attribute #5687 - -Overwriting list with string is causing `TypeError: string indices must be integers` in subsequent iterations, crashing the validator plugin. - - -___ - -
- - -
-General: Avoid fallback if value is 0 for handle start/end #5652 - -There's a bug on the `pyblish_functions.get_time_data_from_instance_or_context` where if `handleStart` or `handleEnd` on the instance are set to value 0 it's falling back to grabbing the handles from the instance context. Instead, the logic should be that it only falls back to the `instance.context` if the key doesn't exist.This change was only affecting me on the `handleStart`/`handleEnd` and it's unlikely it could cause issues on `frameStart`, `frameEnd` or `fps` but regardless, the `get` logic is wrong. - - -___ - -
- - -
-Fusion: added missing env vars to Deadline submission #5659 - -Environment variables discerning type of job was missing. Without this injection of environment variables won't start. - - -___ - -
- - -
-Nuke: workfile version synchronization settings fixed #5662 - -Settings for synchronizing workfile version to published products is fixed. - - -___ - -
- - -
-AYON Workfiles Tool: Open workfile changes context #5671 - -Change context when workfile is opened. - - -___ - -
- - -
-Blender: Fix remove/update in new layout instance #5679 - -Fixes an error that occurs when removing or updating an asset in a new layout instance. - - -___ - -
- - -
-AYON Launcher tool: Fix refresh btn #5685 - -Refresh button does propagate refreshed content properly. Folders and tasks are cached for 60 seconds instead of 10 seconds. Auto-refresh in launcher will refresh only actions and related data which is project and project settings. - - -___ - -
- - -
-Deadline: handle all valid paths in RenderExecutable #5694 - -This commit enhances the path resolution mechanism in the RenderExecutable function of the Ayon plugin. Previously, the function only considered paths starting with a tilde (~), ignoring other valid paths listed in exe_list. This limitation led to an empty expanded_paths list when none of the paths in exe_list started with a tilde, causing the function to fail in finding the Ayon executable.With this fix, the RenderExecutable function now correctly processes and includes all valid paths from exe_list, improving its reliability and preventing unnecessary errors related to Ayon executable location. - - -___ - -
- - -
-AYON Launcher tool: Fix skip last workfile boolean #5700 - -Skip last workfile boolean works as expected. - - -___ - -
- - -
-Chore: Explore here action can work without task #5703 - -Explore here action does not crash when task is not selected, and change error message a little. - - -___ - -
- - -
-Testing: Inject mongo_url argument earlier #5706 - -Fix for https://github.com/ynput/OpenPype/pull/5676The Mongo url is used earlier in the execution. - - -___ - -
- - -
-Blender: Add support to auto-install PySide2 in blender 4 #5723 - -Change version regex to support blender 4 subfolder. - - -___ - -
- - -
-Fix: Hardcoded main site and wrongly copied workfile #5733 - -Fixing these two issues: -- Hardcoded main site -> Replaced by `anatomy.fill_root`. -- Workfiles can sometimes be copied while they shouldn't. - - -___ - -
- - -
-Bugfix: ServerDeleteOperation asset -> folder conversion typo #5735 - -Fix ServerDeleteOperation asset -> folder conversion typo - - -___ - -
- - -
-Nuke: loaders are filtering correctly #5739 - -Variable name for filtering by extensions were not correct - it suppose to be plural. It is fixed now and filtering is working as suppose to. - - -___ - -
- - -
-Nuke: failing multiple thumbnails integration #5741 - -This handles the situation when `ExtractReviewIntermediates` (previously `ExtractReviewDataMov`) has multiple outputs, including thumbnails that need to be integrated. Previously, integrating the thumbnail representation was causing an issue in the integration process. However, we have now resolved this issue by no longer integrating thumbnails as loadable representations.NOW default is that thumbnail representation are NOT integrated (eg. they will not show up in DB > couldn't be Loaded in Loader) and no `_thumb.jpg` will be left in `render` (most likely) publish folder.IF there would be need to override this behavior, please use `project_settings/global/publish/PreIntegrateThumbnails` - - -___ - -
- - -
-AYON Settings: Fix global overrides #5745 - -The `output` dictionary that gets passed into `ayon_settings._convert_global_project_settings` gets replaced when converting the settings for `ExtractOIIOTranscode`. This results in `global` not being in the output dictionary and thus the defaults being used and not the project overrides. - - -___ - -
- - -
-Chore: AYON query functions arguments #5752 - -Fixed how `archived` argument is handled in get subsets/assets function. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Publisher: Refactor Report Maker plugin data storage to be a dict by plugin.id #5668 - -Refactor Report Maker plugin data storage to be a dict by `plugin.id`Also fixes `_current_plugin_data` type on `__init__` - - -___ - -
- - -
-Chore: Refactor Resolve into new style HostBase, IWorkfileHost, ILoadHost #5701 - -Refactor Resolve into new style HostBase, IWorkfileHost, ILoadHost - - -___ - -
- -### **Merged pull requests** - - -
-Chore: Maya reduce get project settings calls #5669 - -Re-use system settings / project settings where we can instead of requerying. - - -___ - -
- - -
-Extended error message when getting subset name #5649 - -Each Creator is using `get_subset_name` functions which collects context data and fills configured template with placeholders.If any key is missing in the template, non descriptive error is thrown.This should provide more verbose message: - - -___ - -
- - -
-Tests: Remove checks for env var #5696 - -Env var will be filled in `env_var` fixture, here it is too early to check - - -___ - -
- - - - -## [3.17.1](https://github.com/ynput/OpenPype/tree/3.17.1) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.17.0...3.17.1) - -### **๐Ÿ†• New features** - - -
-Unreal: Yeti support #5643 - -Implemented Yeti support for Unreal. - - -___ - -
- - -
-Houdini: Add Static Mesh product-type (family) #5481 - -This PR adds support to publish Unreal Static Mesh in Houdini as FBXQuick recap -- [x] Add UE Static Mesh Creator -- [x] Dynamic subset name like in Maya -- [x] Collect Static Mesh Type -- [x] Update collect output node -- [x] Validate FBX output node -- [x] Validate mesh is static -- [x] Validate Unreal Static Mesh Name -- [x] Validate Subset Name -- [x] FBX Extractor -- [x] FBX Loader -- [x] Update OP Settings -- [x] Update AYON Settings - - -___ - -
- - -
-Launcher tool: Refactor launcher tool (for AYON) #5612 - -Refactored launcher tool to new tool. Separated backend and frontend logic. Refactored logic is AYON-centric and is used only in AYON mode, so it does not affect OpenPype. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Use custom staging dir function for Maya renders - OP-5265 #5186 - -Check for custom staging dir when setting the renders output folder in Maya. - - -___ - -
- - -
-Colorspace: updating file path detection methods #5273 - -Support for OCIO v2 file rules integrated into the available color management API - - -___ - -
- - -
-Chore: add default isort config #5572 - -Add default configuration for isort tool - - -___ - -
- - -
-Deadline: set PATH environment in deadline jobs by GlobalJobPreLoad #5622 - -This PR makes `GlobalJobPreLoad` to set `PATH` environment in deadline jobs so that we don't have to use the full executable path for deadline to launch the dcc app. This trick should save us adding logic to pass houdini patch version and modifying Houdini deadline plugin. This trick should work with other DCCs - - -___ - -
- - -
-nuke: extract review data mov read node with expression #5635 - -Some productions might have set default values for read nodes, those settings are not colliding anymore now. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Support new publisher for colorsets validation. #5630 - -Fix `validate_color_sets` for the new publisher.In current `develop` the repair option does not appear due to wrong error raising. - - -___ - -
- - -
-Houdini: Camera Loader fix mismatch for Maya cameras #5584 - -This PR adds -- A workaround to match Maya render mask in Houdini -- `SetCameraResolution` inventory action -- set camera resolution when loading or updating camera - - -___ - -
- - -
-Nuke: fix set colorspace on writes #5634 - -Colorspace is set correctly to any write node created from publisher. - - -___ - -
- - -
-TVPaint: Fix review family extraction #5637 - -Extractor marks representation of review instance with review tag. - - -___ - -
- - -
-AYON settings: Extract OIIO transcode settings #5639 - -Output definitions of Extract OIIO transcode have name to match OpenPype settings, and the settings are converted to dictionary in settings conversion. - - -___ - -
- - -
-AYON: Fix task type short name conversion #5641 - -Convert AYON task type short name for OpenPype correctly. - - -___ - -
- - -
-colorspace: missing `allowed_exts` fix #5646 - -Colorspace module is not failing due to missing `allowed_exts` attribute. - - -___ - -
- - -
-Photoshop: remove trailing underscore in subset name #5647 - -If {layer} placeholder is at the end of subset name template and not used (for example in `auto_image` where separating it by layer doesn't make any sense) trailing '_' was kept. This updates cleaning logic and extracts it as it might be similar in regular `image` instance. - - -___ - -
- - -
-traypublisher: missing `assetEntity` in context data #5648 - -Issue with missing `assetEnity` key in context data is not problem anymore. - - -___ - -
- - -
-AYON: Workfiles tool save button works #5653 - -Fix save as button in workfiles tool.(It is mystery why this stopped to work??) - - -___ - -
- - -
-Max: bug fix delete items from container #5658 - -Fix the bug shown when clicking "Delete Items from Container" and selecting nothing and press ok. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Chore: Remove unused functions from Fusion integration #5617 - -Cleanup unused code from Fusion integration - - -___ - -
- -### **Merged pull requests** - - -
-Increase timout for deadline test #5654 - -DL picks up jobs quite slow, so bump up delay. - - -___ - -
- - - - -## [3.17.0](https://github.com/ynput/OpenPype/tree/3.17.0) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.7...3.17.0) - -### **๐Ÿš€ Enhancements** - - -
-Chore: Remove schema from OpenPype root #5355 - -Remove unused schema directory in root of repository which was moved inside openpype/pipeline/schema. - - -___ - -
- - -
-Igniter: Allow custom Qt scale factor rounding policy #5554 - -Do not force `PassThrough` rounding policy if different policy is defined via env variable. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Chore: Lower urllib3 to support older OpenSSL #5538 - -Lowered `urllib3` to `1.26.16` to support older OpenSSL. - - -___ - -
- - -
-Chore: Do not try to add schema to zip files #5557 - -Do not add `schema` folder to zip file. This fixes issue cause by https://github.com/ynput/OpenPype/pull/5355 . - - -___ - -
- - -
-Chore: Lower click dependency version #5629 - -Lower click version to support older versions of python. - - -___ - -
- -### **Merged pull requests** - - -
-Bump certifi from 2023.5.7 to 2023.7.22 #5351 - -Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.5.7 to 2023.7.22. -
-Commits - -
-
- - -[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=certifi&package-manager=pip&previous-version=2023.5.7&new-version=2023.7.22)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -You can trigger a rebase of this PR by commenting `@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) -You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). - -
- -> **Note** -> Automatic rebases have been disabled on this pull request as it has been open for over 30 days. - -___ - -
- - - - -## [3.16.7](https://github.com/ynput/OpenPype/tree/3.16.7) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.6...3.16.7) - -### **๐Ÿ†• New features** - - -
-Maya: Extract active view as thumbnail when no thumbnail set #5426 - -This sets the Maya instance's thumbnail to the current active view if no thumbnail was set yet. - - -___ - -
- - -
-Maya: Implement USD publish and load using native `mayaUsdPlugin` #5573 - -Implement Creator and Loaders for extraction and loading of USD files using Maya's own `mayaUsdPlugin`.Also adds support to load a `usd` file into an Arnold Standin (`aiStandin`) and assigning looks to it. - - -___ - -
- - -
-AYON: Ignore separated modules #5619 - -Do not load already separated modules from default directory. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Reduce amount of code for Collect Looks #5253 - -- Refactor `get_file_node_files` because popping from `paths` by index should have been done in reversed order anyway. It's now changed to not need popping at all. -- Removed unused `RENDERER_NODE_TYPES` and if-branch which collected `node_attrs` list which was unused + collected members which was also done outside of the if branch and thus generated no extra data. -- Collected all materials from look set attributes at once instead of multiple queries -- Collected all file nodes in history from a single query instead of per type -- Restructured assignment of `instance.data["resources"]` to be more readable -- Cached `PXR_NODES` only ones (Note: plugin load is checked on discovery of the collect look plugin) instead of querying plugin load and its nodes per file node per attribute -- Removed some debug logs or combined some messages - - -___ - -
- - -
-AYON: Mark deprecated settings in Maya #5627 - -Added deprecated info to docstrings of maya colormanagement settings.Resolves: https://github.com/ynput/OpenPype/issues/5556 - - -___ - -
- - -
-Max: switching versions of maxScene maintain parentage/links with the loaders #5424 - -When using scene inventory to manage or update the version of the loading objects, the linked modifiers or parentage of the objects would be kept.Meanwhile, loaded objects from all loaders no longer parented to the container with OP Data. - - -___ - -
- - -
-3ds max: small tweaks to obj extractor and model publishing flow #5605 - -There migh be situation where OBJ Extractor passes without failure, but no obj file is produced. This is adding simple check directly into the extractor to catch it earlier then in the integration phase. Also switched `Validate USD Plugin` to optional, because it was always run no matter if the Extract USD was enabled or not, hindering testing (and publishing). - - -___ - -
- - -
-TVPaint: Plugin can be reopened #5610 - -TVPaint plugin can be reopened. - - -___ - -
- - -
-Maya: Remove context prompt #5632 - -More of a plea than a PR, but could we please remove the context prompt in Maya when switching tasks? - - -___ - -
- - -
-General: Create a desktop icon is checked #5636 - -In OP Installer `Create a desktop icon` is checked by default. -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Extract look is not AYON compatible - OP-5375 #5341 - -The textures that would use hardlinking are going through texture processors. Currently all texture processors are hardcoded to copy texture instead of respecting the settings of forcing to copy.The texture processors were last modified 4 months ago, so effectively all clients that are on any pipeline updated in the last 4 months wont be utilizing hardlinking at all, since the hardcoded texture processors will copy texture no matter the OS.This opts for completely disabling the hardlinking feature, while we figure out what to do about it. - - -___ - -
- - -
-Maya: Multiverse USD Override inherit from correct new style creator #5566 - -Fix Creator for Multiverse USD Override by inheriting from correct new style creator class type - - -___ - -
- - -
-Max: Bug Fix Alembic Loaders with Ornatrix #5434 - -Bugfix the alembic loader with both ornatrix alembic and max alembic supportsAdd the ornatrix alembic loaders for loading the alembic with Ornatrix-related modifiers. - - -___ - -
- - -
-AYON: Avoid creation of duplicated links #5593 - -Handle cases when an existing link should be recreated and do not create the same link multitple times during single publishing. - - -___ - -
- - -
-Extract Review: Multilayer specification for ffmpeg #5613 - -Extract review is specifying layer name when exr is multilayer. - - -___ - -
- - -
-Fussion: added support for Fusion 17 #5614 - -Fusion 17 still uses Python 3.6 which causes issues with some our delivered libraries. Vendorized necessary set for Python 3.6 - - -___ - -
- - -
-Publisher: Fix screenshot widget #5615 - -Use correct super method name.EDITED:Removed fade animation which is not triggered at some cases, e.g. in Nuke the animation does not start. I do expect that is caused by `exec_` on the dialog, which blocks event processing to the animation, even when I've added the window as parent it still didn't trigger registered callback.Modified how the "empty" space is not filled by using paths instead of clear mode on painter. Added render hints to add antialiasing. - - -___ - -
- - -
-Photoshop: auto_images without alpha will not fail #5620 - -ExtractReview caused issue on `auto_image` instance without alpha channel, this fixes it. - - -___ - -
- - -
-Fix - _id key used instead of id in get_last_version_by_subset_name #5626 - -Just 'id' is not returned because value in fields. Caused KeyError. - - -___ - -
- - -
-Bugfix: create symlinks for ssl libs on Centos 7 #5633 - -Docker build was missing `libssl.1.1.so` and `libcrypto.1.1.so` symlinks needed by the executable itself, because Python is now explicitly built with OpenSSL 1.1.1 - - -___ - -
- -### **๐Ÿ“ƒ Documentation** - - -
-Documentation/local settings #5102 - -I completed the "Working with local settings" page. I updated the screenshot, wrote an explanation for each empty category, and if available, linked the more detailed pages already existing. I also added the "Environments" category. - - -___ - -
- - - - -## [3.16.6](https://github.com/ynput/OpenPype/tree/3.16.6) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.5...3.16.6) - -### **๐Ÿ†• New features** - - -
-Workfiles tool: Refactor workfiles tool (for AYON) #5550 - -Refactored workfiles tool to new tool. Separated backend and frontend logic. Refactored logic is AYON-centric and is used only in AYON mode, so it does not affect OpenPype. - - -___ - -
- - -
-AfterEffects: added validator for missing files in FootageItems #5590 - -Published composition in AE could contain multiple FootageItems as a layers. If FootageItem contains imported file and it doesn't exist, render triggered by Publish process will silently fail and no output is generated. This could cause failure later in the process with unclear reason. (In `ExtractReview`).This PR adds validation to protect from this. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Yeti Cache Include viewport preview settings from source #5561 - -When publishing and loading yeti caches persist the display output and preview colors + settings to ensure consistency in the view - - -___ - -
- - -
-Houdini: validate colorspace in review rop #5322 - -Adding a validator that checks if 'OCIO Colorspace' parameter on review rop was set to a valid value.It is a step towards managing colorspace in review ropvalid values are the ones in the dropdown menuthis validator also provides some helper actions This PR is related to #4836 and #4833 - - -___ - -
- - -
-Colorspace: adding abstraction of publishing related functions #5497 - -The functionality of Colorspace has been abstracted for greater usability. - - -___ - -
- - -
-Nuke: removing redundant workfile colorspace attributes #5580 - -Nuke root workfile colorspace data type knobs are long time configured automatically via config roles or the default values are also working well. Therefore there is no need for pipeline managed knobs. - - -___ - -
- - -
-Ftrack: Less verbose logs for Ftrack integration in artist facing logs #5596 - -- Reduce artist-facing logs for component integration for Ftrack -- Avoid "Comment is not set" log in artist facing report for Kitsu and Ftrack -- Remove info log about `ffprobe` inspecting a file (changed to debug log) -- interesting to see however that it ffprobes the same jpeg twice - but maybe once for thumbnail? - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Fix rig validators for new out_SET and controls_SET names #5595 - -Fix usage of `out_SET` and `controls_SET` since #5310 because they can now be prefixed by the subset name. - - -___ - -
- - -
-TrayPublisher: set default frame values to sequential data #5530 - -We are inheriting default frame handles and fps data either from project or setting them to 0. This is just for case a production will decide not to injest the sequential representations with asset based metadata. - - -___ - -
- - -
-Publisher: Screenshot opacity value fix #5576 - -Fix opacity value. - - -___ - -
- - -
-AfterEffects: fix imports of image sequences #5581 - -#4602 broke imports of image sequences. - - -___ - -
- - -
-AYON: Fix representation context conversion #5591 - -Do not fix `"folder"` key in representation context until it is needed. - - -___ - -
- - -
-ayon-nuke: default factory to lists #5594 - -Default factory were missing in settings schemas for complicated objects like lists and it was causing settings to be failing saving. - - -___ - -
- - -
-Maya: Fix look assigner showing no asset if 'not found' representations are present #5597 - -Fix Maya Look assigner failing to show any content if it finds an invalid container for which it can't find the asset in the current project. (This can happen when e.g. loading something from a library project).There was logic already to avoid this but there was a bug where it used variable `_id` which did not exist and likely had to be `asset_id`.I've fixed that and improved the logged message a bit, e.g.: -``` -// Warning: openpype.hosts.maya.tools.mayalookassigner.commands : Id found on 22 nodes for which no asset is found database, skipping '641d78ec85c3c5b102e836b0' -``` -Example not found representation in Loader:The issue isn't necessarily related to NOT FOUND representations but in essence boils down to finding nodes with asset ids that do not exist in the current project which could very well just be local meshes in your scene.**Note:**I've excluded logging the nodes themselves because that tends to be a very long list of nodes. Only downside to removing that is that it's unclear which nodes are related to that `id`. If there are any ideas on how to still provide a concise informational message about that that'd be great so I could add it. Things I had considered: -- Report the containers, issue here is that it's about asset ids on nodes which don't HAVE to be in containers - it could be local geometry -- Report the namespaces, issue here is that it could be nodes without namespaces (plus potentially not about ALL nodes in a namespace) -- Report the short names of the nodes; it's shorter and readable but still likely a lot of nodes.@tokejepsen @LiborBatek any other ideas? - - -___ - -
- - -
-Photoshop: fixed blank Flatten image #5600 - -Flatten image is simplified publishing approach where all visible layers are "flatten" and published together. This image could be used as a reference etc.This is implemented by auto creator which wasn't updated after first publish. This would result in missing newly created layers after `auto_image` instance was created. - - -___ - -
- - -
-Blender: Remove Hardcoded Subset Name for Reviews #5603 - -Fixes hardcoded subset name for Reviews in Blender. - - -___ - -
- - -
-TVPaint: Fix tool callbacks #5608 - -Do not wait for callback to finish. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Chore: Remove unused variables and cleanup #5588 - -Removing some unused variables. In some cases the unused variables _seemed like they should've been used - maybe?_ so please **double check the code whether it doesn't hint to an already existing bug**.Also tweaked some other small bugs in code + tweaked logging levels. - - -___ - -
- -### **Merged pull requests** - - -
-Chore: Loader log deprecation warning for 'fname' attribute #5587 - -Since https://github.com/ynput/OpenPype/pull/4602 the `fname` attribute on the `LoaderPlugin` should've been deprecated and set for removal over time. However, no deprecation warning was logged whatsoever and thus one usage appears to have sneaked in (fixed with this PR) and a new one tried to sneak in with a recent PR - - -___ - -
- - - - -## [3.16.5](https://github.com/ynput/OpenPype/tree/3.16.5) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.4...3.16.5) - -### **๐Ÿ†• New features** - - -
-Attribute Definitions: Multiselection enum def #5547 - -Added `multiselection` option to `EnumDef`. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Farm: adding target collector #5494 - -Enhancing farm publishing workflow. - - -___ - -
- - -
-Maya: Optimize validate plug-in path attributes #5522 - -- Optimize query (use `cmds.ls` once) -- Add Select Invalid action -- Improve validation report -- Avoid "Unknown object type" errors - - -___ - -
- - -
-Maya: Remove Validate Instance Attributes plug-in #5525 - -Remove Validate Instance Attributes plug-in. - - -___ - -
- - -
-Enhancement: Tweak logging for artist facing reports #5537 - -Tweak the logging of publishing for global, deadline, maya and a fusion plugin to have a cleaner artist-facing report. -- Fix context being reported correctly from CollectContext -- Fix ValidateMeshArnoldAttributes: fix when arnold is not loaded, fix applying settings, fix for when ai attributes do not exist - - -___ - -
- - -
-AYON: Update settings #5544 - -Updated settings in AYON addons and conversion of AYON settings in OpenPype. - - -___ - -
- - -
-Chore: Removed Ass export script #5560 - -Removed Arnold render script, which was obsolete and unused. - - -___ - -
- - -
-Nuke: Allow for knob values to be validated against multiple values. #5042 - -Knob values can now be validated against multiple values, so you can allow write nodes to be `exr` and `png`, or `16-bit` and `32-bit`. - - -___ - -
- - -
-Enhancement: Cosmetics for Higher version of publish already exists validation error #5190 - -Fix double spaces in message.Example output **after** the PR: - - -___ - -
- - -
-Nuke: publish existing frames on farm #5409 - -This PR proposes adding a fourth option in Nuke render publish called "Use Existing Frames - Farm". This would be useful when the farm is busy or when the artist lacks enough farm licenses. Additionally, some artists prefer rendering on the farm but still want to check frames before publishing.By adding the "Use Existing Frames - Farm" option, artists will have more flexibility and control over their render publishing process. This enhancement will streamline the workflow and improve efficiency for Nuke users. - - -___ - -
- - -
-Unreal: Create project in temp location and move to final when done #5476 - -Create Unreal project in local temporary folder and when done, move it to final destination. - - -___ - -
- - -
-TrayPublisher: adding audio product type into default presets #5489 - -Adding Audio product type into default presets so anybody can publish audio to their shots. - - -___ - -
- - -
-Global: avoiding cleanup of flagged representation #5502 - -Publishing folder can be flagged as persistent at representation level. - - -___ - -
- - -
-General: missing tag could raise error #5511 - -- avoiding potential situation where missing Tag key could raise error - - -___ - -
- - -
-Chore: Queued event system #5514 - -Implemented event system with more expected behavior of event system. If an event is triggered during other event callback, it is not processed immediately but waits until all callbacks of previous events are done. The event system also allows to not trigger events directly once `emit_event` is called which gives option to process events in custom loops. - - -___ - -
- - -
-Publisher: Tweak log message to provide plugin name after "Plugin" #5521 - -Fix logged message for settings automatically applied to plugin attributes - - -___ - -
- - -
-Houdini: Improve VDB Selection #5523 - -Improves VDB selection if selection is `SopNode`: return the selected sop nodeif selection is `ObjNode`: get the output node with the minimum 'outputidx' or the node with display flag - - -___ - -
- - -
-Maya: Refactor/tweak Validate Instance In same Context plug-in #5526 - -- Chore/Refactor: Re-use existing select invalid and repair actions -- Enhancement: provide more elaborate PublishValidationError report -- Bugfix: fix "optional" support by using `OptionalPyblishPluginMixin` base class. - - -___ - -
- - -
-Enhancement: Update houdini main menu #5527 - -This PR adds two updates: -- dynamic main menu -- dynamic asset name and task - - -___ - -
- - -
-Houdini: Reset FPS when clicking Set Frame Range #5528 - -_Similar to Maya,_ Make `Set Frame Range` resets FPS, issue https://github.com/ynput/OpenPype/issues/5516 - - -___ - -
- - -
-Enhancement: Deadline plugins optimize, cleanup and fix optional support for validate deadline pools #5531 - -- Fix optional support of validate deadline pools -- Query deadline webservice only once per URL for verification, and once for available deadline pools instead of for every instance -- Use `deadlineUrl` in `instance.data` when validating pools if it is set. -- Code cleanup: Re-use existing `requests_get` implementation - - -___ - -
- - -
-Chore: PowerShell script for docker build #5535 - -Added PowerShell script to run docker build. - - -___ - -
- - -
-AYON: Deadline expand userpaths in executables list #5540 - -Expande `~` paths in executables list. - - -___ - -
- - -
-Chore: Use correct git url #5542 - -Fixed github url in README.md. - - -___ - -
- - -
-Chore: Create plugin does not expect system settings #5553 - -System settings are not passed to initialization of create plugin initialization (and `apply_settings`). - - -___ - -
- - -
-Chore: Allow custom Qt scale factor rounding policy #5555 - -Do not force `PassThrough` rounding policy if different policy is defined via env variable. - - -___ - -
- - -
-Houdini: Fix outdated containers pop-up on opening last workfile on launch #5567 - -Fix Houdini not showing outdated containers pop-up on scene open when launching with last workfile argument - - -___ - -
- - -
-Houdini: Improve errors e.g. raise PublishValidationError or cosmetics #5568 - -Improve errors e.g. raise PublishValidationError or cosmeticsThis also fixes the Increment Current File plug-in since due to an invalid import it was previously broken - - -___ - -
- - -
-Fusion: Code updates #5569 - -Update fusion code which contains obsolete code. Removed `switch_ui.py` script from fusion with related script in scripts. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Validate Shape Zero fix repair action + provide informational artist-facing report #5524 - -Refactor to PublishValidationError to allow the RepairAction to work + provide informational report message - - -___ - -
- - -
-Maya: Fix attribute definitions for `CreateYetiCache` #5574 - -Fix attribute definitions for `CreateYetiCache` - - -___ - -
- - -
-Max: Optional Renderable Camera Validator for Render Instance #5286 - -Optional validation to check on renderable camera being set up correctly for deadline submission.If not being set up correctly, it wont pass the validation and user can perform repair actions. - - -___ - -
- - -
-Max: Adding custom modifiers back to the loaded objects #5378 - -The custom parameters OpenpypeData doesn't show in the loaded container when it is being loaded through the loader. - - -___ - -
- - -
-Houdini: Use default_variant to Houdini Node TAB Creator #5421 - -Use the default variant of the creator plugins on the interactive creator from the TAB node search instead of hard-coding it to `Main`. - - -___ - -
- - -
-Nuke: adding inherited colorspace from instance #5454 - -Thumbnails are extracted with inherited colorspace collected from rendering write node. - - -___ - -
- - -
-Add kitsu credentials to deadline publish job #5455 - -This PR hopefully fixes this issue #5440 - - -___ - -
- - -
-AYON: Fill entities during editorial #5475 - -Fill entities and update template data on instances during extract AYON hierarchy. - - -___ - -
- - -
-Ftrack: Fix version 0 when integrating to Ftrack - OP-6595 #5477 - -Fix publishing version 0 to Ftrack. - - -___ - -
- - -
-OCIO: windows unc path support in Nuke and Hiero #5479 - -Hiero and Nuke is not supporting windows unc path formatting in OCIO environment variable. - - -___ - -
- - -
-Deadline: Added super call to init #5480 - -DL 10.3 requires plugin inheriting from DeadlinePlugin to call super's **init** explicitly. - - -___ - -
- - -
-Nuke: fixing thumbnail and monitor out root attributes #5483 - -Nuke Root Colorspace settings for Thumbnail and Monitor Out schema was gradually changed between version 12, 13, 14 and we needed to address those changes individually for particular version. - - -___ - -
- - -
-Nuke: fixing missing `instance_id` error #5484 - -Workfiles with Instances created in old publisher workflow were rising error during converting method since they were missing `instance_id` key introduced in new publisher workflow. - - -___ - -
- - -
-Nuke: existing frames validator is repairing render target #5486 - -Nuke is now correctly repairing render target after the existing frames validator finds missing frames and repair action is used. - - -___ - -
- - -
-added UE to extract burnins families #5487 - -This PR fixes missing burnins in reviewables when rendering from UE. -___ - -
- - -
-Harmony: refresh code for current Deadline #5493 - -- Added support in Deadline Plug-in for new versions of Harmony, in particular version 21 and 22. -- Remove review=False flag on render instance -- Add farm=True flag on render instance -- Fix is_in_tests function call in Harmony Deadline submission plugin -- Force HarmonyOpenPype.py Deadline Python plug-in to py3 -- Fix cosmetics/hound in HarmonyOpenPype.py Deadline Python plug-in - - -___ - -
- - -
-Publisher: Fix multiselection value #5505 - -Selection of multiple instances in Publisher does not cause that all instances change all publish attributes to the same value. - - -___ - -
- - -
-Publisher: Avoid warnings on thumbnails if source image also has alpha channel #5510 - -Avoids the following warning from `ExtractThumbnailFromSource`: -``` -// pyblish.ExtractThumbnailFromSource : oiiotool WARNING: -o : Can't save 4 channels to jpeg... saving only R,G,B -``` - - - -___ - -
- - -
-Update ayon-python-api #5512 - -Update ayon python api and related callbacks. - - -___ - -
- - -
-Max: Fixing the bug of falling back to use workfile for Arnold or any renderers except Redshift #5520 - -Fix the bug of falling back to use workfile for Arnold - - -___ - -
- - -
-General: Fix Validate Publish Dir Validator #5534 - -Nonsensical "family" key was used instead of real value (as 'render' etc.) which would result in wrong translation of intermediate family names.Updated docstring. - - -___ - -
- - -
-have the addons loading respect a custom AYON_ADDONS_DIR #5539 - -When using a custom AYON_ADDONS_DIR environment variable that variable is used in the launcher correctly and downloads and extracts addons to there, however when running Ayon does not respect this environment variable - - -___ - -
- - -
-Deadline: files on representation cannot be single item list #5545 - -Further logic expects that single item files will be only 'string' not 'list' (eg. repre["files"] = "abc.exr" not repre["files"] = ["abc.exr"].This would cause an issue in ExtractReview later.This could happen if DL rendered single frame file with different frame value. - - -___ - -
- - -
-Webpublisher: better encode list values for click #5546 - -Targets could be a list, original implementation pushed it as a separate items, it must be added as `--targets webpulish --targets filepublish`.`wepublish_routes` handles triggering from UI, changes in `publish_functions` handle triggering from cmd (for tests, api access). - - -___ - -
- - -
-Houdini: Introduce imprint function for correct version in hda loader #5548 - -Resolve #5478 - - -___ - -
- - -
-AYON: Fill entities during editorial (2) #5549 - -Fix changes made in https://github.com/ynput/OpenPype/pull/5475. - - -___ - -
- - -
-Max: OP Data updates in Loaders #5563 - -Fix the bug on the loaders not being able to load the objects when iterating key and values with the dict.Max prefers list over the list in dict. - - -___ - -
- - -
-Create Plugins: Better check of overriden '__init__' method #5571 - -Create plugins do not log warning messages about each create plugin because of wrong `__init__` method check. - - -___ - -
- -### **Merged pull requests** - - -
-Tests: fix unit tests #5533 - -Fixed failing tests.Updated Unreal's validator to match removed general one which had a couple of issues fixed. - - -___ - -
- - - - -## [3.16.4](https://github.com/ynput/OpenPype/tree/3.16.4) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.3...3.16.4) - -### **๐Ÿ†• New features** - - -
-Feature: Download last published workfile specify version #4998 - -Setting `workfile_version` key to hook's `self.launch_context.data` allow you to specify the workfile version you want sync service to download if none is matched locally. This is helpful if the last version hasn't been correctly published/synchronized, and you want to recover the previous one (or some you'd like).Version could be set in two ways: -- OP's absolute version, matching the `version` index in DB. -- Relative version in reverse order from the last one: `-2`, `-3`...I don't know where I should write documentation about that. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: allow not creation of group for Import loaders #5427 - -This PR enhances previous one. All ReferenceLoaders could not wrap imported products into explicit group.Also `Import` Loaders have same options. Control for this is separate in Settings, eg. Reference might wrap loaded items in group, `Import` might not. - - -___ - -
- - -
-3dsMax: Settings for Ayon #5388 - -Max Addon Setting for Ayon - - -___ - -
- - -
-General: Navigation to Folder from Launcher #5404 - -Adds an action in launcher to open the directory of the asset. - - -___ - -
- - -
-Chore: Default variant in create plugin #5429 - -Attribute `default_variant` on create plugins always returns string and if default variant is not filled other ways how to get one are implemented. - - -___ - -
- - -
-Publisher: Thumbnail widget enhancements #5439 - -Thumbnails widget in Publisher has new 3 options to choose from: Paste (from clipboard), Take screenshot and Browse. Clear button and new options are not visible by default, user must expand options button to show them. - - -___ - -
- - -
-AYON: Update ayon api to '0.3.5' #5460 - -Updated ayon-python-api to 0.3.5. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-AYON: Apply unknown ayon settings first #5435 - -Settings of custom addons are available in converted settings. - - -___ - -
- - -
-Maya: Fix wrong subset name of render family in deadline #5442 - -New Publisher is creating different subset names than previously which resulted in duplication of `render` string in final subset name of `render` family published on Deadline.This PR solves that, it also fixes issues with legacy instances from old publisher, it matches the subset name as was before.This solves same issue in Max implementation. - - -___ - -
- - -
-Maya: Fix setting of version to workfile instance #5452 - -If there are multiple instances of renderlayer published, previous logic resulted in unpredictable rewrite of instance family to 'workfile' if `Sync render version with workfile` was on. - - -___ - -
- - -
-Maya: Context plugin shouldn't be tied to family #5464 - -`Maya Current File` collector was tied to `workfile` unnecessary. It should run even if `workile` instance is not being published. - - -___ - -
- - -
-Unreal: Fix loading hero version for static and skeletal meshes #5393 - -Fixed a problem with loading hero versions for static ans skeletal meshes. - - -___ - -
- - -
-TVPaint: Fix 'repeat' behavior #5412 - -Calculation of frames for repeat behavior is working correctly. - - -___ - -
- - -
-AYON: Thumbnails cache and api prep #5437 - -Moved thumbnails cache from ayon python api to OpenPype and prepare AYON thumbnail resolver for new api functions. Current implementation should work with old and new ayon-python-api. - - -___ - -
- - -
-Nuke: Name of the Read Node should be updated correctly when switching versions or assets. #5444 - -Bug fixing of the read node's name not being updated correctly when setting version or switching asset. - - -___ - -
- - -
-Farm publishing: asymmetric handles fixed #5446 - -Handles are now set correctly on farm published product version if asymmetric were set to shot attributes. - - -___ - -
- - -
-Scene Inventory: Provider icons fix #5450 - -Fix how provider icons are accessed in scene inventory. - - -___ - -
- - -
-Fix typo on Deadline OP plugin name #5453 - -Surprised that no one has hit this bug yet... but it seems like there was a typo on the name of the OP Deadline plugin when submitting jobs to it. - - -___ - -
- - -
-AYON: Fix version attributes update #5472 - -Fixed updates of attribs in AYON mode. - - -___ - -
- -### **Merged pull requests** - - -
-Added missing defaults for import_loader #5447 - - -___ - -
- - -
-Bug: Local settings don't open on 3.14.7 #5220 - -### Before posting a new ticket, have you looked through the documentation to find an answer? - -Yes I have - -### Have you looked through the existing tickets to find any related issues ? - -Not yet - -### Author of the bug - -@FadyFS - -### Version - -3.15.11-nightly.3 - -### What platform you are running OpenPype on? - -Linux / Centos - -### Current Behavior: - -the previous behavior (bug) : -![image](https://github.com/quadproduction/OpenPype/assets/135602303/09bff9d5-3f8b-4339-a1e5-30c04ade828c) - - -### Expected Behavior: - -![image](https://github.com/quadproduction/OpenPype/assets/135602303/c505a103-7965-4796-bcdf-73bcc48a469b) - - -### What type of bug is it ? - -Happened only once in a particular configuration - -### Which project / workfile / asset / ... - -open settings with 3.14.7 - -### Steps To Reproduce: - -1. Run openpype on the 3.15.11-nightly.3 version -2. Open settings in 3.14.7 version - -### Relevant log output: - -_No response_ - -### Additional context: - -_No response_ - -___ - -
- - -
-Tests: Add automated targets for tests #5443 - -Without it plugins with 'automated' targets won't be triggered (eg `CloseAE` etc.) - - -___ - -
- - - - -## [3.16.3](https://github.com/ynput/OpenPype/tree/3.16.3) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.2...3.16.3) - -### **๐Ÿ†• New features** - - -
-AYON: 3rd party addon usage #5300 - -Prepare OpenPype code to be able use `ayon-third-party` addon which supply ffmpeg and OpenImageIO executables. Because they both can support to define custom arguments (more than one) a new functions were needed to supply.New functions are `get_ffmpeg_tool_args` and `get_oiio_tool_args`. They work similar to previous but instead of string are returning list of strings. All places using previous functions `get_ffmpeg_tool_path` and `get_oiio_tool_path` are now using new ones. They should be backwards compatible and even with addon if returns single argument. - - -___ - -
- - -
-AYON: Addon settings in OpenPype #5347 - -Moved settings addons to OpenPype server addon. Modified create package to create zip files for server for each settings addon and for openpype addon. - - -___ - -
- - -
-AYON: Add folder to template data #5417 - -Added `folder` to template data, so `{folder[name]}` can be used in templates. - - -___ - -
- - -
-Option to start versioning from 0 #5262 - -This PR adds a settings option to start all versioning from 0.This PR will replace #4455. - - -___ - -
- - -
-Ayon: deadline implementation #5321 - -Quick implementation of deadline in Ayon. New Ayon plugin added for Deadline repository - - -___ - -
- - -
-AYON: Remove AYON launch logic from OpenPype #5348 - -Removed AYON launch logic from OpenPype. The logic is outdated at this moment and is replaced by `ayon-launcher`. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Bug: Error on multiple instance rig with maya #5310 - -I change endswith method by startswith method because the set are automacaly name out_SET, out_SET1, out_SET2 ... - - -___ - -
- - -
-Applications: Use prelaunch hooks to extract environments #5387 - -Environment variable preparation is based on prelaunch hooks. This should allow to pass OCIO environment variables to farm jobs. - - -___ - -
- - -
-Applications: Launch hooks cleanup #5395 - -Use `set` instead of `list` for filtering attributes in launch hooks. Celaction hooks dir does not contain `__init__.py`. Celaction prelaunch hook is reusing `CELACTION_ROOT_DIR`. Launch hooks are using full import from `openpype.lib.applications`. - - -___ - -
- - -
-Applications: Environment variables order #5245 - -Changed order of set environment variables. First are set context environment variables and then project environment overrides. Also asset and task environemnt variables are optional. - - -___ - -
- - -
-Autosave preferences can be read after Nuke opens the script #5295 - -Looks like I need to open the script in Nuke to be able to correctly load the autosave preferences.This PR reads the Nuke script in context, and offers owerwriting the current script with autosaved one if autosave exists. - - -___ - -
- - -
-Resolve: Update with compatible resolve version and latest docs #5317 - -Missing information about compatible Resolve version and latest docs from https://github.com/ynput/OpenPype/tree/develop/openpype/hosts/resolve - - -___ - -
- - -
-Chore: Remove deprecated functions #5323 - -Removed functions/classes that are deprecated and marked to be removed. - - -___ - -
- - -
-Nuke Render and Prerender nodes Process Order - OP-3555 #5332 - -This PR exposes control over the order of processing of the instances, by sorting the instances created. The sorting happens on the `render_order` and subset name. If the knob `render_order` is found on the instance, we'll sort by that first before sorting by subset name.`render_order` instances are processed before nodes without `render_order`. This could be extended in the future by querying other knobs but I dont know of a usecase for this.Hardcoded the creator `order` attribute of the `prerender` class to be before the `render`. Could be exposed to the user/studio but dont know of a use case for this. - - -___ - -
- - -
-Unreal: Python Environment Improvements #5344 - -Automatically set `UE_PYTHONPATH` as `PYTHONPATH` when launching Unreal. - - -___ - -
- - -
-Unreal: Custom location for Unreal Ayon Plugin #5346 - -Added a new environment variable `AYON_BUILT_UNREAL_PLUGIN` to set an already existing and built Ayon Plugin for Unreal. - - -___ - -
- - -
-Unreal: Better handling of Exceptions in UE Worker threads #5349 - -Implemented a new `UEWorker` base class to handle exception during the execution of UE Workers. - - -___ - -
- - -
-Houdini: Add farm toggle on creation menu #5350 - -Deadline Farm publishing and Rendering for Houdini was possible with this PR #4825 farm publishing is enabled by default some ROP nodes which may surprise new users (like me).I think adding a toggle (on by default) on creation UI is better so that users will be aware that there's a farm option for this publish instance.ROPs Modified : -- [x] Mantra ROP -- [x] Karma ROP -- [x] Arnold ROP -- [x] Redshift ROP -- [x] Vray ROP - - -___ - -
- - -
-Ftrack: Sync to avalon settings #5353 - -Added roles settings for sync to avalon action. - - -___ - -
- - -
-Chore: Schemas inside OpenPype #5354 - -Moved/copied schemas from repository root inside openpype/pipeline. - - -___ - -
- - -
-AYON: Addons creation enhancements #5356 - -Enhanced AYON addons creation. Fix issue with `Pattern` typehint. Zip filenames contain version. OpenPype package is skipping modules that are already separated in AYON. Updated settings of addons. - - -___ - -
- - -
-AYON: Update staging icons #5372 - -Updated staging icons for staging mode. - - -___ - -
- - -
-Enhancement: Houdini Update pointcache labels #5373 - -To me it's logical to find pointcaches types listed one after another, but they were named differentlySo, I made this PR to update their labels - - -___ - -
- - -
-nuke: split write node product instance features #5389 - -Improving Write node product instances by allowing precise activation of specific features. - - -___ - -
- - -
-Max: Use the empty modifiers in container to store AYON Parameter #5396 - -Instead of adding AYON/OP Parameter along with other attributes inside the container, empty modifiers would be created to store AYON/OP custom attributes - - -___ - -
- - -
-AfterEffects: Removed unused imports #5397 - -Removed unused import from extract local render plugin file. - - -___ - -
- - -
-Nuke: adding BBox knob type to settings #5405 - -Nuke knob types in settings having new `Box` type for reposition nodes like Crop or Reformat. - - -___ - -
- - -
-SyncServer: Existence of module is optional #5413 - -Existence of SyncServer module is optional and not required. Added `sync_server` module back to ignored modules when openpype addon is created for AYON. Command `syncserver` is marked as deprecated and redirected to sync server cli. - - -___ - -
- - -
-Webpublisher: Self contain test publish logic #5414 - -Moved test logic of publishing to webpublisher. Simplified `remote_publish` to remove webpublisher specific logic. - - -___ - -
- - -
-Webpublisher: Cleanup targets #5418 - -Removed `remote` target from webpublisher and replaced it with 2 targets `webpublisher` and `automated`. - - -___ - -
- - -
-nuke: update server addon settings with box #5419 - -updtaing nuke ayon server settings for Box option in knob types. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: fix validate frame range on review attached to other instances #5296 - -Fixes situation where frame range validator can't be turned off on models if they are attached to reviewable camera in Maya. - - -___ - -
- - -
-Maya: Apply project settings to creators #5303 - -Project settings were not applied to the creators. - - -___ - -
- - -
-Maya: Validate Model Content #5336 - -`assemblies` in `cmds.ls` does not seem to work; -```python - -from maya import cmds - - -content_instance = ['|group2|pSphere1_GEO', '|group2|pSphere1_GEO|pSphere1_GEOShape', '|group1|pSphere1_GEO', '|group1|pSphere1_GEO|pSphere1_GEOShape'] -assemblies = cmds.ls(content_instance, assemblies=True, long=True) -print(assemblies) -``` - -Fixing with string splitting instead. - - -___ - -
- - -
-Bugfix: Maya update defaults variable #5368 - -So, something was forgotten while moving out from `LegacyCreator` to `NewCreator``LegacyCreator` used `defaults` to list suggested subset names which was changed into `default_variants` in the the `NewCreator`and setting `defaults` to any values has no effect!This update affects: -- [x] Model -- [x] Set Dress - - -___ - -
- - -
-Chore: Python 2 support fix #5375 - -Fix Python 2 support by adding `click` into python 2 dependencies and removing f-string from maya. - - -___ - -
- - -
-Maya: do not create top level group on reference #5402 - -This PR allows to not wrapping loaded referenced assets in top level group either explicitly for artist or by configuration in Settings.Artists can control group creation in ReferenceLoader options.Default no group creation could be set by emptying `Group Name` in `project_settings/maya/load/reference_loader` - - -___ - -
- - -
-Settings: Houdini & Maya create plugin settings #5436 - -Fixes related to Maya and Houdini settings. Renamed `defaults` to `default_variants` in plugin settings to match attribute name on create plugin in both OpenPype and AYON settings. Fixed Houdini AYON settings where were missing settings for defautlt varaints and fixed Maya AYON settings where default factory had wrong assignment. - - -___ - -
- - -
-Maya: Hide CreateAnimation #5297 - -When converting `animation` family or loading a `rig` family, need to include the `animation` creator but hide it in creator context. - - -___ - -
- - -
-Nuke Anamorphic slate - Read pixel aspect from input #5304 - -When asset pixel aspect differs from rendered pixel aspect, Nuke slate pixel aspect is not longer taken from asset, but is readed via ffprobe. - - -___ - -
- - -
-Nuke - Allow ExtractReviewDataMov with no timecode knob #5305 - -ExtractReviewDataMov allows to specify file type. Trying to write some other extension than mov fails on generate_mov assuming that mov64_write_timecode knob exists. - - -___ - -
- - -
-Nuke: removing settings schema with defaults for OpenPype #5306 - -continuation of https://github.com/ynput/OpenPype/pull/5275 - - -___ - -
- - -
-Bugfix: Dependency without 'inputLinks' not downloaded #5337 - -Remove condition that avoids downloading dependency without `inputLinks`. - - -___ - -
- - -
-Bugfix: Houdini Creator use selection even if it was toggled off #5359 - -When creating many product types (families) one after another without refreshing the creator window manually if you toggled `Use selection` once, all the later product types will use selection even if it was toggled offHere's Before it will keep use selection even if it was toggled off, unless you refresh window manuallyhttps://github.com/ynput/OpenPype/assets/20871534/8b890122-5b53-4c6b-897d-6a2f3aa3388aHere's After it works as expectedhttps://github.com/ynput/OpenPype/assets/20871534/6b1db990-de1b-428e-8828-04ab59a44e28 - - -___ - -
- - -
-Houdini: Correct camera selection for karma renderer when using selected node #5360 - -When user creates the karma rop with selected camera by use selection, it will give the error message of "no render camera found in selection".This PR is to fix the bug of creating karma rop when using selected camera node in Houdini - - -___ - -
- - -
-AYON: Environment variables and functions #5361 - -Prepare code for ayon-launcher compatibility. Fix ayon launcher subprocess calls, added more checks for `AYON_SERVER_ENABLED`, use ayon launcher suitable environment variables in AYON mode and changed outputs of some functions. Replaced usages of `OPENPYPE_REPOS_ROOT` environment variable with `PACKAGE_DIR` variable -> correct paths are used. - - -___ - -
- - -
-Nuke: farm rendering of prerender ignore roots in nuke #5366 - -`prerender` family was using wrong subset, same as `render` which should be different. - - -___ - -
- - -
-Bugfix: Houdini update defaults variable #5367 - -So, something was forgotten while moving out from `LegacyCreator` to `NewCreator``LegacyCreator` used `defaults` to list suggested subset names which was changed into `default_variants` in the the `NewCreator`and setting `defaults` to any values has no effect!This update affects: -- [x] Arnold ASS -- [x] Arnold ROP -- [x] Karma ROP -- [x] Mantra ROP -- [x] Redshift ROP -- [x] VRay ROP - - -___ - -
- - -
-Publisher: Fix create/publish animation #5369 - -Use geometry movement instead of changing min/max width. - - -___ - -
- - -
-Unreal: Move unreal splash screen to unreal #5370 - -Moved splash screen code to unreal integration and removed import from Igniter. - - -___ - -
- - -
-Nuke: returned not cleaning of renders folder on the farm #5374 - -Previous PR enabled explicit cleanup of `renders` folder after farm publishing. This is not matching customer's workflows. Customer wants to have access to files in `renders` folder and potentially redo some frames for long frame sequences.This PR extends logic of marking rendered files for deletion only if instance doesn't have `stagingDir_persistent`.For backwards compatibility all Nuke instances have `stagingDir_persistent` set to True, eg. `renders` folder won't be cleaned after farm publish. - - -___ - -
- - -
-Nuke: loading sequences is working #5376 - -Loading image sequences was broken after the latest release, version 3.16. However, I am pleased to inform you that it is now functioning as expected. - - -___ - -
- - -
-AYON: Fix settings conversion for ayon addons #5377 - -AYON addon settings are available in system settings and does not have available the same values in `"modules"` subkey. - - -___ - -
- - -
-Nuke: OCIO env var workflow #5379 - -The OCIO environment variable needs to be consistently handled across all platforms. Nuke resolves the custom OCIO config path differently depending on the platform, so we included the ocio config path in the workfile with a partial replacement using an environment variable. Additionally, for Windows sessions, we replaced backward slashes with a TCL expression. - - -___ - -
- - -
-Unreal: Fix Unreal build script #5381 - -Define 'AYON_UNREAL_ROOT' environment variable in unreal addon. - - -___ - -
- - -
-3dsMax: Use relative path to MAX_HOST_DIR #5382 - -Use `MAX_HOST_DIR` to calculate startup script path instead of use relative path to `OPENPYPE_ROOT` environment variable. - - -___ - -
- - -
-Bugfix: Houdini abc validator error message #5386 - -When ABC path validator fails, it prints node objects not node paths or namesThis bug happened because of updating `get_invalid` method to return nodes instead of node pathsBeforeAfter - - -___ - -
- - -
-Nuke: node name influence product (subset) name #5392 - -Nuke now allows users to duplicate publishing instances, making the workflow easier. By duplicating a node and changing its name, users can set the product (subset) name in the publishing context.Users now have the ability to change the variant name in Publisher, which will automatically rename the associated instance node. - - -___ - -
- - -
-Houdini: delete redundant bgeo sop validator #5394 - -I found out that this `Validate BGEO SOP Path` validator is redundant, it catches two cases that are already implemented in "Validate Output Node". "Validate Output Node" works with `bgeo` as well as `abc` because `"pointcache"` is listed in its families - - -___ - -
- - -
-Nuke: workfile is not reopening after change of context #5399 - -Nuke no longer reopens the latest workfile when the context is changed to a different task using the Workfile tool. The issue also affected the Script Clean (from Nuke File menu) and Close feature, but it has now been fixed. - - -___ - -
- - -
-Bugfix: houdini hard coded project settings #5400 - -I made this PR to solve the issue with hard-coded settings in houdini - - -___ - -
- - -
-AYON: 3dsMax settings #5401 - -Keep `adsk_3dsmax` group in applications settings. - - -___ - -
- - -
-Bugfix: update defaults to default_variants in maya and houdini OP DCC settings #5407 - -On moving out to new creator in Maya and Houdini updating settings was missed. - - -___ - -
- - -
-Applications: Attributes creation #5408 - -Applications addon does not cause infinite server restart loop. - - -___ - -
- - -
-Max: fix the bug of handling Object deletion in OP Parameter #5410 - -If the object is added to the OP parameter and user delete it in the scene thereafter, it will error out the container with OP attributes. This PR resolves the bug.This PR also fixes the bug of not adding the attribute into OP parameter correctly when the user enables "use selections" to link the object into the OP parameter. - - -___ - -
- - -
-Colorspace: including environments from launcher process #5411 - -Fixed bug in GitHub PR where the OCIO config template was not properly formatting environment variables from System Settings `general/environment`. - - -___ - -
- - -
-Nuke: workfile template fixes #5428 - -Some bunch of small bugs needed to be fixed - - -___ - -
- - -
-Houdini, Max: Fix missed function interface change #5430 - -This PR https://github.com/ynput/OpenPype/pull/5321/files from @kalisp missed updating the `add_render_job_env_var` in Houdini and Max as they are passing an extra arg: -``` -TypeError: add_render_job_env_var() takes 1 positional argument but 2 were given -``` - - -___ - -
- - -
-Scene Inventory: Fix issue with 'sync_server' #5431 - -Fix accesss to `sync_server` attribute in scene inventory. - - -___ - -
- - -
-Unpack project: Fix import issue #5433 - -Added `load_json_file`, `replace_project_documents` and `store_project_documents` to mongo init. - - -___ - -
- - -
-Chore: Versions post fixes #5441 - -Fixed issues caused by my fault. Filled right version value to anatomy data. - - -___ - -
- -### **๐Ÿ“ƒ Testing** - - -
-Tests: Copy file_handler as it will be removed by purging ayon code #5357 - -Ayon code will get purged in the future from this repo/addon, therefore all `ayon_common` will be gone. `file_handler` gets internalized to tests as it is not used anywhere else. - - -___ - -
- - - - -## [3.16.2](https://github.com/ynput/OpenPype/tree/3.16.2) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.1...3.16.2) - -### **๐Ÿ†• New features** - - -
-Fusion - Set selected tool to active #5327 - -When you run the action to select a node, this PR makes the node-flow show the selected node + you'll see the nodes controls in the inspector. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: All base create plugins #5326 - -Prepared base classes for each creator type in Maya. Extended `MayaCreatorBase` to have default implementations of common logic with instances which is used in each type of plugin. - - -___ - -
- - -
-Windows: Support long paths on zip updates. #5265 - -Support long paths for version extract on Windows.Use case is when having long paths in for example an addon. You can install to the C drive but because the zip files are extracted in the local users folder, it'll add additional sub directories to the paths and quickly get too long paths for Windows to handle the zip updates. - - -___ - -
- - -
-Blender: Added setting to set resolution and start/end frames at startup #5338 - -This PR adds `set_resolution_startup`and `set_frames_startup` settings. They automatically set respectively the resolution and start/end frames and FPS in Blender when opening a file or creating a new one. - - -___ - -
- - -
-Blender: Support for ExtractBurnin #5339 - -This PR adds support for ExtractBurnin for Blender, when publishing a Review. - - -___ - -
- - -
-Blender: Extract Camera as Alembic #5343 - -Added support to extract Alembic Cameras in Blender. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Validate Instance In Context #5335 - -Missing new publisher error so the repair action shows up. - - -___ - -
- - -
-Settings: Fix default settings #5311 - -Fixed defautl settings for shotgrid. Renamed `FarmRootEnumEntity` to `DynamicEnumEntity` and removed doubled ABC metaclass definition (all settings entities have abstract metaclass). - - -___ - -
- - -
-Deadline: missing context argument #5312 - -Updated function arguments - - -___ - -
- - -
-Qt UI: Multiselection combobox PySide6 compatibility #5314 - -- The check states are replaced with the values for PySide6 -- `QtCore.Qt.ItemIsUserTristate` is used instead of `QtCore.Qt.ItemIsTristate` to avoid crashes on PySide6 - - -___ - -
- - -
-Docker: handle openssl 1.1.1 for centos 7 docker build #5319 - -Move to python 3.9 has added need to use openssl 1.1.x - but it is not by default available on centos 7 image. This is fixing it. - - -___ - -
- - -
-houdini: fix typo in redshift proxy #5320 - -I believe there's a typo in `create_redshift_proxy.py` ( extra ` ) in filename, and I made this PR to suggest a fix - - -___ - -
- - -
-Houdini: fix wrong creator identifier in pointCache workflow #5324 - -FIxing a bug in publishing alembics, were invalid creator identifier caused missing family association. - - -___ - -
- - -
-Fix colorspace compatibility check #5334 - -for some reason a user may have `PyOpenColorIO` installed to his machine, _in my case it came with renderman._it can trick the compatibility check as `import PyOpenColorIO` won't raise an error however it may be an old version _like my case_Beforecompatibility check was true and It used wrapper directly After Fix It will use wrapper via subprocess instead - - -___ - -
- -### **Merged pull requests** - - -
-Remove forgotten dev logging #5315 - - -___ - -
- - - - -## [3.16.1](https://github.com/ynput/OpenPype/tree/3.16.1) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.16.0...3.16.1) - -### **๐Ÿ†• New features** - - -
-Royal Render: Maya and Nuke support #5191 - -Basic working implementation of Royal Render support in Maya.It expects New publisher implemented in Maya. - - -___ - -
- - -
-Blender: Blend File Family #4321 - -Implementation of the Blend File family analogue to the Maya Scene one. - - -___ - -
- - -
-Houdini: simple bgeo publishing #4588 - -Support for simple publishing of bgeo files. - -This is adding basic support for bgeo publishing in Houdini. It will allow publishing bgeo in all supported formats (selectable in the creator options). If selected node has `output` on sop level, it will be used automatically as path in file node. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-General: delivery action add renamed frame number in Loader #5024 - -Frame Offset options for delivery in Openpype loader - - -___ - -
- - -
-Enhancement/houdini add path action for abc validator #5237 - -Add a default path attribute Action.it's a helper action more than a repair action, which used to add a default single value. - - -___ - -
- - -
-Nuke: auto apply all settings after template build #5277 - -Adding auto run of Apply All Settings after template is builder is finishing its process. This will apply Frame-range, Image size, Colorspace found in context of a task shot. - - -___ - -
- - -
-Harmony:Removed loader settings for Harmony #5289 - -It shouldn't be configurable, it is internal logic. By adding additional extension it wouldn't start to work magically. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-AYON: Make appdirs case sensitive #5298 - -Appdirs for AYON are case sensitive for linux and mac so we needed to change them to match ayon launcher. Changed 'ayon' to 'AYON' and 'ynput' to 'Ynput'. - - -___ - -
- - -
-Traypublisher: Fix plugin order #5299 - -Frame range collector for traypublisher was moved to traypublisher plugins and changed order to make sure `assetEntity` is filled in `instance.data`. - - -___ - -
- - -
-Deadline: removing OPENPYPE_VERSION from some host submitters #5302 - -Removing deprecated method of adding OPENPYPE_VERSION to job environment. It was leftover and other hosts have already been cleared. - - -___ - -
- - -
-AYON: Fix args for workfile conversion util #5308 - -Workfile update conversion util function have right expected arguments. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Maya: Refactor imports to `lib.get_reference_node` since the other functionโ€ฆ #5258 - -Refactor imports to `lib.get_reference_node` since the other function is deprecated. - - -___ - -
- - - - -## [3.16.0](https://github.com/ynput/OpenPype/tree/3.16.0) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/...3.16.0) - -### **๐Ÿ†• New features** - - -
-General: Reduce usage of legacy io #4723 - -Replace usages of `legacy_io` with getter methods or reuse already available information. Create plugins using CreateContext are using context from CreateContext object. Loaders are usign getter function from context tools. Publish plugin are using information instance.data or context.data. In some cases were pieces of code refactored a little e.g. fps getter in maya. - - -___ - -
- - -
-Documentation: API docs reborn - yet again #4419 - -## Feature - -Add functional base for API Documentation using Sphinx and AutoAPI. - -After unsuccessful #2512, #834 and #210 this is yet another try. But this time without ambition to solve the whole issue. This is making Shinx script to work and nothing else. Any changes and improvements in API docs should be made in subsequent PRs. - -## How to use it - -You can run: - -```sh -cd .\docs -make.bat html -``` - -or - -```sh -cd ./docs -make html -``` - -This will go over our code and generate **.rst** files in `/docs/source/autoapi` and from those it will generate full html documentation in `/docs/build/html`. - -During the build you'll see tons of red errors that are pointing to our issues: - -1) **Wrong imports** - Invalid import are usually wrong relative imports (too deep) or circular imports. - -2) **Invalid doc-strings** - Doc-strings to be processed into documentation needs to follow some syntax - this can be checked by running - `pydocstyle` that is already included with OpenPype -3) **Invalid markdown/rst files** - md/rst files can be included inside rst files using `.. include::` directive. But they have to be properly formatted. - - -## Editing rst templates - -Everything starts with `/docs/source/index.rst` - this file should be properly edited, Right now it just includes `readme.rst` that in turn include and parse main `README.md`. This is entrypoint to API documentation. All templates generated by AutoAPI are in `/docs/source/autoapi`. They should be eventually commited to repository and edited too. - -## Steps for enhancing API documentation - -1) Run `/docs/make.bat html` -2) Read the red errors/warnings - fix it in the code -3) Run `/docs/make.bat html` again until there are not red lines -4) Edit rst files and add some meaningfull content there - -> **Note** -> This can (should) be merged as is without doc-string fixes in the code or changes in templates. All additional improvements on API documentation should be made in new PRs. - -> **Warning** -> You need to add new dependencies to use it. Run `create_venv`. - -Connected to #2490 -___ - -
- - -
-Global: custom location for OP local versions #4673 - -This provides configurable location to unzip Openpype version zips. By default, it was hardcoded to artist's app data folder, which might be problematic/slow with roaming profiles.Location must be accessible by user running OP Tray with write permissions (so `Program Files` might be problematic) - - -___ - -
- - -
-AYON: Update settings conversion #4837 - -Updated conversion script of AYON settings to v3 settings. PR is related to changes in addons repository https://github.com/ynput/ayon-addons/pull/6 . Changed how the conversion happens -> conversion output does not start with openpype defaults but as empty dictionary. - - -___ - -
- - -
-AYON: Implement integrate links publish plugin #4842 - -Implemented entity links get/create functions. Added new integrator which replaces v3 integrator for links. - - -___ - -
- - -
-General: Version attributes integration #4991 - -Implemented unified integrate plugin to update version attributes after all integrations for AYON. The goal is to be able update attribute values in a unified way to a version when all addon integrators are done, so e.g. ftrack can add ftrack id to matching version in AYON server etc.The can be stored under `"versionAttributes"` key. - - -___ - -
- - -
-AYON: Staging versions can be used #4992 - -Added ability to use staging versions in AYON mode. - - -___ - -
- - -
-AYON: Preparation for products #5038 - -Prepare ayon settings conversion script for `product` settings conversion. - - -___ - -
- - -
-Loader: Hide inactive versions in UI #5101 - -Added support for `active` argument to hide versions with active set to False in Loader UI when in AYON mode. - - -___ - -
- - -
-General: CLI addon command #5109 - -Added `addon` alias for `module` in OpenPype cli commands. - - -___ - -
- - -
-AYON: OpenPype as server addon #5199 - -OpenPype repository can be converted to AYON addon for distribution. Addon has defined dependencies that are required to use it and are not in base ayon-launcher (desktop application). - - -___ - -
- - -
-General: Runtime dependencies #5206 - -Defined runtime dependencies in pyproject toml. Moved python ocio and otio modules there. - - -___ - -
- - -
-AYON: Bundle distribution #5209 - -Since AYON server 0.3.0 are addon versions defined by bundles which affects how addons, dependency packages and installers are handled. Only source of truth, about any version of anything that should be used, is server bundle. - - -___ - -
- - -
-Feature/blender handle q application #5264 - -This edit is to change the way the QApplication is run for Blender. It calls in the singleton (QApplication) during the register. This is made so that other Qt applications and addons are able to run on Blender. In its current implementation, if a QApplication is already running, all functionality of OpenPype becomes unavailable. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-General: Connect to AYON server (base) #3924 - -Initial implementation of being able use AYON server in current OpenPype client. Added ability to connect to AYON server and use base queries. - -AYON mode has it's own executable (and start script). To start in AYON mode just replace `start.py` with `ayon_start.py` (added tray start script to tools). Added constant `AYON_SERVER_ENABLED` to `openpype/__init__.py` to know if ayon mode is enabled. In that case Mongo is not used at all and any attempts will cause crashes.I had to modify `~/openpype/client` content to be able do this switch. Mongo implementation was moved to `mongo` subfolder and use "star imports" in files from where current imports are used. Logic of any tool or query in code was not changed at all. Since functions were based on mongo queries they don't use full potential of AYON server abilities.ATM implementation has login UI, distribution of files from server and replacement of mongo queries. For queries is used `ayon_api` module. Which is in live development so the versions may change from day to day. - - -___ - -
- - -
-Enhancement kitsu note with exceptions #4537 - -Adding a setting to choose some exceptions to IntegrateKitsuNote task status changes. - - -___ - -
- - -
-General: Environment variable for default OCIO configs #4670 - -Define environment variable which lead to root of builtin ocio configs to be able change the root without changing settings. For the path in settings was used `"{OPENPYPE_ROOT}/vendor/bin/ocioconfig/OpenColorIOConfig"` which disallow to change the root somewhere else. That will be needed in AYON where configs won't be part of desktop application but downloaded from server. - - -___ - -
- - -
-AYON: Editorial hierarchy creation #4699 - -Implemented extract hierarchy to AYON plugin which created entities in AYON using ayon api. - - -___ - -
- - -
-AYON: Vendorize ayon api #4753 - -Vendorize ayon api into openpype vendor directory. The reason is that `ayon-python-api` is in live development and will fix/add features often in next few weeks/months, and because update of dependency requires new release -> new build, we want to avoid the need of doing that as it would affect OpenPype development. - - -___ - -
- - -
-General: Update PySide 6 for MacOs #4764 - -New version of PySide6 does not have issues with settings UI. It is still breaking UI stylesheets so it is not changed for other plaforms but it is enhancement from previous state. - - -___ - -
- - -
-General: Removed unused cli commands #4902 - -Removed `texturecopy` and `launch` cli commands from cli commands. - - -___ - -
- - -
-AYON: Linux & MacOS launch script #4970 - -Added shell script to launch tray in AYON mode. - - -___ - -
- - -
-General: Qt scale enhancement #5059 - -Set ~~'QT_SCALE_FACTOR_ROUNDING_POLICY'~~ scale factor rounding policy of QApplication to `PassThrough` so the scaling can be 'float' number and not just 'int' (150% -> 1.5 scale). - - -___ - -
- - -
-CI: WPS linting instead of Hound (rebase) 2 #5115 - -Because Hound currently used to lint the code on GH ships with really old flake8 support, it fails miserably on any newer Python syntax. This PR is adding WPS linter to GitHub workflows that should step in. - - -___ - -
- - -
-Max: OP parameters only displays what is attached to the container #5229 - -The OP parameter in 3dsmax only displays what is currently attached to the container while deleting while you can see the items which is not added when you are adding to the container. - - -___ - -
- - -
-Testing: improving logging during testing #5271 - -Unit testing logging was crashing on more then one nested layers of inherited loggers. - - -___ - -
- - -
-Nuke: removing deprecated settings in baking #5275 - -Removing deprecated settings for baking with reformat. This option was only for single reformat node and it had been substituted with multiple reposition nodes. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-AYON: General fixes and updates #4975 - -Few smaller fixes related to AYON connection. Some of fixes were taken from this PR. - - -___ - -
- - -
-Start script: Change returncode on validate or list versions #4515 - -Change exit code from `1` to `0` when versions are printed or when version is validated. - -Return code `1` is indicating error but there didn't happen any error. - - -___ - -
- - -
-AYON: Change login UI works #4754 - -Fixed change of login UI. Logic change UI did show up, new login was successful, but after restart was used the previous login. This change fix the issue. - - -___ - -
- - -
-AYON: General issues #4763 - -Vendorized `ayon_api` from PR broke OpenPype launch, because `ayon_api` is not available. Moved `ayon_api` from ayon specific subforlder to `common` python vendor in OpenPype, and removed login in ayon start script (which was invalid anyway). Also made fixed compatibility with PySide6 by using `qtpy` instead of `Qt` and changing code which is not PySide6 compatible. - - -___ - -
- - -
-AYON: Small fixes #4841 - -Bugsfixes and enhancements related to AYON logic. Define `BUILTIN_OCIO_ROOT` environment variable so OCIO configs are working. Use constants from ayon api instead of hardcoding them in codebase. Change process name from "openpype" to "ayon". Don't execute login dialog when application is not yet running but use `open` method instead. Fixed missing modules settings which were not taken from openpype defaults. Updated ayon api to `0.1.17`. - - -___ - -
- - -
-Bugfix - Update gazu to 0.9.3 #4845 - -This updates Gazu to 0.9.3 to make sure Gazu works with Kitsu and Zou 0.16.x+ - - -___ - -
- - -
-Igniter: fix error reports in silent mode #4909 - -Some errors in silent mode commands in Igniter were suppressed and not visible for example in Deadline log. - - -___ - -
- - -
-General: Remove ayon api from poetry lock #4964 - -Remove AYON python api from pyproject.toml and poetry.lock again. - - -___ - -
- - -
-Ftrack: Fix AYON settings conversion #4967 - -Fix conversion of ftrack settings in AYON mode. - - -___ - -
- - -
-AYON: ISO date format conversion issues #4981 - -Function `datetime.fromisoformat` was replaced with `arrow.get` to be used instead. - - -___ - -
- - -
-AYON: Missing files on representations #4989 - -Fix integration of files into representation in server database. - - -___ - -
- - -
-General: Fix Python 2 vendor for arrow #4993 - -Moved remaining dependencies for arrow from ftrack to python 2 vendor. - - -___ - -
- - -
-General: Fix new load plugins for next minor relase #5000 - -Fix access to `fname` attribute which is not available on load plugin anymore. - - -___ - -
- - -
-General: Fix mongo secure connection #5031 - -Fix `ssl` and `tls` keys checks in mongo uri query string. - - -___ - -
- - -
-AYON: Fix site sync settings #5069 - -Fixed settings for AYON variant of sync server. - - -___ - -
- - -
-General: Replace deprecated keyword argument in PyMongo #5080 - -Use argument `tlsCAFile` instead of `ssl_ca_certs` to avoid deprecation warnings. - - -___ - -
- - -
-Igniter: QApplication is created #5081 - -Function `_get_qt_app` actually creates new `QApplication` if was not created yet. - - -___ - -
- - -
-General: Lower unidecode version #5090 - -Use older version of Unidecode module to support Python 2. - - -___ - -
- - -
-General: Lower cryptography to 39.0.0 #5099 - -Lower cryptography to 39.0.0 to avoid breaking of DCCs like Maya and Nuke. - - -___ - -
- - -
-AYON: Global environments key fix #5118 - -Seems that when converting ayon settings to OP settings the `environments` setting is put under the `environments` key in `general` however when populating the environment the `environment` key gets picked up, which does not contain the environment variables from the `core/environments` setting - - -___ - -
- - -
-Add collector to tray publisher for getting frame range data #5152 - -Add collector to tray publisher to get frame range data. User can choose to enable this collector if they need this in the publisher.Resolve #5136 - - -___ - -
- - -
-Unreal: get current project settings not using unreal project name #5170 - -There was a bug where Unreal project name was used to query project settings. But Unreal project name can differ from the "real" one because of naming convention rules set by Unreal. This is fixing it by asking for current project settings. - - -___ - -
- - -
-Substance Painter: Fix Collect Texture Set Images unable to copy.deepcopy due to QMenu #5238 - -Fix `copy.deepcopy` of `instance.data`. - - -___ - -
- - -
-Ayon: server returns different key #5251 - -Package returned from server has `filename` instead of `name`. - - -___ - -
- - -
-Substance Painter: Fix default color management settings #5259 - -The default settings for color management for Substance Painter were invalid, it was set to override the global config by default but specified no valid config paths of its own - and thus errored that the paths were not correct.This sets the defaults correctly to match other hosts._I quickly checked - this seems to be the only host with the wrong default settings_ - - -___ - -
- - -
-Nuke: fixing container data if windows path in value #5267 - -Windows path in container data are reformatted. Previously it was reported that Nuke was rising `utf8 0xc0` error if backward slashes were in data values. - - -___ - -
- - -
-Houdini: fix typo error in collect arnold rop #5281 - -Fixing a typo error in `collect_arnold_rop.py`Reference: #5280 - - -___ - -
- - -
-Slack - enhanced logging and protection against failure #5287 - -Covered issues found in production on customer site. SlackAPI exception doesn't need to have 'error', covered uncaught exception. - - -___ - -
- - -
-Maya: Removed unnecessary import of pyblish.cli #5292 - -This import resulted in adding additional logging handler which lead to duplication of logs in hosts with plugins containing `is_in_tests` method. Import is unnecessary for testing functionality. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Loader: Remove `context` argument from Loader.__init__() #4602 - -Remove the previously required `context` argument. - - -___ - -
- - -
-Global: Remove legacy integrator #4786 - -Remove the legacy integrator. - - -___ - -
- -### **๐Ÿ“ƒ Documentation** - - -
-Next Minor Release #5291 - - -___ - -
- -### **Merged pull requests** - - -
-Maya: Refactor to new publisher #4388 - -**Refactor Maya to use the new publisher with new creators.** - - -- [x] Legacy instance can be converted in UI using `SubsetConvertorPlugin` -- [x] Fix support for old style "render" and "vrayscene" instance to the new per layer format. -- [x] Context data is stored with scene -- [x] Workfile instance converted to AutoCreator -- [x] Converted Creator classes -- [x] Create animation -- [x] Create ass -- [x] Create assembly -- [x] Create camera -- [x] Create layout -- [x] Create look -- [x] Create mayascene -- [x] Create model -- [x] Create multiverse look -- [x] Create multiverse usd -- [x] Create multiverse usd comp -- [x] Create multiverse usd over -- [x] Create pointcache -- [x] Create proxy abc -- [x] Create redshift proxy -- [x] Create render -- [x] Create rendersetup -- [x] Create review -- [x] Create rig -- [x] Create setdress -- [x] Create unreal skeletalmesh -- [x] Create unreal staticmesh -- [x] Create vrayproxy -- [x] Create vrayscene -- [x] Create xgen -- [x] Create yeti cache -- [x] Create yeti rig -- [ ] Tested new Creator publishes -- [x] Publish animation -- [x] Publish ass -- [x] Publish assembly -- [x] Publish camera -- [x] Publish layout -- [x] Publish look -- [x] Publish mayascene -- [x] Publish model -- [ ] Publish multiverse look -- [ ] Publish multiverse usd -- [ ] Publish multiverse usd comp -- [ ] Publish multiverse usd over -- [x] Publish pointcache -- [x] Publish proxy abc -- [x] Publish redshift proxy -- [x] Publish render -- [x] Publish rendersetup -- [x] Publish review -- [x] Publish rig -- [x] Publish setdress -- [x] Publish unreal skeletalmesh -- [x] Publish unreal staticmesh -- [x] Publish vrayproxy -- [x] Publish vrayscene -- [x] Publish xgen -- [x] Publish yeti cache -- [x] Publish yeti rig -- [x] Publish workfile -- [x] Rig loader correctly generates a new style animation creator instance -- [ ] Validations / Error messages for common validation failures look nice and usable as a report. -- [ ] Make Create Animation hidden to the user (should not create manually?) -- [x] Correctly detect difference between **'creator_attributes'** and **'instance_data'** since both are "flattened" to the top node. - - -___ - -
- - -
-Start script: Fix possible issues with destination drive path #4478 - -Drive paths for windows are fixing possibly missing slash at the end of destination path. - -Windows `subst` command require to have destination path with slash if it's a drive (it should be `G:\` not `G:`). - - -___ - -
- - -
-Global: Move PyOpenColorIO to vendor/python #4946 - -So that DCCs don't conflict with their own. - -See https://github.com/ynput/OpenPype/pull/4267#issuecomment-1537153263 for the issue with Gaffer. - -I'm not sure if this is the correct approach, but I assume PySide/Shiboken is under `vendor/python` for this reason as well... -___ - -
- - -
-RuntimeError with Click on deadline publish #5065 - -I changed Click to version 8.0 instead of 7.1.2 to solve this error: -``` -2023-05-30 16:16:51: 0: STDOUT: Traceback (most recent call last): -2023-05-30 16:16:51: 0: STDOUT: File "start.py", line 1126, in boot -2023-05-30 16:16:51: 0: STDOUT: File "/prod/softprod/apps/openpype/LINUX/3.15/dependencies/click/core.py", line 829, in __call__ -2023-05-30 16:16:51: 0: STDOUT: return self.main(*args, **kwargs) -2023-05-30 16:16:51: 0: STDOUT: File "/prod/softprod/apps/openpype/LINUX/3.15/dependencies/click/core.py", line 760, in main -2023-05-30 16:16:51: 0: STDOUT: _verify_python3_env() -2023-05-30 16:16:51: 0: STDOUT: File "/prod/softprod/apps/openpype/LINUX/3.15/dependencies/click/_unicodefun.py", line 126, in _verify_python3_env -2023-05-30 16:16:51: 0: STDOUT: raise RuntimeError( -2023-05-30 16:16:51: 0: STDOUT: RuntimeError: Click will abort further execution because Python 3 was configured to use ASCII as encoding for the environment. Consult https://click.palletsprojects.com/python3/ for mitigation steps. -``` - - -___ - -
- - - - -## [3.15.12](https://github.com/ynput/OpenPype/tree/3.15.12) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.11...3.15.12) - -### **๐Ÿ†• New features** - - -
-Tray Publisher: User can set colorspace per instance explicitly #4901 - -With this feature a user can set/override the colorspace for the representations of an instance explicitly instead of relying on the File Rules from project settings or alike. This way you can ingest any file and explicitly say "this file is colorspace X". - - -___ - -
- - -
-Review Family in Max #5001 - -Review Feature by creating preview animation in 3dsmax(The code is still cleaning up so there is going to be some updates until it is ready for review) - - -___ - -
- - -
-AfterEffects: support for workfile template builder #5163 - -This PR add functionality of templated workfile builder. It allows someone to prepare AE workfile with placeholders as for automatically loading particular representation of particular subset of particular asset from context where workfile is opened.Selection from multiple prepared workfiles is provided with usage of templates, specific type of tasks could use particular workfile template etc.Artists then can build workfile from template when opening new workfile. - - -___ - -
- - -
-CreatePlugin: Get next version helper #5242 - -Implemented helper functions to get next available versions for create instances. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Improve Templates #4854 - -Use library method for fetching reference node and support parent in hierarchy. - - -___ - -
- - -
-Bug: Maya - xgen sidecar files arent moved when saving workfile as an new asset workfile changing context - OP-6222 #5215 - -This PR manages the Xgen files when switching context in the Workfiles app. - - -___ - -
- - -
-node references to check for duplicates in Max #5192 - -No duplicates for node references in Max when users trying to select nodes before publishing - - -___ - -
- - -
-Tweak profiles logging to debug level #5194 - -Tweak profiles logging to debug level since they aren't artist facing logs. - - -___ - -
- - -
-Enhancement: Reduce more visual clutter for artists in new publisher reports #5208 - -Got this from one of our artists' reports - figured some of these logs were definitely not for the artist, reduced those logs to debug level. - - -___ - -
- - -
-Cosmetics: Tweak pyblish repair actions (icon, logs, docstring) #5213 - -- Add icon to RepairContextAction -- logs to debug level -- also add attempt repair for RepairAction for consistency -- fix RepairContextAction docstring to mention correct argument name - -#### Additional info - -We should not forget to remove this ["deprecated" actions.py file](https://github.com/ynput/OpenPype/blob/3501d0d23a78fbaef106da2fffe946cb49bef855/openpype/action.py) in 3.16 (next-minor) - -## Testing notes: - -1. Run some fabulous repairs! - -___ - -
- - -
-Maya: fix save file prompt on launch last workfile with color management enabled + restructure `set_colorspace` #5225 - -- Only set `configFilePath` when OCIO env var is not set since it doesn't do anything if OCIO var is set anyway. -- Set the Maya 2022+ default OCIO path using the resources path instead of "" to avoid Maya Save File on new file after launch -- **Bugfix: This is what fixes the Save prompt on open last workfile feature with Global color management enabled** -- Move all code related to applying the maya settings together after querying the settings -- Swap around the `if use_workfile_settings` since the check was reversed -- Use `get_current_project_name()` instead of environment vars - - -___ - -
- - -
-Enhancement: More descriptive error messages for Loaders #5227 - -Tweak raised errors and error messages for loader errors. - - -___ - -
- - -
-Houdini: add select invalid action for ValidateSopOutputNode #5231 - -This PR adds `SelectROPAction` action to `houdini\api\action.py`and it's used in `Validate Output Node``SelectROPAction` is used to select the associated ROPs with the errored instances. - - -___ - -
- - -
-Remove new lines from the delivery template string #5235 - -If the delivery template has a new line symbol at the end, say it was copied from the text editor, the delivery process will fail with `OSError` due to incorrect destination path. To avoid that I added `rstrip()` to the `delivery_path` processing. - - -___ - -
- - -
-Houdini: better selection on pointcache creation #5250 - -Houdini allows `ObjNode` path as `sop_path` in the `ROP` unlike OP/ Ayon require `sop_path` to be set to a sop node path explicitly In this code, better selection is used to filter out invalid selections from OP/ Ayon point of viewValid selections are -- `SopNode` that has parent of type `geo` or `subnet` -- `ObjNode` of type `geo` that has -- `SopNode` of type `output` -- `SopNode` with render flag `on` (if no `Sopnode` of type `output`)this effectively filter -- empty `ObjNode` -- `ObjNode`(s) of other types like `cam` and `dopnet` -- `SopNode`(s) that thier parents of other types like `cam` and `sop solver` - - -___ - -
- - -
-Update scene inventory even if any errors occurred during update #5252 - -When selecting many items in the scene inventory to update versions and one of the items would error out the updating stops. However, before this PR the scene inventory would also NOT refresh making you think it did nothing.Also implemented as method to allow some code deduplication. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Convert frame values to integers #5188 - -Convert frame values to integers. - - -___ - -
- - -
-Maya: fix the register_event_callback correctly collecting workfile save after #5214 - -fixing the bug of register_event_callback not being able to collect action of "workfile_save_after" for lock file action - - -___ - -
- - -
-Maya: aligning default settings to distributed aces 1.2 config #5233 - -Maya colorspace setttings defaults are set the way they align our distributed ACES 1.2 config file set in global colorspace configs. - - -___ - -
- - -
-RepairAction and SelectInvalidAction filter instances failed on the exact plugin #5240 - -RepairAction and SelectInvalidAction actually filter to instances that failed on the exact plugin - not on "any failure" - - -___ - -
- - -
-Maya: Bugfix look update nodes by id with non-unique shape names (query with `fullPath`) #5257 - -Fixes a bug where updating attributes on nodes with assigned shader if shape name existed more than once in the scene due to `cmds.listRelatives` call not being done with the `fullPath=True` flag.Original error: -```python -# Traceback (most recent call last): -# File "E:\openpype\OpenPype\openpype\tools\sceneinventory\view.py", line 264, in -# lambda: self._show_version_dialog(items)) -# File "E:\openpype\OpenPype\openpype\tools\sceneinventory\view.py", line 722, in _show_version_dialog -# self._update_containers(items, version) -# File "E:\openpype\OpenPype\openpype\tools\sceneinventory\view.py", line 849, in _update_containers -# update_container(item, item_version) -# File "E:\openpype\OpenPype\openpype\pipeline\load\utils.py", line 502, in update_container -# return loader.update(container, new_representation) -# File "E:\openpype\OpenPype\openpype\hosts\maya\plugins\load\load_look.py", line 119, in update -# nodes_by_id[lib.get_id(n)].append(n) -# File "E:\openpype\OpenPype\openpype\hosts\maya\api\lib.py", line 1420, in get_id -# sel.add(node) -``` - - -___ - -
- - -
-Nuke: Create nodes with inpanel=False #5051 - -This PR is meant to remove the annoyance of the UI changing focus to the properties window just for the property window of the newly created node to disappear. Instead of using node.hideControlPanel I'm implementing the concealment during the creation of the node which will not change the focus of the current window. -___ - -
- - -
-Fix the reset frame range not setting up the right timeline in Max #5187 - -Resolve #5181 - - -___ - -
- - -
-Resolve: after launch automatization fixes #5193 - -Workfile is no correctly created and aligned witch actual project. Also the launching mechanism is now fixed so even no workfile had been saved yet it will open OpenPype menu automatically. - - -___ - -
- - -
-General: Revert backward incompatible change of path to template to multiplatform #5197 - -Now platformity is still handed by usage of `work[root]` (or any other root that is accessible across platforms.) - - -___ - -
- - -
-Nuke: root set format updating in node graph #5198 - -Nuke root node needs to be reset on some values so any knobs could be updated in node graph. This works the same way as an user would change frame number so expressions would update its values in knobs. - - -___ - -
- - -
-Hiero: fixing otio current project and cosmetics #5200 - -Otio were not returning correct current project once additional Untitled project was open in project manager stack. - - -___ - -
- - -
-Max: Publisher instances dont hold its enabled disabled states when Publisher reopened again #5202 - -Resolve #5183, general maxscript conversion issue to python (e.g. bool conversion, true in maxscript while True in Python)(Also resolve the ValueError when you change the subset to publish into list view menu) - - -___ - -
- - -
-Burnins: Filter script is defined only for video streams #5205 - -Burnins are working for inputs with audio. - - -___ - -
- - -
-Colorspace lib fix compatible python version comparison #5212 - -Fix python version comparison. - - -___ - -
- - -
-Houdini: Fix `get_color_management_preferences` #5217 - -Fix the issue described here where the logic for retrieving the current OCIO display and view was incorrectly trying to apply a regex to it. - - -___ - -
- - -
-Houdini: Redshift ROP image format bug #5218 - -Problem : -"RS_outputFileFormat" parm value was missing -and there were more "image_format" than redshift rop supports - -Fix: -1) removed unnecessary formats from `image_format_enum` -2) add the selected format value to `RS_outputFileFormat` -___ - -
- - -
-Colorspace: check PyOpenColorIO rather then python version #5223 - -Fixing previously merged PR (https://github.com/ynput/OpenPype/pull/5212) And applying better way to check compatibility with PyOpenColorIO python api. - - -___ - -
- - -
-Validate delivery action representations status #5228 - -- disable delivery button if no representations checked -- fix macos combobox layout -- add error message if no delivery templates found - - -___ - -
- - -
- Houdini: Add geometry check for pointcache family #5230 - -When `sop_path` on ABC ROP node points to a non `SopNode`, these validators `validate_abc_primitive_to_detail.py`, `validate_primitive_hierarchy_paths.py` will error and crash when this line is executed `geo = output_node.geometryAtFrame(frame)` - - -___ - -
- - -
-Houdini: Add geometry check for VDB family #5232 - -When `sop_path` on Geometry ROP node points to a non SopNode, this validator `validate_vdb_output_node.py` will error and crash when this line is executed`sop_node.geometryAtFrame(frame)` - - -___ - -
- - -
-Substance Painter: Include the setting only in publish tab #5234 - -Instead of having two settings in both create and publish tab, there is solely one setting in the publish tab for users to set up the parameters.Resolve #5172 - - -___ - -
- - -
-Maya: Fix collecting arnold prefix when none #5243 - -When no prefix is specified in render settings, the renderlayer collector would error. - - -___ - -
- - -
-Deadline: OPENPYPE_VERSION should only be added when running from build #5244 - -When running from source the environment variable `OPENPYPE_VERSION` should not be added. This is a bugfix for the feature #4489 - - -___ - -
- - -
-Fix no prompt for "unsaved changes" showing when opening workfile in Houdini #5246 - -Fix no prompt for "unsaved changes" showing when opening workfile in Houdini. - - -___ - -
- - -
-Fix no prompt for "unsaved changes" showing when opening workfile in Substance Painter #5248 - -Fix no prompt for "unsaved changes" showing when opening workfile in Substance Painter. - - -___ - -
- - -
-General: add the os library before os.environ.get #5249 - -Adding os library into `creator_plugins.py` due to `os.environ.get` in line 667 - - -___ - -
- - -
-Maya: Fix set_attribute for enum attributes #5261 - -Fix for #5260 - - -___ - -
- - -
-Unreal: Move Qt imports away from module init #5268 - -Importing `Window` creates errors in headless mode. -``` -*** WRN: >>> { ModulesLoader }: [ FAILED to import host folder unreal ] -============================= -No Qt bindings could be found -============================= -Traceback (most recent call last): - File "C:\Users\tokejepsen\OpenPype\.venv\lib\site-packages\qtpy\__init__.py", line 252, in - from PySide6 import __version__ as PYSIDE_VERSION # analysis:ignore -ModuleNotFoundERROR: No module named 'PySide6' - -During handling of the above exception, another exception occurred: - -Traceback (most recent call last): - File "C:\Users\tokejepsen\OpenPype\openpype\modules\base.py", line 385, in _load_modules - default_module = __import__( - File "C:\Users\tokejepsen\OpenPype\openpype\hosts\unreal\__init__.py", line 1, in - from .addon import UnrealAddon - File "C:\Users\tokejepsen\OpenPype\openpype\hosts\unreal\addon.py", line 4, in - from openpype.widgets.message_window import Window - File "C:\Users\tokejepsen\OpenPype\openpype\widgets\__init__.py", line 1, in - from .password_dialog import PasswordDialog - File "C:\Users\tokejepsen\OpenPype\openpype\widgets\password_dialog.py", line 1, in - from qtpy import QtWidgets, QtCore, QtGui - File "C:\Users\tokejepsen\OpenPype\.venv\lib\site-packages\qtpy\__init__.py", line 259, in - raise QtBindingsNotFoundERROR() -qtpy.QtBindingsNotFoundERROR: No Qt bindings could be found -``` - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Maya: Minor refactoring and code cleanup #5226 - -Some small cleanup and refactoring of logic. Removing old comments, unused imports and some minor optimization. Also removed the prints of the loader names of each container the scene in `fix_incompatible_containers` + optimizing by using `set` and defining only once. Moved some UI related code/tweaks to run `on_init` only if not in headless mode. Removed an empty `obj.py` file.Each commit message kind of describes why the change was made. - - -___ - -
- -### **Merged pull requests** - - -
-Bug: Template builder fails when loading data without outliner representation #5222 - -I add an assertion management in case the container does not have a represention in outliner. - - -___ - -
- - -
-AfterEffects - add container check validator to AE settings #5203 - -Adds check if scene contains only latest version of loaded containers. - - -___ - -
- - - - -## [3.15.11](https://github.com/ynput/OpenPype/tree/3.15.11) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.10...3.15.11) - -### **๐Ÿ†• New features** - - -
-Ftrack: Task status during publishing #5123 - -Added option to change task status during publishing for 3 possible cases: "sending to farm", "local integration" and "on farm integration". - - -___ - -
- - -
-Nuke: Allow for more complex temp rendering paths #5132 - -When changing the temporary rendering template (i.e., add `{asset}` to the path) to something a bit more complex the formatting was erroring due to missing keys. - - -___ - -
- - -
-Blender: Add support for custom path for app templates #5137 - -This PR adds support for a custom App Templates path in Blender by setting the `BLENDER_USER_SCRIPTS` environment variable to the path specified in `OPENPYPE_APP_TEMPLATES_PATH`. This allows users to use their own custom app templates in Blender. - - -___ - -
- - -
-TrayPublisher & StandalonePublisher: Specify version #5142 - -Simple creators in TrayPublisher can affect which version will be integrated. Standalone publisher respects the version change from UI. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Workfile Builder UI: Workfile builder window is not modal #5131 - -Workfile Templates Builder: -- Create dialog is not a modal dialog -- Create dialog remains open after create, so you can directly create a new placeholder with similar settings -- In Maya allow to create root level placeholders (no selection during create) - **this felt more like a bugfix than anything else.** - - -___ - -
- - -
-3dsmax: Use custom modifiers to hold instance members #4931 - -Moving logic to handle members of publishing instance from children/parent relationship on Container to tracking via custom attribute on modifier. This eliminates limitations where you couldn't have one node multiple times under one Container and because it stores those relationships as weak references, they are easily transferable even when original nodes are renamed. - - -___ - -
- - -
-Add height, width and fps setup to project manager #5075 - -Add Width, Height, FPS, Pixel Aspect and Frame Start/End to the Project creation dialogue in the Project Manager.I understand that the Project manager will be replaced in the upcoming Ayon, but for the time being I believe setting new project with these options available would be more fun. - - -___ - -
- - -
-Nuke: connect custom write node script to the OP setting #5113 - -Allows user to customize the values of knobs attribute in the OP setting and use it in custom write node - - -___ - -
- - -
-Keep `publisher.create_widget` variant when creating subsets #5119 - -Whenever a person is creating a subset to publish, the "creator" widget resets (where you choose the variant, product, etc.) so if the person is publishing several images of the a variant which is not the default one, they have to keep selecting the correct one after every "create". - -This commit resets the original variant upon successful creation of a subset for publishing. - -Demo: -[Screencast from 2023-06-08 10-46-40.webm](https://github.com/ynput/OpenPype/assets/1800151/ca1c91d4-b8f3-43d2-a7b7-35987f5b6a3f) - -## Testing notes: -1. Launch AYON/OP -2. Launch the publisher (select a project, shot, etc.) -3. Crete a publish type (any works) -4. Choose a variant for the publish that is not the default -5. "Create >>" - -The Variant fields should still have the variant you choose. - - - -___ - -
- - -
-Color Management- added color management support for simple expected files on Deadline #5122 - -Running of `ExtractOIIOTranscode` during Deadline publish was previously implemented only on DCCs with AOVs (Maya, Max).This PR extends this for other DCCs with flat structure of expected files. - - -___ - -
- - -
-hide macos dock icon on build #5133 - -Set `LSUIElement` to `1` in the `Info.plist` to hide OP icon from the macos dock by default. - - -___ - -
- - -
-Pack project: Raise exception with reasonable message #5145 - -Pack project crashes with relevant message when destination directory is not set. - - -___ - -
- - -
-Allow "inventory" actions to be supplied by a Module/Addon. #5146 - -Adds "inventory" as a possible key to the plugin paths to be returned from a module. - - -___ - -
- - -
-3dsmax: make code compatible with 3dsmax 2022 #5164 - -Python 3.7 in 3dsmax 2022 is not supporting walrus operator. This is removing it from the code for the sake of compatibility - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Support same attribute names on different node types. #5054 - -When validating render settings attributes, support same attribute names on different node types. - - -___ - -
- - -
-Maya: bug fix the standin being not loaded when they are first loaded #5143 - -fix the bug of raising error when the first two standins are loaded through the loaderThe bug mentioned in the related issue: https://github.com/ynput/OpenPype/issues/5129For some reason, `defaultArnoldRenderOptions.operator` is not listed in the connection node attribute even if `cmds.loadPlugin("mtoa", quiet=True)` executed before loading the object as standins for the first time.But if you manually turn on mtoa through plugin preference and load the standins for the first time, it won't raise the related `defaultArnoldRenderOptions.operator` error. - - -___ - -
- - -
-Maya: bug fix arnoldExportAss unable to export selected set members #5150 - -See #5108 fix the bug arnoldExportAss being not able to export and error out during extraction. - - -___ - -
- - -
-Maya: Xgen multiple descriptions on single shape - OP-6039 #5160 - -When having multiple descriptions on the same geometry, the extraction would produce redundant duplicate geometries. - - -___ - -
- - -
-Maya: Xgen export of Abc's during Render Publishing - OP-6206 #5167 - -Shading assignments was missing duplicating the setup for Xgen publishing and the exporting of patches was getting the end frame incorrectly. - - -___ - -
- - -
-Maya: Include handles - OP-6236 #5175 - -Render range was missing the handles. - - -___ - -
- - -
-OCIO: Support working with single frame renders #5053 - -When there is only 1 file, the datamember `files` on the representation should be a string. - - -___ - -
- - -
-Burnins: Refactored burnins script #5094 - -Refactored list value for burnins and fixed command length limit by using temp file for filters string. - - -___ - -
- - -
-Nuke: open_file function can open autosave script #5107 - -Fix the bug of the workfile dialog being unable to open autosave nuke script - - -___ - -
- - -
-ImageIO: Minor fixes #5147 - -Resolve few minor fixes related to latest image io changes from PR. - - -___ - -
- - -
-Publisher: Fix save shortcut #5148 - -Save shortcut should work for both PySide2 and PySide6. - - -___ - -
- - -
-Pack Project: Fix files packing #5154 - -Packing of project with files does work again. - - -___ - -
- - -
-Maya: Xgen version mismatch after publish - OP-6204 #5161 - -Xgen was not updating correctly when for example adding or removing descriptions. This resolve the issue by overwritting the workspace xgen file. - - -___ - -
- - -
-Publisher: Edge case fixes #5165 - -Fix few edge case issues that may cause issues in Publisher UI. - - -___ - -
- - -
-Colorspace: host config path backward compatibility #5166 - -Old project settings overrides are now fully backward compatible. The issue with host config paths overrides were solved and now once a project used to be set to ocio_config **enabled** with found filepaths - this is now considered as activated host ocio_config paths overrides.Nuke is having an popup dialogue which is letting know to a user that settings for config path were changed. - - -___ - -
- - -
-Maya: import workfile missing - OP-6233 #5174 - -Missing `workfile` family to import. - - -___ - -
- - -
-Ftrack: Fix ignore sync filter #5176 - -Ftrack ignore filter does not crash because of dictionary modifications during it's iteration. - - -___ - -
- - -
-Webpublisher - headless publish shouldn't be blocking operation #5177 - -`subprocess.call` was blocking, which resulted in UI non responsiveness as it was waiting for publish to finish. - - -___ - -
- - -
-Publisher: Fix disappearing actions #5184 - -Pyblish plugin actions are visible as expected. - - -___ - -
- -### **Merged pull requests** - - -
-Enhancement:animation family loaded as standing (abc) uses "use file sequence" #5110 - -The changes are the following. We started by updating the the is_sequence(files) function allowing it to return True for a list of files which has only one file, since our animation in this provides just one alembic file. For the correct FPS number, we got the fps from the published ass/abc from the version data. - - -___ - -
- - -
-add label to matching family #5128 - -I added the possibility to filter the `family smart select` with the label in addition to the family. - - -___ - -
- - - - -## [3.15.10](https://github.com/ynput/OpenPype/tree/3.15.10) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.9...3.15.10) - -### **๐Ÿ†• New features** - - -
-ImageIO: Adding ImageIO activation toggle to all hosts #4700 - -Colorspace management can now be enabled at the project level, although it is disabled by default. Once enabled, all hosts will use the OCIO config file defined in the settings. If settings are disabled, the system switches to DCC's native color space management, and we do not store colorspace information at the representative level. - - -___ - -
- - -
-Redshift Proxy Support in 3dsMax #4625 - -Redshift Proxy Support for 3dsMax. -- [x] Creator -- [x] Loader -- [x] Extractor -- [x] Validator -- [x] Add documentation - - -___ - -
- - -
-Houdini farm publishing and rendering #4825 - -Deadline Farm publishing and Rendering for Houdini -- [x] Mantra -- [x] Karma(including usd renders) -- [x] Arnold -- [x] Elaborate Redshift ROP for deadline submission -- [x] fix the existing bug in Redshift ROP -- [x] Vray -- [x] add docs - - -___ - -
- - -
-Feature: Blender hook to execute python scripts at launch #4905 - -Hook to allow hooks to add path to a python script that will be executed when Blender starts. - - -___ - -
- - -
-Feature: Resolve: Open last workfile on launch through .scriptlib #5047 - -Added implementation to Resolve integration to open last workfile on launch. - - -___ - -
- - -
-General: Remove default windowFlags from publisher #5089 - -The default windowFlags is making the publisher window (in Linux at least) only show the close button and it's frustrating as many times you just want to minimize the window and get back to the validation after. Removing that line I get what I'd expect.**Before:****After:** - - -___ - -
- - -
-General: Show user who created the workfile on the details pane of workfile manager #5093 - -New PR for https://github.com/ynput/OpenPype/pull/5087, which was closed after merging `next-minor` branch and then realizing we don't need to target it as it was decided it's not required to support windows. More info on that PR discussion.Small addition to add name of the `user` who created the workfile on the details pane of the workfile manager: - - -___ - -
- - -
-Loader: Hide inactive versions in UI #5100 - -Hide versions with `active` set to `False` in Loader UI. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Repair RenderPass token when merging AOVs. #5055 - -Validator was flagging that `` was in the image prefix, but did not repair the issue. - - -___ - -
- - -
-Maya: Improve error feedback when no renderable cameras exist for ASS family. #5092 - -When collecting cameras for `ass` family, this improves the error message when no cameras are renderable. - - -___ - -
- - -
-Nuke: Custom script to set frame range of read nodes #5039 - -Adding option to set frame range specifically for the read nodes in Openpype Panel. User can set up their preferred frame range with the frame range dialog, which can be showed after clicking `Set Frame Range (Read Node)` in Openpype Tools - - -___ - -
- - -
-Update extract review letterbox docs #5074 - -Update Extract Review - Letter Box section in Docs. Letterbox type description is removed. - - -___ - -
- - -
-Project pack: Documents only skips roots validation #5082 - -Single roots validation is skipped if only documents are extracted. - - -___ - -
- - -
-Nuke: custom settings for write node without publish #5084 - -Set Render Output and other settings to write nodes for non-publish purposes. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Deadline servers #5052 - -Fix working with multiple Deadline servers in Maya. -- Pools (primary and secondary) attributes were not recreated correctly. -- Order of collector plugins were wrong, so collected data was not injected into render instances. -- Server attribute was not converted to string so comparing with settings was incorrect. -- Improve debug logging for where the webservice url is getting fetched from. - - -___ - -
- - -
-Maya: Fix Load Reference. #5091 - -Fix bug introduced with https://github.com/ynput/OpenPype/pull/4751 where `cmds.ls` returns a list. - - -___ - -
- - -
-3dsmax: Publishing Deadline jobs from RedShift #4960 - -Fix the bug of being uable to publish deadline jobs from RedshiftUse Current File instead of Published Scene for just Redshift. -- add save scene before rendering to ensure the scene is saved after the modification. -- add separated aov files option to allow users to choose to have aovs in render output -- add validator for render publish to aovid overriding the previous renders - - -___ - -
- - -
-Houdini: Fix missing frame range for pointcache and camera exports #5026 - -Fix missing frame range for pointcache and camera exports on published version. - - -___ - -
- - -
-Global: collect_frame_fix plugin fix and cleanup #5064 - -Previous implementation https://github.com/ynput/OpenPype/pull/5036 was broken this is fixing the issue where attribute is found in instance data although the settings were disabled for the plugin. - - -___ - -
- - -
-Hiero: Fix apply settings Clip Load #5073 - -Changed `apply_settings` to classmethod which fixes the issue with settings. - - -___ - -
- - -
-Resolve: Make sure scripts dir exists #5078 - -Make sure the scripts directory exists before looping over it's content. - - -___ - -
- - -
-removing info knob from nuke creators #5083 - -- removing instance node if removed via publisher -- removing info knob since it is not needed any more (was there only for the transition phase) - - -___ - -
- - -
-Tray: Fix restart arguments on update #5085 - -Fix arguments on restart. - - -___ - -
- - -
-Maya: bug fix on repair action in Arnold Scene Source CBID Validator #5096 - -Fix the bug of not being able to use repair action in Arnold Scene Source CBID Validator - - -___ - -
- - -
-Nuke: batch of small fixes #5103 - -- default settings for `imageio.requiredNodes` **CreateWriteImage** -- default settings for **LoadImage** representations -- **Create** and **Publish** menu items with `parent=main_window` (version > 14) - - -___ - -
- - -
-Deadline: make prerender check safer #5104 - -Prerender wasn't correctly recognized and was replaced with just 'render' family.In Nuke it is correctly `prerender.farm` in families, which wasn't handled here. It resulted into using `render` in templates even if `render` and `prerender` templates were split. - - -___ - -
- - -
-General: Sort launcher actions alphabetically #5106 - -The launcher actions weren't being sorted by its label but its name (which on the case of the apps it's the version number) and thus the order wasn't consistent and we kept getting a different order on every launch. From my debugging session, this was the result of what the `actions` variable held after the `filter_compatible_actions` function before these changes: -``` -(Pdb) for p in actions: print(p.order, p.name) -0 14-02 -0 14-02 -0 14-02 -0 14-02 -0 14-02 -0 19-5-493 -0 2023 -0 3-41 -0 6-01 -```This caused already a couple bugs from our artists thinking they had launched Nuke X and instead launched Nuke and telling us their Nuke was missing nodes**Before:****After:** - - -___ - -
- - -
-TrayPublisher: Editorial video stream discovery #5120 - -Editorial create plugin in traypublisher does not expect that first stream in input is video. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-3dsmax: Move from deprecated interface #5117 - -`INewPublisher` interface is deprecated, this PR is changing the use to `IPublishHost` instead. - - -___ - -
- -### **Merged pull requests** - - -
-add movalex as a contributor for code #5076 - -Adds @movalex as a contributor for code. - -This was requested by mkolar [in this comment](https://github.com/ynput/OpenPype/pull/4916#issuecomment-1571498425) - -[skip ci] -___ - -
- - -
-3dsmax: refactor load plugins #5079 - - -___ - -
- - - - -## [3.15.9](https://github.com/ynput/OpenPype/tree/3.15.9) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.8...3.15.9) - -### **๐Ÿ†• New features** - - -
-Blender: Implemented Loading of Alembic Camera #4990 - -Implemented loading of Alembic cameras in Blender. - - -___ - -
- - -
-Unreal: Implemented Creator, Loader and Extractor for Levels #5008 - -Creator, Loader and Extractor for Unreal Levels have been implemented. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Blender: Added setting for base unit scale #4987 - -A setting for the base unit scale has been added for Blender.The unit scale is automatically applied when opening a file or creating a new one. - - -___ - -
- - -
-Unreal: Changed naming and path of Camera Levels #5010 - -The levels created for the camera in Unreal now include `_camera` in the name, to be better identifiable, and are placed in the camera folder. - - -___ - -
- - -
-Settings: Added option to nest settings templates #5022 - -It is possible to nest settings templates in another templates. - - -___ - -
- - -
-Enhancement/publisher: Remove "hit play to continue" label on continue #5029 - -Remove "hit play to continue" message on continue so that it doesn't show anymore when play was clicked. - - -___ - -
- - -
-Ftrack: Limit number of ftrack events to query at once #5033 - -Limit the amount of ftrack events received from mongo at once to 100. - - -___ - -
- - -
-General: Small code cleanups #5034 - -Small code cleanup and updates. - - -___ - -
- - -
-Global: collect frames to fix with settings #5036 - -Settings for `Collect Frames to Fix` will allow disable per project the plugin. Also `Rewriting latest version` attribute is hiddable from settings. - - -___ - -
- - -
-General: Publish plugin apply settings can expect only project settings #5037 - -Only project settings are passed to optional `apply_settings` method, if the method expects only one argument. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Load Assembly fix invalid imports #4859 - -Refactors imports so they are now correct. - - -___ - -
- - -
-Maya: Skipping rendersetup for members. #4973 - -When publishing a `rendersetup`, the objectset is and should be empty. - - -___ - -
- - -
-Maya: Validate Rig Output IDs #5016 - -Absolute names of node were not used, so plugin did not fetch the nodes properly.Also missed pymel command. - - -___ - -
- - -
-Deadline: escape rootless path in publish job #4910 - -If the publish path on Deadline job contains spaces or other characters, command was failing because the path wasn't properly escaped. This is fixing it. - - -___ - -
- - -
-General: Company name and URL changed #4974 - -The current records were obsolete in inno_setup, changed to the up-to-date. -___ - -
- - -
-Unreal: Fix usage of 'get_full_path' function #5014 - -This PR changes all the occurrences of `get_full_path` functions to alternatives to get the path of the objects. - - -___ - -
- - -
-Unreal: Fix sequence frames validator to use correct data #5021 - -Fix sequence frames validator to use clipIn and clipOut data instead of frameStart and frameEnd. - - -___ - -
- - -
-Unreal: Fix render instances collection to use correct data #5023 - -Fix render instances collection to use `frameStart` and `frameEnd` from the Project Manager, instead of the sequence's ones. - - -___ - -
- - -
-Resolve: loader is opening even if no timeline in project #5025 - -Loader is opening now even no timeline is available in a project. - - -___ - -
- - -
-nuke: callback for dirmapping is on demand #5030 - -Nuke was slowed down on processing due this callback. Since it is disabled by default it made sense to add it only on demand. - - -___ - -
- - -
-Publisher: UI works with instances without label #5032 - -Publisher UI does not crash if instance don't have filled 'label' key in instance data. - - -___ - -
- - -
-Publisher: Call explicitly prepared tab methods #5044 - -It is not possible to go to Create tab during publishing from OpenPype menu. - - -___ - -
- - -
-Ftrack: Role names are not case sensitive in ftrack event server status action #5058 - -Event server status action is not case sensitive for role names of user. - - -___ - -
- - -
-Publisher: Fix border widget #5063 - -Fixed border lines in Publisher UI to be painted correctly with correct indentation and size. - - -___ - -
- - -
-Unreal: Fix Commandlet Project and Permissions #5066 - -Fix problem when creating an Unreal Project when Commandlet Project is in a protected location. - - -___ - -
- - -
-Unreal: Added verification for Unreal app name format #5070 - -The Unreal app name is used to determine the Unreal version folder, so it is necessary that if follows the format `x-x`, where `x` is any integer. This PR adds a verification that the app name follows that format. - - -___ - -
- -### **๐Ÿ“ƒ Documentation** - - -
-Docs: Display wrong image in ExtractOIIOTranscode #5045 - -Wrong image display in `https://openpype.io/docs/project_settings/settings_project_global#extract-oiio-transcode`. - - -___ - -
- -### **Merged pull requests** - - -
-Drop-down menu to list all families in create placeholder #4928 - -Currently in the create placeholder window, we need to write the family manually. This replace the text field by an enum field with all families for the current software. - - -___ - -
- - -
-add sync to specific projects or listen only #4919 - -Extend kitsu sync service with additional arguments to sync specific projects. - - -___ - -
- - - - -## [3.15.8](https://github.com/ynput/OpenPype/tree/3.15.8) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.7...3.15.8) - -### **๐Ÿ†• New features** - - -
-Publisher: Show instances in report page #4915 - -Show publish instances in report page. Also added basic log view with logs grouped by instance. Validation error detail now have 2 colums, one with erro details second with logs. Crashed state shows fast access to report action buttons. Success will show only logs. Publish frame is shrunked automatically on publish stop. - - -___ - -
- - -
-Fusion - Loader plugins updates #4920 - -Update to some Fusion loader plugins:The sequence loader can now load footage from the image and online family.The FBX loader can now import all formats Fusions FBX node can read.You can now import the content of another workfile into your current comp with the workfile loader. - - -___ - -
- - -
-Fusion: deadline farm rendering #4955 - -Enabling Fusion for deadline farm rendering. - - -___ - -
- - -
-AfterEffects: set frame range and resolution #4983 - -Frame information (frame start, duration, fps) and resolution (width and height) is applied to selected composition from Asset Management System (Ftrack or DB) automatically when published instance is created.It is also possible explicitly propagate both values from DB to selected composition by newly added menu buttons. - - -___ - -
- - -
-Publish: Enhance automated publish plugin settings #4986 - -Added plugins option to define settings category where to look for settings of a plugin and added public helper functions to apply settings `get_plugin_settings` and `apply_plugin_settings_automatically`. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Load Rig References - Change Rig to Animation in Animation instance #4877 - -We are using the template builder to build an animation scene. All the rig placeholders are imported correctly, but the automatically created animation instances retain the rig family in their names and subsets. In our example, we need animationMain instead of rigMain, because this name will be used in the following steps like lighting.Here is the result we need. I checked, and it's not a template builder problem, because even if I load a rig as a reference, the result is the same. For me, since we are in the animation instance, it makes more sense to have animation instead of rig in the name. The naming is just fine if we use create from the Openpype menu. - - -___ - -
- - -
-Enhancement: Resolve prelaunch code refactoring and update defaults #4916 - -The main reason of this PR is wrong default settings in `openpype/settings/defaults/system_settings/applications.json` for Resolve host. The `bin` folder should not be a part of the macos and Linux `RESOLVE_PYTHON3_PATH` variable.The rest of this PR is some code cleanups for Resolve prelaunch hook to simplify further development.Also added a .gitignore for vscode workspace files. - - -___ - -
- - -
-Unreal: ๐Ÿšš move Unreal plugin to separate repository #4980 - -To support Epic Marketplace have to move AYON Unreal integration plugins to separate repository. This is replacing current files with git submodule, so the change should be functionally without impact.New repository lives here: https://github.com/ynput/ayon-unreal-plugin - - -___ - -
- - -
-General: Lib code cleanup #5003 - -Small cleanup in lib files in openpype. - - -___ - -
- - -
-Allow to open with djv by extension instead of representation name #5004 - -Filter open in djv action by extension instead of representation. - - -___ - -
- - -
-DJV open action `extensions` as `set` #5005 - -Change `extensions` attribute to `set`. - - -___ - -
- - -
-Nuke: extract thumbnail with multiple reposition nodes #5011 - -Added support for multiple reposition nodes. - - -___ - -
- - -
-Enhancement: Improve logging levels and messages for artist facing publish reports #5018 - -Tweak the logging levels and messages to try and only show those logs that an artist should see and could understand. Move anything that's slightly more involved into a "debug" message instead. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Bugfix/frame variable fix #4978 - -Renamed variables to match OpenPype terminology to reduce confusion and add consistency. -___ - -
- - -
-Global: plugins cleanup plugin will leave beauty rendered files #4790 - -Attempt to mark more files to be cleaned up explicitly in intermediate `renders` folder in work area for farm jobs. - - -___ - -
- - -
-Fix: Download last workfile doesn't work if not already downloaded #4942 - -Some optimization condition is messing with the feature: if the published workfile is not already downloaded, it won't download it... - - -___ - -
- - -
-Unreal: Fix transform when loading layout to match existing assets #4972 - -Fixed transform when loading layout to match existing assets. - - -___ - -
- - -
-fix the bug of fbx loaders in Max #4977 - -bug fix of fbx loaders for not being able to parent to the CON instances while importing cameras(and models) which is published from other DCCs such as Maya. - - -___ - -
- - -
-AfterEffects: allow returning stub with not saved workfile #4984 - -Allows to use Workfile app to Save first empty workfile. - - -___ - -
- - -
-Blender: Fix Alembic loading #4985 - -Fixed problem occurring when trying to load an Alembic model in Blender. - - -___ - -
- - -
-Unreal: Addon Py2 compatibility #4994 - -Fixed Python 2 compatibility of unreal addon. - - -___ - -
- - -
-Nuke: fixed missing files key in representation #4999 - -Issue with missing keys once rendering target set to existing frames is fixed. Instance has to be evaluated in validation for missing files. - - -___ - -
- - -
-Unreal: Fix the frame range when loading camera #5002 - -The keyframes of the camera, when loaded, were not using the correct frame range. - - -___ - -
- - -
-Fusion: fixing frame range targeting #5013 - -Frame range targeting at Rendering instances is now following configured options. - - -___ - -
- - -
-Deadline: fix selection from multiple webservices #5015 - -Multiple different DL webservice could be configured. First they must by configured in System Settings., then they could be configured per project in `project_settings/deadline/deadline_servers`.Only single webservice could be a target of publish though. - - -___ - -
- -### **Merged pull requests** - - -
-3dsmax: Refactored publish plugins to use proper implementation of pymxs #4988 - - -___ - -
- - - - -## [3.15.7](https://github.com/ynput/OpenPype/tree/3.15.7) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.6...3.15.7) - -### **๐Ÿ†• New features** - - -
-Addons directory #4893 - -This adds a directory for Addons, for easier distribution of studio specific code. - - -___ - -
- - -
-Kitsu - Add "image", "online" and "plate" to review families #4923 - -This PR adds "image", "online" and "plate" to the review families so they also can be uploaded to Kitsu.It also adds the `Add review to Kitsu` tag to the default png review. Without it the user would manually need to add it for single image uploads to Kitsu and might confuse users (it confused me first for a while as movies did work). - - -___ - -
- - -
-Feature/remove and load inv action #4930 - -Added the ability to remove and load a container, as a way to reset it.This can be useful in cases where a container breaks in a way that can be fixed by removing it, then reloading it.Also added the ability to add `InventoryAction` plugins by placing them in `openpype/plugins/inventory`. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Load Rig References - Change Rig to Animation in Animation instance #4877 - -We are using the template builder to build an animation scene. All the rig placeholders are imported correctly, but the automatically created animation instances retain the rig family in their names and subsets. In our example, we need animationMain instead of rigMain, because this name will be used in the following steps like lighting.Here is the result we need. I checked, and it's not a template builder problem, because even if I load a rig as a reference, the result is the same. For me, since we are in the animation instance, it makes more sense to have animation instead of rig in the name. The naming is just fine if we use create from the Openpype menu. - - -___ - -
- - -
-Maya template builder - preserve all references when importing a template #4797 - -When building a template with Maya template builder, we import the template and also the references inside the template file. This causes some problems: -- We cannot use the references to version assets imported by the template. -- When we import the file, the internal reference files are also imported. As a side effect, Maya complains about a reference that no longer exists.`// Error: file: /xxx/maya/2023.3/linux/scripts/AETemplates/AEtransformRelated.mel line 58: Reference node 'turntable_mayaSceneMain_01_RN' is not associated with a reference file.` - - -___ - -
- - -
-Unreal: Renaming the integration plugin to Ayon. #4646 - -Renamed the .h, and .cpp files to Ayon. Also renamed the classes to with the Ayon keyword. - - -___ - -
- - -
-3dsMax: render dialogue needs to be closed #4729 - -Make sure the render setup dialog is in a closed state for the update of resolution and other render settings - - -___ - -
- - -
-Maya Template Builder - Remove default cameras from renderable cameras #4815 - -When we build an asset workfile with build workfile from template inside Maya, we load our turntable camera. But then we end up with 2 renderables camera : **persp** the one imported from the template.We need to remove the **persp** camera (or any other default camera) from renderable cameras when building the work file. - - -___ - -
- - -
-Validators for Frame Range in Max #4914 - -Switch Render Frame Range Type to 3 for specific ranges (initial setup for the range type is 4)Reset Frame Range will also set the frame range for render settingsRender Collector won't take the frame range from context data but take the range directly from render settingAdd validators for render frame range type and frame range respectively with repair action - - -___ - -
- - -
-Fusion: Saver creator settings #4943 - -Adding Saver creator settings and enhanced rendering path with template. - - -___ - -
- - -
-General: Project Anatomy on creators #4962 - -Anatomy object of current project is available on `CreateContext` and create plugins. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Validate shader name - OP-5903 #4971 - -Running the plugin would error with: -``` -// TypeError: 'str' object cannot be interpreted as an integer -```Fixed and added setting `active`. - - -___ - -
- - -
-Houdini: Fix slow Houdini launch due to shelves generation #4829 - -Shelf generation during Houdini startup would add an insane amount of delay for the Houdini UI to launch correctly. By deferring the shelf generation this takes away the 5+ minutes of delay for the Houdini UI to launch. - - -___ - -
- - -
-Fusion - Fixed "optional validation" #4912 - -Added OptionalPyblishPluginMixin and is_active checks for all publish tools that should be optional - - -___ - -
- - -
-Bug: add missing `pyblish.util` import #4937 - -remote publishing was missing import of `remote_publish`. This is adding it back. - - -___ - -
- - -
-Unreal: Fix missing 'object_path' property #4938 - -Epic removed the `object_path` property from `AssetData`. This PR fixes usages of that property.Fixes #4936 - - -___ - -
- - -
-Remove obsolete global validator #4939 - -Removing `Validate Sequence Frames` validator from global plugins as it wasn't handling correctly many things and was by mistake enabled, breaking functionality on Deadline. - - -___ - -
- - -
-General: fix build_workfile get_linked_assets missing project_name arg #4940 - -Linked assets collection don't work within `build_workfile` because `get_linked_assets` function call has a missing `project_name`argument. -- Added the `project_name` arg to the `get_linked_assets` function call. - - -___ - -
- - -
-General: fix Scene Inventory switch version error dialog missing parent arg on init #4941 - -QuickFix for the switch version error dialog to set inventory widget as parent. - - -___ - -
- - -
-Unreal: Fix camera frame range #4956 - -Fix the frame range of the level sequence for the Camera in Unreal. - - -___ - -
- - -
-Unreal: Fix missing parameter when updating Alembic StaticMesh #4957 - -Fix an error when updating an Alembic StaticMesh in Unreal, due to a missing parameter in a function call. - - -___ - -
- - -
-Unreal: Fix render extraction #4963 - -Fix a problem with the extraction of renders in Unreal. - - -___ - -
- - -
-Unreal: Remove Python 3.8 syntax from addon #4965 - -Removed Python 3.8 syntax from addon. - - -___ - -
- - -
-Ftrack: Fix editorial task creation #4966 - -Fix key assignment on instance data during editorial publishing in ftrack hierarchy integration. - - -___ - -
- -### **Merged pull requests** - - -
-Add "shortcut" to Scripts Menu Definition #4927 - -Add the possibility to associate a shorcut for an entry in the script menu definition with the key "shortcut" - - -___ - -
- - - - -## [3.15.6](https://github.com/ynput/OpenPype/tree/3.15.6) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.5...3.15.6) - -### **๐Ÿ†• New features** - - -
-Substance Painter Integration #4283 - -This implements a part of #4205 by implementing a Substance Painter integration - -Status: -- [x] Implement Host -- [x] start substance with last workfile using `AddLastWorkfileToLaunchArgs` prelaunch hook -- [x] Implement Qt tools -- [x] Implement loaders -- [x] Implemented a Set project mesh loader (this is relatively special case because a Project will always have exactly one mesh - a Substance Painter project cannot exist without a mesh). -- [x] Implement project open callback -- [x] On project open it notifies the user if the loaded model is outdated -- [x] Implement publishing logic -- [x] Workfile publishing -- [x] Export Texture Sets -- [x] Support OCIO using #4195 (draft brach is set up - see comment) -- [ ] Likely needs more testing on the OCIO front -- [x] Validate all outputs of the Export template are exported/generated -- [x] Allow validation to be optional **(issue: there's no API method to detect what maps will be exported without doing an actual export to disk)** -- [x] Support extracting/integration if not all outputs are generated -- [x] Support multiple materials/texture sets per instance -- [ ] Add validator that can enforce only a single texture set output if studio prefers that. -- [ ] Implement Export File Format (extensions) override in Creator -- [ ] Add settings so Admin can choose which extensions are available. - - -___ - -
- - -
-Data Exchange: Geometry in 3dsMax #4555 - -Introduces and updates a creator, extractors and loaders for model family - -Introduces new creator, extractors and loaders for model family while adding model families into the existing max scene loader and extractor -- [x] creators -- [x] adding model family into max scene loader and extractor -- [x] fbx loader -- [x] fbx extractor -- [x] usd loader -- [x] usd extractor -- [x] validator for model family -- [x] obj loader(update function) -- [x] fix the update function of the loader as #4675 -- [x] Add documentation - - -___ - -
- - -
-AfterEffects: add review flag to each instance #4884 - -Adds `mark_for_review` flag to the Creator to allow artists to disable review if necessary.Exposed this flag in Settings, by default set to True (eg. same behavior as previously). - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Houdini: Fix Validate Output Node (VDB) #4819 - -- Removes plug-in that was a duplicate of this plug-in. -- Optimize logging of many prims slightly -- Fix error reporting like https://github.com/ynput/OpenPype/pull/4818 did - - -___ - -
- - -
-Houdini: Add null node as output indicator when using TAB search #4834 - - -___ - -
- - -
-Houdini: Don't error in collect review if camera is not set correctly #4874 - -Do not raise an error in collector when invalid path is set as camera path. Allow camera path to not be set correctly in review instance until validation so it's nicely shown in a validation report. - - -___ - -
- - -
-Project packager: Backup and restore can store only database #4879 - -Pack project functionality have option to zip only project database without project files. Unpack project can skip project copy if the folder is not found.Added helper functions to `openpype.client.mongo` that can be also used for tests as replacement of mongo dump. - - -___ - -
- - -
-Houdini: ExtractOpenGL for Review instance not optional #4881 - -Don't make ExtractOpenGL optional for review instance optional. - - -___ - -
- - -
-Publisher: Small style changes #4894 - -Small changes in styles and form of publisher UI. - - -___ - -
- - -
-Houdini: Workfile icon in new publisher #4898 - -Fix icon for the workfile instance in new publisher - - -___ - -
- - -
-Fusion: Simplify creator icons code #4899 - -Simplify code for setting the icons for the Fusion creators - - -___ - -
- - -
-Enhancement: Fix PySide 6.5 support for loader #4900 - -Fixes PySide 6.5 support in Loader. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Validate Attributes #4917 - -This plugin was broken due to bad fetching of data and wrong repair action. - - -___ - -
- - -
-Fix: Locally copied version of last published workfile is not incremented #4722 - -### Fix 1 -When copied, the local workfile version keeps the published version number, when it must be +1 to follow OP's naming convention. - -### Fix 2 -Local workfile version's name is built from anatomy. This avoids to get workfiles with their publish template naming. - -### Fix 3 -In the case a subset has at least two tasks with published workfiles, for example `Modeling` and `Rigging`, launching `Rigging` was getting the first one with the `next` and trying to find representations, therefore `workfileModeling` and trying to match the current `task_name` (`Rigging`) with the `representation["context"]["task"]["name"]` of a Modeling representation, which was ending up to a `workfile_representation` to `None`, and exiting the process. - -Trying to find the `task_name` in the `subset['name']` fixes it. - -### Fix 4 -Fetch input dependencies of workfile. - -Replacing https://github.com/ynput/OpenPype/pull/4102 for changes to bring this home. -___ - -
- - -
-Maya: soft-fail when pan/zoom locked on camera when playblasting #4929 - -When pan/zoom enabled attribute on camera is locked, playblasting with pan/zoom fails because it is trying to restore it. This is fixing it by skipping over with warning. - - -___ - -
- -### **Merged pull requests** - - -
-Maya Load References - Add Display Handle Setting #4904 - -When we load a reference in Maya using OpenPype loader, display handle is checked by default and prevent us to select easily the object in the viewport. I understand that some productions like to keep this option, so I propose to add display handle to the reference loader settings. - - -___ - -
- - -
-Photoshop: add autocreators for review and flat image #4871 - -Review and flatten image (produced when no instance of `image` family was created) were created somehow magically. This PRintroduces two new auto creators which allow artists to disable review or flatten image.For all `image` instances `Review` flag was added to provide functionality to create separate review per `image` instance. Previously was possible only to have separate instance of `review` family.Review is not enabled on `image` family by default. (Eg. follows original behavior)Review auto creator is enabled by default as it was before.Flatten image creator must be set in Settings in `project_settings/photoshop/create/AutoImageCreator`. - - -___ - -
- - - - -## [3.15.5](https://github.com/ynput/OpenPype/tree/3.15.5) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.4...3.15.5) - -### **๐Ÿš€ Enhancements** - - -
-Maya: Playblast profiles #4777 - -Support playblast profiles.This enables studios to customize what playblast settings should be on a per task and/or subset basis. For example `modeling` should have `Wireframe On Shaded` enabled, while all other tasks should have it disabled. - - -___ - -
- - -
-Maya: Support .abc files directly for Arnold standin look assignment #4856 - -If `.abc` file is loaded into arnold standin support look assignment through the `cbId` attributes in the alembic file. - - -___ - -
- - -
-Maya: Hide animation instance in creator #4872 - -- Hide animation instance in creator -- Add inventory action to recreate animation publish instance for loaded rigs - - -___ - -
- - -
-Unreal: Render Creator enhancements #4477 - -Improvements to the creator for render family - -This PR introduces some enhancements to the creator for the render family in Unreal Engine: -- Added the option to create a new, empty sequence for the render. -- Added the option to not include the whole hierarchy for the selected sequence. -- Improvements of the error messages. - - -___ - -
- - -
-Unreal: Added settings for rendering #4575 - -Added settings for rendering in Unreal Engine. - -Two settings has been added: -- Pre roll frames, to set how many frames are used to load the scene before starting the actual rendering. -- Configuration path, to allow to save a preset of settings from Unreal, and use it for rendering. - - -___ - -
- - -
-Global: Optimize anatomy formatting by only formatting used templates instead #4784 - -Optimization to not format full anatomy when only a single template is used. Instead format only the single template instead. - - -___ - -
- - -
-Patchelf version locked #4853 - -For Centos dockerfile it is necessary to lock the patchelf version to the older, otherwise the build process fails. - -___ - -
- - -
-Houdini: Implement `switch` method on loaders #4866 - -Implement `switch` method on loaders - - -___ - -
- - -
-Code: Tweak docstrings and return type hints #4875 - -Tweak docstrings and return type hints for functions in `openpype.client.entities`. - - -___ - -
- - -
-Publisher: Clear comment on successful publish and on window close #4885 - -Clear comment text field on successful publish and on window close. - - -___ - -
- - -
-Publisher: Make sure to reset asset widget when hidden and reshown #4886 - -Make sure to reset asset widget when hidden and reshown. Without this the asset list would never refresh in the set asset widget when changing context on an existing instance and thus would not show new assets from after the first time launching that widget. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Fix nested model instances. #4852 - -Fix nested model instance under review instance, where data collection was not including "Display Lights" and "Focal Length". - - -___ - -
- - -
-Maya: Make default namespace naming backwards compatible #4873 - -Namespaces of loaded references are now _by default_ back to what they were before #4511 - - -___ - -
- - -
-Nuke: Legacy convertor skips deprecation warnings #4846 - -Nuke legacy convertor was triggering deprecated function which is causing a lot of logs which slows down whole process. Changed the convertor to skip all nodes without `AVALON_TAB` to avoid the warnings. - - -___ - -
- - -
-3dsmax: move startup script logic to hook #4849 - -Startup script for OpenPype was interfering with Open Last Workfile feature. Moving this loggic from simple command line argument in the Settings to pre-launch hook is solving the order of command line arguments and making both features work. - - -___ - -
- - -
-Maya: Don't change time slider ranges in `get_frame_range` #4858 - -Don't change time slider ranges in `get_frame_range` - - -___ - -
- - -
-Maya: Looks - calculate hash for tx texture #4878 - -Texture hash is calculated for textures used in published look and it is used as key in dictionary. In recent changes, this hash is not calculated for TX files, resulting in `None` value as key in dictionary, crashing publishing. This PR is adding texture hash for TX files to solve that issue. - - -___ - -
- - -
-Houdini: Collect `currentFile` context data separate from workfile instance #4883 - -Fix publishing without an active workfile instance due to missing `currentFile` data.Now collect `currentFile` into context in houdini through context plugin no matter the active instances. - - -___ - -
- - -
-Nuke: fixed broken slate workflow once published on deadline #4887 - -Slate workflow is now working as expected and Validate Sequence Frames is not raising the once slate frame is included. - - -___ - -
- - -
-Add fps as instance.data in collect review in Houdini. #4888 - -fix the bug of failing to publish extract review in HoudiniOriginal error: -```python - File "OpenPype\build\exe.win-amd64-3.9\openpype\plugins\publish\extract_review.py", line 516, in prepare_temp_data - "fps": float(instance.data["fps"]), -KeyError: 'fps' -``` - - -___ - -
- - -
-TrayPublisher: Fill missing data for instances with review #4891 - -Fill required data to instance in traypublisher if instance has review family. The data are required by ExtractReview and it would be complicated to do proper fix at this moment! The collector does for review instances what did https://github.com/ynput/OpenPype/pull/4383 - - -___ - -
- - -
-Publisher: Keep track about current context and fix context selection widget #4892 - -Change selected context to current context on reset. Fix bug when context widget is re-enabled. - - -___ - -
- - -
-Scene inventory: Model refresh fix with cherry picking #4895 - -Fix cherry pick issue in scene inventory. - - -___ - -
- - -
-Nuke: Pre-render and missing review flag on instance causing crash #4897 - -If instance created in nuke was missing `review` flag, collector crashed. - - -___ - -
- -### **Merged pull requests** - - -
-After Effects: fix handles KeyError #4727 - -Sometimes when publishing with AE (we only saw this error on AE 2023), we got a KeyError for the handles in the "Collect Workfile" step. So I did get the handles from the context if ther's no handles in the asset entity. - - -___ - -
- - - - -## [3.15.4](https://github.com/ynput/OpenPype/tree/3.15.4) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.3...3.15.4) - -### **๐Ÿ†• New features** - - -
-Maya: Cant assign shaders to the ass file - OP-4859 #4460 - -Support AiStandIn nodes for look assignment. - -Using operators we assign shaders and attribute/parameters to nodes within standins. Initially there is only support for a limited mount of attributes but we can add support as needed; -``` -primaryVisibility -castsShadows -receiveShadows -aiSelfShadows -aiOpaque -aiMatte -aiVisibleInDiffuseTransmission -aiVisibleInSpecularTransmission -aiVisibleInVolume -aiVisibleInDiffuseReflection -aiVisibleInSpecularReflection -aiSubdivUvSmoothing -aiDispHeight -aiDispPadding -aiDispZeroValue -aiStepSize -aiVolumePadding -aiSubdivType -aiSubdivIterations -``` - - -___ - -
- - -
-Maya: GPU cache representation #4649 - -Implement GPU cache for model, animation and pointcache. - - -___ - -
- - -
-Houdini: Implement review family with opengl node #3839 - -Implements a first pass for Reviews publishing in Houdini. Resolves #2720 - -Uses the `opengl` ROP node to produce PNG images. - - -___ - -
- - -
-Maya: Camera focal length visible in review - OP-3278 #4531 - -Camera focal length visible in review. - -Support camera focal length in review; static and dynamic.Resolves #3220 - - -___ - -
- - -
-Maya: Defining plugins to load on Maya start - OP-4994 #4714 - -Feature to define plugins to load on Maya launch. - - -___ - -
- - -
-Nuke, DL: Returning Suspended Publishing attribute #4715 - -Old Nuke Publisher's feature for suspended publishing job on render farm was added back to the current Publisher. - - -___ - -
- - -
-Settings UI: Allow setting a size hint for text fields #4821 - -Text entity have `minimum_lines_count` which allows to change minimum size hint of UI input. - - -___ - -
- - -
-TrayPublisher: Move 'BatchMovieCreator' settings to 'create' subcategory #4827 - -Moved settings for `BatchMoviewCreator` into subcategory `create` in settings. Changes are made to match other hosts settings chema and structure. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya looks: support for native Redshift texture format #2971 - -Add support for native Redshift textures handling. Closes #2599 - -Uses Redshift's Texture Processor executable to convert textures being used in renders to the Redshift ".rstexbin" format. - - -___ - -
- - -
-Maya: custom namespace for references #4511 - -Adding an option in Project Settings > Maya > Loader plugins to set custom namespace. If no namespace is set, the default one is used. - - -___ - -
- - -
-Maya: Set correct framerange with handles on file opening #4664 - -Set the range of playback from the asset data, counting handles, to get the correct data when calling the "collect_animation_data" function. - - -___ - -
- - -
-Maya: Fix camera update #4751 - -Fix resetting any modelPanel to a different camera when loading a camera and updating. - - -___ - -
- - -
-Maya: Remove single assembly validation for animation instances #4840 - -Rig groups may now be parented to others groups when `includeParentHierarchy` attribute on the instance is "off". - - -___ - -
- - -
-Maya: Optional control of display lights on playblast. #4145 - -Optional control of display lights on playblast. - -Giving control to what display lights are on the playblasts. - - -___ - -
- - -
-Kitsu: note family requirements #4551 - -Allowing to add family requirements to `IntegrateKitsuNote` task status change. - -Adds a `Family requirements` setting to `Integrate Kitsu Note`, so you can add requirements to determine if kitsu task status should be changed based on which families are published or not. For instance you could have the status change only if another subset than workfile is published (but workfile can still be included) by adding an item set to `Not equal` and `workfile`. - - -___ - -
- - -
-Deactivate closed Kitsu projects on OP #4619 - -Deactivate project on OP when the project is closed on Kitsu. - - -___ - -
- - -
-Maya: Suggestion to change capture labels. #4691 - -Change capture labels. - - -___ - -
- - -
-Houdini: Change node type for OpenPypeContext `null` -> `subnet` #4745 - -Change the node type for OpenPype's hidden context node in Houdini from `null` to `subnet`. This fixes #4734 - - -___ - -
- - -
-General: Extract burnin hosts filters #4749 - -Removed hosts filter from ExtractBurnin plugin. Instance without representations won't cause crash but just skip the instance. We've discovered because Blender already has review but did not create burnins. - - -___ - -
- - -
-Global: Improve speed of Collect Custom Staging Directory #4768 - -Improve speed of Collect Custom Staging Directory. - - -___ - -
- - -
-General: Anatomy templates formatting #4773 - -Added option to format only single template from anatomy instead of formatting all of them all the time. Formatting of all templates is causing slowdowns e.g. during publishing of hundreds of instances. - - -___ - -
- - -
-Harmony: Handle zip files with deeper structure #4782 - -External Harmony zip files might contain one additional level with scene name. - - -___ - -
- - -
-Unreal: Use common logic to configure executable #4788 - -Unreal Editor location and version was autodetected. This easied configuration in some cases but was not flexible enought. This PR is changing the way Unreal Editor location is set, unifying it with the logic other hosts are using. - - -___ - -
- - -
-Github: Grammar tweaks + uppercase issue title #4813 - -Tweak some of the grammar in the issue form templates. - - -___ - -
- - -
-Houdini: Allow creation of publish instances via Houdini TAB menu #4831 - -Register the available Creator's as houdini tools so an artist can add publish instances via the Houdini TAB node search menu from within the network editor. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Fix Collect Render for V-Ray, Redshift and Renderman for missing colorspace #4650 - -Fix Collect Render not working for Redshift, V-Ray and Renderman due to missing `colorspace` argument to `RenderProduct` dataclass. - - -___ - -
- - -
-Maya: Xgen fixes #4707 - -Fix for Xgen extraction of world parented nodes and validation for required namespace. - - -___ - -
- - -
-Maya: Fix extract review and thumbnail for Maya 2020 #4744 - -Fix playblasting in Maya 2020 with override viewport options enabled. Fixes #4730. - - -___ - -
- - -
-Maya: local variable 'arnold_standins' referenced before assignment - OP-5542 #4778 - -MayaLookAssigner erroring when MTOA is not loaded: -``` -# Traceback (most recent call last): -# File "\openpype\hosts\maya\tools\mayalookassigner\app.py", line 272, in on_process_selected -# nodes = list(set(item["nodes"]).difference(arnold_standins)) -# UnboundLocalError: local variable 'arnold_standins' referenced before assignment -``` - - -___ - -
- - -
-Maya: Fix getting view and display in Maya 2020 - OP-5035 #4795 - -The `view_transform` returns a different format in Maya 2020. Fixes #4540 (hopefully). - - -___ - -
- - -
-Maya: Fix Look Maya 2020 Py2 support for Extract Look #4808 - -Fix Extract Look supporting python 2.7 for Maya 2020. - - -___ - -
- - -
-Maya: Fix Validate Mesh Overlapping UVs plugin #4816 - -Fix typo in the code where a maya command returns a `list` instead of `str`. - - -___ - -
- - -
-Maya: Fix tile rendering with Vray - OP-5566 #4832 - -Fixes tile rendering with Vray. - - -___ - -
- - -
-Deadline: checking existing frames fails when there is number in file name #4698 - -Previous implementation of validator failed on files with any other number in rendered file names.Used regular expression pattern now handles numbers in the file names (eg "Main_beauty.v001.1001.exr", "Main_beauty_v001.1001.exr", "Main_beauty.1001.1001.exr") but not numbers behind frames (eg. "Main_beauty.1001.v001.exr") - - -___ - -
- - -
-Maya: Validate Render Settings. #4735 - -Fixes error message when using attribute validation. - - -___ - -
- - -
-General: Hero version sites recalculation #4737 - -Sites recalculation in integrate hero version did expect that it is integrated exactly same amount of files as in previous integration. This is not the case in many cases, so the sites recalculation happens in a different way, first are prepared all sites from previous representation files, and all of them are added to each file in new representation. - - -___ - -
- - -
-Houdini: Fix collect current file #4739 - -Fixes the Workfile publishing getting added into every instance being published from Houdini - - -___ - -
- - -
-Global: Fix Extract Burnin + Colorspace functions for conflicting python environments with PYTHONHOME #4740 - -This fixes the running of openpype processes from e.g. a host with conflicting python versions that had `PYTHONHOME` said additionally to `PYTHONPATH`, like e.g. Houdini Py3.7 together with OpenPype Py3.9 when using Extract Burnin for a review in #3839This fix applies to Extract Burnin and some of the colorspace functions that use `run_openpype_process` - - -___ - -
- - -
-Harmony: render what is in timeline in Harmony locally #4741 - -Previously it wasn't possible to render according to what was set in Timeline in scene start/end, just by what it was set in whole timeline.This allows artist to override what is in DB with what they require (with disabled `Validate Scene Settings`). Now artist can extend scene by additional frames, that shouldn't be rendered, but which might be desired.Removed explicit set scene settings (eg. applying frames and resolution directly to the scene after launch), added separate menu item to allow artist to do it themselves. - - -___ - -
- - -
-Maya: Extract Review settings add Use Background Gradient #4747 - -Add Display Gradient Background toggle in settings to fix support for setting flat background color for reviews. - - -___ - -
- - -
-Nuke: publisher is offering review on write families on demand #4755 - -Original idea where reviewable toggle will be offered in publisher on demand is fixed and now `review` attribute can be disabled in settings. - - -___ - -
- - -
-Workfiles: keep Browse always enabled #4766 - -Browse might make sense even if there are no workfiles present, actually in that case it makes the most sense (eg. I want to locate workfile from outside - from Desktop for example). - - -___ - -
- - -
-Global: label key in instance data is optional #4779 - -Collect OTIO review plugin is not crashing if `label` key is missing in instance data. - - -___ - -
- - -
-Loader: Fix missing variable #4781 - -There is missing variable `handles` in loader tool after https://github.com/ynput/OpenPype/pull/4746. The variable was renamed to `handles_label` and is initialized to `None` if handles are not available. - - -___ - -
- - -
-Nuke: Workfile Template builder fixes #4783 - -Popup window after Nuke start is not showing. Knobs with X/Y coordination on nodes where were converted from placeholders are not added if `keepPlaceholders` is witched off. - - -___ - -
- - -
-Maya: Add family filter 'review' to burnin profile with focal length #4791 - -Avoid profile burnin with `focalLength` key for renders, but use only for playblast reviews. - - -___ - -
- - -
-add farm instance to the render collector in 3dsMax #4794 - -bug fix for the failure of submitting publish job in 3dsmax - - -___ - -
- - -
-Publisher: Plugin active attribute is respected #4798 - -Publisher consider plugin's `active` attribute, so the plugin is not processed when `active` is set to `False`. But we use the attribute in `OptionalPyblishPluginMixin` for different purposes, so I've added hack bypass of the active state validation when plugin inherit from the mixin. This is temporary solution which cannot be changed until all hosts use Publisher otherwise global plugins would be broken. Also plugins which have `enabled` set to `False` are filtered out -> this happened only when automated settings were applied and the settings contained `"enabled"` key se to `False`. - - -___ - -
- - -
-Nuke: settings and optional attribute in publisher for some validators #4811 - -New publisher is supporting optional switch for plugins which is offered in Publisher in Right panel. Some plugins were missing this switch and also settings which would offer the optionality. - - -___ - -
- - -
-Settings: Version settings popup fix #4822 - -Version completer popup have issues on some platforms, this should fix those edge cases. Also fixed issue when completer stayed shown fater reset (save). - - -___ - -
- - -
-Hiero/Nuke: adding monitorOut key to settings #4826 - -New versions of Hiero were introduced with new colorspace property for Monitor Out. It have been added into project settings. Also added new config names into settings enumerator option. - - -___ - -
- - -
-Nuke: removed default workfile template builder preset #4835 - -Default for workfile template builder should have been empty. - - -___ - -
- - -
-TVPaint: Review can be made from any instance #4843 - -Add `"review"` tag to output of extract sequence if instance is marked for review. At this moment only instances with family `"review"` were able to define input for `ExtractReview` plugin which is not right. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Deadline: Remove unused FramesPerTask job info submission #4657 - -Remove unused `FramesPerTask` job info submission to Deadline. - - -___ - -
- - -
-Maya: Remove pymel dependency #4724 - -Refactors code written using `pymel` to use standard maya python libraries instead like `maya.cmds` or `maya.api.OpenMaya` - - -___ - -
- - -
-Remove "preview" data from representation #4759 - -Remove "preview" data from representation - - -___ - -
- - -
-Maya: Collect Review cleanup code for attached subsets #4720 - -Refactor some code for Maya: Collect Review for attached subsets. - - -___ - -
- - -
-Refactor: Remove `handles`, `edit_in` and `edit_out` backwards compatibility #4746 - -Removes backward compatibiliy fallback for data called `handles`, `edit_in` and `edit_out`. - - -___ - -
- -### **๐Ÿ“ƒ Documentation** - - -
-Bump webpack from 5.69.1 to 5.76.1 in /website #4624 - -Bumps [webpack](https://github.com/webpack/webpack) from 5.69.1 to 5.76.1. -
-Release notes -

Sourced from webpack's releases.

-
-

v5.76.1

-

Fixed

-
    -
  • Added assert/strict built-in to NodeTargetPlugin
  • -
-

Revert

- -

v5.76.0

-

Bugfixes

- -

Features

- -

Security

- -

Repo Changes

- -

New Contributors

- -

Full Changelog: https://github.com/webpack/webpack/compare/v5.75.0...v5.76.0

-

v5.75.0

-

Bugfixes

-
    -
  • experiments.* normalize to false when opt-out
  • -
  • avoid NaN%
  • -
  • show the correct error when using a conflicting chunk name in code
  • -
  • HMR code tests existance of window before trying to access it
  • -
  • fix eval-nosources-* actually exclude sources
  • -
  • fix race condition where no module is returned from processing module
  • -
  • fix position of standalong semicolon in runtime code
  • -
-

Features

-
    -
  • add support for @import to extenal CSS when using experimental CSS in node
  • -
- -
-

... (truncated)

-
-
-Commits - -
-
-Maintainer changes -

This version was pushed to npm by evilebottnawi, a new releaser for webpack since your current version.

-
-
- - -[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=webpack&package-manager=npm_and_yarn&previous-version=5.69.1&new-version=5.76.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) -- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language -- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language -- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language -- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language - -You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). - -
-___ - -
- - -
-Documentation: Add Extract Burnin documentation #4765 - -Add documentation for Extract Burnin global plugin settings. - - -___ - -
- - -
-Documentation: Move publisher related tips to publisher area #4772 - -Move publisher related tips for After Effects artist documentation to the correct position. - - -___ - -
- - -
-Documentation: Add extra terminology to the key concepts glossary #4838 - -Tweak some of the key concepts in the documentation. - - -___ - -
- -### **Merged pull requests** - - -
-Maya: Refactor Extract Look with dedicated processors for maketx #4711 - -Refactor Maya extract look to fix some issues: -- [x] Allow Extraction with maketx with OCIO Color Management enabled in Maya. -- [x] Fix file hashing so it includes arguments to maketx, so that when arguments change it correctly generates a new hash -- [x] Fix maketx destination colorspace when OCIO is enabled -- [x] Use pre-collected colorspaces of the resources instead of trying to retrieve again in Extract Look -- [x] Fix colorspace attributes being reinterpreted by maya on export (fix remapping) - goal is to resolve #2337 -- [x] Fix support for checking config path of maya default OCIO config (due to using `lib.get_color_management_preferences` which remaps that path) -- [x] Merged in #2971 to refactor MakeTX into TextureProcessor and also support generating Redshift `.rstexbin` files. - goal is to resolve #2599 -- [x] Allow custom arguments to `maketx` from OpenPype Settings like mentioned here by @fabiaserra for arguments like: `--monochrome-detect`, `--opaque-detect`, `--checknan`. -- [x] Actually fix the code and make it work. :) (I'll try to keep below checkboxes in sync with my code changes) -- [x] Publishing without texture processor should work (no maketx + no rstexbin) -- [x] Publishing with maketx should work -- [x] Publishing with rstexbin should work -- [x] Test it. (This is just me doing some test-runs, please still test the PR!) - - -___ - -
- - -
-Maya template builder load all assets linked to the shot #4761 - -Problem -All the assets of the ftrack project are loaded and not those linked to the shot - -How get error -Open maya in the context of shot, then build a new scene with the "Build Workfile from template" button in "OpenPype" menu. -![image](https://user-images.githubusercontent.com/7068597/229124652-573a23d7-a2b2-4d50-81bf-7592c00d24dc.png) - - -___ - -
- - -
-Global: Do not force instance data with frame ranges of the asset #4383 - -This aims to resolve #4317 - - -___ - -
- - -
-Cosmetics: Fix some grammar in docstrings and messages (and some code) #4752 - -Tweak some grammar in codebase - - -___ - -
- - -
-Deadline: Submit publish job fails due root work hardcode - OP-5528 #4775 - -Generating config templates was hardcoded to `root[work]`. This PR fixes that. - - -___ - -
- - -
-CreateContext: Added option to remove Unknown attributes #4776 - -Added option to remove attributes with UnkownAttrDef on instances. Pop of key will also remove the attribute definition from attribute values, so they're not recreated again. - - -___ - -
- - - -## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3) - - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3) - -### **๐Ÿ†• New features** - - -
-Blender: Extract Review #3616 - -Added Review to Blender. - -This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender. - - -___ - -
- - -
-Data Exchanges: Point Cloud for 3dsMax #4532 - -Publish PRT format with tyFlow in 3dsmax - -Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too. -- [x] creator -- [x] extractor -- [x] validator -- [x] loader - - -___ - -
- - -
-Global: persistent staging directory for renders #4583 - -Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles. - -With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles` - - -___ - -
- - -
-Kitsu custom comment template #4599 - -Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers. - - -___ - -
- - -
-MaxScene Family #4615 - -Introduction of the Max Scene Family - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: Multiple values on single render attribute - OP-4131 #4631 - -When validating render attributes, this adds support for multiple values. When repairing first value in list is used. - - -___ - -
- - -
-Maya: enable 2D Pan/Zoom for playblasts - OP-5213 #4687 - -Setting for enabling 2D Pan/Zoom on reviews. - - -___ - -
- - -
-Copy existing or generate new Fusion profile on prelaunch #4572 - -Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch. - -The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied: -- forced Python3 -- forced English interface -- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug). - - -___ - -
- - -
-Houdini: Create button open new publisher's "create" tab #4601 - -During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet? - - -___ - -
- - -
-Clockify: refresh and fix the integration #4607 - -Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/ - - -___ - -
- - -
-Fusion publish existing frames #4611 - -This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke. - - -___ - -
- - -
-Resolution settings referenced from DB record for 3dsMax #4652 - -- Add Callback for setting the resolution according to DB after the new scene is created. -- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax - - -___ - -
- - -
-3dsmax: render instance settings in Publish tab #4658 - -Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher - - -___ - -
- - -
-scene length setting referenced from DB record for 3dsMax #4665 - -Setting the timeline length based on DB record in 3dsMax Hosts - - -___ - -
- - -
-Publisher: Windows reduce command window pop-ups during Publishing #4672 - -Reduce the command line pop-ups that show on Windows during publishing. - - -___ - -
- - -
-Publisher: Explicit save #4676 - -Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r). - - -___ - -
- - -
-CelAction: conditional workfile parameters from settings #4677 - -Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project. - - -___ - -
- - -
-Improve logging of used app + tool envs on application launch #4682 - -Improve logging of what apps + tool environments got loaded for an application launch. - - -___ - -
- - -
-Fix name and docstring for Create Workdir Extra Folders prelaunch hook #4683 - -Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin. - - -___ - -
- - -
-Application launch context: Include app group name in logger #4684 - -Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After: - - -___ - -
- - -
-increment workfile version 3dsmax #4685 - -increment workfile version in 3dsmax as if in blender and maya hosts. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-Maya: Fix getting non-active model panel. #2968 - -When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence. - - -___ - -
- - -
-Maya: Fix broken review publishing. #4549 - -Resolves #4547 - - -___ - -
- - -
-Maya: Avoid error on right click in Loader if `mtoa` is not loaded #4616 - -Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin. - - -___ - -
- - -
-Maya: Fix extract look colorspace detection #4618 - -Fix the logic which guesses the colorspace using `arnold` python library. -- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available. -- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace. -- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time. -```python -# Tested in Maya 2022.1 -print(cmds.loadPlugin("mtoa", quiet=True)) -# ['mtoa'] -print(cmds.loadPlugin("mtoa", quiet=True)) -# None -``` - - -___ - -
- - -
-Maya: Maya Playblast Options overrides - OP-3847 #4634 - -When publishing a review in Maya, the extractor would fail due to wrong (long) panel name. - - -___ - -
- - -
-Bugfix/op 2834 fix extract playblast #4701 - -Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. - - -___ - -
- - -
-Bugfix/op 2834 fix extract playblast #4704 - -Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. - - -___ - -
- - -
-Maya: bug fix for passing zoom settings if review is attached to subset #4716 - -Fix for attaching review to subset with pan/zoom option. - - -___ - -
- - -
-Maya: tile assembly fail in draft - OP-4820 #4416 - -Tile assembly in Deadline was broken. - -Initial bug report revealed other areas of the tile assembly that needed fixing. - - -___ - -
- - -
-Maya: Yeti Validate Rig Input - OP-3454 #4554 - -Fix Yeti Validate Rig Input - -Existing workflow was broken due to this #3297. - - -___ - -
- - -
-Scene inventory: Fix code errors when "not found" entries are found #4594 - -Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory: -- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord: -```python -Traceback (most recent call last): - File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint - text = self.displayText( - File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText - assert isinstance(value, numbers.Integral), ( -AssertionError: Version is not integer. "None" -``` -- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those. - - -___ - -
- - -
-Kitsu: Sync OP with zou, make sure value-data is int or float #4596 - -Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion). - - - -This PR makes sure each value is set with int() or float() so these bugs can't happen later on. - - - -_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_ - - -___ - -
- - -
-Max: fix the bug of removing an instance #4617 - -fix the bug of removing an instance in 3dsMax - - -___ - -
- - -
-Global | Nuke: fixing farm publishing workflow #4623 - -After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing. - - -___ - -
- - -
-Ftrack: Ftrack additional families filtering #4633 - -Ftrack family collector makes sure the subset family is also in instance families for additional families filtering. - - -___ - -
- - -
-Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix #4635 - -Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities. - - -___ - -
- - -
-bugfix for 3dsmax publishing error #4637 - -fix the bug of failing publishing job in 3dsMax - - -___ - -
- - -
-General: Use right validation for ffmpeg executable #4640 - -Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues. - - -___ - -
- - -
-3dsmax: opening last workfile #4644 - -Supports opening last saved workfile in 3dsmax host. - - -___ - -
- - -
-Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution #4647 - -This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself. - - -___ - -
- - -
-General: Use right plugin class for Collect Comment #4653 - -Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`. - - -___ - -
- - -
-Global: add tags field to thumbnail representation #4660 - -Thumbnail representation might be missing tags field. - - -___ - -
- - -
-Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers #4662 - -Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish. - - -___ - -
- - -
-Hiero: Creator with correct workfile numeric padding input #4666 - -Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now. - - -___ - -
- - -
-Nuke: Nukenodes family instance without frame range #4669 - -No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless. - - -___ - -
- - -
-TVPaint: Optional Validation plugins can be de/activated by user #4674 - -Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional. - - -___ - -
- - -
-Kitsu: Slightly less strict with instance data #4678 - -- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data. -- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint. -- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review. - - -___ - -
- - -
-Publisher: Fix reset shortcut sequence #4694 - -Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence. - - -___ - -
- - -
-Refactor _capture #4702 - -Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. - - -___ - -
- - -
-Hiero: correct container colors if UpToDate #4708 - -Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Look Assigner: Move Look Assigner tool since it's Maya only #4604 - -Fix #4357: Move Look Assigner tool to maya since it's Maya only - - -___ - -
- - -
-Maya: Remove unused functions from Extract Look #4671 - -Remove unused functions from Maya Extract Look plug-in - - -___ - -
- - -
-Extract Review code refactor #3930 - -Trying to reduce complexity of Extract Review plug-in -- Re-use profile filtering from lib -- Remove "combination families" additional filtering which supposedly was from OP v2 -- Simplify 'formatting' for filling gaps -- Use `legacy_io.Session` over `os.environ` - - -___ - -
- - -
-Maya: Replace last usages of Qt module #4610 - -Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins. - - -___ - -
- - -
-Update tests and documentation for `ColormanagedPyblishPluginMixin` #4612 - -Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation. - - -___ - -
- - -
-Improve logging of used app + tool envs on application launch (minor tweak) #4686 - -Use `app.full_name` for change done in #4682 - - -___ - -
- -### **๐Ÿ“ƒ Documentation** - - -
-Docs/add architecture document #4344 - -Add `ARCHITECTURE.md` document. - -his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do. - - -___ - -
- - -
-Documentation: Tweak grammar and fix some typos #4613 - -This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension. - - -___ - -
- - -
-Docs: Fix some minor grammar/typos #4680 - -Typo/grammar fixes in documentation. - - -___ - -
- -### **Merged pull requests** - - -
-Maya: Implement image file node loader #4313 - -Implements a loader for loading texture image into a `file` node in Maya. - -Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating: -- Texture -- Projection -- StencilThese should match what Maya generates if you create those in Maya. -- [x] Load and manage file nodes -- [x] Apply color spaces after #4195 -- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically. -- [ ] Add support for animation sequences of UDIM textures using the `..exr` path format? - - -___ - -
- - -
-Maya Look Assigner: Don't rely on containers for get all assets #4600 - -This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be: -- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored. -- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show? - - -___ - -
- - -
-Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" #4605 - -Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range. - - -___ - -
- - -
-Resolve missing OPENPYPE_MONGO in deadline global job preload #4484 - -In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables. - -In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters. - - -___ - -
- - -
-Tests: Added setup_only to tests #4591 - -Allows to download test zip, unzip and restore DB in preparation for new test. - - -___ - -
- - -
-Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) #4603 - -Fix #4429: Do not reset fps or playback timeline on applying or creating render settings - - -___ - -
- - -
-Bump @sideway/formula from 3.0.0 to 3.0.1 in /website #4609 - -Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1. -
-Commits - -
-
-Maintainer changes -

This version was pushed to npm by marsup, a new releaser for @โ€‹sideway/formula since your current version.

-
-
- - -[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@sideway/formula&package-manager=npm_and_yarn&previous-version=3.0.0&new-version=3.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) -- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language -- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language -- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language -- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language - -You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). - -
-___ - -
- - -
-Update artist_hosts_maya_arnold.md #4626 - -Correct Arnold docs. -___ - -
- - -
-Maya: Add "Include Parent Hierarchy" option in animation creator plugin #4645 - -Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation. - - -___ - -
- - -
-General: Filter available applications #4667 - -Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled. - - -___ - -
- - -
-3dsmax: make sure that startup script executes #4695 - -Fixing reliability of OpenPype startup in 3dsmax. - - -___ - -
- - -
-Project Manager: Change minimum frame start/end to '0' #4719 - -Project manager can have frame start/end set to `0`. - - -___ - -
- - - -## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2) - -### **๐Ÿ†• New features** - - -
-maya gltf texture convertor and validator #4261 - -Continuity of the gltf extractor implementation - -Continuity of the gltf extractor https://github.com/pypeclub/OpenPype/pull/4192UPDATE:**Validator for GLSL Shader**: Validate whether the mesh uses GLSL Shader. If not it will error out. The user can choose to perform the repair action and it will help to assign glsl shader. If the mesh with Stringray PBS, the repair action will also check to see if there is any linked texture such as Color, Occulsion, and Normal Map. If yes, it will help to relink the related textures to the glsl shader.*****If the mesh uses the PBS Shader, - - -___ - -
- - -
-Unreal: New Publisher #4370 - -Implementation of the new publisher for Unreal. - -The implementation of the new publisher for Unreal. This PR includes the changes for all the existing creators to be compatible with the new publisher.The basic creator has been split in two distinct creators: -- `UnrealAssetCreator`, works with assets in the Content Browser. -- `UnrealActorCreator` that works with actors in the scene. - - -___ - -
- - -
-Implementation of a new splash screen #4592 - -Implemented a new splash screen widget to reflect a process running in the background. This widget can be used for other tasks than UE. **Also fixed the compilation error of the AssetContainer.cpp when trying to build the plugin in UE 5.0** - - -___ - -
- - -
-Deadline for 3dsMax #4439 - -Setting up deadline for 3dsmax - -Setting up deadline for 3dsmax by setting render outputs and viewport camera - - -___ - -
- - -
-Nuke: adding nukeassist #4494 - -Adding support for NukeAssist - -For support of NukeAssist we had to limit some Nuke features since NukeAssist itself Nuke with limitations. We do not support Creator and Publisher. User can only Load versions with version control. User can also set Framerange and Colorspace. - - -___ - -
- -### **๐Ÿš€ Enhancements** - - -
-Maya: OP-2630 acescg maya #4340 - -Resolves #2712 - - -___ - -
- - -
-Default Ftrack Family on RenderLayer #4458 - -With default settings, renderlayers in Maya were not being tagged with the Ftrack family leading to confusion when doing reviews. - - -___ - -
- - -
-Maya: Maya Playblast Options - OP-3783 #4487 - -Replacement PR for #3912. Adds more options for playblasts to preferences/settings. - -Adds the following as options in generating playblasts, matching viewport settings. -- Use default material -- Wireframe on shaded -- X-ray -- X-ray Joints -- X-ray active component - - -___ - -
- - -
-Maya: Passing custom attributes to alembic - OP-4111 #4516 - -Passing custom attributes to alembic - -This PR makes it possible to pass all user defined attributes along to the alembic representation. - - -___ - -
- - -
-Maya: Options for VrayProxy output - OP-2010 #4525 - -Options for output of VrayProxy. - -Client requested more granular control of output from VrayProxy instance. Exposed options on the instance and settings for vrmesh and alembic. - - -___ - -
- - -
-Maya: Validate missing instance attributes #4559 - -Validate missing instance attributes. - -New attributes can be introduced as new features come in. Old instances will need to be updated with these attributes for the documentation to make sense, and users do not have to recreate the instances. - - -___ - -
- - -
-Refactored Generation of UE Projects, installation of plugins moved to the engine #4369 - -Improved the way how OpenPype works with generation of UE projects. Also the installation of the plugin has been altered to install into the engine - -OpenPype now uses the appropriate tools to generate UE projects. Unreal Build Tool (UBT) and a "Commandlet Project" is needed to properly generate a BP project, or C++ code in case that `dev_mode = True`, folders, the .uproject file and many other resources.On the plugin's side, it is built seperately with the UnrealAutomationTool (UAT) and then it's contents are moved under the `Engine/Plugins/Marketplace/OpenPype` directory. - - -___ - -
- - -
-Unreal: Use client functions in Layout loader #4578 - -Use 'get_representations' instead of 'legacy_io' query in layout loader. - -This is removing usage of `find_one` called on `legacy_io` and use rather client functions as preparation for AYON connection. Also all representations are queried at once instead of one by one. - - -___ - -
- - -
-General: Support for extensions filtering in loaders #4492 - -Added extensions filtering support to loader plugins. - -To avoid possible backwards compatibility break is filtering exactly the same and filtering by extensions is enabled only if class attribute 'extensions' is set. - - -___ - -
- - -
-Nuke: multiple reformat in baking review profiles #4514 - -Added support for multiple reformat nodes in baking profiles. - -Old settings for single reformat node is supported and prioritised just in case studios are using it and backward compatibility is needed. Warnings in Nuke terminal are notifying users to switch settings to new workflow. Settings are also explaining the migration way. - - -___ - -
- - -
-Nuke: Add option to use new creating system in workfile template builder #4545 - -Nuke workfile template builder can use new creators instead of legacy creators. - -Modified workfile template builder to have option to say if legacy creators should be used or new creators. Legacy creators are disabled by default, so Maya has changed the value. - - -___ - -
- - -
-Global, Nuke: Workfile first version with template processing #4579 - -Supporting new template workfile builder with toggle for creation of first version of workfile in case there is none yet. - - -___ - -
- - -
-Fusion: New Publisher #4523 - -This is an updated PR for @BigRoy 's old PR (https://github.com/ynput/OpenPype/pull/3892).I have merged it with code from OP 3.15.1-nightly.6 and made sure it works as expected.This converts the old publishing system to the new one. It implements Fusion as a new host addon. - - -- Create button removed in OpenPype menu in favor of the new Publisher -- Draft refactor validations to raise PublishValidationError -- Implement Creator for New Publisher -- Implement Fusion as Host addon - - -___ - -
- - -
-TVPaint: Use Publisher tool #4471 - -Use Publisher tool and new creation system in TVPaint integration. - -Using new creation system makes TVPaint integration a little bit easier to maintain for artists. Removed unneeded tools Creator and Subset Manager tools. Goal is to keep the integration work as close as possible to previous integration. Some changes were made but primarilly because they were not right using previous system.All creators create instance with final family instead of changing the family during extraction. Render passes are not related to group id but to render layer instance. Render layer is still related to group. Workfile, review and scene render instances are created using autocreators instead of auto-collection during publishing. Subset names are fully filled during publishing but instance labels are filled on refresh with the last known right value. Implemented basic of legacy convertor which should convert render layers and render passes. - - -___ - -
- - -
-TVPaint: Auto-detect render creation #4496 - -Create plugin which will create Render Layer and Render Pass instances based on information in the scene. - -Added new creator that must be triggered by artist. The create plugin will first create Render Layer instances if were not created yet. For variant is used color group name. The creator has option to rename color groups by template defined in settings -> Template may use index of group by it's usage in scene (from bottom to top). After Render Layers will create Render Passes. Render Pass is created for each individual TVPaint layer in any group that had created Render Layer. It's name is used as variant (pass). - - -___ - -
- - -
-TVPaint: Small enhancements #4501 - -Small enhancements in TVPaint integration which did not get to https://github.com/ynput/OpenPype/pull/4471. - -It was found out that `opacity` returned from `tv_layerinfo` is always empty and is dangerous to add it to layer information. Added information about "current" layer to layers information. Disable review of Render Layer and Render Pass instances by default. In most of productions is used only "scene review". Skip usage of `"enabled"` key from settings in automated layer/pass creation. - - -___ - -
- - -
-Global: color v3 global oiio transcoder plugin #4291 - -Implements possibility to use `oiiotool` to transcode image sequences from one color space to another(s). - -Uses collected `colorspaceData` information about source color spaces, these information needs to be collected previously in each DCC interested in color management.Uses profiles configured in Settings to create single or multiple new representations (and file extensions) with different color spaces.New representations might replace existing one, each new representation might contain different tags and custom tags to control its integration step. - - -___ - -
- - -
-Deadline: Added support for multiple install dirs in Deadline #4451 - -SearchDirectoryList returns FIRST existing so if you would have multiple OP install dirs, it won't search for appropriate version in later ones. - - -___ - -
- - -
-Ftrack: Upload reviewables with original name #4483 - -Ftrack can integrate reviewables with original filenames. - -As ftrack have restrictions about names of components the only way how to achieve the result was to upload the same file twice, one with required name and one with origin name. - - -___ - -
- - -
-TVPaint: Ignore transparency in Render Pass #4499 - -It is possible to ignore layers transparency during Render Pass extraction. - -Render pass extraction does not respect opacity of TVPaint layers set in scene during extraction. It can be enabled/disabled in settings. - - -___ - -
- - -
-Anatomy: Preparation for different root overrides #4521 - -Prepare Anatomy to handle only 'studio' site override on it's own. - -Change how Anatomy fill root overrides based on requested site name. The logic which decide what is active site was moved to sync server addon and the same for receiving root overrides of local site. The Anatomy resolve only studio site overrides anything else is handled by sync server. BaseAnatomy only expect root overrides value and does not need site name. Validation of site name happens in sync server same as resolving if site name is local or not. - - -___ - -
- - -
-Nuke | Global: colormanaged plugin in collection #4556 - -Colormanaged extractor had changed to Mixin class so it can be added to any stage of publishing rather then just to Exctracting.Nuke is no collecting colorspaceData to representation collected on already rendered images. - -Mixin class can no be used as secondary parent in publishing plugins. - - -___ - -
- -### **๐Ÿ› Bug fixes** - - -
-look publishing and srgb colorspace in maya #4276 - -Check the OCIO color management is enabled before doing linearize colorspace for converting the texture maps into tx files. - -Check whether the OCIO color management is enabled before the condition of converting the texture to tx extension. - - -___ - -
- - -
-Maya: extract Thumbnail "No active model panel found" - OP-3849 #4421 - -Error when extracting playblast with no model panel. - -If `project_settings/maya/publish/ExtractPlayblast/capture_preset/Viewport Options/override_viewport_options` were off and publishing without showing any model panel, the extraction would fail. - - -___ - -
- - -
-Maya: Fix setting scene fps with float input #4488 - -Returned value of float fps on integer values would return float. - -This PR fixes the case when switching between integer fps values for example 24 > 25. Issue was when setting the scene fps, the original float value was used which makes it unpredictable whether the value is float or integer when mapping the fps values. - - -___ - -
- - -
-Maya: Multipart fix #4497 - -Fix multipart logic in render products. - -Each renderer has a different way of defining whether output images is multipart, so we need to define it for each renderer. Also before the `multipart` class variable was defined multiple times in several places, which made it tricky to debug where `multipart` was defined. Now its created on initialization and referenced as `self.multipart` - - -___ - -
- - -
-Maya: Set pool on tile assembly - OP-2012 #4520 - -Set pool on tile assembly - -Pool for publishing and tiling jobs, need to use the settings (`project_settings/deadline/publish/ProcessSubmittedJobOnFarm/deadline_pool`) else fallback on primary pool (`project_settings/deadline/publish/CollectDeadlinePools/primary_pool`) - - -___ - -
- - -
-Maya: Extract review with handles #4527 - -Review was not extracting properly with/without handles. - -Review instance was not created properly resulting in the frame range on the instance including handles. - - -___ - -
- - -
-Maya: Fix broken lib. #4529 - -Fix broken lib. - -This commit from this PR broke the Maya lib module. - - -___ - -
- - -
-Maya: Validate model name - OP-4983 #4539 - -Validate model name issues. - -Couple of issues with validate model name; -- missing platform extraction from settings -- map function should be list comprehension -- code cosmetics - - -___ - -
- - -
-Maya: SkeletalMesh family loadable as reference #4573 - -In Maya, fix the SkeletalMesh family not loadable as reference. - - -___ - -
- - -
-Unreal: fix loaders because of missing AssetContainer #4536 - -Fixing Unreal loaders, where changes in OpenPype Unreal integration plugin deleted AssetContainer. - -`AssetContainer` and `AssetContainerFactory` are still used to mark loaded instances. Because of optimizations in Integration plugin we've accidentally removed them but that broke loader. - - -___ - -
- - -
-3dsmax unable to delete loaded asset in the scene inventory #4507 - -Fix the bug of being unable to delete loaded asset in the Scene Inventory - -Fix the bug of being unable to delete loaded asset in the Scene Inventory - - -___ - -
- - -
-Hiero/Nuke: originalBasename editorial publishing and loading #4453 - -Publishing and loading `originalBasename` is working as expected - -Frame-ranges on version document is now correctly defined to fit original media frame range which is published. It means loading is now correctly identifying frame start and end on clip loader in Nuke. - - -___ - -
- - -
-Nuke: Fix workfile template placeholder creation #4512 - -Template placeholder creation was erroring out in Nuke due to the Workfile template builder not being able to find any of the plugins for the Nuke host. - -Move `get_workfile_build_placeholder_plugins` function to NukeHost class as workfile template builder expects. - - -___ - -
- - -
-Nuke: creator farm attributes from deadline submit plugin settings #4519 - -Defaults in farm attributes are sourced from settings. - -Settings for deadline nuke submitter are now used during nuke render and prerender creator plugins. - - -___ - -
- - -
-Nuke: fix clip sequence loading #4574 - -Nuke is loading correctly clip from image sequence created without "{originalBasename}" token in anatomy template. - - -___ - -
- - -
-Fusion: Fix files collection and small bug-fixes #4423 - -Fixed Fusion review-representation and small bug-fixes - -This fixes the problem with review-file generation that stopped the publishing on second publish before the fix.The problem was that Fusion simply looked at all the files in the render-folder instead of only gathering the needed frames for the review.Also includes a fix to get the handle start/end that before throw an error if the data didn't exist (like from a kitsu sync). - - -___ - -
- - -
-Fusion: Updated render_local.py to not only process the first instance #4522 - -Moved the `__hasRun` to `render_once()` so the check only happens with the rendering. Currently only the first render node gets the representations added.Critical PR - - -___ - -
- - -
-Fusion: Load sequence fix filepath resolving from representation #4580 - -Resolves issue mentioned on discord by @movalex:The loader was incorrectly trying to find the file in the publish folder which resulted in just picking 'any first file'. - -This gets the filepath from representation instead of taking the first file from listing files from publish folder. - - -___ - -
- - -
-Fusion: Fix review burnin start and end frame #4590 - -Fix the burnin start and end frame for reviews. Without this the asset document's start and end handle would've been added to the _burnin_ frame range even though that would've been incorrect since the handles are based on the comp saver's render range instead. - - -___ - -
- - -
-Harmony: missing set of frame range when opening scene #4485 - -Frame range gets set from DB everytime scene is opened. - -Added also check for not up-to-date loaded containers. - - -___ - -
- - -
-Photoshop: context is not changed in publisher #4570 - -When PS is already open and artists launch new task, it should keep only opened PS open, but change context. - -Problem were occurring in Workfile app where under new task files from old task were shown. This fixes this and adds opening of last workfile for new context if workfile exists. - - -___ - -
- - -
-hiero: fix effect item node class #4543 - -Collected effect name after renaming is saving correct class name. - - -___ - -
- - -
-Bugfix/OP-4616 vray multipart #4297 - -This fixes a bug where multipart vray renders would not make a review in Ftrack. - - -___ - -
- - -
-Maya: Fix changed location of reset_frame_range #4491 - -Location in commands caused cyclic import - - -___ - -
- - -
-global: source template fixed frame duplication #4503 - -Duplication is not happening. - -Template is using `originalBasename` which already assume all necessary elements are part of the file name so there was no need for additional optional name elements. - - -___ - -
- - -
-Deadline: Hint to use Python 3 #4518 - -Added shebank to give deadline hint which python should be used. - -Deadline has issues with Python 2 (especially with `os.scandir`). When a shebank is added to file header deadline will use python 3 mode instead of python 2 which fix the issue. - - -___ - -
- - -
-Publisher: Prevent access to create tab after publish start #4528 - -Prevent access to create tab after publish start. - -Disable create button in instance view on publish start and enable it again on reset. Even with that make sure that it is not possible to go to create tab if the tab is disabled. - - -___ - -
- - -
-Color Transcoding: store target_colorspace as new colorspace #4544 - -When transcoding into new colorspace, representation must carry this information instead original color space. - - -___ - -
- - -
-Deadline: fix submit_publish_job #4552 - -Fix submit_publish_job - -Resolves #4541 - - -___ - -
- - -
-Kitsu: Fix task itteration in update-op-with-zou #4577 - -From the last PR (https://github.com/ynput/OpenPype/pull/4425) a comment-commit last second messed up the code and resulted in two lines being the same, crashing the script. This PR fixes that. -___ - -
- - -
-AttrDefs: Fix type for PySide6 #4584 - -Use right type in signal emit for value change of attribute definitions. - -Changed `UUID` type to `str`. This is not an issue with PySide2 but it is with PySide6. - - -___ - -
- -### **๐Ÿ”€ Refactored code** - - -
-Scene Inventory: Avoid using ObjectId #4524 - -Avoid using conversion to ObjectId type in scene inventory tool. - -Preparation for AYON compatibility where ObjectId won't be used for ids. Representation ids from loaded containers are not converted to ObjectId but kept as strings which also required some changes when working with representation documents. - - -___ - -
- -### **Merged pull requests** - - -
-SiteSync: host dirmap is not working properly #4563 - -If artists uses SiteSync with real remote (gdrive, dropbox, sftp) drive, Local Settings were throwing error `string indices must be integers`. - -Logic was reworked to provide only `local_drive` values to be overrriden by Local Settings. If remote site is `gdrive` etc. mapping to `studio` is provided as it is expected that workfiles will have imported from `studio` location and not from `gdrive` folder.Also Nuke dirmap was reworked to be less verbose and much faster. - - -___ - -
- - -
-General: Input representation ids are not ObjectIds #4576 - -Don't use `ObjectId` as representation ids during publishing. - -Representation ids are kept as strings during publishing instead of converting them to `ObjectId`. This change is pre-requirement for AYON connection.Inputs are used for integration of links and for farm publishing (or at least it looks like). - - -___ - -
- - -
-Shotgrid: Fixes on Deadline submissions #4498 - -A few other bug fixes for getting Nuke submission to Deadline work smoothly using Shotgrid integration. - -Continuing on the work done on this other PR this fixes a few other bugs I came across with further tests. - - -___ - -
- - -
-Fusion: New Publisher #3892 - -This converts the old publishing system to the new one. It implements Fusion as a new host addon. - - -- Create button removed in OpenPype menu in favor of the new Publisher -- Draft refactor validations to raise `PublishValidationError` -- Implement Creator for New Publisher -- Implement Fusion as Host addon - - -___ - -
- - -
-Make Kitsu work with Tray Publisher, added kitsureview tag, fixed sync-problems. #4425 - -Make Kitsu work with Tray Publisher, added kitsureview tag, fixed sync-problems. - -This PR updates the way the module gather info for the current publish so it now works with Tray Publisher.It fixes the data that gets synced from Kitsu to OP so all needed data gets registered even if it doesn't exist on Kitsus side.It also adds the tag "Add review to Kitsu" and adds it to Burn In so previews gets generated by default to Kitsu. - - -___ - -
- - -
-Maya: V-Ray Set Image Format from settings #4566 - -Resolves #4565 - -Set V-Ray Image Format using settings. - - -___ - -
- - - - -## [3.15.1](https://github.com/ynput/OpenPype/tree/3.15.1) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.0...3.15.1) - -### **๐Ÿ†• New features** - - - - -
-Maya: Xgen (3d / maya ) - #4256 - -___ - -#### Brief description - -Initial Xgen implementation. - - - -#### Description - -Client request of Xgen pipeline. - - - - -___ - -
- - - -
-Data exchange cameras for 3d Studio Max (3d / 3dsmax ) - #4376 - -___ - -#### Brief description - -Add Camera Family into the 3d Studio Max - - - -#### Description - -Adding Camera Extractors(extract abc camera and extract fbx camera) and validators(for camera contents) into 3dMaxAlso add the extractor for exporting 3d max raw scene (which is also related to 3dMax Scene Family) for camera family - - - - -___ - -
- - -### **๐Ÿš€ Enhancements** - - - - -
-Adding path validator for non-maya nodes (3d / maya ) - #4271 - -___ - -#### Brief description - -Adding a path validator for filepaths from non-maya nodes, which are created by plugins such as Renderman, Yeti and abcImport. - - - -#### Description - -As File Path Editor cannot catch the wrong filenpaths from non-maya nodes such as AlembicNodes, It is neccessary to have a new validator to ensure the existence of the filepaths from the nodes. - - - - -___ - -
- - - -
-Deadline: Allow disabling strict error check in Maya submissions (3d / maya / deadline ) - #4420 - -___ - -#### Brief description - -DL by default has Strict error checking, but some errors are not fatal. - - - -#### Description - -This allows to set profile based on Task and Subset values to temporarily disable Strict Error Checks.Subset and task names should support regular expressions. (not wildcard notation though). - - - - -___ - -
- - - -
-Houdini: New publisher code tweak (3d / houdini ) - #4374 - -___ - -#### Brief description - -This is cosmetics only - the previous code to me felt quite unreadable due to the lengthy strings being used. - - - -#### Description - -Code should do roughly the same, but just be reformatted. - - - - -___ - -
- - - -
-3dsmax: enhance alembic loader update function (3d / 3dsmax ) - #4387 - -___ - -## Enhancement - - - -This PR is adding update/switch ability to pointcache/alembic loader in 3dsmax and fixing wrong tool shown when clicking on "Manage" item on OpenPype menu, that is now correctly Scene Inventory (but was Subset Manager). - - - -Alembic update has still one caveat - it doesn't cope with changed number of object inside alembic, since loading alembic in max involves creating all those objects as first class nodes. So it will keep the objects in scene, just update path to alembic file on them. -___ - -
- - - -
-Global: supporting `OPENPYPE_TMPDIR` in staging dir maker (editorial / hiero ) - #4398 - -___ - -#### Brief description - -Productions can use OPENPYPE_TMPDIR for staging temp publishing directory - - - -#### Description - -Studios were demanding to be able to configure their own shared storages as temporary staging directories. Template formatting is also supported with optional keys formatting and following anatomy keys: - root[work | ] - project[name | code] - - - - -___ - -
- - - -
-General: Functions for current context (other ) - #4324 - -___ - -#### Brief description - -Defined more functions to receive current context information and added the methods to host integration so host can affect the result. - - - -#### Description - -This is one of steps to reduce usage of `legacy_io.Session`. This change define how to receive current context information -> call functions instead of accessing `legacy_io.Session` or `os.environ` directly. Plus, direct access on session or environments is unfortunatelly not enough for some DCCs where multiple workfiles can be opened at one time which can heavily affect the context but host integration sometimes can't affect that at all.`HostBase` already had implemented `get_current_context`, that was enhanced by adding more specific methods `get_current_project_name`, `get_current_asset_name` and `get_current_task_name`. The same functions were added to `~/openpype/pipeline/cotext_tools.py`. The functions in context tools are calling host integration methods (if are available) otherwise are using environent variables as default implementation does. Also was added `get_current_host_name` to receive host name from registered host if is available or from environment variable. - - - - -___ - -
- - - -
-Houdini: Do not visualize the hidden OpenPypeContext node (other / houdini ) - #4382 - -___ - -#### Brief description - -Using the new publisher UI would generate a visible 'null' locator at the origin. It's confusing to the user since it's supposed to be 'hidden'. - - - -#### Description - -Before this PR the user would see a locator/null at the origin which was the 'hidden' `/obj/OpenPypeContext` node. This null would suddenly appear if the user would've ever opened the Publisher UI once.After this PR it will not show:Nice and tidy. - - - - -___ - -
- - - -
-Maya + Blender: Pyblish plugins removed unused `version` and `category` attributes (other ) - #4402 - -___ - -#### Brief description - -Once upon a time in a land far far away there lived a few plug-ins who felt like they didn't belong in generic boxes and felt they needed to be versioned well above others. They tried, but with no success. - - - -#### Description - -Even though they now lived in a universe with elaborate `version` and `category` attributes embedded into their tiny little plug-in DNA this particular deviation has been greatly unused. There is nothing special about the version, nothing special about the category.It does nothing. - - - - -___ - -
- - - -
-General: Fix original basename frame issues (other ) - #4452 - -___ - -#### Brief description - -Treat `{originalBasename}` in different way then standard files processing. In case template should use `{originalBasename}` the transfers will use them as they are without any changes or handling of frames. - - - -#### Description - -Frames handling is problematic with original basename because their padding can't be defined to match padding in source filenames. Also it limits the usage of functionality to "must have frame at end of fiename". This is proposal how that could be solved by simply ignoring frame handling and using filenames as are on representation. First frame is still stored to representation context but is not used in formatting part. This way we don't have to care about padding of frames at all. - - - - -___ - -
- - - -
-Publisher: Report also crashed creators and convertors (other ) - #4473 - -___ - -#### Brief description - -Added crashes of creators and convertos discovery (lazy solution). - - - -#### Description - -Report in Publisher also contains information about crashed files caused during creator plugin discovery and convertor plugin discovery. They're not separated into categroies and there is no other information in the report about them, but this helps a lot during development. This change does not need to change format/schema of the report nor UI logic. - - - - -___ - -
- - -### **๐Ÿ› Bug fixes** - - - - -
-Maya: Fix Validate Attributes plugin (3d / maya ) - #4401 - -___ - -#### Brief description - -Code was broken. So either plug-in was unused or it had gone unnoticed. - - - -#### Description - -Looking at the commit history of the plug-in itself it seems this might have been broken somewhere between two to three years. I think it's broken since two years since this commit.Should this plug-in be removed completely?@tokejepsen Is there still a use case where we should have this plug-in? (You created the original one) - - - - -___ - -
- - - -
-Maya: Ignore workfile lock in Untitled scene (3d / maya ) - #4414 - -___ - -#### Brief description - -Skip workfile lock check if current scene is 'Untitled'. - - - - -___ - -
- - - -
-Maya: fps rounding - OP-2549 (3d / maya ) - #4424 - -___ - -#### Brief description - -When FPS is registered in for example Ftrack and round either down or up (floor/ceil), comparing to Maya FPS can fail. Example:23.97 (Ftrack/Mongo) != 23.976023976023978 (Maya) - - - -#### Description - -Since Maya only has a select number of supported framerates, I've taken the approach of converting any fps to supported framerates in Maya. We validate the input fps to make sure they are supported in Maya in two ways:Whole Numbers - are validated straight against the supported framerates in Maya.Demical Numbers - we find the closest supported framerate in Maya. If the difference to the closest supported framerate, is more than 0.5 we'll throw an error.If Maya ever supports arbitrary framerates, then we might have a problem but I'm not holding my breath... - - - - -___ - -
- - - -
-Strict Error Checking Default (3d / maya ) - #4457 - -___ - -#### Brief description - -Provide default of strict error checking for instances created prior to PR. - - - - -___ - -
- - - -
-Create: Enhance instance & context changes (3d / houdini,after effects,3dsmax ) - #4375 - -___ - -#### Brief description - -Changes of instances and context have complex, hard to get structure. The structure did not change but instead of complex dictionaries are used objected data. - - - -#### Description - -This is poposal of changes data improvement for creators. Implemented `TrackChangesItem` which handles the changes for us. The item is creating changes based on old and new value and can provide information about changed keys or access to full old or new value. Can give the values on any "sub-dictionary".Used this new approach to fix change in houdini and 3ds max and also modified one aftereffects plugin using changes. - - - - -___ - -
- - - -
-Houdini: hotfix condition (3d / houdini ) - #4391 - -___ - -## Hotfix - - - -This is fixing bug introduced int #4374 -___ - -
- - - -
-Houdini: Houdini shelf tools fixes (3d / houdini ) - #4428 - -___ - -#### Brief description - -Fix Houdini shelf tools. - - - -#### Description - -Use `label` as mandatory key instead of `name`. Changed how shelves are created. If the script is empty it is gracefully skipping it instead of crashing. - - - - -___ - -
- - - -
-3dsmax: startup fixes (3d / 3dsmax ) - #4412 - -___ - -#### Brief description - -This is fixing various issues that can occur on some of the 3dsmax versions. - - - -#### Description - -On displays with +4K resolution UI was broken, some 3dsmax versions couldn't process `PYTHONPATH` correctly. This PR is forcing `sys.path` and disabling `QT_AUTO_SCREEN_SCALE_FACTOR` - - - - -___ - -
- - - -
-Fix features for gizmo menu (2d / nuke ) - #4280 - -___ - -#### Brief description - -Fix features for the Gizmo Menu project settings (shortcut for python type of usage and file type of usage functionality) - - - - -___ - -
- - - -
-Photoshop: fix missing legacy io for legacy instances (2d / photoshop,after effects ) - #4467 - -___ - -#### Brief description - -`legacy_io` import was removed, but usage stayed. - - - -#### Description - -Usage of `legacy_io` should be eradicated, in creators it should be replaced by `self.create_context.get_current_project_name/asset_name/task_name`. - - - - -___ - -
- - - -
-Fix - addSite loader handles hero version (other / sitesync ) - #4359 - -___ - -#### Brief description - -If adding site to representation presence of hero version is checked, if found hero version is marked to be donwloaded too.Replacing https://github.com/ynput/OpenPype/pull/4191 - - - - -___ - -
- - - -
-Remove OIIO build for macos (other ) - #4381 - -___ - -## Fix - - - -Since we are not able to provide OpenImageIO tools binaries for macos, we should remove the item from th `pyproject.toml`. This PR is taking care of it. - - - -It is also changing the way `fetch_thirdparty_libs` script works in that it doesn't crash when lib cannot be processed, it only issue warning. - - - - - -Resolves #3858 -___ - -
- - - -
-General: Attribute definitions fixes (other ) - #4392 - -___ - -#### Brief description - -Fix possible issues with attribute definitions in publisher if there is unknown attribute on an instance. - - - -#### Description - -Source of the issue is that attribute definitions from creator plugin could be "expanded" during `CreatedInstance` initialization. Which would affect all other instances using the same list of attributes -> literally object of list. If the same list object is used in "BaseClass" for other creators it would affect all instances (because of 1 instance). There had to be implemented other changes to fix the issue and keep behavior the same.Object of `CreatedInstance` can be created without reference to creator object. `CreatedInstance` is responsible to give UI attribute definitions (technically is prepared for cases when each instance may have different attribute definitions -> not yet).Attribute definition has added more conditions for `__eq__` method and have implemented `__ne__` method (which is required for Py 2 compatibility). Renamed `AbtractAttrDef` to `AbstractAttrDef` (fix typo). - - - - -___ - -
- - - -
-Ftrack: Don't force ftrackapp endpoint (other / ftrack ) - #4411 - -___ - -#### Brief description - -Auto-fill of ftrack url don't break custom urls. Custom urls couldn't be used as `ftrackapp.com` is added if is not in the url. - - - -#### Description - -The code was changed in a way that auto-fill is still supported but before `ftrackapp` is added it will try to use url as is. If the connection works as is it is used. - - - - -___ - -
- - - -
-Fix: DL on MacOS (other ) - #4418 - -___ - -#### Brief description - -This works if DL Openpype plugin Installation Directories is set to level of app bundle (eg. '/Applications/OpenPype 3.15.0.app') - - - - -___ - -
- - - -
-Photoshop: make usage of layer name in subset name more controllable (other ) - #4432 - -___ - -#### Brief description - -Layer name was previously used in subset name only if multiple instances were being created in single step. This adds explicit toggle. - - - -#### Description - -Toggling this button allows to use layer name in created subset name even if single instance is being created.This follows more closely implementation if AE. - - - - -___ - -
- - - -
-SiteSync: fix dirmap (other ) - #4436 - -___ - -#### Brief description - -Fixed issue in dirmap in Maya and Nuke - - - -#### Description - -Loads of error were thrown in Nuke console about dictionary value.`AttributeError: 'dict' object has no attribute 'lower'` - - - - -___ - -
- - - -
-General: Ignore decode error of stdout/stderr in run_subprocess (other ) - #4446 - -___ - -#### Brief description - -Ignore decode errors and replace invalid character (byte) with escaped byte character. - - - -#### Description - -Calling of `run_subprocess` may cause crashes if output contains some unicode character which (for example Polish name of encoder handler). - - - - -___ - -
- - - -
-Publisher: Fix reopen bug (other ) - #4463 - -___ - -#### Brief description - -Use right name of constant 'ActiveWindow' -> 'WindowActive'. - - - - -___ - -
- - - -
-Publisher: Fix compatibility of QAction in Publisher (other ) - #4474 - -___ - -#### Brief description - -Fix `QAction` for older version of Qt bindings where QAction requires a parent on initialization. - - - -#### Description - -This bug was discovered in Nuke 11. Fixed by creating QAction when QMenu is already available and can be used as parent. - - - - -___ - -
- - -### **๐Ÿ”€ Refactored code** - - - - -
-General: Remove 'openpype.api' (other ) - #4413 - -___ - -#### Brief description - -PR is removing `openpype/api.py` file which is causing a lot of troubles and cross-imports. - - - -#### Description - -I wanted to remove the file slowly function by function but it always reappear somewhere in codebase even if most of the functionality imported from there is triggering deprecation warnings. This is small change which may have huge impact.There shouldn't be anything in openpype codebase which is using `openpype.api` anymore so only possible issues are in customized repositories or custom addons. - - - - -___ - -
- - -### **๐Ÿ“ƒ Documentation** - - - - -
-docs-user-Getting Started adjustments (other ) - #4365 - -___ - -#### Brief description - -Small typo fixes here and there, additional info on install/ running OP. - - - - -___ - -
- - -### **Merged pull requests** - - - - -
-Renderman support for sample and display filters (3d / maya ) - #4003 - -___ - -#### Brief description - -User can set up both sample and display filters in Openpype settings if they are using Renderman as renderer. - - - -#### Description - -You can preset which sample and display filters for renderman , including the cryptomatte renderpass, in Openpype settings. Once you select which filters to be included in openpype settings and then create render instance for your camera in maya, it would automatically tell the system to generate your selected filters in render settings.The place you can find for setting up the filters: _Maya > Render Settings > Renderman Renderer > Display Filters/ Sample Filters_ - - - - -___ - -
- - - -
-Maya: Create Arnold options on repair. (3d / maya ) - #4448 - -___ - -#### Brief description - -When validating/repairing we previously required users to open render settings to create the Arnold options. This is done through code now. - - - - -___ - -
- - - -
-Update Asset field of creator Instances in Maya Template Builder (3d / maya ) - #4470 - -___ - -#### Brief description - -When we build a template with Maya Template Builder, it will update the asset field of the sets (creator instances) that are imported from the template. - - - -#### Description - -When building a template, we also want to define the publishable content in advance: create an instance of a model, or look, etc., to speed up the workflow and reduce the number of questions we are asked. After building a work file from a saved template that contains pre-created instances, the template builder should update the asset field to the current asset. - - - - -___ - -
- - - -
-Blender: fix import workfile all families (3d / blender ) - #4405 - -___ - -#### Brief description - -Having this feature related to workfile available for any family is absurd. - - - - -___ - -
- - - -
-Nuke: update rendered frames in latest version (2d / nuke ) - #4362 - -___ - -#### Brief description - -Introduced new field to insert frame(s) to rerender only. - - - -#### Description - -Rendering is expensive, sometimes it is helpful only to re-render changed frames and reuse existing.Artists can in Publisher fill which frame(s) should be re-rendered.If there is already published version of currently publishing subset, all representation files are collected (currently for `render` family only) and then when Nuke is rendering (locally only for now), old published files are copied into into temporary render folder where will be rewritten only by frames explicitly set in new field.That way review/burnin process could also reuse old files and recreate reviews/burnins.New version is produced during this process! - - - - -___ - -
- - - -
-Feature: Keep synced hero representations up-to-date. (other ) - #4343 - -___ - -#### Brief description - -Keep previously synchronized sites up-to-date by comparing old and new sites and adding old sites if missing in new ones.Fix #4331 - - - - -___ - -
- - - -
-Maya: Fix template builder bug where assets are not put in the right hierarchy (other ) - #4367 - -___ - -#### Brief description - -When buiding scene from template, the assets loaded from the placeholders are not put in the hierarchy. Plus, the assets are loaded in double. - - - - -___ - -
- - - -
-Bump ua-parser-js from 0.7.31 to 0.7.33 in /website (other ) - #4371 - -___ - -Bumps [ua-parser-js](https://github.com/faisalman/ua-parser-js) from 0.7.31 to 0.7.33. -
-Changelog -

Sourced from ua-parser-js's changelog.

-
-

Version 0.7.31 / 1.0.2

-
    -
  • Fix OPPO Reno A5 incorrect detection
  • -
  • Fix TypeError Bug
  • -
  • Use AST to extract regexes and verify them with safe-regex
  • -
-

Version 0.7.32 / 1.0.32

-
    -
  • Add new browser : DuckDuckGo, Huawei Browser, LinkedIn
  • -
  • Add new OS : HarmonyOS
  • -
  • Add some Huawei models
  • -
  • Add Sharp Aquos TV
  • -
  • Improve detection Xiaomi Mi CC9
  • -
  • Fix Sony Xperia 1 III misidentified as Acer tablet
  • -
  • Fix Detect Sony BRAVIA as SmartTV
  • -
  • Fix Detect Xiaomi Mi TV as SmartTV
  • -
  • Fix Detect Galaxy Tab S8 as tablet
  • -
  • Fix WeGame mistakenly identified as WeChat
  • -
  • Fix included commas in Safari / Mobile Safari version
  • -
  • Increase UA_MAX_LENGTH to 350
  • -
-

Version 0.7.33 / 1.0.33

-
    -
  • Add new browser : Cobalt
  • -
  • Identify Macintosh as an Apple device
  • -
  • Fix ReDoS vulnerability
  • -
-

Version 0.8

-

Version 0.8 was created by accident. This version is now deprecated and no longer maintained, please update to version 0.7 / 1.0.

-
-
-
-Commits - -
-
- - -[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ua-parser-js&package-manager=npm_and_yarn&previous-version=0.7.31&new-version=0.7.33)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) -- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language -- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language -- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language -- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language - -You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). - -
-___ - -
- - - -
-Docs: Question about renaming in Kitsu (other ) - #4384 - -___ - -#### Brief description - -To keep memory of this discussion: https://discord.com/channels/517362899170230292/563751989075378201/1068112668491255818 - - - - -___ - -
- - - -
-New Publisher: Fix Creator error typo (other ) - #4396 - -___ - -#### Brief description - -Fixes typo in error message. - - - - -___ - -
- - - -
-Chore: pyproject.toml version because of Poetry (other ) - #4408 - -___ - -#### Brief description - -Automatization injects wrong format - - - - -___ - -
- - - -
-Fix - remove minor part in toml (other ) - #4437 - -___ - -#### Brief description - -Causes issue in create_env and new Poetry - - - - -___ - -
- - - -
-General: Add project code to anatomy (other ) - #4445 - -___ - -#### Brief description - -Added attribute `project_code` to `Anatomy` object. - - - -#### Description - -Anatomy already have access to almost all attributes from project anatomy except project code. This PR changing it. Technically `Anatomy` is everything what would be needed to get fill data of project. - -``` - -{ - - "project": { - - "name": anatomy.project_name, - - "code": anatomy.project_code - - } - -} - -``` - - -___ - -
- - - -
-Maya: Arnold Scene Source overhaul - OP-4865 (other / maya ) - #4449 - -___ - -#### Brief description - -General overhaul of the Arnold Scene Source (ASS) workflow. - - - -#### Description - -This originally was to support static files (non-sequencial) ASS publishing, but digging deeper whole workflow needed an update to get ready for further issues. During this overhaul the following changes were made: - -- Generalized Arnold Standin workflow to a single loader. - -- Support multiple nodes as proxies. - -- Support proxies for `pointcache` family. - -- Generalized approach to proxies as resources, so they can be the same file format as the original.This workflow should allow further expansion to utilize operators and eventually USD. - - - - -___ - -
- - - - -## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) - -**Deprecated:** - -- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) - -### ๐Ÿ“– Documentation - -- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) -- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) -- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) - -**๐Ÿ†• New features** - -- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) -- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) -- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) -- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) - -**๐Ÿš€ Enhancements** - -- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) -- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) -- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) -- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) -- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) -- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) -- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) -- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) -- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) -- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) -- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) -- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) -- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) -- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) -- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) -- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) -- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) -- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) - -**๐Ÿ› Bug fixes** - -- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) -- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) -- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) -- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) -- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) -- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) -- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) -- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) -- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) -- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) -- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) -- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) -- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) -- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) -- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) -- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) -- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) -- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) -- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) -- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) -- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) - -**๐Ÿ”€ Refactored code** - -- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) -- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) - -**Merged pull requests:** - -- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) -- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) -- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) - -## [3.14.10](https://github.com/ynput/OpenPype/tree/HEAD) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) - -**๐Ÿ†• New features** - -- Global | Nuke: Creator placeholders in workfile template builder [\#4266](https://github.com/ynput/OpenPype/pull/4266) -- Slack: Added dynamic message [\#4265](https://github.com/ynput/OpenPype/pull/4265) -- Blender: Workfile Loader [\#4234](https://github.com/ynput/OpenPype/pull/4234) -- Unreal: Publishing and Loading for UAssets [\#4198](https://github.com/ynput/OpenPype/pull/4198) -- Publish: register publishes without copying them [\#4157](https://github.com/ynput/OpenPype/pull/4157) - -**๐Ÿš€ Enhancements** - -- General: Added install method with docstring to HostBase [\#4298](https://github.com/ynput/OpenPype/pull/4298) -- Traypublisher: simple editorial multiple edl [\#4248](https://github.com/ynput/OpenPype/pull/4248) -- General: Extend 'IPluginPaths' to have more available methods [\#4214](https://github.com/ynput/OpenPype/pull/4214) -- Refactorization of folder coloring [\#4211](https://github.com/ynput/OpenPype/pull/4211) -- Flame - loading multilayer with controlled layer names [\#4204](https://github.com/ynput/OpenPype/pull/4204) - -**๐Ÿ› Bug fixes** - -- Unreal: fix missing `maintained_selection` call [\#4300](https://github.com/ynput/OpenPype/pull/4300) -- Ftrack: Fix receive of host ip on MacOs [\#4288](https://github.com/ynput/OpenPype/pull/4288) -- SiteSync: sftp connection failing when shouldnt be tested [\#4278](https://github.com/ynput/OpenPype/pull/4278) -- Deadline: fix default value for passing mongo url [\#4275](https://github.com/ynput/OpenPype/pull/4275) -- Scene Manager: Fix variable name [\#4268](https://github.com/ynput/OpenPype/pull/4268) -- Slack: notification fails because of missing published path [\#4264](https://github.com/ynput/OpenPype/pull/4264) -- hiero: creator gui with min max [\#4257](https://github.com/ynput/OpenPype/pull/4257) -- NiceCheckbox: Fix checker positioning in Python 2 [\#4253](https://github.com/ynput/OpenPype/pull/4253) -- Publisher: Fix 'CreatorType' not equal for Python 2 DCCs [\#4249](https://github.com/ynput/OpenPype/pull/4249) -- Deadline: fix dependencies [\#4242](https://github.com/ynput/OpenPype/pull/4242) -- Houdini: hotfix instance data access [\#4236](https://github.com/ynput/OpenPype/pull/4236) -- bugfix/image plane load error [\#4222](https://github.com/ynput/OpenPype/pull/4222) -- Hiero: thumbnail from multilayer exr [\#4209](https://github.com/ynput/OpenPype/pull/4209) - -**๐Ÿ”€ Refactored code** - -- Resolve: Use qtpy in Resolve [\#4254](https://github.com/ynput/OpenPype/pull/4254) -- Houdini: Use qtpy in Houdini [\#4252](https://github.com/ynput/OpenPype/pull/4252) -- Max: Use qtpy in Max [\#4251](https://github.com/ynput/OpenPype/pull/4251) -- Maya: Use qtpy in Maya [\#4250](https://github.com/ynput/OpenPype/pull/4250) -- Hiero: Use qtpy in Hiero [\#4240](https://github.com/ynput/OpenPype/pull/4240) -- Nuke: Use qtpy in Nuke [\#4239](https://github.com/ynput/OpenPype/pull/4239) -- Flame: Use qtpy in flame [\#4238](https://github.com/ynput/OpenPype/pull/4238) -- General: Legacy io not used in global plugins [\#4134](https://github.com/ynput/OpenPype/pull/4134) - -**Merged pull requests:** - -- Bump json5 from 1.0.1 to 1.0.2 in /website [\#4292](https://github.com/ynput/OpenPype/pull/4292) -- Maya: Fix validate frame range repair + fix create render with deadline disabled [\#4279](https://github.com/ynput/OpenPype/pull/4279) - - -## [3.14.9](https://github.com/pypeclub/OpenPype/tree/3.14.9) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.8...3.14.9) - -### ๐Ÿ“– Documentation - -- Documentation: Testing on Deadline [\#4185](https://github.com/pypeclub/OpenPype/pull/4185) -- Consistent Python version [\#4160](https://github.com/pypeclub/OpenPype/pull/4160) - -**๐Ÿ†• New features** - -- Feature/op 4397 gl tf extractor for maya [\#4192](https://github.com/pypeclub/OpenPype/pull/4192) -- Maya: Extractor for Unreal SkeletalMesh [\#4174](https://github.com/pypeclub/OpenPype/pull/4174) -- 3dsmax: integration [\#4168](https://github.com/pypeclub/OpenPype/pull/4168) -- Blender: Extract Alembic Animations [\#4128](https://github.com/pypeclub/OpenPype/pull/4128) -- Unreal: Load Alembic Animations [\#4127](https://github.com/pypeclub/OpenPype/pull/4127) - -**๐Ÿš€ Enhancements** - -- Houdini: Use new interface class name for publish host [\#4220](https://github.com/pypeclub/OpenPype/pull/4220) -- General: Default command for headless mode is interactive [\#4203](https://github.com/pypeclub/OpenPype/pull/4203) -- Maya: Enhanced ASS publishing [\#4196](https://github.com/pypeclub/OpenPype/pull/4196) -- Feature/op 3924 implement ass extractor [\#4188](https://github.com/pypeclub/OpenPype/pull/4188) -- File transactions: Source path is destination path [\#4184](https://github.com/pypeclub/OpenPype/pull/4184) -- Deadline: improve environment processing [\#4182](https://github.com/pypeclub/OpenPype/pull/4182) -- General: Comment per instance in Publisher [\#4178](https://github.com/pypeclub/OpenPype/pull/4178) -- Ensure Mongo database directory exists in Windows. [\#4166](https://github.com/pypeclub/OpenPype/pull/4166) -- Note about unrestricted execution on Windows. [\#4161](https://github.com/pypeclub/OpenPype/pull/4161) -- Maya: Enable thumbnail transparency on extraction. [\#4147](https://github.com/pypeclub/OpenPype/pull/4147) -- Maya: Disable viewport Pan/Zoom on playblast extraction. [\#4146](https://github.com/pypeclub/OpenPype/pull/4146) -- Maya: Optional viewport refresh on pointcache extraction [\#4144](https://github.com/pypeclub/OpenPype/pull/4144) -- CelAction: refactory integration to current openpype [\#4140](https://github.com/pypeclub/OpenPype/pull/4140) -- Maya: create and publish bounding box geometry [\#4131](https://github.com/pypeclub/OpenPype/pull/4131) -- Changed the UOpenPypePublishInstance to use the UDataAsset class [\#4124](https://github.com/pypeclub/OpenPype/pull/4124) -- General: Collection Audio speed up [\#4110](https://github.com/pypeclub/OpenPype/pull/4110) -- Maya: keep existing AOVs when creating render instance [\#4087](https://github.com/pypeclub/OpenPype/pull/4087) -- General: Oiio conversion multipart fix [\#4060](https://github.com/pypeclub/OpenPype/pull/4060) - -**๐Ÿ› Bug fixes** - -- Publisher: Signal type issues in Python 2 DCCs [\#4230](https://github.com/pypeclub/OpenPype/pull/4230) -- Blender: Fix Layout Family Versioning [\#4228](https://github.com/pypeclub/OpenPype/pull/4228) -- Blender: Fix Create Camera "Use selection" [\#4226](https://github.com/pypeclub/OpenPype/pull/4226) -- TrayPublisher - join needs list [\#4224](https://github.com/pypeclub/OpenPype/pull/4224) -- General: Event callbacks pass event to callbacks as expected [\#4210](https://github.com/pypeclub/OpenPype/pull/4210) -- Build:Revert .toml update of Gazu [\#4207](https://github.com/pypeclub/OpenPype/pull/4207) -- Nuke: fixed imageio node overrides subset filter [\#4202](https://github.com/pypeclub/OpenPype/pull/4202) -- Maya: pointcache [\#4201](https://github.com/pypeclub/OpenPype/pull/4201) -- Unreal: Support for Unreal Engine 5.1 [\#4199](https://github.com/pypeclub/OpenPype/pull/4199) -- General: Integrate thumbnail looks for thumbnail to multiple places [\#4181](https://github.com/pypeclub/OpenPype/pull/4181) -- Various minor bugfixes [\#4172](https://github.com/pypeclub/OpenPype/pull/4172) -- Nuke/Hiero: Remove tkinter library paths before launch [\#4171](https://github.com/pypeclub/OpenPype/pull/4171) -- Flame: vertical alignment of layers [\#4169](https://github.com/pypeclub/OpenPype/pull/4169) -- Nuke: correct detection of viewer and display [\#4165](https://github.com/pypeclub/OpenPype/pull/4165) -- Settings UI: Don't create QApplication if already exists [\#4156](https://github.com/pypeclub/OpenPype/pull/4156) -- General: Extract review handle start offset of sequences [\#4152](https://github.com/pypeclub/OpenPype/pull/4152) -- Maya: Maintain time connections on Alembic update. [\#4143](https://github.com/pypeclub/OpenPype/pull/4143) - -**๐Ÿ”€ Refactored code** - -- General: Use qtpy in modules and hosts UIs which are running in OpenPype process [\#4225](https://github.com/pypeclub/OpenPype/pull/4225) -- Tools: Use qtpy instead of Qt in standalone tools [\#4223](https://github.com/pypeclub/OpenPype/pull/4223) -- General: Use qtpy in settings UI [\#4215](https://github.com/pypeclub/OpenPype/pull/4215) - -**Merged pull requests:** - -- layout publish more than one container issue [\#4098](https://github.com/pypeclub/OpenPype/pull/4098) - -## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8) - -**๐Ÿš€ Enhancements** - -- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139) -- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137) -- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129) -- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126) -- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115) -- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046) -- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148) - -**๐Ÿ› Bug fixes** - -- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153) -- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136) -- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135) -- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117) - -## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) - -**๐Ÿ†• New features** - -- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) - -**๐Ÿš€ Enhancements** - -- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) -- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) -- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) -- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) -- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) -- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) -- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) -- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) -- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) -- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) -- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) -- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) -- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) -- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) - -**๐Ÿ› Bug fixes** - -- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) -- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) -- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) -- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) -- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) -- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) -- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) -- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) -- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) -- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) -- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) -- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) -- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) -- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) -- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) -- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) -- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) - -**๐Ÿ”€ Refactored code** - -- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) -- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) - -**Merged pull requests:** - -- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) -- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) -- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) -- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) -- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) - -## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) - -### ๐Ÿ“– Documentation - -- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) - -**๐Ÿ†• New features** - -- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) - -**๐Ÿš€ Enhancements** - -- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) -- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) -- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) -- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) -- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) -- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) - -**๐Ÿ› Bug fixes** - -- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) -- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) -- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) -- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) -- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) -- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) -- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) - -**๐Ÿ”€ Refactored code** - -- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) - -## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) - -**๐Ÿš€ Enhancements** - -- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) -- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) -- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) -- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) -- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) -- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) - -**๐Ÿ”€ Refactored code** - -- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) -- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) -- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) -- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) -- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) - -**Merged pull requests:** - -- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) -- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) -- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) -- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) - -## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) - -**๐Ÿ†• New features** - -- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) -- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) - -**๐Ÿš€ Enhancements** - -- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) -- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) -- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) -- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) -- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) -- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) -- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) -- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) -- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) -- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) -- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) -- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) -- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) -- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) -- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) -- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) -- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) -- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) -- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) -- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) -- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) -- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) -- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) -- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) -- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) -- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) -- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) -- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) -- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) -- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) -- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) - -**๐Ÿ”€ Refactored code** - -- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) -- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) -- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) -- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) -- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) -- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) -- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) - -**Merged pull requests:** - -- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) -- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) -- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) - -## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) - -**๐Ÿš€ Enhancements** - -- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) - -**๐Ÿ› Bug fixes** - -- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) -- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) -- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) -- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) - -**๐Ÿ”€ Refactored code** - -- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) -- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) -- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) -- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) -- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) -- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) - -**Merged pull requests:** - -- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) - -## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) - -### ๐Ÿ“– Documentation - -- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) - -**๐Ÿ†• New features** - -- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) -- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) -- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) - -**๐Ÿš€ Enhancements** - -- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) -- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) -- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) -- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) -- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) -- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) -- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) -- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) -- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) -- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) -- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) - -**๐Ÿ› Bug fixes** - -- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) -- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) -- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) -- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) -- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) -- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) -- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) -- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) -- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) -- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) -- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) - -**๐Ÿ”€ Refactored code** - -- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) -- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) -- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) -- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) -- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) -- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) -- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) -- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) -- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) -- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) -- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) -- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) -- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) -- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) -- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) -- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) - -**Merged pull requests:** - -- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) -- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) - -## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) - -### ๐Ÿ“– Documentation - -- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) -- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) - -**๐Ÿ†• New features** - -- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) -- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) - -**๐Ÿš€ Enhancements** - -- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) -- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) -- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) -- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) -- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) -- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) -- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) -- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) -- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) - -**๐Ÿ› Bug fixes** - -- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) -- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) -- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) -- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) -- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) -- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) -- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) -- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) -- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) -- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) -- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) -- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) -- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) - -**๐Ÿ”€ Refactored code** - -- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) -- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) -- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) -- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) -- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) -- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) -- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) -- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) -- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) -- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) -- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) -- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) -- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) -- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) -- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) -- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) -- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) -- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) -- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) - -**Merged pull requests:** - -- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) -- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) -- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) -- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) - -## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) - -**๐Ÿ†• New features** - -- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) -- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) -- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) - -**๐Ÿš€ Enhancements** - -- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) -- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) -- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) -- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) -- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) - -**๐Ÿ› Bug fixes** - -- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) -- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) -- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) -- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) -- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) -- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) -- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) - -**๐Ÿ”€ Refactored code** - -- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) -- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) -- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) -- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) -- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) -- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) - -**Merged pull requests:** - -- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) -- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) -- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) -- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) - -## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) - -**๐Ÿ†• New features** - -- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) -- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) - -**๐Ÿš€ Enhancements** - -- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) -- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) -- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) -- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) -- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) -- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) -- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) -- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) -- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) -- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) - -**๐Ÿ› Bug fixes** - -- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) -- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) -- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) -- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) -- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) -- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) -- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) -- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) -- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) -- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) -- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) -- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) -- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) -- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) -- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) -- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) - -**๐Ÿ”€ Refactored code** - -- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) -- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) -- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) -- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) -- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) - -**Merged pull requests:** - -- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) -- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) -- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) - -## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) - -### ๐Ÿ“– Documentation - -- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) -- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) - -**๐Ÿš€ Enhancements** - -- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) -- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) -- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) -- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) -- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) -- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) -- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) -- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) -- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) -- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) -- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) -- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) -- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) -- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) - -**๐Ÿ› Bug fixes** - -- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) -- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) -- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) -- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) -- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) -- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) -- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) -- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) -- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) -- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) -- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) -- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) -- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) -- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) -- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) -- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) -- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) -- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) - -**๐Ÿ”€ Refactored code** - -- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) -- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) -- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) -- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) -- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) -- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) -- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) -- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) -- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) -- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) - -**Merged pull requests:** - -- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) - -## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) - -### ๐Ÿ“– Documentation - -- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) - -**๐Ÿ†• New features** - -- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) - -**๐Ÿš€ Enhancements** - -- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) -- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) -- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) -- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) -- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) -- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) -- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) -- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) -- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) -- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) -- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) -- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) -- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) -- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) -- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) -- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) -- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) -- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) -- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) -- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) -- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) -- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) -- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) -- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) -- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) -- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) -- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) -- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) -- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) -- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) -- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) - -**๐Ÿ”€ Refactored code** - -- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) -- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) -- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) -- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) -- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) -- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) -- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) -- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) -- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) -- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) -- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) -- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) -- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) - -## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) - -### ๐Ÿ“– Documentation - -- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) -- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) - -**๐Ÿ†• New features** - -- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) - -**๐Ÿš€ Enhancements** - -- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) -- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) -- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) -- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) -- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) -- Maya: Allow more data to be published along camera ๐ŸŽฅ [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) -- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) - -**๐Ÿ› Bug fixes** - -- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) -- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) -- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) -- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) -- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) -- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) -- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) -- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) -- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) -- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) -- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) -- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) -- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) -- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) -- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) - -**๐Ÿ”€ Refactored code** - -- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) -- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) -- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) -- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) -- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) -- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) -- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) -- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) -- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) -- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) -- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) -- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) -- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) -- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) - -**Merged pull requests:** - -- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) -- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) -- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) - -## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) - -**๐Ÿ†• New features** - -- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) -- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) - -**๐Ÿš€ Enhancements** - -- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) -- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) -- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) -- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) -- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) - -**๐Ÿ› Bug fixes** - -- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) -- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) -- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) -- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) -- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) -- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) -- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) -- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) -- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) -- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) -- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) -- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) - -**๐Ÿ”€ Refactored code** - -- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) - -## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) - -### ๐Ÿ“– Documentation - -- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) -- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) -- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) - -**๐Ÿ†• New features** - -- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) - -**๐Ÿš€ Enhancements** - -- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) -- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) -- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) -- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) -- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) -- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) -- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) -- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) -- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) -- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) -- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) -- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) -- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) -- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) -- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) -- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) -- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) -- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) - -**๐Ÿ› Bug fixes** - -- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) -- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) -- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) -- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) -- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) -- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) -- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) -- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) -- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) -- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) -- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) -- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) -- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) -- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) -- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) -- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) -- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) -- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) -- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) -- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) -- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) -- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) -- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) - -**๐Ÿ”€ Refactored code** - -- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) -- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) - -**Merged pull requests:** - -- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) -- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) - -## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) - -### ๐Ÿ“– Documentation - -- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) -- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) - -**๐Ÿ†• New features** - -- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) -- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) -- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) -- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) -- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) -- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) -- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) -- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) - -**๐Ÿš€ Enhancements** - -- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) -- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) -- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) -- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) -- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) -- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) -- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) -- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) -- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) -- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) -- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) -- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) -- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) -- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) -- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) -- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) -- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) -- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) -- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) -- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) -- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) -- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) -- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) -- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) -- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) -- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) -- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) -- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) -- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) -- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) -- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) -- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) -- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) - -**๐Ÿ› Bug fixes** - -- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) -- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) -- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) -- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) -- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) -- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) -- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) -- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) -- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) -- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) -- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) -- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) -- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) -- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) -- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) -- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) -- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) -- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) -- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) -- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) -- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) -- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) -- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) -- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) -- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) -- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) -- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) -- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) -- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) -- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) -- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) -- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) -- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) -- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) -- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) -- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) -- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) -- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) -- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) - -**๐Ÿ”€ Refactored code** - -- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) -- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) -- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) -- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) - -**Merged pull requests:** - -- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) -- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) -- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) -- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) -- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) -- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) -- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) -- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) -- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) -- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) -- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) - -## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) - -## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) - -## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) - -## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) - -## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) - -### ๐Ÿ“– Documentation - -- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) -- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) -- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) - -**๐Ÿ†• New features** - -- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) -- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) - -**๐Ÿš€ Enhancements** - -- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) -- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) -- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) - -**๐Ÿ› Bug fixes** - -- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) -- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) -- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) -- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) -- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) -- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) -- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) -- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) -- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) -- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) -- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) -- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) -- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) - -**Merged pull requests:** - -- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) -- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) - -## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) - -### ๐Ÿ“– Documentation - -- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) -- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) -- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) -- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) - -**๐Ÿ†• New features** - -- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) -- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) -- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) -- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) -- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) - -**๐Ÿš€ Enhancements** - -- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) -- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) -- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) -- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) -- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) -- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) -- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) -- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) -- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) -- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) -- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) -- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) -- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) -- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) -- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) -- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) -- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) -- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) -- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) -- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) -- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) -- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) -- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) -- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) - -**๐Ÿ› Bug fixes** - -- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) -- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) -- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) -- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) -- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) -- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) -- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) -- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) -- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) -- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) -- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) -- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) -- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) -- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) -- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) -- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) -- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) -- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) -- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) -- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) -- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) -- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) -- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) -- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) -- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) -- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) -- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) -- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) -- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) -- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) -- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) -- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) -- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) -- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) -- Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) -- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) -- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) -- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) - -**๐Ÿ”€ Refactored code** - -- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) -- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) -- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) -- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) -- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) -- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) - -**Merged pull requests:** - -- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) -- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) -- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) -- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) -- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) - -## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) - -## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) - -**๐Ÿš€ Enhancements** - -- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) -- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) -- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) -- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) -- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) - -**๐Ÿ› Bug fixes** - -- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) -- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) -- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) -- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) -- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) -- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) -- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) -- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) -- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) -- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) -- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) - -**๐Ÿ”€ Refactored code** - -- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) -- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) - -## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) - -**Deprecated:** - -- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) -- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) -- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) - -### ๐Ÿ“– Documentation - -- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) -- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) -- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) -- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) -- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) -- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) -- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) -- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) -- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) - -**๐Ÿ†• New features** - -- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) -- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) - -**๐Ÿš€ Enhancements** - -- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) -- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) -- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) -- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) -- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) -- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) -- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) -- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) -- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) -- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) -- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) -- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) -- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) -- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) -- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) -- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) -- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) -- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) -- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) -- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) -- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) -- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) -- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) -- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) -- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) -- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) -- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) -- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) -- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) -- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) -- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) -- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) -- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) -- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) -- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) -- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) -- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) -- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) -- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) -- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) -- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) -- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) -- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) -- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) -- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) -- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) - -**๐Ÿ› Bug fixes** - -- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) -- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) -- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) -- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) -- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) -- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) -- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) -- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) -- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) -- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) -- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) -- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) -- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) -- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) -- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) -- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) -- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) -- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) -- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) -- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) -- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) -- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) -- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) -- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) -- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) -- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) -- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) -- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) -- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) -- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) -- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) -- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) -- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) -- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) -- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) -- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) -- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) -- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) -- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) -- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) -- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) -- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) -- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) -- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) -- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) -- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) -- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) -- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) -- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) -- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) -- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) -- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) -- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) -- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) -- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) -- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) -- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) -- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) -- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) - -**๐Ÿ”€ Refactored code** - -- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) -- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) -- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) -- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) -- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) -- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) -- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) -- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) -- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) -- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) -- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) -- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) - -**Merged pull requests:** - -- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) -- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) -- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) -- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) -- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) -- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) -- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) -- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) - -## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) - -### ๐Ÿ“– Documentation - -- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) - -**๐Ÿš€ Enhancements** - -- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) -- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) -- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) -- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) -- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) - -**๐Ÿ› Bug fixes** - -- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) -- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) - -**Merged pull requests:** - -- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) -- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) - -## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) - -**๐Ÿš€ Enhancements** - -- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) -- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) -- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) -- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) - -**๐Ÿ› Bug fixes** - -- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) -- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) -- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) -- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) -- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) -- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) -- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) -- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) - -**Merged pull requests:** - -- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) -- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) -- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) - -## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) - -### ๐Ÿ“– Documentation - -- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) - -**๐Ÿ†• New features** - -- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) -- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) -- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) -- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) -- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) -- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) - -**๐Ÿš€ Enhancements** - -- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) -- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) -- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) -- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) -- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) -- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) -- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) -- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) -- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) -- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) -- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) -- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) -- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) -- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) -- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) -- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) -- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) -- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) -- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) -- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) -- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) -- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) -- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) -- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) -- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) -- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) -- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) -- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) - -**๐Ÿ› Bug fixes** - -- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) -- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) -- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) -- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) -- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) -- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) -- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) -- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) -- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) -- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) -- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) -- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) -- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) -- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) -- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) -- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) -- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) -- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) -- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) -- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) -- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) -- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) -- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) -- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) -- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) -- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) - -**Merged pull requests:** - -- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) -- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) -- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) -- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) -- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) -- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) -- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) - -## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) - -**Deprecated:** - -- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) - -### ๐Ÿ“– Documentation - -- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) - -**๐Ÿ†• New features** - -- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) -- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) -- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) -- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) -- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) - -**๐Ÿš€ Enhancements** - -- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) -- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) -- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) -- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) -- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) -- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) -- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) -- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) -- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) -- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) -- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) -- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) -- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) -- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) -- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) -- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) -- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) -- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) -- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) -- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) -- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) -- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) -- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) -- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) -- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) -- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) -- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) -- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) -- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) -- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) -- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) -- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) -- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) -- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) -- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) -- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) -- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) -- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) -- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) -- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) -- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) -- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) -- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) - -**๐Ÿ› Bug fixes** - -- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) -- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) -- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) -- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) -- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) -- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) -- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) -- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) -- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) -- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) -- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) -- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) -- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) -- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) -- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) -- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) -- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) -- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) -- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) -- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) -- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) -- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) -- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) -- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) -- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) -- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) -- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) -- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) -- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) -- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) -- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) -- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) -- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) -- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) -- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) -- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) -- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) -- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) -- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) -- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) -- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) -- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) - -**Merged pull requests:** - -- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) -- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) -- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) -- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) -- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) - -## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) - -**๐Ÿ› Bug fixes** - -- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) - -## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) - -**๐Ÿ› Bug fixes** - -- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) - -## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) - -**๐Ÿš€ Enhancements** - -- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) -- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) -- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) -- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) -- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) -- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) -- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) - -**๐Ÿ› Bug fixes** - -- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) -- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) -- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) -- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) -- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) -- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) -- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) -- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) -- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) - -## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) - -**๐Ÿ› Bug fixes** - -- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) - -## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) - -### ๐Ÿ“– Documentation - -- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) -- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) - -**๐Ÿ†• New features** - -- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) -- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) -- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) -- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) -- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) -- Basic Royal Render Integration โœจ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) -- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) -- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) - -**๐Ÿš€ Enhancements** - -- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) -- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) -- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) -- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) -- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) -- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) -- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) -- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) -- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) -- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) -- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) -- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) -- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) -- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) -- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) -- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) -- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) -- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) -- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) -- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) -- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) -- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) -- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) -- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) -- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) -- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) - -**๐Ÿ› Bug fixes** - -- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) -- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) -- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) -- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) -- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) -- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) -- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) -- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) -- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) -- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) -- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) -- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) -- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) -- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) -- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) -- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) -- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) -- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) -- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) -- Maya: Collect render - fix UNC path support ๐Ÿ› [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) -- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) -- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) -- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) -- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) - -**Merged pull requests:** - -- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) -- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) - -## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) - -**Deprecated:** - -- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) - -**๐Ÿ†• New features** - -- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) -- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) -- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) -- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) -- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) -- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) -- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) - -**๐Ÿš€ Enhancements** - -- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) -- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) -- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) -- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) -- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) -- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) -- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) -- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) -- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) -- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) -- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) -- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) -- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) -- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) -- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) -- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) -- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) -- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) -- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) - -**๐Ÿ› Bug fixes** - -- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) -- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) -- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) -- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) -- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) -- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) -- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) -- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) -- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) -- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) -- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) -- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) -- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) -- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) -- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) -- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) -- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) -- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) -- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) -- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) - -**Merged pull requests:** - -- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) - -## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) - -**๐Ÿ†• New features** - -- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) - -**๐Ÿš€ Enhancements** - -- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) -- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) -- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) -- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) -- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) -- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) -- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) -- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) -- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) -- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) -- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) -- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) - -**๐Ÿ› Bug fixes** - -- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) -- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) -- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) -- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) -- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) - -**Merged pull requests:** - -- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) - -## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) - -### ๐Ÿ“– Documentation - -- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) -- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) -- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) - -**๐Ÿ†• New features** - -- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) -- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) -- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) -- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) - -**๐Ÿš€ Enhancements** - -- Added possibility to configure of synchronization of workfile versionโ€ฆ [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) -- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) -- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) -- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) -- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) -- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) -- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) -- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) -- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) -- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) -- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) -- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) -- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) -- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) -- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) -- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) -- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) -- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) -- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) -- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) -- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) -- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) -- OpenPype: Add version validation and `--headless` mode and update progress ๐Ÿ”„ [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) -- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) -- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) -- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) -- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) - -**๐Ÿ› Bug fixes** - -- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) -- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) -- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) -- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) -- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) -- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) -- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) -- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) -- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) -- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) -- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) -- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) -- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) -- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) -- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) -- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) -- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) -- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) -- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) - -**Merged pull requests:** - -- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) -- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) - -## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) - -**๐Ÿ› Bug fixes** - -- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) -- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) -- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) -- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) - -## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) - -### ๐Ÿ“– Documentation - -- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) - -**๐Ÿ†• New features** - -- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) -- Maya: Scene patching ๐Ÿฉนon submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) -- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) - -**๐Ÿš€ Enhancements** - -- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) -- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) -- Check for missing โœจ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) -- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) -- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) -- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) -- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) -- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) -- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) -- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) -- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) -- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) -- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) -- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) -- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) -- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) -- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) -- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) -- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) -- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) -- Maya: support for configurable `dirmap` ๐Ÿ—บ๏ธ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) -- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) -- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) -- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) -- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) -- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) -- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) -- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) -- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) -- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) -- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) - -**๐Ÿ› Bug fixes** - -- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) -- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) -- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) -- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) -- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) -- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) -- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) -- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) -- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) -- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) -- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) -- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) -- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) -- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) -- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) -- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) -- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) -- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) -- Maya: don't add reference members as connections to the container set ๐Ÿ“ฆ [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) -- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) -- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) -- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) -- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) -- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) -- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) -- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) -- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) -- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) -- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) -- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) - -**Merged pull requests:** - -- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) -- Add support for multiple Deadline โ˜ ๏ธโž– servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) -- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space ๐Ÿš€ [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) -- Maya: expected files -\> render products โš™๏ธ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) -- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) - -## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) - -### ๐Ÿ“– Documentation - -- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) -- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) -- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) - -**๐Ÿš€ Enhancements** - -- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) -- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) -- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) -- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) -- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) -- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) -- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) -- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) -- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) -- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) -- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) -- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) -- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) -- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) -- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) -- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) -- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) -- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) - -**๐Ÿ› Bug fixes** - -- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) -- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) -- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) -- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) -- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) -- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) -- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) -- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) -- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) -- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) -- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) -- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) -- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) -- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) -- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) -- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) -- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) -- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) -- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) -- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) -- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) -- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) -- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) -- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) -- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) -- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) -- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) - -**Merged pull requests:** - -- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) -- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) - -## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.3...2.18.4) - -## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) - -## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.1.0...2.18.2) - -## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) - -### ๐Ÿ“– Documentation - -- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) - -**๐Ÿš€ Enhancements** - -- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) -- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) -- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) -- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) -- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) -- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) -- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) -- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) -- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) -- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) -- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) -- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) -- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) - -**๐Ÿ› Bug fixes** - -- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) -- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) -- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) -- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) -- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) -- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) -- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) -- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) -- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) -- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) -- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) -- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) -- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) - -**Merged pull requests:** - -- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) - - -## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.1...3.0.0) - -### Configuration -- Studio Settings GUI: no more json configuration files. -- OpenPype Modules can be turned on and off. -- Easy to add Application versions. -- Per Project Environment and plugin management. -- Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. -- Options to make any validator or extractor, optional or disabled. -- Color Management is now unified under anatomy settings. -- Subset naming and grouping is fully configurable. -- All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. -- You can now add per project and per task type templates for workfile initialization in most hosts. -- Too many other individual configurable option to list in this changelog :) - -### Local Settings -- Local Settings GUI where users can change certain option on individual basis. - - Application executables. - - Project roots. - - Project site sync settings. - -### Build, Installation and Deployments -- No requirements on artist machine. -- Fully distributed workflow possible. -- Self-contained installation. -- Available on all three major platforms. -- Automatic artist OpenPype updates. -- Studio OpenPype repository for updates distribution. -- Robust Build system. -- Safe studio update versioning with staging and production options. -- MacOS build generates .app and .dmg installer. -- Windows build with installer creation script. - -### Misc -- System and diagnostic info tool in the tray. -- Launching application from Launcher indicates activity. -- All project roots are now named. Single root project are now achieved by having a single named root in the project anatomy. -- Every project root is cast into environment variable as well, so it can be used in DCC instead of absolute path (depends on DCC support for env vars). -- Basic support for task types, on top of task names. -- Timer now change automatically when the context is switched inside running application. -- 'Master" versions have been renamed to "Hero". -- Extract Burnins now supports file sequences and color settings. -- Extract Review support overscan cropping, better letterboxes and background colour fill. -- Delivery tool for copying and renaming any published assets in bulk. -- Harmony, Photoshop and After Effects now connect directly with OpenPype tray instead of spawning their own terminal. - -### Project Manager GUI -- Create Projects. -- Create Shots and Assets. -- Create Tasks and assign task types. -- Fill required asset attributes. -- Validations for duplicated or unsupported names. -- Archive Assets. -- Move Asset within hierarchy. - -### Site Sync (beta) -- Synchronization of published files between workstations and central storage. -- Ability to add arbitrary storage providers to the Site Sync system. -- Default setup includes Disk and Google Drive providers as examples. -- Access to availability information from Loader and Scene Manager. -- Sync queue GUI with filtering, error and status reporting. -- Site sync can be configured on a per-project basis. -- Bulk upload and download from the loader. - -### Ftrack -- Actions have customisable roles. -- Settings on all actions are updated live and don't need openpype restart. -- Ftrack module can now be turned off completely. -- It is enough to specify ftrack server name and the URL will be formed correctly. So instead of mystudio.ftrackapp.com, it's possible to use simply: "mystudio". - -### Editorial -- Fully OTIO based editorial publishing. -- Completely re-done Hiero publishing to be a lot simpler and faster. -- Consistent conforming from Resolve, Hiero and Standalone Publisher. - -### Backend -- OpenPype and Avalon now always share the same database (in 2.x is was possible to split them). -- Major codebase refactoring to allow for better CI, versioning and control of individual integrations. -- OTIO is bundled with build. -- OIIO is bundled with build. -- FFMPEG is bundled with build. -- Rest API and host WebSocket servers have been unified into a single local webserver. -- Maya look assigner has been integrated into the main codebase. -- Publish GUI has been integrated into the main codebase. -- Studio and Project settings overrides are now stored in Mongo. -- Too many other backend fixes and tweaks to list :), you can see full changelog on github for those. -- OpenPype uses Poetry to manage it's virtual environment when running from code. -- all applications can be marked as python 2 or 3 compatible to make the switch a bit easier. - - -### Pull Requests since 3.0.0-rc.6 - - -**Implemented enhancements:** - -- settings: task types enum entity [\#1605](https://github.com/pypeclub/OpenPype/issues/1605) -- Settings: ignore keys in referenced schema [\#1600](https://github.com/pypeclub/OpenPype/issues/1600) -- Maya: support for frame steps and frame lists [\#1585](https://github.com/pypeclub/OpenPype/issues/1585) -- TVPaint: Publish workfile. [\#1548](https://github.com/pypeclub/OpenPype/issues/1548) -- Loader: Current Asset Button [\#1448](https://github.com/pypeclub/OpenPype/issues/1448) -- Hiero: publish with retiming [\#1377](https://github.com/pypeclub/OpenPype/issues/1377) -- Ask user to restart after changing global environments in settings [\#910](https://github.com/pypeclub/OpenPype/issues/910) -- add option to define paht to workfile template [\#895](https://github.com/pypeclub/OpenPype/issues/895) -- Harmony: move server console to system tray [\#676](https://github.com/pypeclub/OpenPype/issues/676) -- Standalone style [\#1630](https://github.com/pypeclub/OpenPype/pull/1630) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Faster hierarchical values push [\#1627](https://github.com/pypeclub/OpenPype/pull/1627) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Launcher tool style [\#1624](https://github.com/pypeclub/OpenPype/pull/1624) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Loader and Library loader enhancements [\#1623](https://github.com/pypeclub/OpenPype/pull/1623) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Tray style [\#1622](https://github.com/pypeclub/OpenPype/pull/1622) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya schemas cleanup [\#1610](https://github.com/pypeclub/OpenPype/pull/1610) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Settings: ignore keys in referenced schema [\#1608](https://github.com/pypeclub/OpenPype/pull/1608) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- settings: task types enum entity [\#1606](https://github.com/pypeclub/OpenPype/pull/1606) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Openpype style [\#1604](https://github.com/pypeclub/OpenPype/pull/1604) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- TVPaint: Publish workfile. [\#1597](https://github.com/pypeclub/OpenPype/pull/1597) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Nuke: add option to define path to workfile template [\#1571](https://github.com/pypeclub/OpenPype/pull/1571) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Crop overscan in Extract Review [\#1569](https://github.com/pypeclub/OpenPype/pull/1569) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Unreal and Blender: Material Workflow [\#1562](https://github.com/pypeclub/OpenPype/pull/1562) ([simonebarbieri](https://github.com/simonebarbieri)) -- Harmony: move server console to system tray [\#1560](https://github.com/pypeclub/OpenPype/pull/1560) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Ask user to restart after changing global environments in settings [\#1550](https://github.com/pypeclub/OpenPype/pull/1550) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Hiero: publish with retiming [\#1545](https://github.com/pypeclub/OpenPype/pull/1545) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Fixed bugs:** - -- Library loader load asset documents on OpenPype start [\#1603](https://github.com/pypeclub/OpenPype/issues/1603) -- Resolve: unable to load the same footage twice [\#1317](https://github.com/pypeclub/OpenPype/issues/1317) -- Resolve: unable to load footage [\#1316](https://github.com/pypeclub/OpenPype/issues/1316) -- Add required Python 2 modules [\#1291](https://github.com/pypeclub/OpenPype/issues/1291) -- GUi scaling with hires displays [\#705](https://github.com/pypeclub/OpenPype/issues/705) -- Maya: non unicode string in publish validation [\#673](https://github.com/pypeclub/OpenPype/issues/673) -- Nuke: Rendered Frame validation is triggered by multiple collections [\#156](https://github.com/pypeclub/OpenPype/issues/156) -- avalon-core debugging failing [\#80](https://github.com/pypeclub/OpenPype/issues/80) -- Only check arnold shading group if arnold is used [\#72](https://github.com/pypeclub/OpenPype/issues/72) -- Sync server Qt layout fix [\#1621](https://github.com/pypeclub/OpenPype/pull/1621) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Console Listener on Python 2 fix [\#1620](https://github.com/pypeclub/OpenPype/pull/1620) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Bug: Initialize blessed term only in console mode [\#1619](https://github.com/pypeclub/OpenPype/pull/1619) ([antirotor](https://github.com/antirotor)) -- Settings template skip paths support wrappers [\#1618](https://github.com/pypeclub/OpenPype/pull/1618) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya capture 'isolate\_view' fix + minor corrections [\#1617](https://github.com/pypeclub/OpenPype/pull/1617) ([2-REC](https://github.com/2-REC)) -- MacOs Fix launch of standalone publisher [\#1616](https://github.com/pypeclub/OpenPype/pull/1616) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- 'Delivery action' report fix + typos [\#1612](https://github.com/pypeclub/OpenPype/pull/1612) ([2-REC](https://github.com/2-REC)) -- List append fix in mutable dict settings [\#1599](https://github.com/pypeclub/OpenPype/pull/1599) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Documentation: Maya: fix review [\#1598](https://github.com/pypeclub/OpenPype/pull/1598) ([antirotor](https://github.com/antirotor)) -- Bugfix: Set certifi CA bundle for all platforms [\#1596](https://github.com/pypeclub/OpenPype/pull/1596) ([antirotor](https://github.com/antirotor)) - -**Merged pull requests:** - -- Bump dns-packet from 1.3.1 to 1.3.4 in /website [\#1611](https://github.com/pypeclub/OpenPype/pull/1611) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Maya: Render workflow fixes [\#1607](https://github.com/pypeclub/OpenPype/pull/1607) ([antirotor](https://github.com/antirotor)) -- Maya: support for frame steps and frame lists [\#1586](https://github.com/pypeclub/OpenPype/pull/1586) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- 3.0.0 - curated changelog [\#1284](https://github.com/pypeclub/OpenPype/pull/1284) ([mkolar](https://github.com/mkolar)) - -## [2.18.1](https://github.com/pypeclub/openpype/tree/2.18.1) (2021-06-03) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...2.18.1) - -**Enhancements:** - -- Faster hierarchical values push [\#1626](https://github.com/pypeclub/OpenPype/pull/1626) -- Feature Delivery in library loader [\#1549](https://github.com/pypeclub/OpenPype/pull/1549) -- Hiero: Initial frame publish support. [\#1172](https://github.com/pypeclub/OpenPype/pull/1172) - -**Fixed bugs:** - -- Maya capture 'isolate\_view' fix + minor corrections [\#1614](https://github.com/pypeclub/OpenPype/pull/1614) -- 'Delivery action' report fix +typos [\#1613](https://github.com/pypeclub/OpenPype/pull/1613) -- Delivery in LibraryLoader - fixed sequence issue [\#1590](https://github.com/pypeclub/OpenPype/pull/1590) -- FFmpeg filters in quote marks [\#1588](https://github.com/pypeclub/OpenPype/pull/1588) -- Ftrack delete action cause circular error [\#1581](https://github.com/pypeclub/OpenPype/pull/1581) -- Fix Maya playblast. [\#1566](https://github.com/pypeclub/OpenPype/pull/1566) -- More failsafes prevent errored runs. [\#1554](https://github.com/pypeclub/OpenPype/pull/1554) -- Celaction publishing [\#1539](https://github.com/pypeclub/OpenPype/pull/1539) -- celaction: app not starting [\#1533](https://github.com/pypeclub/OpenPype/pull/1533) - -**Merged pull requests:** - -- Maya: Render workflow fixes - 2.0 backport [\#1609](https://github.com/pypeclub/OpenPype/pull/1609) -- Maya Hardware support [\#1553](https://github.com/pypeclub/OpenPype/pull/1553) - - -## [CI/3.0.0-rc.6](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.6) (2021-05-27) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.5...CI/3.0.0-rc.6) - -**Implemented enhancements:** - -- Hiero: publish color and transformation soft-effects [\#1376](https://github.com/pypeclub/OpenPype/issues/1376) -- Get rid of `AVALON\_HIERARCHY` and `hiearchy` key on asset [\#432](https://github.com/pypeclub/OpenPype/issues/432) -- Sync to avalon do not store hierarchy key [\#1582](https://github.com/pypeclub/OpenPype/pull/1582) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Tools: launcher scripts for project manager [\#1557](https://github.com/pypeclub/OpenPype/pull/1557) ([antirotor](https://github.com/antirotor)) -- Simple tvpaint publish [\#1555](https://github.com/pypeclub/OpenPype/pull/1555) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Feature Delivery in library loader [\#1546](https://github.com/pypeclub/OpenPype/pull/1546) ([kalisp](https://github.com/kalisp)) -- Documentation: Dev and system build documentation [\#1543](https://github.com/pypeclub/OpenPype/pull/1543) ([antirotor](https://github.com/antirotor)) -- Color entity [\#1542](https://github.com/pypeclub/OpenPype/pull/1542) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Extract review bg color [\#1534](https://github.com/pypeclub/OpenPype/pull/1534) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- TVPaint loader settings [\#1530](https://github.com/pypeclub/OpenPype/pull/1530) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender can initialize differente user script paths [\#1528](https://github.com/pypeclub/OpenPype/pull/1528) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender and Unreal: Improved Animation Workflow [\#1514](https://github.com/pypeclub/OpenPype/pull/1514) ([simonebarbieri](https://github.com/simonebarbieri)) -- Hiero: publish color and transformation soft-effects [\#1511](https://github.com/pypeclub/OpenPype/pull/1511) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Fixed bugs:** - -- OpenPype specific version issues [\#1583](https://github.com/pypeclub/OpenPype/issues/1583) -- Ftrack login server can't work without stderr [\#1576](https://github.com/pypeclub/OpenPype/issues/1576) -- Mac application launch [\#1575](https://github.com/pypeclub/OpenPype/issues/1575) -- Settings are not propagated to Nuke write nodes [\#1538](https://github.com/pypeclub/OpenPype/issues/1538) -- Subset names settings not applied for publishing [\#1537](https://github.com/pypeclub/OpenPype/issues/1537) -- Nuke: callback at start not setting colorspace [\#1412](https://github.com/pypeclub/OpenPype/issues/1412) -- Pype 3: Missing icon for Settings [\#1272](https://github.com/pypeclub/OpenPype/issues/1272) -- Blender: cannot initialize Avalon if BLENDER\_USER\_SCRIPTS is already used [\#1050](https://github.com/pypeclub/OpenPype/issues/1050) -- Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/OpenPype/issues/206) -- Build: stop cleaning of pyc files in build directory [\#1592](https://github.com/pypeclub/OpenPype/pull/1592) ([antirotor](https://github.com/antirotor)) -- Ftrack login server can't work without stderr [\#1591](https://github.com/pypeclub/OpenPype/pull/1591) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- FFmpeg filters in quote marks [\#1589](https://github.com/pypeclub/OpenPype/pull/1589) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- OpenPype specific version issues [\#1584](https://github.com/pypeclub/OpenPype/pull/1584) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Mac application launch [\#1580](https://github.com/pypeclub/OpenPype/pull/1580) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Ftrack delete action cause circular error [\#1579](https://github.com/pypeclub/OpenPype/pull/1579) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Hiero: publishing issues [\#1578](https://github.com/pypeclub/OpenPype/pull/1578) ([jezscha](https://github.com/jezscha)) -- Nuke: callback at start not setting colorspace [\#1561](https://github.com/pypeclub/OpenPype/pull/1561) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Bugfix PS subset and quick review [\#1541](https://github.com/pypeclub/OpenPype/pull/1541) ([kalisp](https://github.com/kalisp)) -- Settings are not propagated to Nuke write nodes [\#1540](https://github.com/pypeclub/OpenPype/pull/1540) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- OpenPype: Powershell scripts polishing [\#1536](https://github.com/pypeclub/OpenPype/pull/1536) ([antirotor](https://github.com/antirotor)) -- Host name collecting fix [\#1535](https://github.com/pypeclub/OpenPype/pull/1535) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Handle duplicated task names in project manager [\#1531](https://github.com/pypeclub/OpenPype/pull/1531) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Validate is file attribute in settings schema [\#1529](https://github.com/pypeclub/OpenPype/pull/1529) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Merged pull requests:** - -- Bump postcss from 8.2.8 to 8.3.0 in /website [\#1593](https://github.com/pypeclub/OpenPype/pull/1593) ([dependabot[bot]](https://github.com/apps/dependabot)) -- User installation documentation [\#1532](https://github.com/pypeclub/OpenPype/pull/1532) ([64qam](https://github.com/64qam)) - -## [CI/3.0.0-rc.5](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.5) (2021-05-19) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...CI/3.0.0-rc.5) - -**Implemented enhancements:** - -- OpenPype: Build - Add progress bars [\#1524](https://github.com/pypeclub/OpenPype/pull/1524) ([antirotor](https://github.com/antirotor)) -- Default environments per host imlementation [\#1522](https://github.com/pypeclub/OpenPype/pull/1522) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- OpenPype: use `semver` module for version resolution [\#1513](https://github.com/pypeclub/OpenPype/pull/1513) ([antirotor](https://github.com/antirotor)) -- Feature Aftereffects setting cleanup documentation [\#1510](https://github.com/pypeclub/OpenPype/pull/1510) ([kalisp](https://github.com/kalisp)) -- Feature Sync server settings enhancement [\#1501](https://github.com/pypeclub/OpenPype/pull/1501) ([kalisp](https://github.com/kalisp)) -- Project manager [\#1396](https://github.com/pypeclub/OpenPype/pull/1396) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Fixed bugs:** - -- Unified schema definition [\#874](https://github.com/pypeclub/OpenPype/issues/874) -- Maya: fix look assignment [\#1526](https://github.com/pypeclub/OpenPype/pull/1526) ([antirotor](https://github.com/antirotor)) -- Bugfix Sync server local site issues [\#1523](https://github.com/pypeclub/OpenPype/pull/1523) ([kalisp](https://github.com/kalisp)) -- Store as list dictionary check initial value with right type [\#1520](https://github.com/pypeclub/OpenPype/pull/1520) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya: wrong collection of playblasted frames [\#1515](https://github.com/pypeclub/OpenPype/pull/1515) ([mkolar](https://github.com/mkolar)) -- Convert pyblish logs to string at the moment of logging [\#1512](https://github.com/pypeclub/OpenPype/pull/1512) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- 3.0 | nuke: fixing start\_at with option gui [\#1509](https://github.com/pypeclub/OpenPype/pull/1509) ([jezscha](https://github.com/jezscha)) -- Tests: fix pype -\> openpype to make tests work again [\#1508](https://github.com/pypeclub/OpenPype/pull/1508) ([antirotor](https://github.com/antirotor)) - -**Merged pull requests:** - -- OpenPype: disable submodule update with `--no-submodule-update` [\#1525](https://github.com/pypeclub/OpenPype/pull/1525) ([antirotor](https://github.com/antirotor)) -- Ftrack without autosync in Pype 3 [\#1519](https://github.com/pypeclub/OpenPype/pull/1519) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Feature Harmony setting cleanup documentation [\#1506](https://github.com/pypeclub/OpenPype/pull/1506) ([kalisp](https://github.com/kalisp)) -- Sync Server beginning of documentation [\#1471](https://github.com/pypeclub/OpenPype/pull/1471) ([kalisp](https://github.com/kalisp)) -- Blender: publish layout json [\#1348](https://github.com/pypeclub/OpenPype/pull/1348) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) (2021-05-18) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.4...2.18.0) - -**Implemented enhancements:** - -- Default environments per host imlementation [\#1405](https://github.com/pypeclub/OpenPype/issues/1405) -- Blender: publish layout json [\#1346](https://github.com/pypeclub/OpenPype/issues/1346) -- Ftrack without autosync in Pype 3 [\#1128](https://github.com/pypeclub/OpenPype/issues/1128) -- Launcher: started action indicator [\#1102](https://github.com/pypeclub/OpenPype/issues/1102) -- Launch arguments of applications [\#1094](https://github.com/pypeclub/OpenPype/issues/1094) -- Publish: instance info [\#724](https://github.com/pypeclub/OpenPype/issues/724) -- Review: ability to control review length [\#482](https://github.com/pypeclub/OpenPype/issues/482) -- Colorized recognition of creator result [\#394](https://github.com/pypeclub/OpenPype/issues/394) -- event assign user to started task [\#49](https://github.com/pypeclub/OpenPype/issues/49) -- rebuild containers from reference in maya [\#55](https://github.com/pypeclub/OpenPype/issues/55) -- nuke Load metadata [\#66](https://github.com/pypeclub/OpenPype/issues/66) -- Maya: Safer handling of expected render output names [\#1496](https://github.com/pypeclub/OpenPype/pull/1496) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- TVPaint: Increment workfile version on successfull publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489) ([tokejepsen](https://github.com/tokejepsen)) -- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1484](https://github.com/pypeclub/OpenPype/pull/1484) ([tokejepsen](https://github.com/tokejepsen)) -- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Igniter version resolution doesn't consider it's own version [\#1505](https://github.com/pypeclub/OpenPype/issues/1505) -- Maya: Safer handling of expected render output names [\#1159](https://github.com/pypeclub/OpenPype/issues/1159) -- Harmony: Invalid render output from non-conventionally named instance [\#871](https://github.com/pypeclub/OpenPype/issues/871) -- Existing subsets hints in creator [\#1503](https://github.com/pypeclub/OpenPype/pull/1503) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- nuke: space in node name breaking process [\#1494](https://github.com/pypeclub/OpenPype/pull/1494) ([jezscha](https://github.com/jezscha)) -- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517) ([mkolar](https://github.com/mkolar)) -- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486) ([tokejepsen](https://github.com/tokejepsen)) -- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480) ([antirotor](https://github.com/antirotor)) - -**Closed issues:** - -- Nuke: wrong "star at" value on render load [\#1352](https://github.com/pypeclub/OpenPype/issues/1352) -- DV Resolve - loading/updating - image video [\#915](https://github.com/pypeclub/OpenPype/issues/915) - -**Merged pull requests:** - -- nuke: fixing start\_at with option gui [\#1507](https://github.com/pypeclub/OpenPype/pull/1507) ([jezscha](https://github.com/jezscha)) - -## [CI/3.0.0-rc.4](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.4) (2021-05-12) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...CI/3.0.0-rc.4) - -**Implemented enhancements:** - -- Resolve: documentation [\#1490](https://github.com/pypeclub/OpenPype/issues/1490) -- Hiero: audio to review [\#1378](https://github.com/pypeclub/OpenPype/issues/1378) -- nks color clips after publish [\#44](https://github.com/pypeclub/OpenPype/issues/44) -- Store data from modifiable dict as list [\#1504](https://github.com/pypeclub/OpenPype/pull/1504) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1497](https://github.com/pypeclub/OpenPype/pull/1497) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Hiero: publish audio and add to review [\#1493](https://github.com/pypeclub/OpenPype/pull/1493) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Resolve: documentation [\#1491](https://github.com/pypeclub/OpenPype/pull/1491) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Change integratenew template profiles setting [\#1487](https://github.com/pypeclub/OpenPype/pull/1487) ([kalisp](https://github.com/kalisp)) -- Settings tool cleanup [\#1477](https://github.com/pypeclub/OpenPype/pull/1477) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Sorted Applications and Tools in Custom attribute [\#1476](https://github.com/pypeclub/OpenPype/pull/1476) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- PS - group all published instances [\#1416](https://github.com/pypeclub/OpenPype/pull/1416) ([kalisp](https://github.com/kalisp)) -- OpenPype: Support for Docker [\#1289](https://github.com/pypeclub/OpenPype/pull/1289) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Harmony: palettes publishing [\#1439](https://github.com/pypeclub/OpenPype/issues/1439) -- Photoshop: validation for already created images [\#1435](https://github.com/pypeclub/OpenPype/issues/1435) -- Nuke Extracts Thumbnail from frame out of shot range [\#963](https://github.com/pypeclub/OpenPype/issues/963) -- Instance in same Context repairing [\#390](https://github.com/pypeclub/OpenPype/issues/390) -- User Inactivity - Start timers sets wrong time [\#91](https://github.com/pypeclub/OpenPype/issues/91) -- Use instance frame start instead of timeline [\#1499](https://github.com/pypeclub/OpenPype/pull/1499) ([mkolar](https://github.com/mkolar)) -- Various smaller fixes [\#1498](https://github.com/pypeclub/OpenPype/pull/1498) ([mkolar](https://github.com/mkolar)) -- nuke: space in node name breaking process [\#1495](https://github.com/pypeclub/OpenPype/pull/1495) ([jezscha](https://github.com/jezscha)) -- Codec determination in extract burnin [\#1492](https://github.com/pypeclub/OpenPype/pull/1492) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Undefined constant in subprocess module [\#1485](https://github.com/pypeclub/OpenPype/pull/1485) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- List entity catch add/remove item changes properly [\#1482](https://github.com/pypeclub/OpenPype/pull/1482) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Resolve: additional fixes of publishing workflow [\#1481](https://github.com/pypeclub/OpenPype/pull/1481) ([jezscha](https://github.com/jezscha)) -- Photoshop: validation for already created images [\#1436](https://github.com/pypeclub/OpenPype/pull/1436) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Merged pull requests:** - -- Maya: Support for looks on VRay Proxies [\#1443](https://github.com/pypeclub/OpenPype/pull/1443) ([antirotor](https://github.com/antirotor)) - -## [2.17.3](https://github.com/pypeclub/openpype/tree/2.17.3) (2021-05-06) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.3...2.17.3) - -**Fixed bugs:** - -- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479) ([jezscha](https://github.com/jezscha)) - -## [CI/3.0.0-rc.3](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.3) (2021-05-05) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.2...CI/3.0.0-rc.3) - -**Implemented enhancements:** - -- Path entity with placeholder [\#1473](https://github.com/pypeclub/OpenPype/pull/1473) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Burnin custom font filepath [\#1472](https://github.com/pypeclub/OpenPype/pull/1472) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Poetry: Move to OpenPype [\#1449](https://github.com/pypeclub/OpenPype/pull/1449) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Mac SSL path needs to be relative to pype\_root [\#1469](https://github.com/pypeclub/OpenPype/issues/1469) -- Resolve: fix loading clips to timeline [\#1421](https://github.com/pypeclub/OpenPype/issues/1421) -- Wrong handling of slashes when loading on mac [\#1411](https://github.com/pypeclub/OpenPype/issues/1411) -- Nuke openpype3 [\#1342](https://github.com/pypeclub/OpenPype/issues/1342) -- Houdini launcher [\#1171](https://github.com/pypeclub/OpenPype/issues/1171) -- Fix SyncServer get\_enabled\_projects should handle global state [\#1475](https://github.com/pypeclub/OpenPype/pull/1475) ([kalisp](https://github.com/kalisp)) -- Igniter buttons enable/disable fix [\#1474](https://github.com/pypeclub/OpenPype/pull/1474) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Mac SSL path needs to be relative to pype\_root [\#1470](https://github.com/pypeclub/OpenPype/pull/1470) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Resolve: 17 compatibility issues and load image sequences [\#1422](https://github.com/pypeclub/OpenPype/pull/1422) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -## [CI/3.0.0-rc.2](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.2) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.2...CI/3.0.0-rc.2) - -**Implemented enhancements:** - -- Extract burnins with sequences [\#1467](https://github.com/pypeclub/OpenPype/pull/1467) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Extract burnins with color setting [\#1466](https://github.com/pypeclub/OpenPype/pull/1466) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Fixed bugs:** - -- Fix groups check in Python 2 [\#1468](https://github.com/pypeclub/OpenPype/pull/1468) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -## [2.17.2](https://github.com/pypeclub/openpype/tree/2.17.2) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.1...2.17.2) - -**Implemented enhancements:** - -- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -## [CI/3.0.0-rc.1](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.1) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.1...CI/3.0.0-rc.1) - -**Implemented enhancements:** - -- Only show studio settings to admins [\#1406](https://github.com/pypeclub/OpenPype/issues/1406) -- Ftrack specific settings save warning messages [\#1458](https://github.com/pypeclub/OpenPype/pull/1458) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Faster settings actions [\#1446](https://github.com/pypeclub/OpenPype/pull/1446) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Feature/sync server priority [\#1444](https://github.com/pypeclub/OpenPype/pull/1444) ([kalisp](https://github.com/kalisp)) -- Faster settings UI loading [\#1442](https://github.com/pypeclub/OpenPype/pull/1442) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Igniter re-write [\#1441](https://github.com/pypeclub/OpenPype/pull/1441) ([mkolar](https://github.com/mkolar)) -- Wrap openpype build into installers [\#1419](https://github.com/pypeclub/OpenPype/pull/1419) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Extract review first documentation [\#1404](https://github.com/pypeclub/OpenPype/pull/1404) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender PySide2 install guide [\#1403](https://github.com/pypeclub/OpenPype/pull/1403) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: deadline submission with gpu [\#1394](https://github.com/pypeclub/OpenPype/pull/1394) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Igniter: Reverse item filter for OpenPype version [\#1349](https://github.com/pypeclub/OpenPype/pull/1349) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- OpenPype Mongo URL definition [\#1450](https://github.com/pypeclub/OpenPype/issues/1450) -- Various typos and smaller fixes [\#1464](https://github.com/pypeclub/OpenPype/pull/1464) ([mkolar](https://github.com/mkolar)) -- Validation of dynamic items in settings [\#1462](https://github.com/pypeclub/OpenPype/pull/1462) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- List can handle new items correctly [\#1459](https://github.com/pypeclub/OpenPype/pull/1459) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Settings actions process fix [\#1457](https://github.com/pypeclub/OpenPype/pull/1457) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Add to overrides actions fix [\#1456](https://github.com/pypeclub/OpenPype/pull/1456) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- OpenPype Mongo URL definition [\#1455](https://github.com/pypeclub/OpenPype/pull/1455) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Global settings save/load out of system settings [\#1447](https://github.com/pypeclub/OpenPype/pull/1447) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Keep metadata on remove overrides [\#1445](https://github.com/pypeclub/OpenPype/pull/1445) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: fixing undo for loaded mov and sequence [\#1432](https://github.com/pypeclub/OpenPype/pull/1432) ([jezscha](https://github.com/jezscha)) -- ExtractReview skip empty strings from settings [\#1431](https://github.com/pypeclub/OpenPype/pull/1431) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Bugfix Sync server tweaks [\#1430](https://github.com/pypeclub/OpenPype/pull/1430) ([kalisp](https://github.com/kalisp)) -- Hiero: missing thumbnail in review [\#1429](https://github.com/pypeclub/OpenPype/pull/1429) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Bugfix Maya in deadline for OpenPype [\#1428](https://github.com/pypeclub/OpenPype/pull/1428) ([kalisp](https://github.com/kalisp)) -- AE - validation for duration was 1 frame shorter [\#1427](https://github.com/pypeclub/OpenPype/pull/1427) ([kalisp](https://github.com/kalisp)) -- Houdini menu filename [\#1418](https://github.com/pypeclub/OpenPype/pull/1418) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Fix Avalon plugins attribute overrides [\#1413](https://github.com/pypeclub/OpenPype/pull/1413) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: submit to Deadline fails [\#1409](https://github.com/pypeclub/OpenPype/pull/1409) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Validate MongoDB Url on start [\#1407](https://github.com/pypeclub/OpenPype/pull/1407) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: fix set colorspace with new settings [\#1386](https://github.com/pypeclub/OpenPype/pull/1386) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- MacOs build and install issues [\#1380](https://github.com/pypeclub/OpenPype/pull/1380) ([mkolar](https://github.com/mkolar)) - -**Closed issues:** - -- test [\#1452](https://github.com/pypeclub/OpenPype/issues/1452) - -**Merged pull requests:** - -- TVPaint frame range definition [\#1425](https://github.com/pypeclub/OpenPype/pull/1425) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Only show studio settings to admins [\#1420](https://github.com/pypeclub/OpenPype/pull/1420) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- TVPaint documentation [\#1305](https://github.com/pypeclub/OpenPype/pull/1305) ([64qam](https://github.com/64qam)) - -## [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) (2021-04-30) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.0...2.17.1) - -**Enhancements:** - -- Nuke: deadline submission with gpu [\#1414](https://github.com/pypeclub/OpenPype/pull/1414) -- TVPaint frame range definition [\#1424](https://github.com/pypeclub/OpenPype/pull/1424) -- PS - group all published instances [\#1415](https://github.com/pypeclub/OpenPype/pull/1415) -- Add task name to context pop up. [\#1383](https://github.com/pypeclub/OpenPype/pull/1383) -- Enhance review letterbox feature. [\#1371](https://github.com/pypeclub/OpenPype/pull/1371) - -**Fixed bugs:** - -- Houdini menu filename [\#1417](https://github.com/pypeclub/OpenPype/pull/1417) -- AE - validation for duration was 1 frame shorter [\#1426](https://github.com/pypeclub/OpenPype/pull/1426) - -**Merged pull requests:** - -- Maya: Vray - problem getting all file nodes for look publishing [\#1399](https://github.com/pypeclub/OpenPype/pull/1399) -- Maya: Support for Redshift proxies [\#1360](https://github.com/pypeclub/OpenPype/pull/1360) - -## [2.17.0](https://github.com/pypeclub/openpype/tree/2.17.0) (2021-04-20) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-beta.2...2.17.0) - -**Enhancements:** - -- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/OpenPype/pull/1243) -- Settings in mongo as dict [\#1221](https://github.com/pypeclub/OpenPype/pull/1221) -- Maya: Make tx option configurable with presets [\#1328](https://github.com/pypeclub/OpenPype/pull/1328) -- TVPaint asset name validation [\#1302](https://github.com/pypeclub/OpenPype/pull/1302) -- TV Paint: Set initial project settings. [\#1299](https://github.com/pypeclub/OpenPype/pull/1299) -- TV Paint: Validate mark in and out. [\#1298](https://github.com/pypeclub/OpenPype/pull/1298) -- Validate project settings [\#1297](https://github.com/pypeclub/OpenPype/pull/1297) -- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/OpenPype/pull/1234) -- Show error message in pyblish UI [\#1206](https://github.com/pypeclub/OpenPype/pull/1206) - -**Fixed bugs:** - -- Hiero: fixing source frame from correct object [\#1362](https://github.com/pypeclub/OpenPype/pull/1362) -- Nuke: fix colourspace, prerenders and nuke panes opening [\#1308](https://github.com/pypeclub/OpenPype/pull/1308) -- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/OpenPype/pull/1282) -- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/OpenPype/pull/1194) -- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/OpenPype/pull/1312) -- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/OpenPype/pull/1303) -- After Effects: remove orphaned instances [\#1275](https://github.com/pypeclub/OpenPype/pull/1275) -- Avalon schema names [\#1242](https://github.com/pypeclub/OpenPype/pull/1242) -- Handle duplication of Task name [\#1226](https://github.com/pypeclub/OpenPype/pull/1226) -- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/OpenPype/pull/1217) -- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/OpenPype/pull/1214) -- Bulk mov strict task [\#1204](https://github.com/pypeclub/OpenPype/pull/1204) -- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/OpenPype/pull/1202) -- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/OpenPype/pull/1199) -- Nuke: reverse search to make it more versatile [\#1178](https://github.com/pypeclub/OpenPype/pull/1178) - - - -## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) (2021-03-22) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0) - -**Enhancements:** - -- Nuke: deadline submit limit group filter [\#1167](https://github.com/pypeclub/pype/pull/1167) -- Maya: support for Deadline Group and Limit Groups - backport 2.x [\#1156](https://github.com/pypeclub/pype/pull/1156) -- Maya: fixes for Redshift support [\#1152](https://github.com/pypeclub/pype/pull/1152) -- Nuke: adding preset for a Read node name to all img and mov Loaders [\#1146](https://github.com/pypeclub/pype/pull/1146) -- nuke deadline submit with environ var from presets overrides [\#1142](https://github.com/pypeclub/pype/pull/1142) -- Change timers after task change [\#1138](https://github.com/pypeclub/pype/pull/1138) -- Nuke: shortcuts for Pype menu [\#1127](https://github.com/pypeclub/pype/pull/1127) -- Nuke: workfile template [\#1124](https://github.com/pypeclub/pype/pull/1124) -- Sites local settings by site name [\#1117](https://github.com/pypeclub/pype/pull/1117) -- Reset loader's asset selection on context change [\#1106](https://github.com/pypeclub/pype/pull/1106) -- Bulk mov render publishing [\#1101](https://github.com/pypeclub/pype/pull/1101) -- Photoshop: mark publishable instances [\#1093](https://github.com/pypeclub/pype/pull/1093) -- Added ability to define BG color for extract review [\#1088](https://github.com/pypeclub/pype/pull/1088) -- TVPaint extractor enhancement [\#1080](https://github.com/pypeclub/pype/pull/1080) -- Photoshop: added support for .psb in workfiles [\#1078](https://github.com/pypeclub/pype/pull/1078) -- Optionally add task to subset name [\#1072](https://github.com/pypeclub/pype/pull/1072) -- Only extend clip range when collecting. [\#1008](https://github.com/pypeclub/pype/pull/1008) -- Collect audio for farm reviews. [\#1073](https://github.com/pypeclub/pype/pull/1073) - - -**Fixed bugs:** - -- Fix path spaces in jpeg extractor [\#1174](https://github.com/pypeclub/pype/pull/1174) -- Maya: Bugfix: superclass for CreateCameraRig [\#1166](https://github.com/pypeclub/pype/pull/1166) -- Maya: Submit to Deadline - fix typo in condition [\#1163](https://github.com/pypeclub/pype/pull/1163) -- Avoid dot in repre extension [\#1125](https://github.com/pypeclub/pype/pull/1125) -- Fix versions variable usage in standalone publisher [\#1090](https://github.com/pypeclub/pype/pull/1090) -- Collect instance data fix subset query [\#1082](https://github.com/pypeclub/pype/pull/1082) -- Fix getting the camera name. [\#1067](https://github.com/pypeclub/pype/pull/1067) -- Nuke: Ensure "NUKE\_TEMP\_DIR" is not part of the Deadline job environment. [\#1064](https://github.com/pypeclub/pype/pull/1064) - -## [2.15.3](https://github.com/pypeclub/pype/tree/2.15.3) (2021-02-26) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.2...2.15.3) - -**Enhancements:** - -- Maya: speedup renderable camera collection [\#1053](https://github.com/pypeclub/pype/pull/1053) -- Harmony - add regex search to filter allowed task names for collectinโ€ฆ [\#1047](https://github.com/pypeclub/pype/pull/1047) - -**Fixed bugs:** - -- Ftrack integrate hierarchy fix [\#1085](https://github.com/pypeclub/pype/pull/1085) -- Explicit subset filter in anatomy instance data [\#1059](https://github.com/pypeclub/pype/pull/1059) -- TVPaint frame offset [\#1057](https://github.com/pypeclub/pype/pull/1057) -- Auto fix unicode strings [\#1046](https://github.com/pypeclub/pype/pull/1046) - -## [2.15.2](https://github.com/pypeclub/pype/tree/2.15.2) (2021-02-19) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.1...2.15.2) - -**Enhancements:** - -- Maya: Vray scene publishing [\#1013](https://github.com/pypeclub/pype/pull/1013) - -**Fixed bugs:** - -- Fix entity move under project [\#1040](https://github.com/pypeclub/pype/pull/1040) -- smaller nuke fixes from production [\#1036](https://github.com/pypeclub/pype/pull/1036) -- TVPaint thumbnail extract fix [\#1031](https://github.com/pypeclub/pype/pull/1031) - -## [2.15.1](https://github.com/pypeclub/pype/tree/2.15.1) (2021-02-12) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.0...2.15.1) - -**Enhancements:** - -- Delete version as loader action [\#1011](https://github.com/pypeclub/pype/pull/1011) -- Delete old versions [\#445](https://github.com/pypeclub/pype/pull/445) - -**Fixed bugs:** - -- PS - remove obsolete functions from pywin32 [\#1006](https://github.com/pypeclub/pype/pull/1006) -- Clone description of review session objects. [\#922](https://github.com/pypeclub/pype/pull/922) - -## [2.15.0](https://github.com/pypeclub/pype/tree/2.15.0) (2021-02-09) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.6...2.15.0) - -**Enhancements:** - -- Resolve - loading and updating clips [\#932](https://github.com/pypeclub/pype/pull/932) -- Release/2.15.0 [\#926](https://github.com/pypeclub/pype/pull/926) -- Photoshop: add option for template.psd and prelaunch hook [\#894](https://github.com/pypeclub/pype/pull/894) -- Nuke: deadline presets [\#993](https://github.com/pypeclub/pype/pull/993) -- Maya: Alembic only set attributes that exists. [\#986](https://github.com/pypeclub/pype/pull/986) -- Harmony: render local and handle fixes [\#981](https://github.com/pypeclub/pype/pull/981) -- PSD Bulk export of ANIM group [\#965](https://github.com/pypeclub/pype/pull/965) -- AE - added prelaunch hook for opening last or workfile from template [\#944](https://github.com/pypeclub/pype/pull/944) -- PS - safer handling of loading of workfile [\#941](https://github.com/pypeclub/pype/pull/941) -- Maya: Handling Arnold referenced AOVs [\#938](https://github.com/pypeclub/pype/pull/938) -- TVPaint: switch layer IDs for layer names during identification [\#903](https://github.com/pypeclub/pype/pull/903) -- TVPaint audio/sound loader [\#893](https://github.com/pypeclub/pype/pull/893) -- Clone review session with children. [\#891](https://github.com/pypeclub/pype/pull/891) -- Simple compositing data packager for freelancers [\#884](https://github.com/pypeclub/pype/pull/884) -- Harmony deadline submission [\#881](https://github.com/pypeclub/pype/pull/881) -- Maya: Optionally hide image planes from reviews. [\#840](https://github.com/pypeclub/pype/pull/840) -- Maya: handle referenced AOVs for Vray [\#824](https://github.com/pypeclub/pype/pull/824) -- DWAA/DWAB support on windows [\#795](https://github.com/pypeclub/pype/pull/795) -- Unreal: animation, layout and setdress updates [\#695](https://github.com/pypeclub/pype/pull/695) - -**Fixed bugs:** - -- Maya: Looks - disable hardlinks [\#995](https://github.com/pypeclub/pype/pull/995) -- Fix Ftrack custom attribute update [\#982](https://github.com/pypeclub/pype/pull/982) -- Prores ks in burnin script [\#960](https://github.com/pypeclub/pype/pull/960) -- terminal.py crash on import [\#839](https://github.com/pypeclub/pype/pull/839) -- Extract review handle bizarre pixel aspect ratio [\#990](https://github.com/pypeclub/pype/pull/990) -- Nuke: add nuke related env var to sumbission [\#988](https://github.com/pypeclub/pype/pull/988) -- Nuke: missing preset's variable [\#984](https://github.com/pypeclub/pype/pull/984) -- Get creator by name fix [\#979](https://github.com/pypeclub/pype/pull/979) -- Fix update of project's tasks on Ftrack sync [\#972](https://github.com/pypeclub/pype/pull/972) -- nuke: wrong frame offset in mov loader [\#971](https://github.com/pypeclub/pype/pull/971) -- Create project structure action fix multiroot [\#967](https://github.com/pypeclub/pype/pull/967) -- PS: remove pywin installation from hook [\#964](https://github.com/pypeclub/pype/pull/964) -- Prores ks in burnin script [\#959](https://github.com/pypeclub/pype/pull/959) -- Subset family is now stored in subset document [\#956](https://github.com/pypeclub/pype/pull/956) -- DJV new version arguments [\#954](https://github.com/pypeclub/pype/pull/954) -- TV Paint: Fix single frame Sequence [\#953](https://github.com/pypeclub/pype/pull/953) -- nuke: missing `file` knob update [\#933](https://github.com/pypeclub/pype/pull/933) -- Photoshop: Create from single layer was failing [\#920](https://github.com/pypeclub/pype/pull/920) -- Nuke: baking mov with correct colorspace inherited from write [\#909](https://github.com/pypeclub/pype/pull/909) -- Launcher fix actions discover [\#896](https://github.com/pypeclub/pype/pull/896) -- Get the correct file path for the updated mov. [\#889](https://github.com/pypeclub/pype/pull/889) -- Maya: Deadline submitter - shared data access violation [\#831](https://github.com/pypeclub/pype/pull/831) -- Maya: Take into account vray master AOV switch [\#822](https://github.com/pypeclub/pype/pull/822) - -**Merged pull requests:** - -- Refactor blender to 3.0 format [\#934](https://github.com/pypeclub/pype/pull/934) - -## [2.14.6](https://github.com/pypeclub/pype/tree/2.14.6) (2021-01-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.5...2.14.6) - -**Fixed bugs:** - -- Nuke: improving of hashing path [\#885](https://github.com/pypeclub/pype/pull/885) - -**Merged pull requests:** - -- Hiero: cut videos with correct secons [\#892](https://github.com/pypeclub/pype/pull/892) -- Faster sync to avalon preparation [\#869](https://github.com/pypeclub/pype/pull/869) - -## [2.14.5](https://github.com/pypeclub/pype/tree/2.14.5) (2021-01-06) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.4...2.14.5) - -**Merged pull requests:** - -- Pype logger refactor [\#866](https://github.com/pypeclub/pype/pull/866) - -## [2.14.4](https://github.com/pypeclub/pype/tree/2.14.4) (2020-12-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.3...2.14.4) - -**Merged pull requests:** - -- Fix - AE - added explicit cast to int [\#837](https://github.com/pypeclub/pype/pull/837) - -## [2.14.3](https://github.com/pypeclub/pype/tree/2.14.3) (2020-12-16) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.2...2.14.3) - -**Fixed bugs:** - -- TVPaint repair invalid metadata [\#809](https://github.com/pypeclub/pype/pull/809) -- Feature/push hier value to nonhier action [\#807](https://github.com/pypeclub/pype/pull/807) -- Harmony: fix palette and image sequence loader [\#806](https://github.com/pypeclub/pype/pull/806) - -**Merged pull requests:** - -- respecting space in path [\#823](https://github.com/pypeclub/pype/pull/823) - -## [2.14.2](https://github.com/pypeclub/pype/tree/2.14.2) (2020-12-04) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.1...2.14.2) - -**Enhancements:** - -- Collapsible wrapper in settings [\#767](https://github.com/pypeclub/pype/pull/767) - -**Fixed bugs:** - -- Harmony: template extraction and palettes thumbnails on mac [\#768](https://github.com/pypeclub/pype/pull/768) -- TVPaint store context to workfile metadata \(764\) [\#766](https://github.com/pypeclub/pype/pull/766) -- Extract review audio cut fix [\#763](https://github.com/pypeclub/pype/pull/763) - -**Merged pull requests:** - -- AE: fix publish after background load [\#781](https://github.com/pypeclub/pype/pull/781) -- TVPaint store members key [\#769](https://github.com/pypeclub/pype/pull/769) - -## [2.14.1](https://github.com/pypeclub/pype/tree/2.14.1) (2020-11-27) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.0...2.14.1) - -**Enhancements:** - -- Settings required keys in modifiable dict [\#770](https://github.com/pypeclub/pype/pull/770) -- Extract review may not add audio to output [\#761](https://github.com/pypeclub/pype/pull/761) - -**Fixed bugs:** - -- After Effects: frame range, file format and render source scene fixes [\#760](https://github.com/pypeclub/pype/pull/760) -- Hiero: trimming review with clip event number [\#754](https://github.com/pypeclub/pype/pull/754) -- TVPaint: fix updating of loaded subsets [\#752](https://github.com/pypeclub/pype/pull/752) -- Maya: Vray handling of default aov [\#748](https://github.com/pypeclub/pype/pull/748) -- Maya: multiple renderable cameras in layer didn't work [\#744](https://github.com/pypeclub/pype/pull/744) -- Ftrack integrate custom attributes fix [\#742](https://github.com/pypeclub/pype/pull/742) - -## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) (2020-11-23) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.7...2.14.0) - -**Enhancements:** - -- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687) -- Shot asset build trigger status [\#736](https://github.com/pypeclub/pype/pull/736) -- Maya: add camera rig publishing option [\#721](https://github.com/pypeclub/pype/pull/721) -- Sort instances by label in pyblish gui [\#719](https://github.com/pypeclub/pype/pull/719) -- Synchronize ftrack hierarchical and shot attributes [\#716](https://github.com/pypeclub/pype/pull/716) -- 686 standalonepublisher editorial from image sequences [\#699](https://github.com/pypeclub/pype/pull/699) -- Ask user to select non-default camera from scene or create a new. [\#678](https://github.com/pypeclub/pype/pull/678) -- TVPaint: image loader with options [\#675](https://github.com/pypeclub/pype/pull/675) -- Maya: Camera name can be added to burnins. [\#674](https://github.com/pypeclub/pype/pull/674) -- After Effects: base integration with loaders [\#667](https://github.com/pypeclub/pype/pull/667) -- Harmony: Javascript refactoring and overall stability improvements [\#666](https://github.com/pypeclub/pype/pull/666) - -**Fixed bugs:** - -- Bugfix Hiero Review / Plate representation publish [\#743](https://github.com/pypeclub/pype/pull/743) -- Asset fetch second fix [\#726](https://github.com/pypeclub/pype/pull/726) -- TVPaint extract review fix [\#740](https://github.com/pypeclub/pype/pull/740) -- After Effects: Review were not being sent to ftrack [\#738](https://github.com/pypeclub/pype/pull/738) -- Maya: vray proxy was not loading [\#722](https://github.com/pypeclub/pype/pull/722) -- Maya: Vray expected file fixes [\#682](https://github.com/pypeclub/pype/pull/682) -- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639) - -**Deprecated:** - -- Removed artist view from pyblish gui [\#717](https://github.com/pypeclub/pype/pull/717) -- Maya: disable legacy override check for cameras [\#715](https://github.com/pypeclub/pype/pull/715) - -**Merged pull requests:** - -- Application manager [\#728](https://github.com/pypeclub/pype/pull/728) -- Feature \#664 3.0 lib refactor [\#706](https://github.com/pypeclub/pype/pull/706) -- Lib from illicit part 2 [\#700](https://github.com/pypeclub/pype/pull/700) -- 3.0 lib refactor - path tools [\#697](https://github.com/pypeclub/pype/pull/697) - -## [2.13.7](https://github.com/pypeclub/pype/tree/2.13.7) (2020-11-19) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.6...2.13.7) - -**Fixed bugs:** - -- Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) - -## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) - -**Fixed bugs:** - -- Maya workfile version wasn't syncing with renders properly [\#711](https://github.com/pypeclub/pype/pull/711) -- Maya: Fix for publishing multiple cameras with review from the same scene [\#710](https://github.com/pypeclub/pype/pull/710) - -## [2.13.5](https://github.com/pypeclub/pype/tree/2.13.5) (2020-11-12) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.4...2.13.5) - -**Enhancements:** - -- 3.0 lib refactor [\#664](https://github.com/pypeclub/pype/issues/664) - -**Fixed bugs:** - -- Wrong thumbnail file was picked when publishing sequence in standalone publisher [\#703](https://github.com/pypeclub/pype/pull/703) -- Fix: Burnin data pass and FFmpeg tool check [\#701](https://github.com/pypeclub/pype/pull/701) - -## [2.13.4](https://github.com/pypeclub/pype/tree/2.13.4) (2020-11-09) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.3...2.13.4) - -**Enhancements:** - -- AfterEffects integration with Websocket [\#663](https://github.com/pypeclub/pype/issues/663) - -**Fixed bugs:** - -- Photoshop uhiding hidden layers [\#688](https://github.com/pypeclub/pype/issues/688) -- \#688 - Fix publishing hidden layers [\#692](https://github.com/pypeclub/pype/pull/692) - -**Closed issues:** - -- Nuke Favorite directories "shot dir" "project dir" - not working [\#684](https://github.com/pypeclub/pype/issues/684) - -**Merged pull requests:** - -- Nuke Favorite directories "shot dir" "project dir" - not working \#684 [\#685](https://github.com/pypeclub/pype/pull/685) - -## [2.13.3](https://github.com/pypeclub/pype/tree/2.13.3) (2020-11-03) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.2...2.13.3) - -**Enhancements:** - -- TV paint base integration [\#612](https://github.com/pypeclub/pype/issues/612) - -**Fixed bugs:** - -- Fix ffmpeg executable path with spaces [\#680](https://github.com/pypeclub/pype/pull/680) -- Hotfix: Added default version number [\#679](https://github.com/pypeclub/pype/pull/679) - -## [2.13.2](https://github.com/pypeclub/pype/tree/2.13.2) (2020-10-28) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.1...2.13.2) - -**Fixed bugs:** - -- Nuke: wrong conditions when fixing legacy write nodes [\#665](https://github.com/pypeclub/pype/pull/665) - -## [2.13.1](https://github.com/pypeclub/pype/tree/2.13.1) (2020-10-23) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.0...2.13.1) - -**Enhancements:** - -- move maya look assigner to pype menu [\#292](https://github.com/pypeclub/pype/issues/292) - -**Fixed bugs:** - -- Layer name is not propagating to metadata in Photoshop [\#654](https://github.com/pypeclub/pype/issues/654) -- Loader in Photoshop fails with "can't set attribute" [\#650](https://github.com/pypeclub/pype/issues/650) -- Nuke Load mp4 wrong frame range [\#661](https://github.com/pypeclub/pype/issues/661) -- Hiero: Review video file adding one frame to the end [\#659](https://github.com/pypeclub/pype/issues/659) - -## [2.13.0](https://github.com/pypeclub/pype/tree/2.13.0) (2020-10-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.5...2.13.0) - -**Enhancements:** - -- Deadline Output Folder [\#636](https://github.com/pypeclub/pype/issues/636) -- Nuke Camera Loader [\#565](https://github.com/pypeclub/pype/issues/565) -- Deadline publish job shows publishing output folder [\#649](https://github.com/pypeclub/pype/pull/649) -- Get latest version in lib [\#642](https://github.com/pypeclub/pype/pull/642) -- Improved publishing of multiple representation from SP [\#638](https://github.com/pypeclub/pype/pull/638) -- Launch TvPaint shot work file from within Ftrack [\#631](https://github.com/pypeclub/pype/pull/631) -- Add mp4 support for RV action. [\#628](https://github.com/pypeclub/pype/pull/628) -- Maya: allow renders to have version synced with workfile [\#618](https://github.com/pypeclub/pype/pull/618) -- Renaming nukestudio host folder to hiero [\#617](https://github.com/pypeclub/pype/pull/617) -- Harmony: More efficient publishing [\#615](https://github.com/pypeclub/pype/pull/615) -- Ftrack server action improvement [\#608](https://github.com/pypeclub/pype/pull/608) -- Deadline user defaults to pype username if present [\#607](https://github.com/pypeclub/pype/pull/607) -- Standalone publisher now has icon [\#606](https://github.com/pypeclub/pype/pull/606) -- Nuke render write targeting knob improvement [\#603](https://github.com/pypeclub/pype/pull/603) -- Animated pyblish gui [\#602](https://github.com/pypeclub/pype/pull/602) -- Maya: Deadline - make use of asset dependencies optional [\#591](https://github.com/pypeclub/pype/pull/591) -- Nuke: Publishing, loading and updating alembic cameras [\#575](https://github.com/pypeclub/pype/pull/575) -- Maya: add look assigner to pype menu even if scriptsmenu is not available [\#573](https://github.com/pypeclub/pype/pull/573) -- Store task types in the database [\#572](https://github.com/pypeclub/pype/pull/572) -- Maya: Tiled EXRs to scanline EXRs render option [\#512](https://github.com/pypeclub/pype/pull/512) -- Fusion basic integration [\#452](https://github.com/pypeclub/pype/pull/452) - -**Fixed bugs:** - -- Burnin script did not propagate ffmpeg output [\#640](https://github.com/pypeclub/pype/issues/640) -- Pyblish-pype spacer in terminal wasn't transparent [\#646](https://github.com/pypeclub/pype/pull/646) -- Lib subprocess without logger [\#645](https://github.com/pypeclub/pype/pull/645) -- Nuke: prevent crash if we only have single frame in sequence [\#644](https://github.com/pypeclub/pype/pull/644) -- Burnin script logs better output [\#641](https://github.com/pypeclub/pype/pull/641) -- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639) -- review from imagesequence error [\#633](https://github.com/pypeclub/pype/pull/633) -- Hiero: wrong order of fps clip instance data collecting [\#627](https://github.com/pypeclub/pype/pull/627) -- Add source for review instances. [\#625](https://github.com/pypeclub/pype/pull/625) -- Task processing in event sync [\#623](https://github.com/pypeclub/pype/pull/623) -- sync to avalon doesn t remove renamed task [\#619](https://github.com/pypeclub/pype/pull/619) -- Intent publish setting wasn't working with default value [\#562](https://github.com/pypeclub/pype/pull/562) -- Maya: Updating a look where the shader name changed, leaves the geo without a shader [\#514](https://github.com/pypeclub/pype/pull/514) - -**Merged pull requests:** - -- Avalon module without Qt [\#581](https://github.com/pypeclub/pype/pull/581) -- Ftrack module without Qt [\#577](https://github.com/pypeclub/pype/pull/577) - -## [2.12.5](https://github.com/pypeclub/pype/tree/2.12.5) (2020-10-14) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.4...2.12.5) - -**Enhancements:** - -- Launch TvPaint shot work file from within Ftrack [\#629](https://github.com/pypeclub/pype/issues/629) - -**Merged pull requests:** - -- Harmony: Disable application launch logic [\#637](https://github.com/pypeclub/pype/pull/637) - -## [2.12.4](https://github.com/pypeclub/pype/tree/2.12.4) (2020-10-08) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.3...2.12.4) - -**Enhancements:** - -- convert nukestudio to hiero host [\#616](https://github.com/pypeclub/pype/issues/616) -- Fusion basic integration [\#451](https://github.com/pypeclub/pype/issues/451) - -**Fixed bugs:** - -- Sync to avalon doesn't remove renamed task [\#605](https://github.com/pypeclub/pype/issues/605) -- NukeStudio: FPS collecting into clip instances [\#624](https://github.com/pypeclub/pype/pull/624) - -**Merged pull requests:** - -- NukeStudio: small fixes [\#622](https://github.com/pypeclub/pype/pull/622) -- NukeStudio: broken order of plugins [\#620](https://github.com/pypeclub/pype/pull/620) - -## [2.12.3](https://github.com/pypeclub/pype/tree/2.12.3) (2020-10-06) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.2...2.12.3) - -**Enhancements:** - -- Nuke Publish Camera [\#567](https://github.com/pypeclub/pype/issues/567) -- Harmony: open xstage file no matter of its name [\#526](https://github.com/pypeclub/pype/issues/526) -- Stop integration of unwanted data [\#387](https://github.com/pypeclub/pype/issues/387) -- Move avalon-launcher functionality to pype [\#229](https://github.com/pypeclub/pype/issues/229) -- avalon workfiles api [\#214](https://github.com/pypeclub/pype/issues/214) -- Store task types [\#180](https://github.com/pypeclub/pype/issues/180) -- Avalon Mongo Connection split [\#136](https://github.com/pypeclub/pype/issues/136) -- nk camera workflow [\#71](https://github.com/pypeclub/pype/issues/71) -- Hiero integration added [\#590](https://github.com/pypeclub/pype/pull/590) -- Anatomy instance data collection is substantially faster for many instances [\#560](https://github.com/pypeclub/pype/pull/560) - -**Fixed bugs:** - -- test issue [\#596](https://github.com/pypeclub/pype/issues/596) -- Harmony: empty scene contamination [\#583](https://github.com/pypeclub/pype/issues/583) -- Edit publishing in SP doesn't respect shot selection for publishing [\#542](https://github.com/pypeclub/pype/issues/542) -- Pathlib breaks compatibility with python2 hosts [\#281](https://github.com/pypeclub/pype/issues/281) -- Updating a look where the shader name changed leaves the geo without a shader [\#237](https://github.com/pypeclub/pype/issues/237) -- Better error handling [\#84](https://github.com/pypeclub/pype/issues/84) -- Harmony: function signature [\#609](https://github.com/pypeclub/pype/pull/609) -- Nuke: gizmo publishing error [\#594](https://github.com/pypeclub/pype/pull/594) -- Harmony: fix clashing namespace of called js functions [\#584](https://github.com/pypeclub/pype/pull/584) -- Maya: fix maya scene type preset exception [\#569](https://github.com/pypeclub/pype/pull/569) - -**Closed issues:** - -- Nuke Gizmo publishing [\#597](https://github.com/pypeclub/pype/issues/597) -- nuke gizmo publishing error [\#592](https://github.com/pypeclub/pype/issues/592) -- Publish EDL [\#579](https://github.com/pypeclub/pype/issues/579) -- Publish render from SP [\#576](https://github.com/pypeclub/pype/issues/576) -- rename ftrack custom attribute group to `pype` [\#184](https://github.com/pypeclub/pype/issues/184) - -**Merged pull requests:** - -- Audio file existence check [\#614](https://github.com/pypeclub/pype/pull/614) -- NKS small fixes [\#587](https://github.com/pypeclub/pype/pull/587) -- Standalone publisher editorial plugins interfering [\#580](https://github.com/pypeclub/pype/pull/580) - -## [2.12.2](https://github.com/pypeclub/pype/tree/2.12.2) (2020-09-25) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.1...2.12.2) - -**Enhancements:** - -- pype config GUI [\#241](https://github.com/pypeclub/pype/issues/241) - -**Fixed bugs:** - -- Harmony: Saving heavy scenes will crash [\#507](https://github.com/pypeclub/pype/issues/507) -- Extract review a representation name with `\*\_burnin` [\#388](https://github.com/pypeclub/pype/issues/388) -- Hierarchy data was not considering active isntances [\#551](https://github.com/pypeclub/pype/pull/551) - -## [2.12.1](https://github.com/pypeclub/pype/tree/2.12.1) (2020-09-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.0...2.12.1) - -**Fixed bugs:** - -- Pype: changelog.md is outdated [\#503](https://github.com/pypeclub/pype/issues/503) -- dependency security alert ! [\#484](https://github.com/pypeclub/pype/issues/484) -- Maya: RenderSetup is missing update [\#106](https://github.com/pypeclub/pype/issues/106) -- \ extract effects creates new instance [\#78](https://github.com/pypeclub/pype/issues/78) - -## [2.12.0](https://github.com/pypeclub/pype/tree/2.12.0) (2020-09-10) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.8...2.12.0) - -**Enhancements:** - -- Less mongo connections [\#509](https://github.com/pypeclub/pype/pull/509) -- Nuke: adding image loader [\#499](https://github.com/pypeclub/pype/pull/499) -- Move launcher window to top if launcher action is clicked [\#450](https://github.com/pypeclub/pype/pull/450) -- Maya: better tile rendering support in Pype [\#446](https://github.com/pypeclub/pype/pull/446) -- Implementation of non QML launcher [\#443](https://github.com/pypeclub/pype/pull/443) -- Optional skip review on renders. [\#441](https://github.com/pypeclub/pype/pull/441) -- Ftrack: Option to push status from task to latest version [\#440](https://github.com/pypeclub/pype/pull/440) -- Properly containerize image plane loads. [\#434](https://github.com/pypeclub/pype/pull/434) -- Option to keep the review files. [\#426](https://github.com/pypeclub/pype/pull/426) -- Isolate view on instance members. [\#425](https://github.com/pypeclub/pype/pull/425) -- Maya: Publishing of tile renderings on Deadline [\#398](https://github.com/pypeclub/pype/pull/398) -- Feature/little bit better logging gui [\#383](https://github.com/pypeclub/pype/pull/383) - -**Fixed bugs:** - -- Maya: Fix tile order for Draft Tile Assembler [\#511](https://github.com/pypeclub/pype/pull/511) -- Remove extra dash [\#501](https://github.com/pypeclub/pype/pull/501) -- Fix: strip dot from repre names in single frame renders [\#498](https://github.com/pypeclub/pype/pull/498) -- Better handling of destination during integrating [\#485](https://github.com/pypeclub/pype/pull/485) -- Fix: allow thumbnail creation for single frame renders [\#460](https://github.com/pypeclub/pype/pull/460) -- added missing argument to launch\_application in ftrack app handler [\#453](https://github.com/pypeclub/pype/pull/453) -- Burnins: Copy bit rate of input video to match quality. [\#448](https://github.com/pypeclub/pype/pull/448) -- Standalone publisher is now independent from tray [\#442](https://github.com/pypeclub/pype/pull/442) -- Bugfix/empty enumerator attributes [\#436](https://github.com/pypeclub/pype/pull/436) -- Fixed wrong order of "other" category collapssing in publisher [\#435](https://github.com/pypeclub/pype/pull/435) -- Multiple reviews where being overwritten to one. [\#424](https://github.com/pypeclub/pype/pull/424) -- Cleanup plugin fail on instances without staging dir [\#420](https://github.com/pypeclub/pype/pull/420) -- deprecated -intra parameter in ffmpeg to new `-g` [\#417](https://github.com/pypeclub/pype/pull/417) -- Delivery action can now work with entered path [\#397](https://github.com/pypeclub/pype/pull/397) - -**Merged pull requests:** - -- Review on instance.data [\#473](https://github.com/pypeclub/pype/pull/473) - -## [2.11.8](https://github.com/pypeclub/pype/tree/2.11.8) (2020-08-27) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.7...2.11.8) - -**Enhancements:** - -- DWAA support for Maya [\#382](https://github.com/pypeclub/pype/issues/382) -- Isolate View on Playblast [\#367](https://github.com/pypeclub/pype/issues/367) -- Maya: Tile rendering [\#297](https://github.com/pypeclub/pype/issues/297) -- single pype instance running [\#47](https://github.com/pypeclub/pype/issues/47) -- PYPE-649: projects don't guarantee backwards compatible environment [\#8](https://github.com/pypeclub/pype/issues/8) -- PYPE-663: separate venv for each deployed version [\#7](https://github.com/pypeclub/pype/issues/7) - -**Fixed bugs:** - -- pyblish pype - other group is collapsed before plugins are done [\#431](https://github.com/pypeclub/pype/issues/431) -- Alpha white edges in harmony on PNGs [\#412](https://github.com/pypeclub/pype/issues/412) -- harmony image loader picks wrong representations [\#404](https://github.com/pypeclub/pype/issues/404) -- Clockify crash when response contain symbol not allowed by UTF-8 [\#81](https://github.com/pypeclub/pype/issues/81) - -## [2.11.7](https://github.com/pypeclub/pype/tree/2.11.7) (2020-08-21) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.6...2.11.7) - -**Fixed bugs:** - -- Clean Up Baked Movie [\#369](https://github.com/pypeclub/pype/issues/369) -- celaction last workfile [\#459](https://github.com/pypeclub/pype/pull/459) - -## [2.11.6](https://github.com/pypeclub/pype/tree/2.11.6) (2020-08-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.5...2.11.6) - -**Enhancements:** - -- publisher app [\#56](https://github.com/pypeclub/pype/issues/56) - -## [2.11.5](https://github.com/pypeclub/pype/tree/2.11.5) (2020-08-13) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.4...2.11.5) - -**Enhancements:** - -- Switch from master to equivalent [\#220](https://github.com/pypeclub/pype/issues/220) -- Standalone publisher now only groups sequence if the extension is known [\#439](https://github.com/pypeclub/pype/pull/439) - -**Fixed bugs:** - -- Logs have been disable for editorial by default to speed up publishing [\#433](https://github.com/pypeclub/pype/pull/433) -- additional fixes for celaction [\#430](https://github.com/pypeclub/pype/pull/430) -- Harmony: invalid variable scope in validate scene settings [\#428](https://github.com/pypeclub/pype/pull/428) -- new representation name for audio was not accepted [\#427](https://github.com/pypeclub/pype/pull/427) - -## [2.11.4](https://github.com/pypeclub/pype/tree/2.11.4) (2020-08-10) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.3...2.11.4) - -**Enhancements:** - -- WebSocket server [\#135](https://github.com/pypeclub/pype/issues/135) -- standalonepublisher: editorial family features expansion \[master branch\] [\#411](https://github.com/pypeclub/pype/pull/411) - -## [2.11.3](https://github.com/pypeclub/pype/tree/2.11.3) (2020-08-04) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.2...2.11.3) - -**Fixed bugs:** - -- Harmony: publishing performance issues [\#408](https://github.com/pypeclub/pype/pull/408) - -## [2.11.2](https://github.com/pypeclub/pype/tree/2.11.2) (2020-07-31) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.1...2.11.2) - -**Fixed bugs:** - -- Ftrack to Avalon bug [\#406](https://github.com/pypeclub/pype/issues/406) - -## [2.11.1](https://github.com/pypeclub/pype/tree/2.11.1) (2020-07-29) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.0...2.11.1) - -**Merged pull requests:** - -- Celaction: metadata json folder fixes on path [\#393](https://github.com/pypeclub/pype/pull/393) -- CelAction - version up method taken fro pype.lib [\#391](https://github.com/pypeclub/pype/pull/391) - - -## 2.11.0 ## - -_**release date:** 27 July 2020_ - -**new:** -- _(blender)_ namespace support [\#341](https://github.com/pypeclub/pype/pull/341) -- _(blender)_ start end frames [\#330](https://github.com/pypeclub/pype/pull/330) -- _(blender)_ camera asset [\#322](https://github.com/pypeclub/pype/pull/322) -- _(pype)_ toggle instances per family in pyblish GUI [\#320](https://github.com/pypeclub/pype/pull/320) -- _(pype)_ current release version is now shown in the tray menu [#379](https://github.com/pypeclub/pype/pull/379) - - -**improved:** -- _(resolve)_ tagging for publish [\#239](https://github.com/pypeclub/pype/issues/239) -- _(pype)_ Support publishing a subset of shots with standalone editorial [\#336](https://github.com/pypeclub/pype/pull/336) -- _(harmony)_ Basic support for palettes [\#324](https://github.com/pypeclub/pype/pull/324) -- _(photoshop)_ Flag outdated containers on startup and publish. [\#309](https://github.com/pypeclub/pype/pull/309) -- _(harmony)_ Flag Outdated containers [\#302](https://github.com/pypeclub/pype/pull/302) -- _(photoshop)_ Publish review [\#298](https://github.com/pypeclub/pype/pull/298) -- _(pype)_ Optional Last workfile launch [\#365](https://github.com/pypeclub/pype/pull/365) - - -**fixed:** -- _(premiere)_ workflow fixes [\#346](https://github.com/pypeclub/pype/pull/346) -- _(pype)_ pype-setup does not work with space in path [\#327](https://github.com/pypeclub/pype/issues/327) -- _(ftrack)_ Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/pype/issues/206) -- _(nuke)_ Priority was forced to 50 [\#345](https://github.com/pypeclub/pype/pull/345) -- _(nuke)_ Fix ValidateNukeWriteKnobs [\#340](https://github.com/pypeclub/pype/pull/340) -- _(maya)_ If camera attributes are connected, we can ignore them. [\#339](https://github.com/pypeclub/pype/pull/339) -- _(pype)_ stop appending of tools environment to existing env [\#337](https://github.com/pypeclub/pype/pull/337) -- _(ftrack)_ Ftrack timeout needs to look at AVALON\_TIMEOUT [\#325](https://github.com/pypeclub/pype/pull/325) -- _(harmony)_ Only zip files are supported. [\#310](https://github.com/pypeclub/pype/pull/310) -- _(pype)_ hotfix/Fix event server mongo uri [\#305](https://github.com/pypeclub/pype/pull/305) -- _(photoshop)_ Subset was not named or validated correctly. [\#304](https://github.com/pypeclub/pype/pull/304) - - - - -## 2.10.0 ## - -_**release date:** 17 June 2020_ - -**new:** -- _(harmony)_ **Toon Boom Harmony** has been greatly extended to support rigging, scene build, animation and rendering workflows. [#270](https://github.com/pypeclub/pype/issues/270) [#271](https://github.com/pypeclub/pype/issues/271) [#190](https://github.com/pypeclub/pype/issues/190) [#191](https://github.com/pypeclub/pype/issues/191) [#172](https://github.com/pypeclub/pype/issues/172) [#168](https://github.com/pypeclub/pype/issues/168) -- _(pype)_ Added support for rudimentary **edl publishing** into individual shots. [#265](https://github.com/pypeclub/pype/issues/265) -- _(celaction)_ Simple **Celaction** integration has been added with support for workfiles and rendering. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for multiple job types when submitting to the farm. We can now render Maya or Standalone render jobs for Vray and Arnold (limited support for arnold) [#204](https://github.com/pypeclub/pype/issues/204) -- _(photoshop)_ Added initial support for Photoshop [#232](https://github.com/pypeclub/pype/issues/232) - -**improved:** -- _(blender)_ Updated support for rigs and added support Layout family [#233](https://github.com/pypeclub/pype/issues/233) [#226](https://github.com/pypeclub/pype/issues/226) -- _(premiere)_ It is now possible to choose different storage root for workfiles of different task types. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for unmerged AOVs in Redshift multipart EXRs [#197](https://github.com/pypeclub/pype/issues/197) -- _(pype)_ Pype repository has been refactored in preparation for 3.0 release [#169](https://github.com/pypeclub/pype/issues/169) -- _(deadline)_ All file dependencies are now passed to deadline from maya to prevent premature start of rendering if caches or textures haven't been coppied over yet. [#195](https://github.com/pypeclub/pype/issues/195) -- _(nuke)_ Script validation can now be made optional. [#194](https://github.com/pypeclub/pype/issues/194) -- _(pype)_ Publishing can now be stopped at any time. [#194](https://github.com/pypeclub/pype/issues/194) - -**fix:** -- _(pype)_ Pyblish-lite has been integrated into pype repository, plus various publishing GUI fixes. [#274](https://github.com/pypeclub/pype/issues/274) [#275](https://github.com/pypeclub/pype/issues/275) [#268](https://github.com/pypeclub/pype/issues/268) [#227](https://github.com/pypeclub/pype/issues/227) [#238](https://github.com/pypeclub/pype/issues/238) -- _(maya)_ Alembic extractor was getting wrong frame range type in certain scenarios [#254](https://github.com/pypeclub/pype/issues/254) -- _(maya)_ Attaching a render to subset in maya was not passing validation in certain scenarios [#256](https://github.com/pypeclub/pype/issues/256) -- _(ftrack)_ Various small fixes to ftrack sync [#263](https://github.com/pypeclub/pype/issues/263) [#259](https://github.com/pypeclub/pype/issues/259) -- _(maya)_ Look extraction is now able to skp invalid connections in shaders [#207](https://github.com/pypeclub/pype/issues/207) - - - - -## 2.9.0 ## - -_**release date:** 25 May 2020_ - -**new:** -- _(pype)_ Support for **Multiroot projects**. You can now store project data on multiple physical or virtual storages and target individual publishes to these locations. For instance render can be stored on a faster storage than the rest of the project. [#145](https://github.com/pypeclub/pype/issues/145), [#38](https://github.com/pypeclub/pype/issues/38) -- _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(pype)_ OSX support is in public beta now. There are issues to be expected, but the main implementation should be functional. [#141](https://github.com/pypeclub/pype/issues/141) - - -**improved:** - -- _(pype)_ **Review extractor** has been completely rebuilt. It now supports granular filtering so you can create **multiple outputs** for different tasks, families or hosts. [#103](https://github.com/pypeclub/pype/issues/103), [#166](https://github.com/pypeclub/pype/issues/166), [#165](https://github.com/pypeclub/pype/issues/165) -- _(pype)_ **Burnin** generation had been extended to **support same multi-output filtering** as review extractor [#103](https://github.com/pypeclub/pype/issues/103) -- _(pype)_ Publishing file templates can now be specified in config for each individual family [#114](https://github.com/pypeclub/pype/issues/114) -- _(pype)_ Studio specific plugins can now be appended to pype standard publishing plugins. [#112](https://github.com/pypeclub/pype/issues/112) -- _(nukestudio)_ Reviewable clips no longer need to be previously cut, exported and re-imported to timeline. **Pype can now dynamically cut reviewable quicktimes** from continuous offline footage during publishing. [#23](https://github.com/pypeclub/pype/issues/23) -- _(deadline)_ Deadline can now correctly differentiate between staging and production pype. [#154](https://github.com/pypeclub/pype/issues/154) -- _(deadline)_ `PYPE_PYTHON_EXE` env variable can now be used to direct publishing to explicit python installation. [#120](https://github.com/pypeclub/pype/issues/120) -- _(nuke)_ Nuke now check for new version of loaded data on file open. [#140](https://github.com/pypeclub/pype/issues/140) -- _(nuke)_ frame range and limit checkboxes are now exposed on write node. [#119](https://github.com/pypeclub/pype/issues/119) - - - -**fix:** - -- _(nukestudio)_ Project Location was using backslashes which was breaking nukestudio native exporting in certains configurations [#82](https://github.com/pypeclub/pype/issues/82) -- _(nukestudio)_ Duplicity in hierarchy tags was prone to throwing publishing error [#130](https://github.com/pypeclub/pype/issues/130), [#144](https://github.com/pypeclub/pype/issues/144) -- _(ftrack)_ multiple stability improvements [#157](https://github.com/pypeclub/pype/issues/157), [#159](https://github.com/pypeclub/pype/issues/159), [#128](https://github.com/pypeclub/pype/issues/128), [#118](https://github.com/pypeclub/pype/issues/118), [#127](https://github.com/pypeclub/pype/issues/127) -- _(deadline)_ multipart EXRs were stopping review publishing on the farm. They are still not supported for automatic review generation, but the publish will go through correctly without the quicktime. [#155](https://github.com/pypeclub/pype/issues/155) -- _(deadline)_ If deadline is non-responsive it will no longer freeze host when publishing [#149](https://github.com/pypeclub/pype/issues/149) -- _(deadline)_ Sometimes deadline was trying to launch render before all the source data was coppied over. [#137](https://github.com/pypeclub/pype/issues/137) _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(nuke)_ Filepath knob wasn't updated properly. [#131](https://github.com/pypeclub/pype/issues/131) -- _(maya)_ When extracting animation, the "Write Color Set" options on the instance were not respected. [#108](https://github.com/pypeclub/pype/issues/108) -- _(maya)_ Attribute overrides for AOV only worked for the legacy render layers. Now it works for new render setup as well [#132](https://github.com/pypeclub/pype/issues/132) -- _(maya)_ Stability and usability improvements in yeti workflow [#104](https://github.com/pypeclub/pype/issues/104) - - - - -## 2.8.0 ## - -_**release date:** 20 April 2020_ - -**new:** - -- _(pype)_ Option to generate slates from json templates. [PYPE-628] [#26](https://github.com/pypeclub/pype/issues/26) -- _(pype)_ It is now possible to automate loading of published subsets into any scene. Documentation will follow :). [PYPE-611] [#24](https://github.com/pypeclub/pype/issues/24) - -**fix:** - -- _(maya)_ Some Redshift render tokens could break publishing. [PYPE-778] [#33](https://github.com/pypeclub/pype/issues/33) -- _(maya)_ Publish was not preserving maya file extension. [#39](https://github.com/pypeclub/pype/issues/39) -- _(maya)_ Rig output validator was failing on nodes without shapes. [#40](https://github.com/pypeclub/pype/issues/40) -- _(maya)_ Yeti caches can now be properly versioned up in the scene inventory. [#40](https://github.com/pypeclub/pype/issues/40) -- _(nuke)_ Build first workfiles was not accepting jpeg sequences. [#34](https://github.com/pypeclub/pype/issues/34) -- _(deadline)_ Trying to generate ffmpeg review from multipart EXRs no longer crashes publishing. [PYPE-781] -- _(deadline)_ Render publishing is more stable in multiplatform environments. [PYPE-775] - - - - -## 2.7.0 ## - -_**release date:** 30 March 2020_ - -**new:** - -- _(maya)_ Artist can now choose to load multiple references of the same subset at once [PYPE-646, PYPS-81] -- _(nuke)_ Option to use named OCIO colorspaces for review colour baking. [PYPS-82] -- _(pype)_ Pype can now work with `master` versions for publishing and loading. These are non-versioned publishes that are overwritten with the latest version during publish. These are now supported in all the GUIs, but their publishing is deactivated by default. [PYPE-653] -- _(blender)_ Added support for basic blender workflow. We currently support `rig`, `model` and `animation` families. [PYPE-768] -- _(pype)_ Source timecode can now be used in burn-ins. [PYPE-777] -- _(pype)_ Review outputs profiles can now specify delivery resolution different than project setting [PYPE-759] -- _(nuke)_ Bookmark to current context is now added automatically to all nuke browser windows. [PYPE-712] - -**change:** - -- _(maya)_ It is now possible to publish camera without. baking. Keep in mind that unbaked cameras can't be guaranteed to work in other hosts. [PYPE-595] -- _(maya)_ All the renders from maya are now grouped in the loader by their Layer name. [PYPE-482] -- _(nuke/hiero)_ Any publishes from nuke and hiero can now be versioned independently of the workfile. [PYPE-728] - - -**fix:** - -- _(nuke)_ Mixed slashes caused issues in ocio config path. -- _(pype)_ Intent field in pyblish GUI was passing label instead of value to ftrack. [PYPE-733] -- _(nuke)_ Publishing of pre-renders was inconsistent. [PYPE-766] -- _(maya)_ Handles and frame ranges were inconsistent in various places during publishing. -- _(nuke)_ Nuke was crashing if it ran into certain missing knobs. For example DPX output missing `autocrop` [PYPE-774] -- _(deadline)_ Project overrides were not working properly with farm render publishing. -- _(hiero)_ Problems with single frame plates publishing. -- _(maya)_ Redshift RenderPass token were breaking render publishing. [PYPE-778] -- _(nuke)_ Build first workfile was not accepting jpeg sequences. -- _(maya)_ Multipart (Multilayer) EXRs were breaking review publishing due to FFMPEG incompatiblity [PYPE-781] - - - -## 2.6.0 ## - -_**release date:** 9 March 2020_ - -**change:** -- _(maya)_ render publishing has been simplified and made more robust. Render setup layers are now automatically added to publishing subsets and `render globals` family has been replaced with simple `render` [PYPE-570] -- _(avalon)_ change context and workfiles apps, have been merged into one, that allows both actions to be performed at the same time. [PYPE-747] -- _(pype)_ thumbnails are now automatically propagate to asset from the last published subset in the loader -- _(ftrack)_ publishing comment and intent are now being published to ftrack note as well as describtion. [PYPE-727] -- _(pype)_ when overriding existing version new old representations are now overriden, instead of the new ones just being appended. (to allow this behaviour, the version validator need to be disabled. [PYPE-690]) -- _(pype)_ burnin preset has been significantly simplified. It now doesn't require passing function to each field, but only need the actual text template. to use this, all the current burnin PRESETS MUST BE UPDATED for all the projects. -- _(ftrack)_ credentials are now stored on a per server basis, so it's possible to switch between ftrack servers without having to log in and out. [PYPE-723] - - -**new:** -- _(pype)_ production and development deployments now have different colour of the tray icon. Orange for Dev and Green for production [PYPE-718] -- _(maya)_ renders can now be attached to a publishable subset rather than creating their own subset. For example it is possible to create a reviewable `look` or `model` render and have it correctly attached as a representation of the subsets [PYPE-451] -- _(maya)_ after saving current scene into a new context (as a new shot for instance), all the scene publishing subsets data gets re-generated automatically to match the new context [PYPE-532] -- _(pype)_ we now support project specific publish, load and create plugins [PYPE-740] -- _(ftrack)_ new action that allow archiving/deleting old published versions. User can keep how many of the latest version to keep when the action is ran. [PYPE-748, PYPE-715] -- _(ftrack)_ it is now possible to monitor and restart ftrack event server using ftrack action. [PYPE-658] -- _(pype)_ validator that prevent accidental overwrites of previously published versions. [PYPE-680] -- _(avalon)_ avalon core updated to version 5.6.0 -- _(maya)_ added validator to make sure that relative paths are used when publishing arnold standins. -- _(nukestudio)_ it is now possible to extract and publish audio family from clip in nuke studio [PYPE-682] - -**fix**: -- _(maya)_ maya set framerange button was ignoring handles [PYPE-719] -- _(ftrack)_ sync to avalon was sometime crashing when ran on empty project -- _(nukestudio)_ publishing same shots after they've been previously archived/deleted would result in a crash. [PYPE-737] -- _(nuke)_ slate workflow was breaking in certain scenarios. [PYPE-730] -- _(pype)_ rendering publish workflow has been significantly improved to prevent error resulting from implicit render collection. [PYPE-665, PYPE-746] -- _(pype)_ launching application on a non-synced project resulted in obscure [PYPE-528] -- _(pype)_ missing keys in burnins no longer result in an error. [PYPE-706] -- _(ftrack)_ create folder structure action was sometimes failing for project managers due to wrong permissions. -- _(Nukestudio)_ using `source` in the start frame tag could result in wrong frame range calculation -- _(ftrack)_ sync to avalon action and event have been improved by catching more edge cases and provessing them properly. - - - -## 2.5.0 ## - -_**release date:** 11 Feb 2020_ - -**change:** -- _(pype)_ added many logs for easier debugging -- _(pype)_ review presets can now be separated between 2d and 3d renders [PYPE-693] -- _(pype)_ anatomy module has been greatly improved to allow for more dynamic pulblishing and faster debugging [PYPE-685] -- _(pype)_ avalon schemas have been moved from `pype-config` to `pype` repository, for simplification. [PYPE-670] -- _(ftrack)_ updated to latest ftrack API -- _(ftrack)_ publishing comments now appear in ftrack also as a note on version with customisable category [PYPE-645] -- _(ftrack)_ delete asset/subset action had been improved. It is now able to remove multiple entities and descendants of the selected entities [PYPE-361, PYPS-72] -- _(workfiles)_ added date field to workfiles app [PYPE-603] -- _(maya)_ old deprecated loader have been removed in favour of a single unified reference loader (old scenes will upgrade automatically to the new loader upon opening) [PYPE-633, PYPE-697] -- _(avalon)_ core updated to 5.5.15 [PYPE-671] -- _(nuke)_ library loader is now available in nuke [PYPE-698] - - -**new:** -- _(pype)_ added pype render wrapper to allow rendering on mixed platform farms. [PYPE-634] -- _(pype)_ added `pype launch` command. It let's admin run applications with dynamically built environment based on the given context. [PYPE-634] -- _(pype)_ added support for extracting review sequences with burnins [PYPE-657] -- _(publish)_ users can now set intent next to a comment when publishing. This will then be reflected on an attribute in ftrack. [PYPE-632] -- _(burnin)_ timecode can now be added to burnin -- _(burnin)_ datetime keys can now be added to burnin and anatomy [PYPE-651] -- _(burnin)_ anatomy templates can now be used in burnins. [PYPE=626] -- _(nuke)_ new validator for render resolution -- _(nuke)_ support for attach slate to nuke renders [PYPE-630] -- _(nuke)_ png sequences were added to loaders -- _(maya)_ added maya 2020 compatibility [PYPE-677] -- _(maya)_ ability to publish and load .ASS standin sequences [PYPS-54] -- _(pype)_ thumbnails can now be published and are visible in the loader. `AVALON_THUMBNAIL_ROOT` environment variable needs to be set for this to work [PYPE-573, PYPE-132] -- _(blender)_ base implementation of blender was added with publishing and loading of .blend files [PYPE-612] -- _(ftrack)_ new action for preparing deliveries [PYPE-639] - - -**fix**: -- _(burnin)_ more robust way of finding ffmpeg for burnins. -- _(pype)_ improved UNC paths remapping when sending to farm. -- _(pype)_ float frames sometimes made their way to representation context in database, breaking loaders [PYPE-668] -- _(pype)_ `pype install --force` was failing sometimes [PYPE-600] -- _(pype)_ padding in published files got calculated wrongly sometimes. It is now instead being always read from project anatomy. [PYPE-667] -- _(publish)_ comment publishing was failing in certain situations -- _(ftrack)_ multiple edge case scenario fixes in auto sync and sync-to-avalon action -- _(ftrack)_ sync to avalon now works on empty projects -- _(ftrack)_ thumbnail update event was failing when deleting entities [PYPE-561] -- _(nuke)_ loader applies proper colorspaces from Presets -- _(nuke)_ publishing handles didn't always work correctly [PYPE-686] -- _(maya)_ assembly publishing and loading wasn't working correctly - - - - - -## 2.4.0 ## - -_**release date:** 9 Dec 2019_ - -**change:** -- _(ftrack)_ version to status ftrack event can now be configured from Presets - - based on preset `presets/ftracc/ftrack_config.json["status_version_to_task"]` -- _(ftrack)_ sync to avalon event has been completely re-written. It now supports most of the project management situations on ftrack including moving, renaming and deleting entities, updating attributes and working with tasks. -- _(ftrack)_ sync to avalon action has been also re-writen. It is now much faster (up to 100 times depending on a project structure), has much better logging and reporting on encountered problems, and is able to handle much more complex situations. -- _(ftrack)_ sync to avalon trigger by checking `auto-sync` toggle on ftrack [PYPE-504] -- _(pype)_ various new features in the REST api -- _(pype)_ new visual identity used across pype -- _(pype)_ started moving all requirements to pip installation rather than vendorising them in pype repository. Due to a few yet unreleased packages, this means that pype can temporarily be only installed in the offline mode. - -**new:** -- _(nuke)_ support for publishing gizmos and loading them as viewer processes -- _(nuke)_ support for publishing nuke nodes from backdrops and loading them back -- _(pype)_ burnins can now work with start and end frames as keys - - use keys `{frame_start}`, `{frame_end}` and `{current_frame}` in burnin preset to use them. [PYPS-44,PYPS-73, PYPE-602] -- _(pype)_ option to filter logs by user and level in loggin GUI -- _(pype)_ image family added to standalone publisher [PYPE-574] -- _(pype)_ matchmove family added to standalone publisher [PYPE-574] -- _(nuke)_ validator for comparing arbitrary knobs with values from presets -- _(maya)_ option to force maya to copy textures in the new look publish rather than hardlinking them -- _(pype)_ comments from pyblish GUI are now being added to ftrack version -- _(maya)_ validator for checking outdated containers in the scene -- _(maya)_ option to publish and load arnold standin sequence [PYPE-579, PYPS-54] - -**fix**: -- _(pype)_ burnins were not respecting codec of the input video -- _(nuke)_ lot's of various nuke and nuke studio fixes across the board [PYPS-45] -- _(pype)_ workfiles app is not launching with the start of the app by default [PYPE-569] -- _(ftrack)_ ftrack integration during publishing was failing under certain situations [PYPS-66] -- _(pype)_ minor fixes in REST api -- _(ftrack)_ status change event was crashing when the target status was missing [PYPS-68] -- _(ftrack)_ actions will try to reconnect if they fail for some reason -- _(maya)_ problems with fps mapping when using float FPS values -- _(deadline)_ overall improvements to deadline publishing -- _(setup)_ environment variables are now remapped on the fly based on the platform pype is running on. This fixes many issues in mixed platform environments. - - - -## 2.3.6 # - -_**release date:** 27 Nov 2019_ - -**hotfix**: -- _(ftrack)_ was hiding important debug logo -- _(nuke)_ crashes during workfile publishing -- _(ftrack)_ event server crashes because of signal problems -- _(muster)_ problems with muster render submissions -- _(ftrack)_ thumbnail update event syntax errors - - -## 2.3.0 ## -_release date: 6 Oct 2019_ - -**new**: -- _(maya)_ support for yeti rigs and yeti caches -- _(maya)_ validator for comparing arbitrary attributes against ftrack -- _(pype)_ burnins can now show current date and time -- _(muster)_ pools can now be set in render globals in maya -- _(pype)_ Rest API has been implemented in beta stage -- _(nuke)_ LUT loader has been added -- _(pype)_ rudimentary user module has been added as preparation for user management -- _(pype)_ a simple logging GUI has been added to pype tray -- _(nuke)_ nuke can now bake input process into mov -- _(maya)_ imported models now have selection handle displayed by defaulting -- _(avalon)_ it's is now possible to load multiple assets at once using loader -- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading - -**changed**: -- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. -- _(nuke)_ task name is now added to all rendered subsets -- _(pype)_ adding more families to standalone publisher -- _(pype)_ standalone publisher now uses pyblish-lite -- _(pype)_ standalone publisher can now create review quicktimes -- _(ftrack)_ queries to ftrack were sped up -- _(ftrack)_ multiple ftrack action have been deprecated -- _(avalon)_ avalon upstream has been updated to 5.5.0 -- _(nukestudio)_ published transforms can now be animated -- - -**fix**: -- _(maya)_ fps popup button didn't work in some cases -- _(maya)_ geometry instances and references in maya were losing shader assignments -- _(muster)_ muster rendering templates were not working correctly -- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist -- _(pype)_ problems with avalon db sync -- _(maya)_ ftrack was rounding FPS making it inconsistent -- _(pype)_ wrong icon names in Creator -- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene -- _(nukestudio)_ multiple bugs squashed -- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya - -## 2.2.0 ## -_release date: 8 Sept 2019_ - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(nuke)_ option to choose deadline chunk size on write nodes -- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio -- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. -- _(nuke)_ nuke writes now have deadline tab. -- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. -- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. -- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system - -**changed**: -- nukestudio now uses workio API for workfiles -- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen -- _(muster)_ can now be configured with custom templates -- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones - - -**fix**: -- wrong version retrieval from path in certain scenarios -- nuke reset resolution wasn't working in certain scenarios - -## 2.1.0 ## -_release date: 6 Aug 2019_ - -A large cleanup release. Most of the change are under the hood. - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(pype)_ Added configurable option to add burnins to any generated quicktimes -- _(ftrack)_ Action that identifies what machines pype is running on. -- _(system)_ unify subprocess calls -- _(maya)_ add audio to review quicktimes -- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg -- **Nuke Studio** publishing and workfiles support -- **Muster** render manager support -- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup -- _(maya)_ Ability to load published sequences as image planes -- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. -- _(maya)_ Pyblish plugin that allow validation of maya attributes -- _(system)_ added better startup logging to tray debug, including basic connection information -- _(avalon)_ option to group published subsets to groups in the loader -- _(avalon)_ loader family filters are working now - -**changed**: -- change multiple key attributes to unify their behaviour across the pipeline - - `frameRate` to `fps` - - `startFrame` to `frameStart` - - `endFrame` to `frameEnd` - - `fstart` to `frameStart` - - `fend` to `frameEnd` - - `handle_start` to `handleStart` - - `handle_end` to `handleEnd` - - `resolution_width` to `resolutionWidth` - - `resolution_height` to `resolutionHeight` - - `pixel_aspect` to `pixelAspect` - -- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist -- rendered frames are now deleted from temporary location after their publishing is finished. -- _(ftrack)_ RV action can now be launched from any entity -- after publishing only refresh button is now available in pyblish UI -- added context instance pyblish-lite so that artist knows if context plugin fails -- _(avalon)_ allow opening selected files using enter key -- _(avalon)_ core updated to v5.2.9 with our forked changes on top - -**fix**: -- faster hierarchy retrieval from db -- _(nuke)_ A lot of stability enhancements -- _(nuke studio)_ A lot of stability enhancements -- _(nuke)_ now only renders a single write node on farm -- _(ftrack)_ pype would crash when launcher project level task -- work directory was sometimes not being created correctly -- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. -- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 567bb92773..0000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at info@pype.club. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 644a74c1f7..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,53 +0,0 @@ -## How to contribute to Pype - -We are always happy for any contributions for OpenPype improvements. Before making a PR and starting working on an issue, please read these simple guidelines. - -#### **Did you find a bug?** - -1. Check in the issues and our [bug triage[(https://github.com/pypeclub/pype/projects/2) to make sure it wasn't reported already. -2. Ask on our [discord](http://pype.community/chat) Often, what appears as a bug, might be the intended behaviour for someone else. -3. Create a new issue. -4. Use the issue template for you PR please. - - -#### **Did you write a patch that fixes a bug?** - -- Open a new GitHub pull request with the patch. -- Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable. - -#### **Do you intend to add a new feature or change an existing one?** - -- Open a new thread in the [github discussions](https://github.com/pypeclub/pype/discussions/new) -- Do not open issue until the suggestion is discussed. We will convert accepted suggestions into backlog and point them to the relevant discussion thread to keep the context. -- If you are already working on a new feature and you'd like it eventually merged to the main codebase, please consider making a DRAFT PR as soon as possible. This makes it a lot easier to give feedback, discuss the code and functionalit, plus it prevents multiple people tackling the same problem independently. - -#### **Do you have questions about the source code?** - -Open a new question on [github discussions](https://github.com/pypeclub/pype/discussions/new) - -## Branching Strategy - -As we move to 3.x as the primary supported version of pype and only keep 2.15 on bug bugfixes and client sponsored feature requests, we need to be very careful with merging strategy. - -We also use this opportunity to switch the branch naming. 3.0 production branch will no longer be called MASTER, but will be renamed to MAIN. Develop will stay as it is. - -A few important notes about 2.x and 3.x development: - -- 3.x features are not backported to 2.x unless specifically requested -- 3.x bugs and hotfixes can be ported to 2.x if they are relevant or severe -- 2.x features and bugs MUST be ported to 3.x at the same time - -## Pull Requests - -- Each 2.x PR MUST have a corresponding 3.x PR in github. Without 3.x PR, 2.x features will not be merged! Luckily most of the code is compatible, albeit sometimes in a different place after refactor. Porting from 2.x to 3.x should be really easy. -- Please keep the corresponding 2 and 3 PR names the same so they can be easily identified from the PR list page. -- Each 2.x PR should be labeled with `2.x-dev` label. - -Inside each PR, put a link to the corresponding PR for the other version - -Of course if you want to contribute, feel free to make a PR to only 2.x/develop or develop, based on what you are using. While reviewing the PRs, we might convert the code to corresponding PR for the other release ourselves. - -We might also change the target of you PR to and intermediate branch, rather than `develop` if we feel it requires some extra work on our end. That way, we preserve all your commits so you don't loose out on the contribution credits. - - -If a PR is targeted at 2.x release it must be labelled with 2x-dev label in Github. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 46dd9e5c0a..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,82 +0,0 @@ -# Build Pype docker image -FROM ubuntu:focal AS builder -ARG OPENPYPE_PYTHON_VERSION=3.9.12 -ARG BUILD_DATE -ARG VERSION - -LABEL maintainer="info@openpype.io" -LABEL description="Docker Image to build and run OpenPype under Ubuntu 20.04" -LABEL org.opencontainers.image.name="pypeclub/openpype" -LABEL org.opencontainers.image.title="OpenPype Docker Image" -LABEL org.opencontainers.image.url="https://openpype.io/" -LABEL org.opencontainers.image.source="https://github.com/pypeclub/OpenPype" -LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction" -LABEL org.opencontainers.image.created=$BUILD_DATE -LABEL org.opencontainers.image.version=$VERSION - -USER root - -ARG DEBIAN_FRONTEND=noninteractive - -# update base -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - bash \ - git \ - cmake \ - make \ - curl \ - wget \ - build-essential \ - checkinstall \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - llvm \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev \ - patchelf - -SHELL ["/bin/bash", "-c"] - - -RUN mkdir /opt/openpype - -# download and install pyenv -RUN curl https://pyenv.run | bash \ - && echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv init -)"' >> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv init --path)"' >> $HOME/init_pyenv.sh - -# install python with pyenv -RUN source $HOME/init_pyenv.sh \ - && pyenv install ${OPENPYPE_PYTHON_VERSION} - -COPY . /opt/openpype/ - -RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh - -WORKDIR /opt/openpype - -# set local python version -RUN cd /opt/openpype \ - && source $HOME/init_pyenv.sh \ - && pyenv local ${OPENPYPE_PYTHON_VERSION} - -# fetch third party tools/libraries -RUN source $HOME/init_pyenv.sh \ - && ./tools/create_env.sh \ - && ./tools/fetch_thirdparty_libs.sh - -# build openpype -RUN source $HOME/init_pyenv.sh \ - && bash ./tools/build.sh diff --git a/Dockerfile.centos7 b/Dockerfile.centos7 deleted file mode 100644 index ab1d3f8253..0000000000 --- a/Dockerfile.centos7 +++ /dev/null @@ -1,118 +0,0 @@ -# Build Pype docker image -FROM centos:7 AS builder -ARG OPENPYPE_PYTHON_VERSION=3.9.12 - -LABEL org.opencontainers.image.name="pypeclub/openpype" -LABEL org.opencontainers.image.title="OpenPype Docker Image" -LABEL org.opencontainers.image.url="https://openpype.io/" -LABEL org.opencontainers.image.source="https://github.com/pypeclub/pype" -LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction" -LABEL org.opencontainers.image.created=$BUILD_DATE -LABEL org.opencontainers.image.version=$VERSION - - -USER root - -# update base -RUN yum -y install deltarpm \ - && yum -y update \ - && yum clean all - -# add tools we need -RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ - && yum -y install centos-release-scl \ - && yum -y install \ - bash \ - which \ - git \ - make \ - devtoolset-7 \ - cmake \ - curl \ - wget \ - gcc \ - zlib-devel \ - pcre-devel \ - perl-core \ - bzip2 \ - bzip2-devel \ - readline-devel \ - sqlite sqlite-devel \ - openssl-devel \ - openssl-libs \ - openssl11-devel \ - openssl11-libs \ - tk-devel libffi-devel \ - patchelf \ - automake \ - autoconf \ - patch \ - ncurses \ - ncurses-devel \ - qt5-qtbase-devel \ - xcb-util-wm \ - xcb-util-renderutil \ - && yum clean all - -# we need to build our own patchelf -WORKDIR /temp-patchelf -RUN git clone -b 0.17.0 --single-branch https://github.com/NixOS/patchelf.git . \ - && source scl_source enable devtoolset-7 \ - && ./bootstrap.sh \ - && ./configure \ - && make \ - && make install - -RUN mkdir /opt/openpype -# RUN useradd -m pype -# RUN chown pype /opt/openpype -# USER pype - -RUN curl https://pyenv.run | bash -# ENV PYTHON_CONFIGURE_OPTS --enable-shared - -RUN echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/.bashrc \ - && echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc \ - && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc \ - && echo 'eval "$(pyenv init --path)"' >> $HOME/.bashrc -RUN source $HOME/.bashrc \ - && export CPPFLAGS="-I/usr/include/openssl11" \ - && export LDFLAGS="-L/usr/lib64/openssl11 -lssl -lcrypto" \ - && export PATH=/usr/local/openssl/bin:$PATH \ - && export LD_LIBRARY_PATH=/usr/local/openssl/lib:$LD_LIBRARY_PATH \ - && pyenv install ${OPENPYPE_PYTHON_VERSION} - -COPY . /opt/openpype/ -RUN rm -rf /openpype/.poetry || echo "No Poetry installed yet." -# USER root -# RUN chown -R pype /opt/openpype -RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh - -# USER pype - -WORKDIR /opt/openpype - -RUN cd /opt/openpype \ - && source $HOME/.bashrc \ - && pyenv local ${OPENPYPE_PYTHON_VERSION} - -RUN source $HOME/.bashrc \ - && ./tools/create_env.sh - -RUN source $HOME/.bashrc \ - && ./tools/fetch_thirdparty_libs.sh - -RUN echo 'export PYTHONPATH="/opt/openpype/vendor/python:$PYTHONPATH"'>> $HOME/.bashrc -RUN source $HOME/.bashrc \ - && bash ./tools/build.sh - -RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.9/lib \ - && cp /usr/lib64/openssl11/libssl* ./build/exe.linux-x86_64-3.9/lib \ - && cp /usr/lib64/openssl11/libcrypto* ./build/exe.linux-x86_64-3.9/lib \ - && ln -sr ./build/exe.linux-x86_64-3.9/lib/libssl.so ./build/exe.linux-x86_64-3.9/lib/libssl.1.1.so \ - && ln -sr ./build/exe.linux-x86_64-3.9/lib/libcrypto.so ./build/exe.linux-x86_64-3.9/lib/libcrypto.1.1.so \ - && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.9/lib \ - && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.9/vendor/python/PySide2/Qt/lib - -RUN cd /opt/openpype \ - rm -rf ./vendor/bin diff --git a/Dockerfile.debian b/Dockerfile.debian deleted file mode 100644 index a53b5aa769..0000000000 --- a/Dockerfile.debian +++ /dev/null @@ -1,81 +0,0 @@ -# Build Pype docker image -FROM debian:bullseye AS builder -ARG OPENPYPE_PYTHON_VERSION=3.9.12 -ARG BUILD_DATE -ARG VERSION - -LABEL maintainer="info@openpype.io" -LABEL description="Docker Image to build and run OpenPype under Ubuntu 20.04" -LABEL org.opencontainers.image.name="pypeclub/openpype" -LABEL org.opencontainers.image.title="OpenPype Docker Image" -LABEL org.opencontainers.image.url="https://openpype.io/" -LABEL org.opencontainers.image.source="https://github.com/pypeclub/OpenPype" -LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction" -LABEL org.opencontainers.image.created=$BUILD_DATE -LABEL org.opencontainers.image.version=$VERSION - -USER root - -ARG DEBIAN_FRONTEND=noninteractive - -# update base -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - bash \ - git \ - cmake \ - make \ - curl \ - wget \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - llvm \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev \ - patchelf - -SHELL ["/bin/bash", "-c"] - - -RUN mkdir /opt/openpype - -# download and install pyenv -RUN curl https://pyenv.run | bash \ - && echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv init -)"' >> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/init_pyenv.sh \ - && echo 'eval "$(pyenv init --path)"' >> $HOME/init_pyenv.sh - -# install python with pyenv -RUN source $HOME/init_pyenv.sh \ - && pyenv install ${OPENPYPE_PYTHON_VERSION} - -COPY . /opt/openpype/ - -RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh - -WORKDIR /opt/openpype - -# set local python version -RUN cd /opt/openpype \ - && source $HOME/init_pyenv.sh \ - && pyenv local ${OPENPYPE_PYTHON_VERSION} - -# fetch third party tools/libraries -RUN source $HOME/init_pyenv.sh \ - && ./tools/create_env.sh \ - && ./tools/fetch_thirdparty_libs.sh - -# build openpype -RUN source $HOME/init_pyenv.sh \ - && bash ./tools/build.sh diff --git a/HISTORY.md b/HISTORY.md deleted file mode 100644 index 543cf11513..0000000000 --- a/HISTORY.md +++ /dev/null @@ -1,3808 +0,0 @@ -# Changelog - -## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) - -**Deprecated:** - -- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) - -### ๐Ÿ“– Documentation - -- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) -- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) -- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) - -**๐Ÿ†• New features** - -- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) -- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) -- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) -- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) - -**๐Ÿš€ Enhancements** - -- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) -- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) -- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) -- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) -- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) -- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) -- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) -- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) -- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) -- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) -- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) -- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) -- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) -- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) -- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) -- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) -- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) -- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) - -**๐Ÿ› Bug fixes** - -- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) -- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) -- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) -- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) -- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) -- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) -- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) -- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) -- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) -- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) -- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) -- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) -- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) -- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) -- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) -- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) -- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) -- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) -- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) -- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) -- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) - -**๐Ÿ”€ Refactored code** - -- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) -- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) - -**Merged pull requests:** - -- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) -- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) -- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) - - -## [3.14.10](https://github.com/ynput/OpenPype/tree/3.14.10) - -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) - -**๐Ÿ†• New features** - -- Global | Nuke: Creator placeholders in workfile template builder [\#4266](https://github.com/ynput/OpenPype/pull/4266) -- Slack: Added dynamic message [\#4265](https://github.com/ynput/OpenPype/pull/4265) -- Blender: Workfile Loader [\#4234](https://github.com/ynput/OpenPype/pull/4234) -- Unreal: Publishing and Loading for UAssets [\#4198](https://github.com/ynput/OpenPype/pull/4198) -- Publish: register publishes without copying them [\#4157](https://github.com/ynput/OpenPype/pull/4157) - -**๐Ÿš€ Enhancements** - -- General: Added install method with docstring to HostBase [\#4298](https://github.com/ynput/OpenPype/pull/4298) -- Traypublisher: simple editorial multiple edl [\#4248](https://github.com/ynput/OpenPype/pull/4248) -- General: Extend 'IPluginPaths' to have more available methods [\#4214](https://github.com/ynput/OpenPype/pull/4214) -- Refactorization of folder coloring [\#4211](https://github.com/ynput/OpenPype/pull/4211) -- Flame - loading multilayer with controlled layer names [\#4204](https://github.com/ynput/OpenPype/pull/4204) - -**๐Ÿ› Bug fixes** - -- Unreal: fix missing `maintained_selection` call [\#4300](https://github.com/ynput/OpenPype/pull/4300) -- Ftrack: Fix receive of host ip on MacOs [\#4288](https://github.com/ynput/OpenPype/pull/4288) -- SiteSync: sftp connection failing when shouldnt be tested [\#4278](https://github.com/ynput/OpenPype/pull/4278) -- Deadline: fix default value for passing mongo url [\#4275](https://github.com/ynput/OpenPype/pull/4275) -- Scene Manager: Fix variable name [\#4268](https://github.com/ynput/OpenPype/pull/4268) -- Slack: notification fails because of missing published path [\#4264](https://github.com/ynput/OpenPype/pull/4264) -- hiero: creator gui with min max [\#4257](https://github.com/ynput/OpenPype/pull/4257) -- NiceCheckbox: Fix checker positioning in Python 2 [\#4253](https://github.com/ynput/OpenPype/pull/4253) -- Publisher: Fix 'CreatorType' not equal for Python 2 DCCs [\#4249](https://github.com/ynput/OpenPype/pull/4249) -- Deadline: fix dependencies [\#4242](https://github.com/ynput/OpenPype/pull/4242) -- Houdini: hotfix instance data access [\#4236](https://github.com/ynput/OpenPype/pull/4236) -- bugfix/image plane load error [\#4222](https://github.com/ynput/OpenPype/pull/4222) -- Hiero: thumbnail from multilayer exr [\#4209](https://github.com/ynput/OpenPype/pull/4209) - -**๐Ÿ”€ Refactored code** - -- Resolve: Use qtpy in Resolve [\#4254](https://github.com/ynput/OpenPype/pull/4254) -- Houdini: Use qtpy in Houdini [\#4252](https://github.com/ynput/OpenPype/pull/4252) -- Max: Use qtpy in Max [\#4251](https://github.com/ynput/OpenPype/pull/4251) -- Maya: Use qtpy in Maya [\#4250](https://github.com/ynput/OpenPype/pull/4250) -- Hiero: Use qtpy in Hiero [\#4240](https://github.com/ynput/OpenPype/pull/4240) -- Nuke: Use qtpy in Nuke [\#4239](https://github.com/ynput/OpenPype/pull/4239) -- Flame: Use qtpy in flame [\#4238](https://github.com/ynput/OpenPype/pull/4238) -- General: Legacy io not used in global plugins [\#4134](https://github.com/ynput/OpenPype/pull/4134) - -**Merged pull requests:** - -- Bump json5 from 1.0.1 to 1.0.2 in /website [\#4292](https://github.com/ynput/OpenPype/pull/4292) -- Maya: Fix validate frame range repair + fix create render with deadline disabled [\#4279](https://github.com/ynput/OpenPype/pull/4279) - - -## [3.14.9](https://github.com/pypeclub/OpenPype/tree/3.14.9) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.8...3.14.9) - -### ๐Ÿ“– Documentation - -- Documentation: Testing on Deadline [\#4185](https://github.com/pypeclub/OpenPype/pull/4185) -- Consistent Python version [\#4160](https://github.com/pypeclub/OpenPype/pull/4160) - -**๐Ÿ†• New features** - -- Feature/op 4397 gl tf extractor for maya [\#4192](https://github.com/pypeclub/OpenPype/pull/4192) -- Maya: Extractor for Unreal SkeletalMesh [\#4174](https://github.com/pypeclub/OpenPype/pull/4174) -- 3dsmax: integration [\#4168](https://github.com/pypeclub/OpenPype/pull/4168) -- Blender: Extract Alembic Animations [\#4128](https://github.com/pypeclub/OpenPype/pull/4128) -- Unreal: Load Alembic Animations [\#4127](https://github.com/pypeclub/OpenPype/pull/4127) - -**๐Ÿš€ Enhancements** - -- Houdini: Use new interface class name for publish host [\#4220](https://github.com/pypeclub/OpenPype/pull/4220) -- General: Default command for headless mode is interactive [\#4203](https://github.com/pypeclub/OpenPype/pull/4203) -- Maya: Enhanced ASS publishing [\#4196](https://github.com/pypeclub/OpenPype/pull/4196) -- Feature/op 3924 implement ass extractor [\#4188](https://github.com/pypeclub/OpenPype/pull/4188) -- File transactions: Source path is destination path [\#4184](https://github.com/pypeclub/OpenPype/pull/4184) -- Deadline: improve environment processing [\#4182](https://github.com/pypeclub/OpenPype/pull/4182) -- General: Comment per instance in Publisher [\#4178](https://github.com/pypeclub/OpenPype/pull/4178) -- Ensure Mongo database directory exists in Windows. [\#4166](https://github.com/pypeclub/OpenPype/pull/4166) -- Note about unrestricted execution on Windows. [\#4161](https://github.com/pypeclub/OpenPype/pull/4161) -- Maya: Enable thumbnail transparency on extraction. [\#4147](https://github.com/pypeclub/OpenPype/pull/4147) -- Maya: Disable viewport Pan/Zoom on playblast extraction. [\#4146](https://github.com/pypeclub/OpenPype/pull/4146) -- Maya: Optional viewport refresh on pointcache extraction [\#4144](https://github.com/pypeclub/OpenPype/pull/4144) -- CelAction: refactory integration to current openpype [\#4140](https://github.com/pypeclub/OpenPype/pull/4140) -- Maya: create and publish bounding box geometry [\#4131](https://github.com/pypeclub/OpenPype/pull/4131) -- Changed the UOpenPypePublishInstance to use the UDataAsset class [\#4124](https://github.com/pypeclub/OpenPype/pull/4124) -- General: Collection Audio speed up [\#4110](https://github.com/pypeclub/OpenPype/pull/4110) -- Maya: keep existing AOVs when creating render instance [\#4087](https://github.com/pypeclub/OpenPype/pull/4087) -- General: Oiio conversion multipart fix [\#4060](https://github.com/pypeclub/OpenPype/pull/4060) - -**๐Ÿ› Bug fixes** - -- Publisher: Signal type issues in Python 2 DCCs [\#4230](https://github.com/pypeclub/OpenPype/pull/4230) -- Blender: Fix Layout Family Versioning [\#4228](https://github.com/pypeclub/OpenPype/pull/4228) -- Blender: Fix Create Camera "Use selection" [\#4226](https://github.com/pypeclub/OpenPype/pull/4226) -- TrayPublisher - join needs list [\#4224](https://github.com/pypeclub/OpenPype/pull/4224) -- General: Event callbacks pass event to callbacks as expected [\#4210](https://github.com/pypeclub/OpenPype/pull/4210) -- Build:Revert .toml update of Gazu [\#4207](https://github.com/pypeclub/OpenPype/pull/4207) -- Nuke: fixed imageio node overrides subset filter [\#4202](https://github.com/pypeclub/OpenPype/pull/4202) -- Maya: pointcache [\#4201](https://github.com/pypeclub/OpenPype/pull/4201) -- Unreal: Support for Unreal Engine 5.1 [\#4199](https://github.com/pypeclub/OpenPype/pull/4199) -- General: Integrate thumbnail looks for thumbnail to multiple places [\#4181](https://github.com/pypeclub/OpenPype/pull/4181) -- Various minor bugfixes [\#4172](https://github.com/pypeclub/OpenPype/pull/4172) -- Nuke/Hiero: Remove tkinter library paths before launch [\#4171](https://github.com/pypeclub/OpenPype/pull/4171) -- Flame: vertical alignment of layers [\#4169](https://github.com/pypeclub/OpenPype/pull/4169) -- Nuke: correct detection of viewer and display [\#4165](https://github.com/pypeclub/OpenPype/pull/4165) -- Settings UI: Don't create QApplication if already exists [\#4156](https://github.com/pypeclub/OpenPype/pull/4156) -- General: Extract review handle start offset of sequences [\#4152](https://github.com/pypeclub/OpenPype/pull/4152) -- Maya: Maintain time connections on Alembic update. [\#4143](https://github.com/pypeclub/OpenPype/pull/4143) - -**๐Ÿ”€ Refactored code** - -- General: Use qtpy in modules and hosts UIs which are running in OpenPype process [\#4225](https://github.com/pypeclub/OpenPype/pull/4225) -- Tools: Use qtpy instead of Qt in standalone tools [\#4223](https://github.com/pypeclub/OpenPype/pull/4223) -- General: Use qtpy in settings UI [\#4215](https://github.com/pypeclub/OpenPype/pull/4215) - -**Merged pull requests:** - -- layout publish more than one container issue [\#4098](https://github.com/pypeclub/OpenPype/pull/4098) - -## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8) - -**๐Ÿš€ Enhancements** - -- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139) -- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137) -- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129) -- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126) -- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115) -- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046) -- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148) - -**๐Ÿ› Bug fixes** - -- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153) -- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136) -- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135) -- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117) - -## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) - -**๐Ÿ†• New features** - -- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) - -**๐Ÿš€ Enhancements** - -- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) -- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) -- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) -- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) -- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) -- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) -- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) -- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) -- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) -- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) -- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) -- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) -- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) -- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) - -**๐Ÿ› Bug fixes** - -- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) -- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) -- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) -- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) -- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) -- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) -- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) -- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) -- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) -- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) -- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) -- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) -- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) -- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) -- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) -- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) -- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) - -**๐Ÿ”€ Refactored code** - -- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) -- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) - -**Merged pull requests:** - -- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) -- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) -- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) -- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) -- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) - -## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) - -### ๐Ÿ“– Documentation - -- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) - -**๐Ÿ†• New features** - -- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) - -**๐Ÿš€ Enhancements** - -- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) -- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) -- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) -- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) -- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) -- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) - -**๐Ÿ› Bug fixes** - -- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) -- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) -- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) -- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) -- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) -- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) -- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) - -**๐Ÿ”€ Refactored code** - -- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) - -## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) - -**๐Ÿš€ Enhancements** - -- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) -- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) -- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) -- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) -- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) -- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) - -**๐Ÿ”€ Refactored code** - -- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) -- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) -- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) -- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) -- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) - -**Merged pull requests:** - -- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) -- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) -- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) -- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) - -## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) - -**๐Ÿ†• New features** - -- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) -- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) - -**๐Ÿš€ Enhancements** - -- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) -- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) -- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) -- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) -- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) -- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) -- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) -- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) -- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) -- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) -- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) -- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) -- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) -- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) -- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) -- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) -- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) -- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) -- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) -- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) -- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) -- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) -- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) -- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) -- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) -- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) -- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) -- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) -- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) -- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) -- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) - -**๐Ÿ”€ Refactored code** - -- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) -- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) -- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) -- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) -- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) -- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) -- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) - -**Merged pull requests:** - -- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) -- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) -- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) - -## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) - -**๐Ÿš€ Enhancements** - -- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) - -**๐Ÿ› Bug fixes** - -- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) -- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) -- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) -- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) - -**๐Ÿ”€ Refactored code** - -- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) -- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) -- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) -- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) -- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) -- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) - -**Merged pull requests:** - -- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) - -## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) - -### ๐Ÿ“– Documentation - -- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) - -**๐Ÿ†• New features** - -- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) -- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) -- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) - -**๐Ÿš€ Enhancements** - -- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) -- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) -- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) -- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) -- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) -- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) -- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) -- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) -- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) -- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) -- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) - -**๐Ÿ› Bug fixes** - -- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) -- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) -- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) -- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) -- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) -- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) -- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) -- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) -- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) -- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) -- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) - -**๐Ÿ”€ Refactored code** - -- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) -- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) -- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) -- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) -- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) -- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) -- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) -- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) -- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) -- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) -- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) -- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) -- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) -- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) -- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) -- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) - -**Merged pull requests:** - -- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) -- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) - -## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) - -### ๐Ÿ“– Documentation - -- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) -- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) - -**๐Ÿ†• New features** - -- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) -- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) - -**๐Ÿš€ Enhancements** - -- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) -- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) -- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) -- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) -- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) -- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) -- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) -- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) -- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) - -**๐Ÿ› Bug fixes** - -- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) -- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) -- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) -- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) -- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) -- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) -- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) -- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) -- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) -- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) -- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) -- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) -- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) - -**๐Ÿ”€ Refactored code** - -- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) -- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) -- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) -- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) -- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) -- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) -- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) -- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) -- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) -- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) -- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) -- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) -- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) -- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) -- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) -- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) -- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) -- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) -- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) - -**Merged pull requests:** - -- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) -- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) -- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) -- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) - -## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) - -**๐Ÿ†• New features** - -- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) -- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) -- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) - -**๐Ÿš€ Enhancements** - -- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) -- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) -- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) -- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) -- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) - -**๐Ÿ› Bug fixes** - -- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) -- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) -- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) -- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) -- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) -- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) -- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) - -**๐Ÿ”€ Refactored code** - -- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) -- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) -- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) -- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) -- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) -- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) - -**Merged pull requests:** - -- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) -- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) -- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) -- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) - -## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) - -**๐Ÿ†• New features** - -- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) -- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) - -**๐Ÿš€ Enhancements** - -- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) -- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) -- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) -- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) -- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) -- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) -- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) -- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) -- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) -- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) - -**๐Ÿ› Bug fixes** - -- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) -- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) -- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) -- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) -- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) -- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) -- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) -- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) -- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) -- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) -- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) -- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) -- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) -- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) -- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) -- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) - -**๐Ÿ”€ Refactored code** - -- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) -- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) -- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) -- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) -- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) - -**Merged pull requests:** - -- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) -- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) -- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) - -## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) - -### ๐Ÿ“– Documentation - -- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) -- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) - -**๐Ÿš€ Enhancements** - -- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) -- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) -- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) -- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) -- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) -- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) -- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) -- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) -- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) -- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) -- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) -- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) -- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) -- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) - -**๐Ÿ› Bug fixes** - -- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) -- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) -- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) -- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) -- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) -- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) -- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) -- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) -- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) -- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) -- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) -- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) -- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) -- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) -- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) -- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) -- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) -- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) - -**๐Ÿ”€ Refactored code** - -- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) -- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) -- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) -- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) -- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) -- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) -- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) -- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) -- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) -- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) - -**Merged pull requests:** - -- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) - -## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) - -### ๐Ÿ“– Documentation - -- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) - -**๐Ÿ†• New features** - -- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) - -**๐Ÿš€ Enhancements** - -- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) -- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) -- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) -- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) -- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) -- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) -- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) -- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) -- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) -- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) -- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) -- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) -- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) -- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) - -**๐Ÿ› Bug fixes** - -- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) -- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) -- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) -- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) -- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) -- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) -- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) -- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) -- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) -- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) -- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) -- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) -- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) -- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) -- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) -- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) -- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) -- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) - -**๐Ÿ”€ Refactored code** - -- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) -- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) -- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) -- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) -- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) -- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) -- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) -- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) -- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) -- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) -- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) -- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) -- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) - -## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) - -### ๐Ÿ“– Documentation - -- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) -- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) - -**๐Ÿ†• New features** - -- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) - -**๐Ÿš€ Enhancements** - -- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) -- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) -- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) -- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) -- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) -- Maya: Allow more data to be published along camera ๐ŸŽฅ [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) -- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) - -**๐Ÿ› Bug fixes** - -- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) -- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) -- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) -- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) -- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) -- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) -- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) -- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) -- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) -- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) -- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) -- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) -- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) -- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) -- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) - -**๐Ÿ”€ Refactored code** - -- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) -- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) -- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) -- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) -- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) -- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) -- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) -- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) -- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) -- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) -- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) -- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) -- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) -- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) - -**Merged pull requests:** - -- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) -- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) -- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) - -## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) - -**๐Ÿ†• New features** - -- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) -- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) - -**๐Ÿš€ Enhancements** - -- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) -- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) -- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) -- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) -- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) - -**๐Ÿ› Bug fixes** - -- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) -- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) -- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) -- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) -- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) -- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) -- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) -- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) -- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) -- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) -- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) -- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) - -**๐Ÿ”€ Refactored code** - -- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) - -## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) - -### ๐Ÿ“– Documentation - -- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) -- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) -- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) - -**๐Ÿ†• New features** - -- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) - -**๐Ÿš€ Enhancements** - -- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) -- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) -- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) -- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) -- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) -- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) -- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) -- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) -- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) -- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) -- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) -- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) -- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) -- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) -- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) -- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) -- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) -- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) - -**๐Ÿ› Bug fixes** - -- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) -- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) -- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) -- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) -- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) -- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) -- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) -- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) -- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) -- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) -- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) -- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) -- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) -- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) -- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) -- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) -- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) -- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) -- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) -- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) -- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) -- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) -- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) - -**๐Ÿ”€ Refactored code** - -- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) -- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) - -**Merged pull requests:** - -- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) -- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) - -## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) - -### ๐Ÿ“– Documentation - -- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) -- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) - -**๐Ÿ†• New features** - -- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) -- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) -- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) -- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) -- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) -- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) -- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) -- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) - -**๐Ÿš€ Enhancements** - -- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) -- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) -- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) -- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) -- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) -- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) -- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) -- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) -- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) -- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) -- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) -- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) -- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) -- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) -- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) -- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) -- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) -- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) -- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) -- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) -- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) -- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) -- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) -- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) -- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) -- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) -- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) -- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) -- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) -- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) -- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) -- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) -- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) - -**๐Ÿ› Bug fixes** - -- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) -- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) -- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) -- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) -- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) -- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) -- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) -- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) -- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) -- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) -- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) -- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) -- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) -- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) -- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) -- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) -- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) -- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) -- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) -- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) -- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) -- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) -- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) -- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) -- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) -- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) -- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) -- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) -- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) -- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) -- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) -- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) -- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) -- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) -- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) -- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) -- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) -- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) -- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) - -**๐Ÿ”€ Refactored code** - -- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) -- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) -- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) -- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) - -**Merged pull requests:** - -- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) -- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) -- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) -- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) -- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) -- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) -- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) -- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) -- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) -- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) -- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) - -## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) - -## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) - -## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) - -## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) - -## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) - -### ๐Ÿ“– Documentation - -- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) -- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) -- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) - -**๐Ÿ†• New features** - -- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) -- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) - -**๐Ÿš€ Enhancements** - -- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) -- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) -- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) - -**๐Ÿ› Bug fixes** - -- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) -- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) -- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) -- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) -- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) -- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) -- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) -- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) -- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) -- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) -- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) -- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) -- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) - -**Merged pull requests:** - -- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) -- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) - -## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) - -### ๐Ÿ“– Documentation - -- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) -- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) -- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) -- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) - -**๐Ÿ†• New features** - -- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) -- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) -- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) -- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) -- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) - -**๐Ÿš€ Enhancements** - -- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) -- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) -- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) -- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) -- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) -- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) -- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) -- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) -- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) -- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) -- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) -- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) -- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) -- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) -- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) -- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) -- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) -- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) -- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) -- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) -- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) -- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) -- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) -- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) - -**๐Ÿ› Bug fixes** - -- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) -- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) -- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) -- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) -- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) -- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) -- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) -- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) -- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) -- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) -- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) -- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) -- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) -- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) -- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) -- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) -- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) -- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) -- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) -- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) -- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) -- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) -- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) -- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) -- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) -- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) -- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) -- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) -- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) -- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) -- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) -- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) -- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) -- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) -- Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) -- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) -- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) -- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) - -**๐Ÿ”€ Refactored code** - -- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) -- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) -- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) -- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) -- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) -- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) - -**Merged pull requests:** - -- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) -- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) -- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) -- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) -- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) - -## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) - -## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) - -**๐Ÿš€ Enhancements** - -- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) -- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) -- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) -- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) -- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) - -**๐Ÿ› Bug fixes** - -- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) -- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) -- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) -- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) -- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) -- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) -- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) -- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) -- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) -- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) -- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) - -**๐Ÿ”€ Refactored code** - -- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) -- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) - -## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) - -**Deprecated:** - -- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) -- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) -- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) - -### ๐Ÿ“– Documentation - -- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) -- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) -- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) -- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) -- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) -- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) -- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) -- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) -- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) - -**๐Ÿ†• New features** - -- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) -- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) - -**๐Ÿš€ Enhancements** - -- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) -- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) -- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) -- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) -- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) -- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) -- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) -- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) -- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) -- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) -- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) -- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) -- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) -- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) -- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) -- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) -- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) -- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) -- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) -- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) -- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) -- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) -- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) -- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) -- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) -- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) -- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) -- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) -- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) -- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) -- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) -- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) -- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) -- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) -- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) -- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) -- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) -- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) -- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) -- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) -- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) -- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) -- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) -- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) -- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) -- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) - -**๐Ÿ› Bug fixes** - -- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) -- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) -- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) -- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) -- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) -- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) -- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) -- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) -- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) -- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) -- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) -- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) -- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) -- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) -- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) -- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) -- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) -- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) -- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) -- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) -- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) -- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) -- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) -- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) -- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) -- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) -- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) -- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) -- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) -- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) -- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) -- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) -- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) -- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) -- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) -- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) -- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) -- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) -- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) -- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) -- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) -- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) -- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) -- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) -- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) -- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) -- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) -- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) -- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) -- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) -- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) -- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) -- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) -- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) -- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) -- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) -- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) -- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) -- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) - -**๐Ÿ”€ Refactored code** - -- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) -- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) -- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) -- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) -- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) -- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) -- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) -- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) -- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) -- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) -- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) -- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) - -**Merged pull requests:** - -- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) -- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) -- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) -- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) -- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) -- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) -- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) -- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) - -## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) - -### ๐Ÿ“– Documentation - -- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) - -**๐Ÿš€ Enhancements** - -- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) -- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) -- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) -- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) -- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) - -**๐Ÿ› Bug fixes** - -- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) -- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) - -**Merged pull requests:** - -- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) -- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) - -## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) - -**๐Ÿš€ Enhancements** - -- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) -- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) -- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) -- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) - -**๐Ÿ› Bug fixes** - -- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) -- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) -- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) -- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) -- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) -- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) -- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) -- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) - -**Merged pull requests:** - -- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) -- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) -- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) - -## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) - -### ๐Ÿ“– Documentation - -- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) - -**๐Ÿ†• New features** - -- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) -- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) -- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) -- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) -- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) -- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) - -**๐Ÿš€ Enhancements** - -- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) -- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) -- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) -- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) -- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) -- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) -- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) -- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) -- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) -- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) -- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) -- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) -- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) -- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) -- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) -- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) -- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) -- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) -- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) -- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) -- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) -- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) -- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) -- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) -- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) -- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) -- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) -- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) - -**๐Ÿ› Bug fixes** - -- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) -- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) -- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) -- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) -- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) -- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) -- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) -- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) -- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) -- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) -- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) -- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) -- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) -- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) -- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) -- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) -- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) -- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) -- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) -- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) -- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) -- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) -- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) -- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) -- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) -- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) - -**Merged pull requests:** - -- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) -- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) -- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) -- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) -- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) -- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) -- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) - -## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) - -**Deprecated:** - -- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) - -### ๐Ÿ“– Documentation - -- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) - -**๐Ÿ†• New features** - -- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) -- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) -- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) -- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) -- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) - -**๐Ÿš€ Enhancements** - -- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) -- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) -- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) -- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) -- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) -- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) -- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) -- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) -- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) -- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) -- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) -- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) -- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) -- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) -- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) -- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) -- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) -- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) -- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) -- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) -- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) -- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) -- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) -- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) -- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) -- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) -- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) -- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) -- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) -- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) -- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) -- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) -- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) -- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) -- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) -- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) -- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) -- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) -- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) -- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) -- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) -- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) -- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) - -**๐Ÿ› Bug fixes** - -- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) -- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) -- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) -- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) -- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) -- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) -- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) -- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) -- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) -- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) -- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) -- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) -- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) -- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) -- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) -- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) -- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) -- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) -- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) -- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) -- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) -- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) -- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) -- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) -- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) -- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) -- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) -- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) -- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) -- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) -- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) -- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) -- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) -- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) -- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) -- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) -- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) -- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) -- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) -- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) -- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) -- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) - -**Merged pull requests:** - -- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) -- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) -- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) -- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) -- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) - -## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) - -**๐Ÿ› Bug fixes** - -- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) - -## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) - -**๐Ÿ› Bug fixes** - -- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) - -## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) - -**๐Ÿš€ Enhancements** - -- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) -- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) -- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) -- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) -- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) -- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) -- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) - -**๐Ÿ› Bug fixes** - -- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) -- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) -- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) -- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) -- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) -- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) -- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) -- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) -- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) - -## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) - -**๐Ÿ› Bug fixes** - -- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) - -## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) - -### ๐Ÿ“– Documentation - -- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) -- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) - -**๐Ÿ†• New features** - -- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) -- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) -- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) -- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) -- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) -- Basic Royal Render Integration โœจ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) -- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) -- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) - -**๐Ÿš€ Enhancements** - -- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) -- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) -- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) -- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) -- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) -- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) -- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) -- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) -- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) -- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) -- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) -- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) -- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) -- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) -- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) -- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) -- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) -- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) -- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) -- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) -- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) -- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) -- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) -- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) -- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) -- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) - -**๐Ÿ› Bug fixes** - -- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) -- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) -- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) -- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) -- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) -- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) -- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) -- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) -- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) -- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) -- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) -- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) -- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) -- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) -- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) -- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) -- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) -- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) -- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) -- Maya: Collect render - fix UNC path support ๐Ÿ› [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) -- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) -- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) -- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) -- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) - -**Merged pull requests:** - -- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) -- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) - -## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) - -**Deprecated:** - -- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) - -**๐Ÿ†• New features** - -- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) -- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) -- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) -- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) -- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) -- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) -- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) - -**๐Ÿš€ Enhancements** - -- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) -- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) -- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) -- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) -- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) -- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) -- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) -- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) -- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) -- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) -- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) -- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) -- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) -- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) -- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) -- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) -- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) -- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) -- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) - -**๐Ÿ› Bug fixes** - -- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) -- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) -- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) -- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) -- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) -- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) -- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) -- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) -- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) -- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) -- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) -- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) -- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) -- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) -- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) -- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) -- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) -- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) -- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) -- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) - -**Merged pull requests:** - -- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) - -## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) - -**๐Ÿ†• New features** - -- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) - -**๐Ÿš€ Enhancements** - -- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) -- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) -- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) -- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) -- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) -- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) -- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) -- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) -- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) -- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) -- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) -- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) - -**๐Ÿ› Bug fixes** - -- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) -- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) -- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) -- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) -- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) - -**Merged pull requests:** - -- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) - -## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) - -### ๐Ÿ“– Documentation - -- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) -- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) -- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) - -**๐Ÿ†• New features** - -- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) -- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) -- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) -- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) - -**๐Ÿš€ Enhancements** - -- Added possibility to configure of synchronization of workfile versionโ€ฆ [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) -- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) -- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) -- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) -- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) -- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) -- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) -- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) -- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) -- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) -- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) -- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) -- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) -- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) -- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) -- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) -- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) -- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) -- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) -- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) -- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) -- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) -- OpenPype: Add version validation and `--headless` mode and update progress ๐Ÿ”„ [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) -- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) -- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) -- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) -- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) - -**๐Ÿ› Bug fixes** - -- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) -- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) -- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) -- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) -- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) -- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) -- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) -- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) -- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) -- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) -- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) -- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) -- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) -- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) -- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) -- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) -- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) -- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) -- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) - -**Merged pull requests:** - -- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) -- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) - -## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) - -**๐Ÿ› Bug fixes** - -- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) -- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) -- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) -- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) - -## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) - -### ๐Ÿ“– Documentation - -- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) - -**๐Ÿ†• New features** - -- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) -- Maya: Scene patching ๐Ÿฉนon submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) -- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) - -**๐Ÿš€ Enhancements** - -- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) -- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) -- Check for missing โœจ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) -- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) -- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) -- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) -- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) -- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) -- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) -- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) -- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) -- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) -- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) -- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) -- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) -- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) -- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) -- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) -- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) -- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) -- Maya: support for configurable `dirmap` ๐Ÿ—บ๏ธ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) -- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) -- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) -- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) -- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) -- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) -- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) -- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) -- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) -- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) -- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) - -**๐Ÿ› Bug fixes** - -- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) -- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) -- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) -- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) -- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) -- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) -- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) -- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) -- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) -- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) -- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) -- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) -- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) -- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) -- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) -- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) -- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) -- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) -- Maya: don't add reference members as connections to the container set ๐Ÿ“ฆ [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) -- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) -- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) -- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) -- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) -- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) -- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) -- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) -- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) -- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) -- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) -- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) - -**Merged pull requests:** - -- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) -- Add support for multiple Deadline โ˜ ๏ธโž– servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) -- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space ๐Ÿš€ [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) -- Maya: expected files -\> render products โš™๏ธ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) -- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) - -## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) - -### ๐Ÿ“– Documentation - -- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) -- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) -- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) - -**๐Ÿš€ Enhancements** - -- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) -- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) -- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) -- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) -- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) -- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) -- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) -- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) -- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) -- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) -- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) -- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) -- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) -- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) -- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) -- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) -- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) -- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) - -**๐Ÿ› Bug fixes** - -- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) -- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) -- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) -- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) -- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) -- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) -- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) -- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) -- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) -- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) -- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) -- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) -- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) -- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) -- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) -- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) -- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) -- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) -- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) -- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) -- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) -- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) -- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) -- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) -- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) -- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) -- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) - -**Merged pull requests:** - -- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) -- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) - -## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.3...2.18.4) - -## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) - -## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.1.0...2.18.2) - -## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) - -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) - -### ๐Ÿ“– Documentation - -- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) - -**๐Ÿš€ Enhancements** - -- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) -- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) -- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) -- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) -- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) -- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) -- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) -- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) -- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) -- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) -- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) -- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) -- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) - -**๐Ÿ› Bug fixes** - -- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) -- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) -- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) -- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) -- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) -- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) -- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) -- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) -- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) -- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) -- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) -- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) -- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) - -**Merged pull requests:** - -- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) - - -## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.1...3.0.0) - -### Configuration -- Studio Settings GUI: no more json configuration files. -- OpenPype Modules can be turned on and off. -- Easy to add Application versions. -- Per Project Environment and plugin management. -- Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. -- Options to make any validator or extractor, optional or disabled. -- Color Management is now unified under anatomy settings. -- Subset naming and grouping is fully configurable. -- All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. -- You can now add per project and per task type templates for workfile initialization in most hosts. -- Too many other individual configurable option to list in this changelog :) - -### Local Settings -- Local Settings GUI where users can change certain option on individual basis. - - Application executables. - - Project roots. - - Project site sync settings. - -### Build, Installation and Deployments -- No requirements on artist machine. -- Fully distributed workflow possible. -- Self-contained installation. -- Available on all three major platforms. -- Automatic artist OpenPype updates. -- Studio OpenPype repository for updates distribution. -- Robust Build system. -- Safe studio update versioning with staging and production options. -- MacOS build generates .app and .dmg installer. -- Windows build with installer creation script. - -### Misc -- System and diagnostic info tool in the tray. -- Launching application from Launcher indicates activity. -- All project roots are now named. Single root project are now achieved by having a single named root in the project anatomy. -- Every project root is cast into environment variable as well, so it can be used in DCC instead of absolute path (depends on DCC support for env vars). -- Basic support for task types, on top of task names. -- Timer now change automatically when the context is switched inside running application. -- 'Master" versions have been renamed to "Hero". -- Extract Burnins now supports file sequences and color settings. -- Extract Review support overscan cropping, better letterboxes and background colour fill. -- Delivery tool for copying and renaming any published assets in bulk. -- Harmony, Photoshop and After Effects now connect directly with OpenPype tray instead of spawning their own terminal. - -### Project Manager GUI -- Create Projects. -- Create Shots and Assets. -- Create Tasks and assign task types. -- Fill required asset attributes. -- Validations for duplicated or unsupported names. -- Archive Assets. -- Move Asset within hierarchy. - -### Site Sync (beta) -- Synchronization of published files between workstations and central storage. -- Ability to add arbitrary storage providers to the Site Sync system. -- Default setup includes Disk and Google Drive providers as examples. -- Access to availability information from Loader and Scene Manager. -- Sync queue GUI with filtering, error and status reporting. -- Site sync can be configured on a per-project basis. -- Bulk upload and download from the loader. - -### Ftrack -- Actions have customisable roles. -- Settings on all actions are updated live and don't need openpype restart. -- Ftrack module can now be turned off completely. -- It is enough to specify ftrack server name and the URL will be formed correctly. So instead of mystudio.ftrackapp.com, it's possible to use simply: "mystudio". - -### Editorial -- Fully OTIO based editorial publishing. -- Completely re-done Hiero publishing to be a lot simpler and faster. -- Consistent conforming from Resolve, Hiero and Standalone Publisher. - -### Backend -- OpenPype and Avalon now always share the same database (in 2.x is was possible to split them). -- Major codebase refactoring to allow for better CI, versioning and control of individual integrations. -- OTIO is bundled with build. -- OIIO is bundled with build. -- FFMPEG is bundled with build. -- Rest API and host WebSocket servers have been unified into a single local webserver. -- Maya look assigner has been integrated into the main codebase. -- Publish GUI has been integrated into the main codebase. -- Studio and Project settings overrides are now stored in Mongo. -- Too many other backend fixes and tweaks to list :), you can see full changelog on github for those. -- OpenPype uses Poetry to manage it's virtual environment when running from code. -- all applications can be marked as python 2 or 3 compatible to make the switch a bit easier. - - -### Pull Requests since 3.0.0-rc.6 - - -**Implemented enhancements:** - -- settings: task types enum entity [\#1605](https://github.com/pypeclub/OpenPype/issues/1605) -- Settings: ignore keys in referenced schema [\#1600](https://github.com/pypeclub/OpenPype/issues/1600) -- Maya: support for frame steps and frame lists [\#1585](https://github.com/pypeclub/OpenPype/issues/1585) -- TVPaint: Publish workfile. [\#1548](https://github.com/pypeclub/OpenPype/issues/1548) -- Loader: Current Asset Button [\#1448](https://github.com/pypeclub/OpenPype/issues/1448) -- Hiero: publish with retiming [\#1377](https://github.com/pypeclub/OpenPype/issues/1377) -- Ask user to restart after changing global environments in settings [\#910](https://github.com/pypeclub/OpenPype/issues/910) -- add option to define paht to workfile template [\#895](https://github.com/pypeclub/OpenPype/issues/895) -- Harmony: move server console to system tray [\#676](https://github.com/pypeclub/OpenPype/issues/676) -- Standalone style [\#1630](https://github.com/pypeclub/OpenPype/pull/1630) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Faster hierarchical values push [\#1627](https://github.com/pypeclub/OpenPype/pull/1627) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Launcher tool style [\#1624](https://github.com/pypeclub/OpenPype/pull/1624) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Loader and Library loader enhancements [\#1623](https://github.com/pypeclub/OpenPype/pull/1623) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Tray style [\#1622](https://github.com/pypeclub/OpenPype/pull/1622) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya schemas cleanup [\#1610](https://github.com/pypeclub/OpenPype/pull/1610) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Settings: ignore keys in referenced schema [\#1608](https://github.com/pypeclub/OpenPype/pull/1608) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- settings: task types enum entity [\#1606](https://github.com/pypeclub/OpenPype/pull/1606) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Openpype style [\#1604](https://github.com/pypeclub/OpenPype/pull/1604) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- TVPaint: Publish workfile. [\#1597](https://github.com/pypeclub/OpenPype/pull/1597) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Nuke: add option to define path to workfile template [\#1571](https://github.com/pypeclub/OpenPype/pull/1571) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Crop overscan in Extract Review [\#1569](https://github.com/pypeclub/OpenPype/pull/1569) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Unreal and Blender: Material Workflow [\#1562](https://github.com/pypeclub/OpenPype/pull/1562) ([simonebarbieri](https://github.com/simonebarbieri)) -- Harmony: move server console to system tray [\#1560](https://github.com/pypeclub/OpenPype/pull/1560) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Ask user to restart after changing global environments in settings [\#1550](https://github.com/pypeclub/OpenPype/pull/1550) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Hiero: publish with retiming [\#1545](https://github.com/pypeclub/OpenPype/pull/1545) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Fixed bugs:** - -- Library loader load asset documents on OpenPype start [\#1603](https://github.com/pypeclub/OpenPype/issues/1603) -- Resolve: unable to load the same footage twice [\#1317](https://github.com/pypeclub/OpenPype/issues/1317) -- Resolve: unable to load footage [\#1316](https://github.com/pypeclub/OpenPype/issues/1316) -- Add required Python 2 modules [\#1291](https://github.com/pypeclub/OpenPype/issues/1291) -- GUi scaling with hires displays [\#705](https://github.com/pypeclub/OpenPype/issues/705) -- Maya: non unicode string in publish validation [\#673](https://github.com/pypeclub/OpenPype/issues/673) -- Nuke: Rendered Frame validation is triggered by multiple collections [\#156](https://github.com/pypeclub/OpenPype/issues/156) -- avalon-core debugging failing [\#80](https://github.com/pypeclub/OpenPype/issues/80) -- Only check arnold shading group if arnold is used [\#72](https://github.com/pypeclub/OpenPype/issues/72) -- Sync server Qt layout fix [\#1621](https://github.com/pypeclub/OpenPype/pull/1621) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Console Listener on Python 2 fix [\#1620](https://github.com/pypeclub/OpenPype/pull/1620) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Bug: Initialize blessed term only in console mode [\#1619](https://github.com/pypeclub/OpenPype/pull/1619) ([antirotor](https://github.com/antirotor)) -- Settings template skip paths support wrappers [\#1618](https://github.com/pypeclub/OpenPype/pull/1618) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya capture 'isolate\_view' fix + minor corrections [\#1617](https://github.com/pypeclub/OpenPype/pull/1617) ([2-REC](https://github.com/2-REC)) -- MacOs Fix launch of standalone publisher [\#1616](https://github.com/pypeclub/OpenPype/pull/1616) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- 'Delivery action' report fix + typos [\#1612](https://github.com/pypeclub/OpenPype/pull/1612) ([2-REC](https://github.com/2-REC)) -- List append fix in mutable dict settings [\#1599](https://github.com/pypeclub/OpenPype/pull/1599) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Documentation: Maya: fix review [\#1598](https://github.com/pypeclub/OpenPype/pull/1598) ([antirotor](https://github.com/antirotor)) -- Bugfix: Set certifi CA bundle for all platforms [\#1596](https://github.com/pypeclub/OpenPype/pull/1596) ([antirotor](https://github.com/antirotor)) - -**Merged pull requests:** - -- Bump dns-packet from 1.3.1 to 1.3.4 in /website [\#1611](https://github.com/pypeclub/OpenPype/pull/1611) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Maya: Render workflow fixes [\#1607](https://github.com/pypeclub/OpenPype/pull/1607) ([antirotor](https://github.com/antirotor)) -- Maya: support for frame steps and frame lists [\#1586](https://github.com/pypeclub/OpenPype/pull/1586) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- 3.0.0 - curated changelog [\#1284](https://github.com/pypeclub/OpenPype/pull/1284) ([mkolar](https://github.com/mkolar)) - -## [2.18.1](https://github.com/pypeclub/openpype/tree/2.18.1) (2021-06-03) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...2.18.1) - -**Enhancements:** - -- Faster hierarchical values push [\#1626](https://github.com/pypeclub/OpenPype/pull/1626) -- Feature Delivery in library loader [\#1549](https://github.com/pypeclub/OpenPype/pull/1549) -- Hiero: Initial frame publish support. [\#1172](https://github.com/pypeclub/OpenPype/pull/1172) - -**Fixed bugs:** - -- Maya capture 'isolate\_view' fix + minor corrections [\#1614](https://github.com/pypeclub/OpenPype/pull/1614) -- 'Delivery action' report fix +typos [\#1613](https://github.com/pypeclub/OpenPype/pull/1613) -- Delivery in LibraryLoader - fixed sequence issue [\#1590](https://github.com/pypeclub/OpenPype/pull/1590) -- FFmpeg filters in quote marks [\#1588](https://github.com/pypeclub/OpenPype/pull/1588) -- Ftrack delete action cause circular error [\#1581](https://github.com/pypeclub/OpenPype/pull/1581) -- Fix Maya playblast. [\#1566](https://github.com/pypeclub/OpenPype/pull/1566) -- More failsafes prevent errored runs. [\#1554](https://github.com/pypeclub/OpenPype/pull/1554) -- Celaction publishing [\#1539](https://github.com/pypeclub/OpenPype/pull/1539) -- celaction: app not starting [\#1533](https://github.com/pypeclub/OpenPype/pull/1533) - -**Merged pull requests:** - -- Maya: Render workflow fixes - 2.0 backport [\#1609](https://github.com/pypeclub/OpenPype/pull/1609) -- Maya Hardware support [\#1553](https://github.com/pypeclub/OpenPype/pull/1553) - - -## [CI/3.0.0-rc.6](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.6) (2021-05-27) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.5...CI/3.0.0-rc.6) - -**Implemented enhancements:** - -- Hiero: publish color and transformation soft-effects [\#1376](https://github.com/pypeclub/OpenPype/issues/1376) -- Get rid of `AVALON\_HIERARCHY` and `hiearchy` key on asset [\#432](https://github.com/pypeclub/OpenPype/issues/432) -- Sync to avalon do not store hierarchy key [\#1582](https://github.com/pypeclub/OpenPype/pull/1582) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Tools: launcher scripts for project manager [\#1557](https://github.com/pypeclub/OpenPype/pull/1557) ([antirotor](https://github.com/antirotor)) -- Simple tvpaint publish [\#1555](https://github.com/pypeclub/OpenPype/pull/1555) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Feature Delivery in library loader [\#1546](https://github.com/pypeclub/OpenPype/pull/1546) ([kalisp](https://github.com/kalisp)) -- Documentation: Dev and system build documentation [\#1543](https://github.com/pypeclub/OpenPype/pull/1543) ([antirotor](https://github.com/antirotor)) -- Color entity [\#1542](https://github.com/pypeclub/OpenPype/pull/1542) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Extract review bg color [\#1534](https://github.com/pypeclub/OpenPype/pull/1534) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- TVPaint loader settings [\#1530](https://github.com/pypeclub/OpenPype/pull/1530) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender can initialize differente user script paths [\#1528](https://github.com/pypeclub/OpenPype/pull/1528) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender and Unreal: Improved Animation Workflow [\#1514](https://github.com/pypeclub/OpenPype/pull/1514) ([simonebarbieri](https://github.com/simonebarbieri)) -- Hiero: publish color and transformation soft-effects [\#1511](https://github.com/pypeclub/OpenPype/pull/1511) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Fixed bugs:** - -- OpenPype specific version issues [\#1583](https://github.com/pypeclub/OpenPype/issues/1583) -- Ftrack login server can't work without stderr [\#1576](https://github.com/pypeclub/OpenPype/issues/1576) -- Mac application launch [\#1575](https://github.com/pypeclub/OpenPype/issues/1575) -- Settings are not propagated to Nuke write nodes [\#1538](https://github.com/pypeclub/OpenPype/issues/1538) -- Subset names settings not applied for publishing [\#1537](https://github.com/pypeclub/OpenPype/issues/1537) -- Nuke: callback at start not setting colorspace [\#1412](https://github.com/pypeclub/OpenPype/issues/1412) -- Pype 3: Missing icon for Settings [\#1272](https://github.com/pypeclub/OpenPype/issues/1272) -- Blender: cannot initialize Avalon if BLENDER\_USER\_SCRIPTS is already used [\#1050](https://github.com/pypeclub/OpenPype/issues/1050) -- Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/OpenPype/issues/206) -- Build: stop cleaning of pyc files in build directory [\#1592](https://github.com/pypeclub/OpenPype/pull/1592) ([antirotor](https://github.com/antirotor)) -- Ftrack login server can't work without stderr [\#1591](https://github.com/pypeclub/OpenPype/pull/1591) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- FFmpeg filters in quote marks [\#1589](https://github.com/pypeclub/OpenPype/pull/1589) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- OpenPype specific version issues [\#1584](https://github.com/pypeclub/OpenPype/pull/1584) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Mac application launch [\#1580](https://github.com/pypeclub/OpenPype/pull/1580) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Ftrack delete action cause circular error [\#1579](https://github.com/pypeclub/OpenPype/pull/1579) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Hiero: publishing issues [\#1578](https://github.com/pypeclub/OpenPype/pull/1578) ([jezscha](https://github.com/jezscha)) -- Nuke: callback at start not setting colorspace [\#1561](https://github.com/pypeclub/OpenPype/pull/1561) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Bugfix PS subset and quick review [\#1541](https://github.com/pypeclub/OpenPype/pull/1541) ([kalisp](https://github.com/kalisp)) -- Settings are not propagated to Nuke write nodes [\#1540](https://github.com/pypeclub/OpenPype/pull/1540) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- OpenPype: Powershell scripts polishing [\#1536](https://github.com/pypeclub/OpenPype/pull/1536) ([antirotor](https://github.com/antirotor)) -- Host name collecting fix [\#1535](https://github.com/pypeclub/OpenPype/pull/1535) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Handle duplicated task names in project manager [\#1531](https://github.com/pypeclub/OpenPype/pull/1531) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Validate is file attribute in settings schema [\#1529](https://github.com/pypeclub/OpenPype/pull/1529) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Merged pull requests:** - -- Bump postcss from 8.2.8 to 8.3.0 in /website [\#1593](https://github.com/pypeclub/OpenPype/pull/1593) ([dependabot[bot]](https://github.com/apps/dependabot)) -- User installation documentation [\#1532](https://github.com/pypeclub/OpenPype/pull/1532) ([64qam](https://github.com/64qam)) - -## [CI/3.0.0-rc.5](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.5) (2021-05-19) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.18.0...CI/3.0.0-rc.5) - -**Implemented enhancements:** - -- OpenPype: Build - Add progress bars [\#1524](https://github.com/pypeclub/OpenPype/pull/1524) ([antirotor](https://github.com/antirotor)) -- Default environments per host imlementation [\#1522](https://github.com/pypeclub/OpenPype/pull/1522) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- OpenPype: use `semver` module for version resolution [\#1513](https://github.com/pypeclub/OpenPype/pull/1513) ([antirotor](https://github.com/antirotor)) -- Feature Aftereffects setting cleanup documentation [\#1510](https://github.com/pypeclub/OpenPype/pull/1510) ([kalisp](https://github.com/kalisp)) -- Feature Sync server settings enhancement [\#1501](https://github.com/pypeclub/OpenPype/pull/1501) ([kalisp](https://github.com/kalisp)) -- Project manager [\#1396](https://github.com/pypeclub/OpenPype/pull/1396) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Fixed bugs:** - -- Unified schema definition [\#874](https://github.com/pypeclub/OpenPype/issues/874) -- Maya: fix look assignment [\#1526](https://github.com/pypeclub/OpenPype/pull/1526) ([antirotor](https://github.com/antirotor)) -- Bugfix Sync server local site issues [\#1523](https://github.com/pypeclub/OpenPype/pull/1523) ([kalisp](https://github.com/kalisp)) -- Store as list dictionary check initial value with right type [\#1520](https://github.com/pypeclub/OpenPype/pull/1520) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Maya: wrong collection of playblasted frames [\#1515](https://github.com/pypeclub/OpenPype/pull/1515) ([mkolar](https://github.com/mkolar)) -- Convert pyblish logs to string at the moment of logging [\#1512](https://github.com/pypeclub/OpenPype/pull/1512) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- 3.0 | nuke: fixing start\_at with option gui [\#1509](https://github.com/pypeclub/OpenPype/pull/1509) ([jezscha](https://github.com/jezscha)) -- Tests: fix pype -\> openpype to make tests work again [\#1508](https://github.com/pypeclub/OpenPype/pull/1508) ([antirotor](https://github.com/antirotor)) - -**Merged pull requests:** - -- OpenPype: disable submodule update with `--no-submodule-update` [\#1525](https://github.com/pypeclub/OpenPype/pull/1525) ([antirotor](https://github.com/antirotor)) -- Ftrack without autosync in Pype 3 [\#1519](https://github.com/pypeclub/OpenPype/pull/1519) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Feature Harmony setting cleanup documentation [\#1506](https://github.com/pypeclub/OpenPype/pull/1506) ([kalisp](https://github.com/kalisp)) -- Sync Server beginning of documentation [\#1471](https://github.com/pypeclub/OpenPype/pull/1471) ([kalisp](https://github.com/kalisp)) -- Blender: publish layout json [\#1348](https://github.com/pypeclub/OpenPype/pull/1348) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) (2021-05-18) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.4...2.18.0) - -**Implemented enhancements:** - -- Default environments per host imlementation [\#1405](https://github.com/pypeclub/OpenPype/issues/1405) -- Blender: publish layout json [\#1346](https://github.com/pypeclub/OpenPype/issues/1346) -- Ftrack without autosync in Pype 3 [\#1128](https://github.com/pypeclub/OpenPype/issues/1128) -- Launcher: started action indicator [\#1102](https://github.com/pypeclub/OpenPype/issues/1102) -- Launch arguments of applications [\#1094](https://github.com/pypeclub/OpenPype/issues/1094) -- Publish: instance info [\#724](https://github.com/pypeclub/OpenPype/issues/724) -- Review: ability to control review length [\#482](https://github.com/pypeclub/OpenPype/issues/482) -- Colorized recognition of creator result [\#394](https://github.com/pypeclub/OpenPype/issues/394) -- event assign user to started task [\#49](https://github.com/pypeclub/OpenPype/issues/49) -- rebuild containers from reference in maya [\#55](https://github.com/pypeclub/OpenPype/issues/55) -- nuke Load metadata [\#66](https://github.com/pypeclub/OpenPype/issues/66) -- Maya: Safer handling of expected render output names [\#1496](https://github.com/pypeclub/OpenPype/pull/1496) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- TVPaint: Increment workfile version on successfull publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489) ([tokejepsen](https://github.com/tokejepsen)) -- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1484](https://github.com/pypeclub/OpenPype/pull/1484) ([tokejepsen](https://github.com/tokejepsen)) -- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Igniter version resolution doesn't consider it's own version [\#1505](https://github.com/pypeclub/OpenPype/issues/1505) -- Maya: Safer handling of expected render output names [\#1159](https://github.com/pypeclub/OpenPype/issues/1159) -- Harmony: Invalid render output from non-conventionally named instance [\#871](https://github.com/pypeclub/OpenPype/issues/871) -- Existing subsets hints in creator [\#1503](https://github.com/pypeclub/OpenPype/pull/1503) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- nuke: space in node name breaking process [\#1494](https://github.com/pypeclub/OpenPype/pull/1494) ([jezscha](https://github.com/jezscha)) -- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517) ([mkolar](https://github.com/mkolar)) -- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486) ([tokejepsen](https://github.com/tokejepsen)) -- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480) ([antirotor](https://github.com/antirotor)) - -**Closed issues:** - -- Nuke: wrong "star at" value on render load [\#1352](https://github.com/pypeclub/OpenPype/issues/1352) -- DV Resolve - loading/updating - image video [\#915](https://github.com/pypeclub/OpenPype/issues/915) - -**Merged pull requests:** - -- nuke: fixing start\_at with option gui [\#1507](https://github.com/pypeclub/OpenPype/pull/1507) ([jezscha](https://github.com/jezscha)) - -## [CI/3.0.0-rc.4](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.4) (2021-05-12) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...CI/3.0.0-rc.4) - -**Implemented enhancements:** - -- Resolve: documentation [\#1490](https://github.com/pypeclub/OpenPype/issues/1490) -- Hiero: audio to review [\#1378](https://github.com/pypeclub/OpenPype/issues/1378) -- nks color clips after publish [\#44](https://github.com/pypeclub/OpenPype/issues/44) -- Store data from modifiable dict as list [\#1504](https://github.com/pypeclub/OpenPype/pull/1504) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Use SubsetLoader and multiple contexts for delete\_old\_versions [\#1497](https://github.com/pypeclub/OpenPype/pull/1497) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Hiero: publish audio and add to review [\#1493](https://github.com/pypeclub/OpenPype/pull/1493) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Resolve: documentation [\#1491](https://github.com/pypeclub/OpenPype/pull/1491) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Change integratenew template profiles setting [\#1487](https://github.com/pypeclub/OpenPype/pull/1487) ([kalisp](https://github.com/kalisp)) -- Settings tool cleanup [\#1477](https://github.com/pypeclub/OpenPype/pull/1477) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Sorted Applications and Tools in Custom attribute [\#1476](https://github.com/pypeclub/OpenPype/pull/1476) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- PS - group all published instances [\#1416](https://github.com/pypeclub/OpenPype/pull/1416) ([kalisp](https://github.com/kalisp)) -- OpenPype: Support for Docker [\#1289](https://github.com/pypeclub/OpenPype/pull/1289) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Harmony: palettes publishing [\#1439](https://github.com/pypeclub/OpenPype/issues/1439) -- Photoshop: validation for already created images [\#1435](https://github.com/pypeclub/OpenPype/issues/1435) -- Nuke Extracts Thumbnail from frame out of shot range [\#963](https://github.com/pypeclub/OpenPype/issues/963) -- Instance in same Context repairing [\#390](https://github.com/pypeclub/OpenPype/issues/390) -- User Inactivity - Start timers sets wrong time [\#91](https://github.com/pypeclub/OpenPype/issues/91) -- Use instance frame start instead of timeline [\#1499](https://github.com/pypeclub/OpenPype/pull/1499) ([mkolar](https://github.com/mkolar)) -- Various smaller fixes [\#1498](https://github.com/pypeclub/OpenPype/pull/1498) ([mkolar](https://github.com/mkolar)) -- nuke: space in node name breaking process [\#1495](https://github.com/pypeclub/OpenPype/pull/1495) ([jezscha](https://github.com/jezscha)) -- Codec determination in extract burnin [\#1492](https://github.com/pypeclub/OpenPype/pull/1492) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Undefined constant in subprocess module [\#1485](https://github.com/pypeclub/OpenPype/pull/1485) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- List entity catch add/remove item changes properly [\#1482](https://github.com/pypeclub/OpenPype/pull/1482) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Resolve: additional fixes of publishing workflow [\#1481](https://github.com/pypeclub/OpenPype/pull/1481) ([jezscha](https://github.com/jezscha)) -- Photoshop: validation for already created images [\#1436](https://github.com/pypeclub/OpenPype/pull/1436) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -**Merged pull requests:** - -- Maya: Support for looks on VRay Proxies [\#1443](https://github.com/pypeclub/OpenPype/pull/1443) ([antirotor](https://github.com/antirotor)) - -## [2.17.3](https://github.com/pypeclub/openpype/tree/2.17.3) (2021-05-06) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.3...2.17.3) - -**Fixed bugs:** - -- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479) ([jezscha](https://github.com/jezscha)) - -## [CI/3.0.0-rc.3](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.3) (2021-05-05) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.2...CI/3.0.0-rc.3) - -**Implemented enhancements:** - -- Path entity with placeholder [\#1473](https://github.com/pypeclub/OpenPype/pull/1473) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Burnin custom font filepath [\#1472](https://github.com/pypeclub/OpenPype/pull/1472) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Poetry: Move to OpenPype [\#1449](https://github.com/pypeclub/OpenPype/pull/1449) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- Mac SSL path needs to be relative to pype\_root [\#1469](https://github.com/pypeclub/OpenPype/issues/1469) -- Resolve: fix loading clips to timeline [\#1421](https://github.com/pypeclub/OpenPype/issues/1421) -- Wrong handling of slashes when loading on mac [\#1411](https://github.com/pypeclub/OpenPype/issues/1411) -- Nuke openpype3 [\#1342](https://github.com/pypeclub/OpenPype/issues/1342) -- Houdini launcher [\#1171](https://github.com/pypeclub/OpenPype/issues/1171) -- Fix SyncServer get\_enabled\_projects should handle global state [\#1475](https://github.com/pypeclub/OpenPype/pull/1475) ([kalisp](https://github.com/kalisp)) -- Igniter buttons enable/disable fix [\#1474](https://github.com/pypeclub/OpenPype/pull/1474) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Mac SSL path needs to be relative to pype\_root [\#1470](https://github.com/pypeclub/OpenPype/pull/1470) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Resolve: 17 compatibility issues and load image sequences [\#1422](https://github.com/pypeclub/OpenPype/pull/1422) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) - -## [CI/3.0.0-rc.2](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.2) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.2...CI/3.0.0-rc.2) - -**Implemented enhancements:** - -- Extract burnins with sequences [\#1467](https://github.com/pypeclub/OpenPype/pull/1467) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Extract burnins with color setting [\#1466](https://github.com/pypeclub/OpenPype/pull/1466) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -**Fixed bugs:** - -- Fix groups check in Python 2 [\#1468](https://github.com/pypeclub/OpenPype/pull/1468) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -## [2.17.2](https://github.com/pypeclub/openpype/tree/2.17.2) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.1...2.17.2) - -**Implemented enhancements:** - -- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) - -## [CI/3.0.0-rc.1](https://github.com/pypeclub/openpype/tree/CI/3.0.0-rc.1) (2021-05-04) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.1...CI/3.0.0-rc.1) - -**Implemented enhancements:** - -- Only show studio settings to admins [\#1406](https://github.com/pypeclub/OpenPype/issues/1406) -- Ftrack specific settings save warning messages [\#1458](https://github.com/pypeclub/OpenPype/pull/1458) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Faster settings actions [\#1446](https://github.com/pypeclub/OpenPype/pull/1446) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Feature/sync server priority [\#1444](https://github.com/pypeclub/OpenPype/pull/1444) ([kalisp](https://github.com/kalisp)) -- Faster settings UI loading [\#1442](https://github.com/pypeclub/OpenPype/pull/1442) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Igniter re-write [\#1441](https://github.com/pypeclub/OpenPype/pull/1441) ([mkolar](https://github.com/mkolar)) -- Wrap openpype build into installers [\#1419](https://github.com/pypeclub/OpenPype/pull/1419) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Extract review first documentation [\#1404](https://github.com/pypeclub/OpenPype/pull/1404) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Blender PySide2 install guide [\#1403](https://github.com/pypeclub/OpenPype/pull/1403) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: deadline submission with gpu [\#1394](https://github.com/pypeclub/OpenPype/pull/1394) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Igniter: Reverse item filter for OpenPype version [\#1349](https://github.com/pypeclub/OpenPype/pull/1349) ([antirotor](https://github.com/antirotor)) - -**Fixed bugs:** - -- OpenPype Mongo URL definition [\#1450](https://github.com/pypeclub/OpenPype/issues/1450) -- Various typos and smaller fixes [\#1464](https://github.com/pypeclub/OpenPype/pull/1464) ([mkolar](https://github.com/mkolar)) -- Validation of dynamic items in settings [\#1462](https://github.com/pypeclub/OpenPype/pull/1462) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- List can handle new items correctly [\#1459](https://github.com/pypeclub/OpenPype/pull/1459) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Settings actions process fix [\#1457](https://github.com/pypeclub/OpenPype/pull/1457) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Add to overrides actions fix [\#1456](https://github.com/pypeclub/OpenPype/pull/1456) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- OpenPype Mongo URL definition [\#1455](https://github.com/pypeclub/OpenPype/pull/1455) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Global settings save/load out of system settings [\#1447](https://github.com/pypeclub/OpenPype/pull/1447) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Keep metadata on remove overrides [\#1445](https://github.com/pypeclub/OpenPype/pull/1445) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: fixing undo for loaded mov and sequence [\#1432](https://github.com/pypeclub/OpenPype/pull/1432) ([jezscha](https://github.com/jezscha)) -- ExtractReview skip empty strings from settings [\#1431](https://github.com/pypeclub/OpenPype/pull/1431) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Bugfix Sync server tweaks [\#1430](https://github.com/pypeclub/OpenPype/pull/1430) ([kalisp](https://github.com/kalisp)) -- Hiero: missing thumbnail in review [\#1429](https://github.com/pypeclub/OpenPype/pull/1429) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Bugfix Maya in deadline for OpenPype [\#1428](https://github.com/pypeclub/OpenPype/pull/1428) ([kalisp](https://github.com/kalisp)) -- AE - validation for duration was 1 frame shorter [\#1427](https://github.com/pypeclub/OpenPype/pull/1427) ([kalisp](https://github.com/kalisp)) -- Houdini menu filename [\#1418](https://github.com/pypeclub/OpenPype/pull/1418) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Fix Avalon plugins attribute overrides [\#1413](https://github.com/pypeclub/OpenPype/pull/1413) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: submit to Deadline fails [\#1409](https://github.com/pypeclub/OpenPype/pull/1409) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- Validate MongoDB Url on start [\#1407](https://github.com/pypeclub/OpenPype/pull/1407) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Nuke: fix set colorspace with new settings [\#1386](https://github.com/pypeclub/OpenPype/pull/1386) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- MacOs build and install issues [\#1380](https://github.com/pypeclub/OpenPype/pull/1380) ([mkolar](https://github.com/mkolar)) - -**Closed issues:** - -- test [\#1452](https://github.com/pypeclub/OpenPype/issues/1452) - -**Merged pull requests:** - -- TVPaint frame range definition [\#1425](https://github.com/pypeclub/OpenPype/pull/1425) ([iLLiCiTiT](https://github.com/iLLiCiTiT)) -- Only show studio settings to admins [\#1420](https://github.com/pypeclub/OpenPype/pull/1420) ([create-issue-branch[bot]](https://github.com/apps/create-issue-branch)) -- TVPaint documentation [\#1305](https://github.com/pypeclub/OpenPype/pull/1305) ([64qam](https://github.com/64qam)) - -## [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) (2021-04-30) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.0...2.17.1) - -**Enhancements:** - -- Nuke: deadline submission with gpu [\#1414](https://github.com/pypeclub/OpenPype/pull/1414) -- TVPaint frame range definition [\#1424](https://github.com/pypeclub/OpenPype/pull/1424) -- PS - group all published instances [\#1415](https://github.com/pypeclub/OpenPype/pull/1415) -- Add task name to context pop up. [\#1383](https://github.com/pypeclub/OpenPype/pull/1383) -- Enhance review letterbox feature. [\#1371](https://github.com/pypeclub/OpenPype/pull/1371) - -**Fixed bugs:** - -- Houdini menu filename [\#1417](https://github.com/pypeclub/OpenPype/pull/1417) -- AE - validation for duration was 1 frame shorter [\#1426](https://github.com/pypeclub/OpenPype/pull/1426) - -**Merged pull requests:** - -- Maya: Vray - problem getting all file nodes for look publishing [\#1399](https://github.com/pypeclub/OpenPype/pull/1399) -- Maya: Support for Redshift proxies [\#1360](https://github.com/pypeclub/OpenPype/pull/1360) - -## [2.17.0](https://github.com/pypeclub/openpype/tree/2.17.0) (2021-04-20) - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-beta.2...2.17.0) - -**Enhancements:** - -- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/OpenPype/pull/1243) -- Settings in mongo as dict [\#1221](https://github.com/pypeclub/OpenPype/pull/1221) -- Maya: Make tx option configurable with presets [\#1328](https://github.com/pypeclub/OpenPype/pull/1328) -- TVPaint asset name validation [\#1302](https://github.com/pypeclub/OpenPype/pull/1302) -- TV Paint: Set initial project settings. [\#1299](https://github.com/pypeclub/OpenPype/pull/1299) -- TV Paint: Validate mark in and out. [\#1298](https://github.com/pypeclub/OpenPype/pull/1298) -- Validate project settings [\#1297](https://github.com/pypeclub/OpenPype/pull/1297) -- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/OpenPype/pull/1234) -- Show error message in pyblish UI [\#1206](https://github.com/pypeclub/OpenPype/pull/1206) - -**Fixed bugs:** - -- Hiero: fixing source frame from correct object [\#1362](https://github.com/pypeclub/OpenPype/pull/1362) -- Nuke: fix colourspace, prerenders and nuke panes opening [\#1308](https://github.com/pypeclub/OpenPype/pull/1308) -- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/OpenPype/pull/1282) -- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/OpenPype/pull/1194) -- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/OpenPype/pull/1312) -- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/OpenPype/pull/1303) -- After Effects: remove orphaned instances [\#1275](https://github.com/pypeclub/OpenPype/pull/1275) -- Avalon schema names [\#1242](https://github.com/pypeclub/OpenPype/pull/1242) -- Handle duplication of Task name [\#1226](https://github.com/pypeclub/OpenPype/pull/1226) -- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/OpenPype/pull/1217) -- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/OpenPype/pull/1214) -- Bulk mov strict task [\#1204](https://github.com/pypeclub/OpenPype/pull/1204) -- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/OpenPype/pull/1202) -- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/OpenPype/pull/1199) -- Nuke: reverse search to make it more versatile [\#1178](https://github.com/pypeclub/OpenPype/pull/1178) - - - -## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) (2021-03-22) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0) - -**Enhancements:** - -- Nuke: deadline submit limit group filter [\#1167](https://github.com/pypeclub/pype/pull/1167) -- Maya: support for Deadline Group and Limit Groups - backport 2.x [\#1156](https://github.com/pypeclub/pype/pull/1156) -- Maya: fixes for Redshift support [\#1152](https://github.com/pypeclub/pype/pull/1152) -- Nuke: adding preset for a Read node name to all img and mov Loaders [\#1146](https://github.com/pypeclub/pype/pull/1146) -- nuke deadline submit with environ var from presets overrides [\#1142](https://github.com/pypeclub/pype/pull/1142) -- Change timers after task change [\#1138](https://github.com/pypeclub/pype/pull/1138) -- Nuke: shortcuts for Pype menu [\#1127](https://github.com/pypeclub/pype/pull/1127) -- Nuke: workfile template [\#1124](https://github.com/pypeclub/pype/pull/1124) -- Sites local settings by site name [\#1117](https://github.com/pypeclub/pype/pull/1117) -- Reset loader's asset selection on context change [\#1106](https://github.com/pypeclub/pype/pull/1106) -- Bulk mov render publishing [\#1101](https://github.com/pypeclub/pype/pull/1101) -- Photoshop: mark publishable instances [\#1093](https://github.com/pypeclub/pype/pull/1093) -- Added ability to define BG color for extract review [\#1088](https://github.com/pypeclub/pype/pull/1088) -- TVPaint extractor enhancement [\#1080](https://github.com/pypeclub/pype/pull/1080) -- Photoshop: added support for .psb in workfiles [\#1078](https://github.com/pypeclub/pype/pull/1078) -- Optionally add task to subset name [\#1072](https://github.com/pypeclub/pype/pull/1072) -- Only extend clip range when collecting. [\#1008](https://github.com/pypeclub/pype/pull/1008) -- Collect audio for farm reviews. [\#1073](https://github.com/pypeclub/pype/pull/1073) - - -**Fixed bugs:** - -- Fix path spaces in jpeg extractor [\#1174](https://github.com/pypeclub/pype/pull/1174) -- Maya: Bugfix: superclass for CreateCameraRig [\#1166](https://github.com/pypeclub/pype/pull/1166) -- Maya: Submit to Deadline - fix typo in condition [\#1163](https://github.com/pypeclub/pype/pull/1163) -- Avoid dot in repre extension [\#1125](https://github.com/pypeclub/pype/pull/1125) -- Fix versions variable usage in standalone publisher [\#1090](https://github.com/pypeclub/pype/pull/1090) -- Collect instance data fix subset query [\#1082](https://github.com/pypeclub/pype/pull/1082) -- Fix getting the camera name. [\#1067](https://github.com/pypeclub/pype/pull/1067) -- Nuke: Ensure "NUKE\_TEMP\_DIR" is not part of the Deadline job environment. [\#1064](https://github.com/pypeclub/pype/pull/1064) - -## [2.15.3](https://github.com/pypeclub/pype/tree/2.15.3) (2021-02-26) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.2...2.15.3) - -**Enhancements:** - -- Maya: speedup renderable camera collection [\#1053](https://github.com/pypeclub/pype/pull/1053) -- Harmony - add regex search to filter allowed task names for collectinโ€ฆ [\#1047](https://github.com/pypeclub/pype/pull/1047) - -**Fixed bugs:** - -- Ftrack integrate hierarchy fix [\#1085](https://github.com/pypeclub/pype/pull/1085) -- Explicit subset filter in anatomy instance data [\#1059](https://github.com/pypeclub/pype/pull/1059) -- TVPaint frame offset [\#1057](https://github.com/pypeclub/pype/pull/1057) -- Auto fix unicode strings [\#1046](https://github.com/pypeclub/pype/pull/1046) - -## [2.15.2](https://github.com/pypeclub/pype/tree/2.15.2) (2021-02-19) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.1...2.15.2) - -**Enhancements:** - -- Maya: Vray scene publishing [\#1013](https://github.com/pypeclub/pype/pull/1013) - -**Fixed bugs:** - -- Fix entity move under project [\#1040](https://github.com/pypeclub/pype/pull/1040) -- smaller nuke fixes from production [\#1036](https://github.com/pypeclub/pype/pull/1036) -- TVPaint thumbnail extract fix [\#1031](https://github.com/pypeclub/pype/pull/1031) - -## [2.15.1](https://github.com/pypeclub/pype/tree/2.15.1) (2021-02-12) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.0...2.15.1) - -**Enhancements:** - -- Delete version as loader action [\#1011](https://github.com/pypeclub/pype/pull/1011) -- Delete old versions [\#445](https://github.com/pypeclub/pype/pull/445) - -**Fixed bugs:** - -- PS - remove obsolete functions from pywin32 [\#1006](https://github.com/pypeclub/pype/pull/1006) -- Clone description of review session objects. [\#922](https://github.com/pypeclub/pype/pull/922) - -## [2.15.0](https://github.com/pypeclub/pype/tree/2.15.0) (2021-02-09) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.6...2.15.0) - -**Enhancements:** - -- Resolve - loading and updating clips [\#932](https://github.com/pypeclub/pype/pull/932) -- Release/2.15.0 [\#926](https://github.com/pypeclub/pype/pull/926) -- Photoshop: add option for template.psd and prelaunch hook [\#894](https://github.com/pypeclub/pype/pull/894) -- Nuke: deadline presets [\#993](https://github.com/pypeclub/pype/pull/993) -- Maya: Alembic only set attributes that exists. [\#986](https://github.com/pypeclub/pype/pull/986) -- Harmony: render local and handle fixes [\#981](https://github.com/pypeclub/pype/pull/981) -- PSD Bulk export of ANIM group [\#965](https://github.com/pypeclub/pype/pull/965) -- AE - added prelaunch hook for opening last or workfile from template [\#944](https://github.com/pypeclub/pype/pull/944) -- PS - safer handling of loading of workfile [\#941](https://github.com/pypeclub/pype/pull/941) -- Maya: Handling Arnold referenced AOVs [\#938](https://github.com/pypeclub/pype/pull/938) -- TVPaint: switch layer IDs for layer names during identification [\#903](https://github.com/pypeclub/pype/pull/903) -- TVPaint audio/sound loader [\#893](https://github.com/pypeclub/pype/pull/893) -- Clone review session with children. [\#891](https://github.com/pypeclub/pype/pull/891) -- Simple compositing data packager for freelancers [\#884](https://github.com/pypeclub/pype/pull/884) -- Harmony deadline submission [\#881](https://github.com/pypeclub/pype/pull/881) -- Maya: Optionally hide image planes from reviews. [\#840](https://github.com/pypeclub/pype/pull/840) -- Maya: handle referenced AOVs for Vray [\#824](https://github.com/pypeclub/pype/pull/824) -- DWAA/DWAB support on windows [\#795](https://github.com/pypeclub/pype/pull/795) -- Unreal: animation, layout and setdress updates [\#695](https://github.com/pypeclub/pype/pull/695) - -**Fixed bugs:** - -- Maya: Looks - disable hardlinks [\#995](https://github.com/pypeclub/pype/pull/995) -- Fix Ftrack custom attribute update [\#982](https://github.com/pypeclub/pype/pull/982) -- Prores ks in burnin script [\#960](https://github.com/pypeclub/pype/pull/960) -- terminal.py crash on import [\#839](https://github.com/pypeclub/pype/pull/839) -- Extract review handle bizarre pixel aspect ratio [\#990](https://github.com/pypeclub/pype/pull/990) -- Nuke: add nuke related env var to sumbission [\#988](https://github.com/pypeclub/pype/pull/988) -- Nuke: missing preset's variable [\#984](https://github.com/pypeclub/pype/pull/984) -- Get creator by name fix [\#979](https://github.com/pypeclub/pype/pull/979) -- Fix update of project's tasks on Ftrack sync [\#972](https://github.com/pypeclub/pype/pull/972) -- nuke: wrong frame offset in mov loader [\#971](https://github.com/pypeclub/pype/pull/971) -- Create project structure action fix multiroot [\#967](https://github.com/pypeclub/pype/pull/967) -- PS: remove pywin installation from hook [\#964](https://github.com/pypeclub/pype/pull/964) -- Prores ks in burnin script [\#959](https://github.com/pypeclub/pype/pull/959) -- Subset family is now stored in subset document [\#956](https://github.com/pypeclub/pype/pull/956) -- DJV new version arguments [\#954](https://github.com/pypeclub/pype/pull/954) -- TV Paint: Fix single frame Sequence [\#953](https://github.com/pypeclub/pype/pull/953) -- nuke: missing `file` knob update [\#933](https://github.com/pypeclub/pype/pull/933) -- Photoshop: Create from single layer was failing [\#920](https://github.com/pypeclub/pype/pull/920) -- Nuke: baking mov with correct colorspace inherited from write [\#909](https://github.com/pypeclub/pype/pull/909) -- Launcher fix actions discover [\#896](https://github.com/pypeclub/pype/pull/896) -- Get the correct file path for the updated mov. [\#889](https://github.com/pypeclub/pype/pull/889) -- Maya: Deadline submitter - shared data access violation [\#831](https://github.com/pypeclub/pype/pull/831) -- Maya: Take into account vray master AOV switch [\#822](https://github.com/pypeclub/pype/pull/822) - -**Merged pull requests:** - -- Refactor blender to 3.0 format [\#934](https://github.com/pypeclub/pype/pull/934) - -## [2.14.6](https://github.com/pypeclub/pype/tree/2.14.6) (2021-01-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.5...2.14.6) - -**Fixed bugs:** - -- Nuke: improving of hashing path [\#885](https://github.com/pypeclub/pype/pull/885) - -**Merged pull requests:** - -- Hiero: cut videos with correct secons [\#892](https://github.com/pypeclub/pype/pull/892) -- Faster sync to avalon preparation [\#869](https://github.com/pypeclub/pype/pull/869) - -## [2.14.5](https://github.com/pypeclub/pype/tree/2.14.5) (2021-01-06) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.4...2.14.5) - -**Merged pull requests:** - -- Pype logger refactor [\#866](https://github.com/pypeclub/pype/pull/866) - -## [2.14.4](https://github.com/pypeclub/pype/tree/2.14.4) (2020-12-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.3...2.14.4) - -**Merged pull requests:** - -- Fix - AE - added explicit cast to int [\#837](https://github.com/pypeclub/pype/pull/837) - -## [2.14.3](https://github.com/pypeclub/pype/tree/2.14.3) (2020-12-16) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.2...2.14.3) - -**Fixed bugs:** - -- TVPaint repair invalid metadata [\#809](https://github.com/pypeclub/pype/pull/809) -- Feature/push hier value to nonhier action [\#807](https://github.com/pypeclub/pype/pull/807) -- Harmony: fix palette and image sequence loader [\#806](https://github.com/pypeclub/pype/pull/806) - -**Merged pull requests:** - -- respecting space in path [\#823](https://github.com/pypeclub/pype/pull/823) - -## [2.14.2](https://github.com/pypeclub/pype/tree/2.14.2) (2020-12-04) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.1...2.14.2) - -**Enhancements:** - -- Collapsible wrapper in settings [\#767](https://github.com/pypeclub/pype/pull/767) - -**Fixed bugs:** - -- Harmony: template extraction and palettes thumbnails on mac [\#768](https://github.com/pypeclub/pype/pull/768) -- TVPaint store context to workfile metadata \(764\) [\#766](https://github.com/pypeclub/pype/pull/766) -- Extract review audio cut fix [\#763](https://github.com/pypeclub/pype/pull/763) - -**Merged pull requests:** - -- AE: fix publish after background load [\#781](https://github.com/pypeclub/pype/pull/781) -- TVPaint store members key [\#769](https://github.com/pypeclub/pype/pull/769) - -## [2.14.1](https://github.com/pypeclub/pype/tree/2.14.1) (2020-11-27) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.0...2.14.1) - -**Enhancements:** - -- Settings required keys in modifiable dict [\#770](https://github.com/pypeclub/pype/pull/770) -- Extract review may not add audio to output [\#761](https://github.com/pypeclub/pype/pull/761) - -**Fixed bugs:** - -- After Effects: frame range, file format and render source scene fixes [\#760](https://github.com/pypeclub/pype/pull/760) -- Hiero: trimming review with clip event number [\#754](https://github.com/pypeclub/pype/pull/754) -- TVPaint: fix updating of loaded subsets [\#752](https://github.com/pypeclub/pype/pull/752) -- Maya: Vray handling of default aov [\#748](https://github.com/pypeclub/pype/pull/748) -- Maya: multiple renderable cameras in layer didn't work [\#744](https://github.com/pypeclub/pype/pull/744) -- Ftrack integrate custom attributes fix [\#742](https://github.com/pypeclub/pype/pull/742) - -## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) (2020-11-23) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.7...2.14.0) - -**Enhancements:** - -- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687) -- Shot asset build trigger status [\#736](https://github.com/pypeclub/pype/pull/736) -- Maya: add camera rig publishing option [\#721](https://github.com/pypeclub/pype/pull/721) -- Sort instances by label in pyblish gui [\#719](https://github.com/pypeclub/pype/pull/719) -- Synchronize ftrack hierarchical and shot attributes [\#716](https://github.com/pypeclub/pype/pull/716) -- 686 standalonepublisher editorial from image sequences [\#699](https://github.com/pypeclub/pype/pull/699) -- Ask user to select non-default camera from scene or create a new. [\#678](https://github.com/pypeclub/pype/pull/678) -- TVPaint: image loader with options [\#675](https://github.com/pypeclub/pype/pull/675) -- Maya: Camera name can be added to burnins. [\#674](https://github.com/pypeclub/pype/pull/674) -- After Effects: base integration with loaders [\#667](https://github.com/pypeclub/pype/pull/667) -- Harmony: Javascript refactoring and overall stability improvements [\#666](https://github.com/pypeclub/pype/pull/666) - -**Fixed bugs:** - -- Bugfix Hiero Review / Plate representation publish [\#743](https://github.com/pypeclub/pype/pull/743) -- Asset fetch second fix [\#726](https://github.com/pypeclub/pype/pull/726) -- TVPaint extract review fix [\#740](https://github.com/pypeclub/pype/pull/740) -- After Effects: Review were not being sent to ftrack [\#738](https://github.com/pypeclub/pype/pull/738) -- Maya: vray proxy was not loading [\#722](https://github.com/pypeclub/pype/pull/722) -- Maya: Vray expected file fixes [\#682](https://github.com/pypeclub/pype/pull/682) -- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639) - -**Deprecated:** - -- Removed artist view from pyblish gui [\#717](https://github.com/pypeclub/pype/pull/717) -- Maya: disable legacy override check for cameras [\#715](https://github.com/pypeclub/pype/pull/715) - -**Merged pull requests:** - -- Application manager [\#728](https://github.com/pypeclub/pype/pull/728) -- Feature \#664 3.0 lib refactor [\#706](https://github.com/pypeclub/pype/pull/706) -- Lib from illicit part 2 [\#700](https://github.com/pypeclub/pype/pull/700) -- 3.0 lib refactor - path tools [\#697](https://github.com/pypeclub/pype/pull/697) - -## [2.13.7](https://github.com/pypeclub/pype/tree/2.13.7) (2020-11-19) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.6...2.13.7) - -**Fixed bugs:** - -- Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) - -## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) - -**Fixed bugs:** - -- Maya workfile version wasn't syncing with renders properly [\#711](https://github.com/pypeclub/pype/pull/711) -- Maya: Fix for publishing multiple cameras with review from the same scene [\#710](https://github.com/pypeclub/pype/pull/710) - -## [2.13.5](https://github.com/pypeclub/pype/tree/2.13.5) (2020-11-12) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.4...2.13.5) - -**Enhancements:** - -- 3.0 lib refactor [\#664](https://github.com/pypeclub/pype/issues/664) - -**Fixed bugs:** - -- Wrong thumbnail file was picked when publishing sequence in standalone publisher [\#703](https://github.com/pypeclub/pype/pull/703) -- Fix: Burnin data pass and FFmpeg tool check [\#701](https://github.com/pypeclub/pype/pull/701) - -## [2.13.4](https://github.com/pypeclub/pype/tree/2.13.4) (2020-11-09) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.3...2.13.4) - -**Enhancements:** - -- AfterEffects integration with Websocket [\#663](https://github.com/pypeclub/pype/issues/663) - -**Fixed bugs:** - -- Photoshop uhiding hidden layers [\#688](https://github.com/pypeclub/pype/issues/688) -- \#688 - Fix publishing hidden layers [\#692](https://github.com/pypeclub/pype/pull/692) - -**Closed issues:** - -- Nuke Favorite directories "shot dir" "project dir" - not working [\#684](https://github.com/pypeclub/pype/issues/684) - -**Merged pull requests:** - -- Nuke Favorite directories "shot dir" "project dir" - not working \#684 [\#685](https://github.com/pypeclub/pype/pull/685) - -## [2.13.3](https://github.com/pypeclub/pype/tree/2.13.3) (2020-11-03) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.2...2.13.3) - -**Enhancements:** - -- TV paint base integration [\#612](https://github.com/pypeclub/pype/issues/612) - -**Fixed bugs:** - -- Fix ffmpeg executable path with spaces [\#680](https://github.com/pypeclub/pype/pull/680) -- Hotfix: Added default version number [\#679](https://github.com/pypeclub/pype/pull/679) - -## [2.13.2](https://github.com/pypeclub/pype/tree/2.13.2) (2020-10-28) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.1...2.13.2) - -**Fixed bugs:** - -- Nuke: wrong conditions when fixing legacy write nodes [\#665](https://github.com/pypeclub/pype/pull/665) - -## [2.13.1](https://github.com/pypeclub/pype/tree/2.13.1) (2020-10-23) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.0...2.13.1) - -**Enhancements:** - -- move maya look assigner to pype menu [\#292](https://github.com/pypeclub/pype/issues/292) - -**Fixed bugs:** - -- Layer name is not propagating to metadata in Photoshop [\#654](https://github.com/pypeclub/pype/issues/654) -- Loader in Photoshop fails with "can't set attribute" [\#650](https://github.com/pypeclub/pype/issues/650) -- Nuke Load mp4 wrong frame range [\#661](https://github.com/pypeclub/pype/issues/661) -- Hiero: Review video file adding one frame to the end [\#659](https://github.com/pypeclub/pype/issues/659) - -## [2.13.0](https://github.com/pypeclub/pype/tree/2.13.0) (2020-10-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.5...2.13.0) - -**Enhancements:** - -- Deadline Output Folder [\#636](https://github.com/pypeclub/pype/issues/636) -- Nuke Camera Loader [\#565](https://github.com/pypeclub/pype/issues/565) -- Deadline publish job shows publishing output folder [\#649](https://github.com/pypeclub/pype/pull/649) -- Get latest version in lib [\#642](https://github.com/pypeclub/pype/pull/642) -- Improved publishing of multiple representation from SP [\#638](https://github.com/pypeclub/pype/pull/638) -- Launch TvPaint shot work file from within Ftrack [\#631](https://github.com/pypeclub/pype/pull/631) -- Add mp4 support for RV action. [\#628](https://github.com/pypeclub/pype/pull/628) -- Maya: allow renders to have version synced with workfile [\#618](https://github.com/pypeclub/pype/pull/618) -- Renaming nukestudio host folder to hiero [\#617](https://github.com/pypeclub/pype/pull/617) -- Harmony: More efficient publishing [\#615](https://github.com/pypeclub/pype/pull/615) -- Ftrack server action improvement [\#608](https://github.com/pypeclub/pype/pull/608) -- Deadline user defaults to pype username if present [\#607](https://github.com/pypeclub/pype/pull/607) -- Standalone publisher now has icon [\#606](https://github.com/pypeclub/pype/pull/606) -- Nuke render write targeting knob improvement [\#603](https://github.com/pypeclub/pype/pull/603) -- Animated pyblish gui [\#602](https://github.com/pypeclub/pype/pull/602) -- Maya: Deadline - make use of asset dependencies optional [\#591](https://github.com/pypeclub/pype/pull/591) -- Nuke: Publishing, loading and updating alembic cameras [\#575](https://github.com/pypeclub/pype/pull/575) -- Maya: add look assigner to pype menu even if scriptsmenu is not available [\#573](https://github.com/pypeclub/pype/pull/573) -- Store task types in the database [\#572](https://github.com/pypeclub/pype/pull/572) -- Maya: Tiled EXRs to scanline EXRs render option [\#512](https://github.com/pypeclub/pype/pull/512) -- Fusion basic integration [\#452](https://github.com/pypeclub/pype/pull/452) - -**Fixed bugs:** - -- Burnin script did not propagate ffmpeg output [\#640](https://github.com/pypeclub/pype/issues/640) -- Pyblish-pype spacer in terminal wasn't transparent [\#646](https://github.com/pypeclub/pype/pull/646) -- Lib subprocess without logger [\#645](https://github.com/pypeclub/pype/pull/645) -- Nuke: prevent crash if we only have single frame in sequence [\#644](https://github.com/pypeclub/pype/pull/644) -- Burnin script logs better output [\#641](https://github.com/pypeclub/pype/pull/641) -- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639) -- review from imagesequence error [\#633](https://github.com/pypeclub/pype/pull/633) -- Hiero: wrong order of fps clip instance data collecting [\#627](https://github.com/pypeclub/pype/pull/627) -- Add source for review instances. [\#625](https://github.com/pypeclub/pype/pull/625) -- Task processing in event sync [\#623](https://github.com/pypeclub/pype/pull/623) -- sync to avalon doesn t remove renamed task [\#619](https://github.com/pypeclub/pype/pull/619) -- Intent publish setting wasn't working with default value [\#562](https://github.com/pypeclub/pype/pull/562) -- Maya: Updating a look where the shader name changed, leaves the geo without a shader [\#514](https://github.com/pypeclub/pype/pull/514) - -**Merged pull requests:** - -- Avalon module without Qt [\#581](https://github.com/pypeclub/pype/pull/581) -- Ftrack module without Qt [\#577](https://github.com/pypeclub/pype/pull/577) - -## [2.12.5](https://github.com/pypeclub/pype/tree/2.12.5) (2020-10-14) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.4...2.12.5) - -**Enhancements:** - -- Launch TvPaint shot work file from within Ftrack [\#629](https://github.com/pypeclub/pype/issues/629) - -**Merged pull requests:** - -- Harmony: Disable application launch logic [\#637](https://github.com/pypeclub/pype/pull/637) - -## [2.12.4](https://github.com/pypeclub/pype/tree/2.12.4) (2020-10-08) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.3...2.12.4) - -**Enhancements:** - -- convert nukestudio to hiero host [\#616](https://github.com/pypeclub/pype/issues/616) -- Fusion basic integration [\#451](https://github.com/pypeclub/pype/issues/451) - -**Fixed bugs:** - -- Sync to avalon doesn't remove renamed task [\#605](https://github.com/pypeclub/pype/issues/605) -- NukeStudio: FPS collecting into clip instances [\#624](https://github.com/pypeclub/pype/pull/624) - -**Merged pull requests:** - -- NukeStudio: small fixes [\#622](https://github.com/pypeclub/pype/pull/622) -- NukeStudio: broken order of plugins [\#620](https://github.com/pypeclub/pype/pull/620) - -## [2.12.3](https://github.com/pypeclub/pype/tree/2.12.3) (2020-10-06) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.2...2.12.3) - -**Enhancements:** - -- Nuke Publish Camera [\#567](https://github.com/pypeclub/pype/issues/567) -- Harmony: open xstage file no matter of its name [\#526](https://github.com/pypeclub/pype/issues/526) -- Stop integration of unwanted data [\#387](https://github.com/pypeclub/pype/issues/387) -- Move avalon-launcher functionality to pype [\#229](https://github.com/pypeclub/pype/issues/229) -- avalon workfiles api [\#214](https://github.com/pypeclub/pype/issues/214) -- Store task types [\#180](https://github.com/pypeclub/pype/issues/180) -- Avalon Mongo Connection split [\#136](https://github.com/pypeclub/pype/issues/136) -- nk camera workflow [\#71](https://github.com/pypeclub/pype/issues/71) -- Hiero integration added [\#590](https://github.com/pypeclub/pype/pull/590) -- Anatomy instance data collection is substantially faster for many instances [\#560](https://github.com/pypeclub/pype/pull/560) - -**Fixed bugs:** - -- test issue [\#596](https://github.com/pypeclub/pype/issues/596) -- Harmony: empty scene contamination [\#583](https://github.com/pypeclub/pype/issues/583) -- Edit publishing in SP doesn't respect shot selection for publishing [\#542](https://github.com/pypeclub/pype/issues/542) -- Pathlib breaks compatibility with python2 hosts [\#281](https://github.com/pypeclub/pype/issues/281) -- Updating a look where the shader name changed leaves the geo without a shader [\#237](https://github.com/pypeclub/pype/issues/237) -- Better error handling [\#84](https://github.com/pypeclub/pype/issues/84) -- Harmony: function signature [\#609](https://github.com/pypeclub/pype/pull/609) -- Nuke: gizmo publishing error [\#594](https://github.com/pypeclub/pype/pull/594) -- Harmony: fix clashing namespace of called js functions [\#584](https://github.com/pypeclub/pype/pull/584) -- Maya: fix maya scene type preset exception [\#569](https://github.com/pypeclub/pype/pull/569) - -**Closed issues:** - -- Nuke Gizmo publishing [\#597](https://github.com/pypeclub/pype/issues/597) -- nuke gizmo publishing error [\#592](https://github.com/pypeclub/pype/issues/592) -- Publish EDL [\#579](https://github.com/pypeclub/pype/issues/579) -- Publish render from SP [\#576](https://github.com/pypeclub/pype/issues/576) -- rename ftrack custom attribute group to `pype` [\#184](https://github.com/pypeclub/pype/issues/184) - -**Merged pull requests:** - -- Audio file existence check [\#614](https://github.com/pypeclub/pype/pull/614) -- NKS small fixes [\#587](https://github.com/pypeclub/pype/pull/587) -- Standalone publisher editorial plugins interfering [\#580](https://github.com/pypeclub/pype/pull/580) - -## [2.12.2](https://github.com/pypeclub/pype/tree/2.12.2) (2020-09-25) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.1...2.12.2) - -**Enhancements:** - -- pype config GUI [\#241](https://github.com/pypeclub/pype/issues/241) - -**Fixed bugs:** - -- Harmony: Saving heavy scenes will crash [\#507](https://github.com/pypeclub/pype/issues/507) -- Extract review a representation name with `\*\_burnin` [\#388](https://github.com/pypeclub/pype/issues/388) -- Hierarchy data was not considering active isntances [\#551](https://github.com/pypeclub/pype/pull/551) - -## [2.12.1](https://github.com/pypeclub/pype/tree/2.12.1) (2020-09-15) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.0...2.12.1) - -**Fixed bugs:** - -- Pype: changelog.md is outdated [\#503](https://github.com/pypeclub/pype/issues/503) -- dependency security alert ! [\#484](https://github.com/pypeclub/pype/issues/484) -- Maya: RenderSetup is missing update [\#106](https://github.com/pypeclub/pype/issues/106) -- \ extract effects creates new instance [\#78](https://github.com/pypeclub/pype/issues/78) - -## [2.12.0](https://github.com/pypeclub/pype/tree/2.12.0) (2020-09-10) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.8...2.12.0) - -**Enhancements:** - -- Less mongo connections [\#509](https://github.com/pypeclub/pype/pull/509) -- Nuke: adding image loader [\#499](https://github.com/pypeclub/pype/pull/499) -- Move launcher window to top if launcher action is clicked [\#450](https://github.com/pypeclub/pype/pull/450) -- Maya: better tile rendering support in Pype [\#446](https://github.com/pypeclub/pype/pull/446) -- Implementation of non QML launcher [\#443](https://github.com/pypeclub/pype/pull/443) -- Optional skip review on renders. [\#441](https://github.com/pypeclub/pype/pull/441) -- Ftrack: Option to push status from task to latest version [\#440](https://github.com/pypeclub/pype/pull/440) -- Properly containerize image plane loads. [\#434](https://github.com/pypeclub/pype/pull/434) -- Option to keep the review files. [\#426](https://github.com/pypeclub/pype/pull/426) -- Isolate view on instance members. [\#425](https://github.com/pypeclub/pype/pull/425) -- Maya: Publishing of tile renderings on Deadline [\#398](https://github.com/pypeclub/pype/pull/398) -- Feature/little bit better logging gui [\#383](https://github.com/pypeclub/pype/pull/383) - -**Fixed bugs:** - -- Maya: Fix tile order for Draft Tile Assembler [\#511](https://github.com/pypeclub/pype/pull/511) -- Remove extra dash [\#501](https://github.com/pypeclub/pype/pull/501) -- Fix: strip dot from repre names in single frame renders [\#498](https://github.com/pypeclub/pype/pull/498) -- Better handling of destination during integrating [\#485](https://github.com/pypeclub/pype/pull/485) -- Fix: allow thumbnail creation for single frame renders [\#460](https://github.com/pypeclub/pype/pull/460) -- added missing argument to launch\_application in ftrack app handler [\#453](https://github.com/pypeclub/pype/pull/453) -- Burnins: Copy bit rate of input video to match quality. [\#448](https://github.com/pypeclub/pype/pull/448) -- Standalone publisher is now independent from tray [\#442](https://github.com/pypeclub/pype/pull/442) -- Bugfix/empty enumerator attributes [\#436](https://github.com/pypeclub/pype/pull/436) -- Fixed wrong order of "other" category collapssing in publisher [\#435](https://github.com/pypeclub/pype/pull/435) -- Multiple reviews where being overwritten to one. [\#424](https://github.com/pypeclub/pype/pull/424) -- Cleanup plugin fail on instances without staging dir [\#420](https://github.com/pypeclub/pype/pull/420) -- deprecated -intra parameter in ffmpeg to new `-g` [\#417](https://github.com/pypeclub/pype/pull/417) -- Delivery action can now work with entered path [\#397](https://github.com/pypeclub/pype/pull/397) - -**Merged pull requests:** - -- Review on instance.data [\#473](https://github.com/pypeclub/pype/pull/473) - -## [2.11.8](https://github.com/pypeclub/pype/tree/2.11.8) (2020-08-27) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.7...2.11.8) - -**Enhancements:** - -- DWAA support for Maya [\#382](https://github.com/pypeclub/pype/issues/382) -- Isolate View on Playblast [\#367](https://github.com/pypeclub/pype/issues/367) -- Maya: Tile rendering [\#297](https://github.com/pypeclub/pype/issues/297) -- single pype instance running [\#47](https://github.com/pypeclub/pype/issues/47) -- PYPE-649: projects don't guarantee backwards compatible environment [\#8](https://github.com/pypeclub/pype/issues/8) -- PYPE-663: separate venv for each deployed version [\#7](https://github.com/pypeclub/pype/issues/7) - -**Fixed bugs:** - -- pyblish pype - other group is collapsed before plugins are done [\#431](https://github.com/pypeclub/pype/issues/431) -- Alpha white edges in harmony on PNGs [\#412](https://github.com/pypeclub/pype/issues/412) -- harmony image loader picks wrong representations [\#404](https://github.com/pypeclub/pype/issues/404) -- Clockify crash when response contain symbol not allowed by UTF-8 [\#81](https://github.com/pypeclub/pype/issues/81) - -## [2.11.7](https://github.com/pypeclub/pype/tree/2.11.7) (2020-08-21) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.6...2.11.7) - -**Fixed bugs:** - -- Clean Up Baked Movie [\#369](https://github.com/pypeclub/pype/issues/369) -- celaction last workfile [\#459](https://github.com/pypeclub/pype/pull/459) - -## [2.11.6](https://github.com/pypeclub/pype/tree/2.11.6) (2020-08-18) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.5...2.11.6) - -**Enhancements:** - -- publisher app [\#56](https://github.com/pypeclub/pype/issues/56) - -## [2.11.5](https://github.com/pypeclub/pype/tree/2.11.5) (2020-08-13) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.4...2.11.5) - -**Enhancements:** - -- Switch from master to equivalent [\#220](https://github.com/pypeclub/pype/issues/220) -- Standalone publisher now only groups sequence if the extension is known [\#439](https://github.com/pypeclub/pype/pull/439) - -**Fixed bugs:** - -- Logs have been disable for editorial by default to speed up publishing [\#433](https://github.com/pypeclub/pype/pull/433) -- additional fixes for celaction [\#430](https://github.com/pypeclub/pype/pull/430) -- Harmony: invalid variable scope in validate scene settings [\#428](https://github.com/pypeclub/pype/pull/428) -- new representation name for audio was not accepted [\#427](https://github.com/pypeclub/pype/pull/427) - -## [2.11.4](https://github.com/pypeclub/pype/tree/2.11.4) (2020-08-10) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.3...2.11.4) - -**Enhancements:** - -- WebSocket server [\#135](https://github.com/pypeclub/pype/issues/135) -- standalonepublisher: editorial family features expansion \[master branch\] [\#411](https://github.com/pypeclub/pype/pull/411) - -## [2.11.3](https://github.com/pypeclub/pype/tree/2.11.3) (2020-08-04) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.2...2.11.3) - -**Fixed bugs:** - -- Harmony: publishing performance issues [\#408](https://github.com/pypeclub/pype/pull/408) - -## [2.11.2](https://github.com/pypeclub/pype/tree/2.11.2) (2020-07-31) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.1...2.11.2) - -**Fixed bugs:** - -- Ftrack to Avalon bug [\#406](https://github.com/pypeclub/pype/issues/406) - -## [2.11.1](https://github.com/pypeclub/pype/tree/2.11.1) (2020-07-29) - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.0...2.11.1) - -**Merged pull requests:** - -- Celaction: metadata json folder fixes on path [\#393](https://github.com/pypeclub/pype/pull/393) -- CelAction - version up method taken fro pype.lib [\#391](https://github.com/pypeclub/pype/pull/391) - - -## 2.11.0 ## - -_**release date:** 27 July 2020_ - -**new:** -- _(blender)_ namespace support [\#341](https://github.com/pypeclub/pype/pull/341) -- _(blender)_ start end frames [\#330](https://github.com/pypeclub/pype/pull/330) -- _(blender)_ camera asset [\#322](https://github.com/pypeclub/pype/pull/322) -- _(pype)_ toggle instances per family in pyblish GUI [\#320](https://github.com/pypeclub/pype/pull/320) -- _(pype)_ current release version is now shown in the tray menu [#379](https://github.com/pypeclub/pype/pull/379) - - -**improved:** -- _(resolve)_ tagging for publish [\#239](https://github.com/pypeclub/pype/issues/239) -- _(pype)_ Support publishing a subset of shots with standalone editorial [\#336](https://github.com/pypeclub/pype/pull/336) -- _(harmony)_ Basic support for palettes [\#324](https://github.com/pypeclub/pype/pull/324) -- _(photoshop)_ Flag outdated containers on startup and publish. [\#309](https://github.com/pypeclub/pype/pull/309) -- _(harmony)_ Flag Outdated containers [\#302](https://github.com/pypeclub/pype/pull/302) -- _(photoshop)_ Publish review [\#298](https://github.com/pypeclub/pype/pull/298) -- _(pype)_ Optional Last workfile launch [\#365](https://github.com/pypeclub/pype/pull/365) - - -**fixed:** -- _(premiere)_ workflow fixes [\#346](https://github.com/pypeclub/pype/pull/346) -- _(pype)_ pype-setup does not work with space in path [\#327](https://github.com/pypeclub/pype/issues/327) -- _(ftrack)_ Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/pype/issues/206) -- _(nuke)_ Priority was forced to 50 [\#345](https://github.com/pypeclub/pype/pull/345) -- _(nuke)_ Fix ValidateNukeWriteKnobs [\#340](https://github.com/pypeclub/pype/pull/340) -- _(maya)_ If camera attributes are connected, we can ignore them. [\#339](https://github.com/pypeclub/pype/pull/339) -- _(pype)_ stop appending of tools environment to existing env [\#337](https://github.com/pypeclub/pype/pull/337) -- _(ftrack)_ Ftrack timeout needs to look at AVALON\_TIMEOUT [\#325](https://github.com/pypeclub/pype/pull/325) -- _(harmony)_ Only zip files are supported. [\#310](https://github.com/pypeclub/pype/pull/310) -- _(pype)_ hotfix/Fix event server mongo uri [\#305](https://github.com/pypeclub/pype/pull/305) -- _(photoshop)_ Subset was not named or validated correctly. [\#304](https://github.com/pypeclub/pype/pull/304) - - - - -## 2.10.0 ## - -_**release date:** 17 June 2020_ - -**new:** -- _(harmony)_ **Toon Boom Harmony** has been greatly extended to support rigging, scene build, animation and rendering workflows. [#270](https://github.com/pypeclub/pype/issues/270) [#271](https://github.com/pypeclub/pype/issues/271) [#190](https://github.com/pypeclub/pype/issues/190) [#191](https://github.com/pypeclub/pype/issues/191) [#172](https://github.com/pypeclub/pype/issues/172) [#168](https://github.com/pypeclub/pype/issues/168) -- _(pype)_ Added support for rudimentary **edl publishing** into individual shots. [#265](https://github.com/pypeclub/pype/issues/265) -- _(celaction)_ Simple **Celaction** integration has been added with support for workfiles and rendering. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for multiple job types when submitting to the farm. We can now render Maya or Standalone render jobs for Vray and Arnold (limited support for arnold) [#204](https://github.com/pypeclub/pype/issues/204) -- _(photoshop)_ Added initial support for Photoshop [#232](https://github.com/pypeclub/pype/issues/232) - -**improved:** -- _(blender)_ Updated support for rigs and added support Layout family [#233](https://github.com/pypeclub/pype/issues/233) [#226](https://github.com/pypeclub/pype/issues/226) -- _(premiere)_ It is now possible to choose different storage root for workfiles of different task types. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for unmerged AOVs in Redshift multipart EXRs [#197](https://github.com/pypeclub/pype/issues/197) -- _(pype)_ Pype repository has been refactored in preparation for 3.0 release [#169](https://github.com/pypeclub/pype/issues/169) -- _(deadline)_ All file dependencies are now passed to deadline from maya to prevent premature start of rendering if caches or textures haven't been coppied over yet. [#195](https://github.com/pypeclub/pype/issues/195) -- _(nuke)_ Script validation can now be made optional. [#194](https://github.com/pypeclub/pype/issues/194) -- _(pype)_ Publishing can now be stopped at any time. [#194](https://github.com/pypeclub/pype/issues/194) - -**fix:** -- _(pype)_ Pyblish-lite has been integrated into pype repository, plus various publishing GUI fixes. [#274](https://github.com/pypeclub/pype/issues/274) [#275](https://github.com/pypeclub/pype/issues/275) [#268](https://github.com/pypeclub/pype/issues/268) [#227](https://github.com/pypeclub/pype/issues/227) [#238](https://github.com/pypeclub/pype/issues/238) -- _(maya)_ Alembic extractor was getting wrong frame range type in certain scenarios [#254](https://github.com/pypeclub/pype/issues/254) -- _(maya)_ Attaching a render to subset in maya was not passing validation in certain scenarios [#256](https://github.com/pypeclub/pype/issues/256) -- _(ftrack)_ Various small fixes to ftrack sync [#263](https://github.com/pypeclub/pype/issues/263) [#259](https://github.com/pypeclub/pype/issues/259) -- _(maya)_ Look extraction is now able to skp invalid connections in shaders [#207](https://github.com/pypeclub/pype/issues/207) - - - - -## 2.9.0 ## - -_**release date:** 25 May 2020_ - -**new:** -- _(pype)_ Support for **Multiroot projects**. You can now store project data on multiple physical or virtual storages and target individual publishes to these locations. For instance render can be stored on a faster storage than the rest of the project. [#145](https://github.com/pypeclub/pype/issues/145), [#38](https://github.com/pypeclub/pype/issues/38) -- _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(pype)_ OSX support is in public beta now. There are issues to be expected, but the main implementation should be functional. [#141](https://github.com/pypeclub/pype/issues/141) - - -**improved:** - -- _(pype)_ **Review extractor** has been completely rebuilt. It now supports granular filtering so you can create **multiple outputs** for different tasks, families or hosts. [#103](https://github.com/pypeclub/pype/issues/103), [#166](https://github.com/pypeclub/pype/issues/166), [#165](https://github.com/pypeclub/pype/issues/165) -- _(pype)_ **Burnin** generation had been extended to **support same multi-output filtering** as review extractor [#103](https://github.com/pypeclub/pype/issues/103) -- _(pype)_ Publishing file templates can now be specified in config for each individual family [#114](https://github.com/pypeclub/pype/issues/114) -- _(pype)_ Studio specific plugins can now be appended to pype standard publishing plugins. [#112](https://github.com/pypeclub/pype/issues/112) -- _(nukestudio)_ Reviewable clips no longer need to be previously cut, exported and re-imported to timeline. **Pype can now dynamically cut reviewable quicktimes** from continuous offline footage during publishing. [#23](https://github.com/pypeclub/pype/issues/23) -- _(deadline)_ Deadline can now correctly differentiate between staging and production pype. [#154](https://github.com/pypeclub/pype/issues/154) -- _(deadline)_ `PYPE_PYTHON_EXE` env variable can now be used to direct publishing to explicit python installation. [#120](https://github.com/pypeclub/pype/issues/120) -- _(nuke)_ Nuke now check for new version of loaded data on file open. [#140](https://github.com/pypeclub/pype/issues/140) -- _(nuke)_ frame range and limit checkboxes are now exposed on write node. [#119](https://github.com/pypeclub/pype/issues/119) - - - -**fix:** - -- _(nukestudio)_ Project Location was using backslashes which was breaking nukestudio native exporting in certains configurations [#82](https://github.com/pypeclub/pype/issues/82) -- _(nukestudio)_ Duplicity in hierarchy tags was prone to throwing publishing error [#130](https://github.com/pypeclub/pype/issues/130), [#144](https://github.com/pypeclub/pype/issues/144) -- _(ftrack)_ multiple stability improvements [#157](https://github.com/pypeclub/pype/issues/157), [#159](https://github.com/pypeclub/pype/issues/159), [#128](https://github.com/pypeclub/pype/issues/128), [#118](https://github.com/pypeclub/pype/issues/118), [#127](https://github.com/pypeclub/pype/issues/127) -- _(deadline)_ multipart EXRs were stopping review publishing on the farm. They are still not supported for automatic review generation, but the publish will go through correctly without the quicktime. [#155](https://github.com/pypeclub/pype/issues/155) -- _(deadline)_ If deadline is non-responsive it will no longer freeze host when publishing [#149](https://github.com/pypeclub/pype/issues/149) -- _(deadline)_ Sometimes deadline was trying to launch render before all the source data was coppied over. [#137](https://github.com/pypeclub/pype/issues/137) _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(nuke)_ Filepath knob wasn't updated properly. [#131](https://github.com/pypeclub/pype/issues/131) -- _(maya)_ When extracting animation, the "Write Color Set" options on the instance were not respected. [#108](https://github.com/pypeclub/pype/issues/108) -- _(maya)_ Attribute overrides for AOV only worked for the legacy render layers. Now it works for new render setup as well [#132](https://github.com/pypeclub/pype/issues/132) -- _(maya)_ Stability and usability improvements in yeti workflow [#104](https://github.com/pypeclub/pype/issues/104) - - - - -## 2.8.0 ## - -_**release date:** 20 April 2020_ - -**new:** - -- _(pype)_ Option to generate slates from json templates. [PYPE-628] [#26](https://github.com/pypeclub/pype/issues/26) -- _(pype)_ It is now possible to automate loading of published subsets into any scene. Documentation will follow :). [PYPE-611] [#24](https://github.com/pypeclub/pype/issues/24) - -**fix:** - -- _(maya)_ Some Redshift render tokens could break publishing. [PYPE-778] [#33](https://github.com/pypeclub/pype/issues/33) -- _(maya)_ Publish was not preserving maya file extension. [#39](https://github.com/pypeclub/pype/issues/39) -- _(maya)_ Rig output validator was failing on nodes without shapes. [#40](https://github.com/pypeclub/pype/issues/40) -- _(maya)_ Yeti caches can now be properly versioned up in the scene inventory. [#40](https://github.com/pypeclub/pype/issues/40) -- _(nuke)_ Build first workfiles was not accepting jpeg sequences. [#34](https://github.com/pypeclub/pype/issues/34) -- _(deadline)_ Trying to generate ffmpeg review from multipart EXRs no longer crashes publishing. [PYPE-781] -- _(deadline)_ Render publishing is more stable in multiplatform environments. [PYPE-775] - - - - -## 2.7.0 ## - -_**release date:** 30 March 2020_ - -**new:** - -- _(maya)_ Artist can now choose to load multiple references of the same subset at once [PYPE-646, PYPS-81] -- _(nuke)_ Option to use named OCIO colorspaces for review colour baking. [PYPS-82] -- _(pype)_ Pype can now work with `master` versions for publishing and loading. These are non-versioned publishes that are overwritten with the latest version during publish. These are now supported in all the GUIs, but their publishing is deactivated by default. [PYPE-653] -- _(blender)_ Added support for basic blender workflow. We currently support `rig`, `model` and `animation` families. [PYPE-768] -- _(pype)_ Source timecode can now be used in burn-ins. [PYPE-777] -- _(pype)_ Review outputs profiles can now specify delivery resolution different than project setting [PYPE-759] -- _(nuke)_ Bookmark to current context is now added automatically to all nuke browser windows. [PYPE-712] - -**change:** - -- _(maya)_ It is now possible to publish camera without. baking. Keep in mind that unbaked cameras can't be guaranteed to work in other hosts. [PYPE-595] -- _(maya)_ All the renders from maya are now grouped in the loader by their Layer name. [PYPE-482] -- _(nuke/hiero)_ Any publishes from nuke and hiero can now be versioned independently of the workfile. [PYPE-728] - - -**fix:** - -- _(nuke)_ Mixed slashes caused issues in ocio config path. -- _(pype)_ Intent field in pyblish GUI was passing label instead of value to ftrack. [PYPE-733] -- _(nuke)_ Publishing of pre-renders was inconsistent. [PYPE-766] -- _(maya)_ Handles and frame ranges were inconsistent in various places during publishing. -- _(nuke)_ Nuke was crashing if it ran into certain missing knobs. For example DPX output missing `autocrop` [PYPE-774] -- _(deadline)_ Project overrides were not working properly with farm render publishing. -- _(hiero)_ Problems with single frame plates publishing. -- _(maya)_ Redshift RenderPass token were breaking render publishing. [PYPE-778] -- _(nuke)_ Build first workfile was not accepting jpeg sequences. -- _(maya)_ Multipart (Multilayer) EXRs were breaking review publishing due to FFMPEG incompatiblity [PYPE-781] - - - -## 2.6.0 ## - -_**release date:** 9 March 2020_ - -**change:** -- _(maya)_ render publishing has been simplified and made more robust. Render setup layers are now automatically added to publishing subsets and `render globals` family has been replaced with simple `render` [PYPE-570] -- _(avalon)_ change context and workfiles apps, have been merged into one, that allows both actions to be performed at the same time. [PYPE-747] -- _(pype)_ thumbnails are now automatically propagate to asset from the last published subset in the loader -- _(ftrack)_ publishing comment and intent are now being published to ftrack note as well as describtion. [PYPE-727] -- _(pype)_ when overriding existing version new old representations are now overriden, instead of the new ones just being appended. (to allow this behaviour, the version validator need to be disabled. [PYPE-690]) -- _(pype)_ burnin preset has been significantly simplified. It now doesn't require passing function to each field, but only need the actual text template. to use this, all the current burnin PRESETS MUST BE UPDATED for all the projects. -- _(ftrack)_ credentials are now stored on a per server basis, so it's possible to switch between ftrack servers without having to log in and out. [PYPE-723] - - -**new:** -- _(pype)_ production and development deployments now have different colour of the tray icon. Orange for Dev and Green for production [PYPE-718] -- _(maya)_ renders can now be attached to a publishable subset rather than creating their own subset. For example it is possible to create a reviewable `look` or `model` render and have it correctly attached as a representation of the subsets [PYPE-451] -- _(maya)_ after saving current scene into a new context (as a new shot for instance), all the scene publishing subsets data gets re-generated automatically to match the new context [PYPE-532] -- _(pype)_ we now support project specific publish, load and create plugins [PYPE-740] -- _(ftrack)_ new action that allow archiving/deleting old published versions. User can keep how many of the latest version to keep when the action is ran. [PYPE-748, PYPE-715] -- _(ftrack)_ it is now possible to monitor and restart ftrack event server using ftrack action. [PYPE-658] -- _(pype)_ validator that prevent accidental overwrites of previously published versions. [PYPE-680] -- _(avalon)_ avalon core updated to version 5.6.0 -- _(maya)_ added validator to make sure that relative paths are used when publishing arnold standins. -- _(nukestudio)_ it is now possible to extract and publish audio family from clip in nuke studio [PYPE-682] - -**fix**: -- _(maya)_ maya set framerange button was ignoring handles [PYPE-719] -- _(ftrack)_ sync to avalon was sometime crashing when ran on empty project -- _(nukestudio)_ publishing same shots after they've been previously archived/deleted would result in a crash. [PYPE-737] -- _(nuke)_ slate workflow was breaking in certain scenarios. [PYPE-730] -- _(pype)_ rendering publish workflow has been significantly improved to prevent error resulting from implicit render collection. [PYPE-665, PYPE-746] -- _(pype)_ launching application on a non-synced project resulted in obscure [PYPE-528] -- _(pype)_ missing keys in burnins no longer result in an error. [PYPE-706] -- _(ftrack)_ create folder structure action was sometimes failing for project managers due to wrong permissions. -- _(Nukestudio)_ using `source` in the start frame tag could result in wrong frame range calculation -- _(ftrack)_ sync to avalon action and event have been improved by catching more edge cases and provessing them properly. - - - -## 2.5.0 ## - -_**release date:** 11 Feb 2020_ - -**change:** -- _(pype)_ added many logs for easier debugging -- _(pype)_ review presets can now be separated between 2d and 3d renders [PYPE-693] -- _(pype)_ anatomy module has been greatly improved to allow for more dynamic pulblishing and faster debugging [PYPE-685] -- _(pype)_ avalon schemas have been moved from `pype-config` to `pype` repository, for simplification. [PYPE-670] -- _(ftrack)_ updated to latest ftrack API -- _(ftrack)_ publishing comments now appear in ftrack also as a note on version with customisable category [PYPE-645] -- _(ftrack)_ delete asset/subset action had been improved. It is now able to remove multiple entities and descendants of the selected entities [PYPE-361, PYPS-72] -- _(workfiles)_ added date field to workfiles app [PYPE-603] -- _(maya)_ old deprecated loader have been removed in favour of a single unified reference loader (old scenes will upgrade automatically to the new loader upon opening) [PYPE-633, PYPE-697] -- _(avalon)_ core updated to 5.5.15 [PYPE-671] -- _(nuke)_ library loader is now available in nuke [PYPE-698] - - -**new:** -- _(pype)_ added pype render wrapper to allow rendering on mixed platform farms. [PYPE-634] -- _(pype)_ added `pype launch` command. It let's admin run applications with dynamically built environment based on the given context. [PYPE-634] -- _(pype)_ added support for extracting review sequences with burnins [PYPE-657] -- _(publish)_ users can now set intent next to a comment when publishing. This will then be reflected on an attribute in ftrack. [PYPE-632] -- _(burnin)_ timecode can now be added to burnin -- _(burnin)_ datetime keys can now be added to burnin and anatomy [PYPE-651] -- _(burnin)_ anatomy templates can now be used in burnins. [PYPE=626] -- _(nuke)_ new validator for render resolution -- _(nuke)_ support for attach slate to nuke renders [PYPE-630] -- _(nuke)_ png sequences were added to loaders -- _(maya)_ added maya 2020 compatibility [PYPE-677] -- _(maya)_ ability to publish and load .ASS standin sequences [PYPS-54] -- _(pype)_ thumbnails can now be published and are visible in the loader. `AVALON_THUMBNAIL_ROOT` environment variable needs to be set for this to work [PYPE-573, PYPE-132] -- _(blender)_ base implementation of blender was added with publishing and loading of .blend files [PYPE-612] -- _(ftrack)_ new action for preparing deliveries [PYPE-639] - - -**fix**: -- _(burnin)_ more robust way of finding ffmpeg for burnins. -- _(pype)_ improved UNC paths remapping when sending to farm. -- _(pype)_ float frames sometimes made their way to representation context in database, breaking loaders [PYPE-668] -- _(pype)_ `pype install --force` was failing sometimes [PYPE-600] -- _(pype)_ padding in published files got calculated wrongly sometimes. It is now instead being always read from project anatomy. [PYPE-667] -- _(publish)_ comment publishing was failing in certain situations -- _(ftrack)_ multiple edge case scenario fixes in auto sync and sync-to-avalon action -- _(ftrack)_ sync to avalon now works on empty projects -- _(ftrack)_ thumbnail update event was failing when deleting entities [PYPE-561] -- _(nuke)_ loader applies proper colorspaces from Presets -- _(nuke)_ publishing handles didn't always work correctly [PYPE-686] -- _(maya)_ assembly publishing and loading wasn't working correctly - - - - - -## 2.4.0 ## - -_**release date:** 9 Dec 2019_ - -**change:** -- _(ftrack)_ version to status ftrack event can now be configured from Presets - - based on preset `presets/ftracc/ftrack_config.json["status_version_to_task"]` -- _(ftrack)_ sync to avalon event has been completely re-written. It now supports most of the project management situations on ftrack including moving, renaming and deleting entities, updating attributes and working with tasks. -- _(ftrack)_ sync to avalon action has been also re-writen. It is now much faster (up to 100 times depending on a project structure), has much better logging and reporting on encountered problems, and is able to handle much more complex situations. -- _(ftrack)_ sync to avalon trigger by checking `auto-sync` toggle on ftrack [PYPE-504] -- _(pype)_ various new features in the REST api -- _(pype)_ new visual identity used across pype -- _(pype)_ started moving all requirements to pip installation rather than vendorising them in pype repository. Due to a few yet unreleased packages, this means that pype can temporarily be only installed in the offline mode. - -**new:** -- _(nuke)_ support for publishing gizmos and loading them as viewer processes -- _(nuke)_ support for publishing nuke nodes from backdrops and loading them back -- _(pype)_ burnins can now work with start and end frames as keys - - use keys `{frame_start}`, `{frame_end}` and `{current_frame}` in burnin preset to use them. [PYPS-44,PYPS-73, PYPE-602] -- _(pype)_ option to filter logs by user and level in loggin GUI -- _(pype)_ image family added to standalone publisher [PYPE-574] -- _(pype)_ matchmove family added to standalone publisher [PYPE-574] -- _(nuke)_ validator for comparing arbitrary knobs with values from presets -- _(maya)_ option to force maya to copy textures in the new look publish rather than hardlinking them -- _(pype)_ comments from pyblish GUI are now being added to ftrack version -- _(maya)_ validator for checking outdated containers in the scene -- _(maya)_ option to publish and load arnold standin sequence [PYPE-579, PYPS-54] - -**fix**: -- _(pype)_ burnins were not respecting codec of the input video -- _(nuke)_ lot's of various nuke and nuke studio fixes across the board [PYPS-45] -- _(pype)_ workfiles app is not launching with the start of the app by default [PYPE-569] -- _(ftrack)_ ftrack integration during publishing was failing under certain situations [PYPS-66] -- _(pype)_ minor fixes in REST api -- _(ftrack)_ status change event was crashing when the target status was missing [PYPS-68] -- _(ftrack)_ actions will try to reconnect if they fail for some reason -- _(maya)_ problems with fps mapping when using float FPS values -- _(deadline)_ overall improvements to deadline publishing -- _(setup)_ environment variables are now remapped on the fly based on the platform pype is running on. This fixes many issues in mixed platform environments. - - - -## 2.3.6 # - -_**release date:** 27 Nov 2019_ - -**hotfix**: -- _(ftrack)_ was hiding important debug logo -- _(nuke)_ crashes during workfile publishing -- _(ftrack)_ event server crashes because of signal problems -- _(muster)_ problems with muster render submissions -- _(ftrack)_ thumbnail update event syntax errors - - -## 2.3.0 ## -_release date: 6 Oct 2019_ - -**new**: -- _(maya)_ support for yeti rigs and yeti caches -- _(maya)_ validator for comparing arbitrary attributes against ftrack -- _(pype)_ burnins can now show current date and time -- _(muster)_ pools can now be set in render globals in maya -- _(pype)_ Rest API has been implemented in beta stage -- _(nuke)_ LUT loader has been added -- _(pype)_ rudimentary user module has been added as preparation for user management -- _(pype)_ a simple logging GUI has been added to pype tray -- _(nuke)_ nuke can now bake input process into mov -- _(maya)_ imported models now have selection handle displayed by defaulting -- _(avalon)_ it's is now possible to load multiple assets at once using loader -- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading - -**changed**: -- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. -- _(nuke)_ task name is now added to all rendered subsets -- _(pype)_ adding more families to standalone publisher -- _(pype)_ standalone publisher now uses pyblish-lite -- _(pype)_ standalone publisher can now create review quicktimes -- _(ftrack)_ queries to ftrack were sped up -- _(ftrack)_ multiple ftrack action have been deprecated -- _(avalon)_ avalon upstream has been updated to 5.5.0 -- _(nukestudio)_ published transforms can now be animated -- - -**fix**: -- _(maya)_ fps popup button didn't work in some cases -- _(maya)_ geometry instances and references in maya were losing shader assignments -- _(muster)_ muster rendering templates were not working correctly -- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist -- _(pype)_ problems with avalon db sync -- _(maya)_ ftrack was rounding FPS making it inconsistent -- _(pype)_ wrong icon names in Creator -- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene -- _(nukestudio)_ multiple bugs squashed -- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya - -## 2.2.0 ## -_release date: 8 Sept 2019_ - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(nuke)_ option to choose deadline chunk size on write nodes -- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio -- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. -- _(nuke)_ nuke writes now have deadline tab. -- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. -- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. -- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system - -**changed**: -- nukestudio now uses workio API for workfiles -- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen -- _(muster)_ can now be configured with custom templates -- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones - - -**fix**: -- wrong version retrieval from path in certain scenarios -- nuke reset resolution wasn't working in certain scenarios - -## 2.1.0 ## -_release date: 6 Aug 2019_ - -A large cleanup release. Most of the change are under the hood. - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(pype)_ Added configurable option to add burnins to any generated quicktimes -- _(ftrack)_ Action that identifies what machines pype is running on. -- _(system)_ unify subprocess calls -- _(maya)_ add audio to review quicktimes -- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg -- **Nuke Studio** publishing and workfiles support -- **Muster** render manager support -- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup -- _(maya)_ Ability to load published sequences as image planes -- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. -- _(maya)_ Pyblish plugin that allow validation of maya attributes -- _(system)_ added better startup logging to tray debug, including basic connection information -- _(avalon)_ option to group published subsets to groups in the loader -- _(avalon)_ loader family filters are working now - -**changed**: -- change multiple key attributes to unify their behaviour across the pipeline - - `frameRate` to `fps` - - `startFrame` to `frameStart` - - `endFrame` to `frameEnd` - - `fstart` to `frameStart` - - `fend` to `frameEnd` - - `handle_start` to `handleStart` - - `handle_end` to `handleEnd` - - `resolution_width` to `resolutionWidth` - - `resolution_height` to `resolutionHeight` - - `pixel_aspect` to `pixelAspect` - -- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist -- rendered frames are now deleted from temporary location after their publishing is finished. -- _(ftrack)_ RV action can now be launched from any entity -- after publishing only refresh button is now available in pyblish UI -- added context instance pyblish-lite so that artist knows if context plugin fails -- _(avalon)_ allow opening selected files using enter key -- _(avalon)_ core updated to v5.2.9 with our forked changes on top - -**fix**: -- faster hierarchy retrieval from db -- _(nuke)_ A lot of stability enhancements -- _(nuke studio)_ A lot of stability enhancements -- _(nuke)_ now only renders a single write node on farm -- _(ftrack)_ pype would crash when launcher project level task -- work directory was sometimes not being created correctly -- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. -- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/README.md b/README.md index a79b9f2582..e2aa98bb16 100644 --- a/README.md +++ b/README.md @@ -1,359 +1,20 @@ - -[![All Contributors](https://img.shields.io/badge/all_contributors-28-orange.svg?style=flat-square)](#contributors-) - -OpenPype +AYON Core addon ======== -[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846) +AYON core provides the base building blocks for all other AYON addons and integrations and is responsible for discovery and initialization of other addons. -## Important Notice! +- Some of its key functions include: +- It is used as the main command line handler in [ayon-launcher](https://github.com/ynput/ayon-launcher) application. +- Provides publishing plugins that are available to all AYON integrations. +- Defines the base classes for new pipeline integrations +- Provides global hooks +- Provides universally available loaders and launcher actions +- Defines pipeline API used by other integrations +- Provides all graphical tools for artists +- Defines AYON QT styling +- A bunch more things -OpenPype as a standalone product has reach end of it's life and this repository is now used as a pipeline core code for [AYON](https://ynput.io/ayon/). You can read more details about the end of life process here https://community.ynput.io/t/openpype-end-of-life-timeline/877 +Together with [ayon-launcher](https://github.com/ynput/ayon-launcher) , they form the base of AYON pipeline and is one of few compulsory addons for AYON pipeline to be useful in a meaningful way. - -Introduction ------------- - -Open-source pipeline for visual effects and animation built on top of the [Avalon](https://getavalon.github.io/) framework, expanding it with extra features and integrations. OpenPype connects your DCCs, asset database, project management and time tracking into a single system. It has a tight integration with [ftrack](https://www.ftrack.com/en/), but can also run independently or be integrated into a different project management solution. - -OpenPype provides a robust platform for your studio, without the worry of a vendor lock. You will always have full access to the source-code and your project database will run locally or in the cloud of your choice. - - -To get all the information about the project, go to [OpenPype.io](http://openpype.io) - -Requirements ------------- - -We aim to closely follow [**VFX Reference Platform**](https://vfxplatform.com/) - -OpenPype is written in Python 3 with specific elements still running in Python2 until all DCCs are fully updated. To see the list of those, that are not quite there yet, go to [VFX Python3 tracker](https://vfxpy.com/) - -The main things you will need to run and build OpenPype are: - -- **Terminal** in your OS - - PowerShell 5.0+ (Windows) - - Bash (Linux) -- [**Python 3.9.6**](#python) or higher -- [**MongoDB**](#database) (needed only for local development) - - -It can be built and ran on all common platforms. We develop and test on the following: - -- **Windows** 10 -- **Linux** - - **Ubuntu** 20.04 LTS - - **Centos** 7 -- **Mac OSX** - - **10.15** Catalina - - **11.1** Big Sur (using Rosetta2) - -For more details on requirements visit [requirements documentation](https://openpype.io/docs/dev_requirements) - -Building OpenPype ------------------ - -To build OpenPype you currently need [Python 3.9](https://www.python.org/downloads/) as we are following -[vfx platform](https://vfxplatform.com). Because of some Linux distros comes with newer Python version -already, you need to install **3.9** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. -**Note**: We do not support 3.9.0 because of [this bug](https://github.com/python/cpython/pull/22670). Please, use higher versions of 3.9.x. - -### Windows - -You will need [Python >= 3.9.1](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). -More tools might be needed for installing dependencies (for example for **OpenTimelineIO**) - mostly -development tools like [CMake](https://cmake.org/) and [Visual Studio](https://visualstudio.microsoft.com/cs/downloads/) - -#### Clone repository: -```sh -git clone --recurse-submodules git@github.com:ynput/OpenPype.git -``` - -#### To build OpenPype: - -1) Run `.\tools\create_env.ps1` to create virtual environment in `.\venv`. -2) Run `.\tools\fetch_thirdparty_libs.ps1` to download third-party dependencies like ffmpeg and oiio. Those will be included in build. -3) Run `.\tools\build.ps1` to build OpenPype executables in `.\build\`. - -To create distributable OpenPype versions, run `./tools/create_zip.ps1` - that will -create zip file with name `openpype-vx.x.x.zip` parsed from current OpenPype repository and -copy it to user data dir, or you can specify `--path /path/to/zip` to force it there. - -You can then point **Igniter** - OpenPype setup tool - to directory containing this zip and -it will install it on current computer. - -OpenPype is build using [CX_Freeze](https://cx-freeze.readthedocs.io/en/latest) to freeze itself and all dependencies. - -### macOS - -You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build -some OpenPype dependencies like [CMake](https://cmake.org/) and **XCode Command Line Tools** (or some other build system). - -Easy way of installing everything necessary is to use [Homebrew](https://brew.sh): - -1) Install **Homebrew**: - ```sh - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - ``` - -2) Install **cmake**: - ```sh - brew install cmake - ``` - -3) Install [pyenv](https://github.com/pyenv/pyenv): - ```sh - brew install pyenv - echo 'eval "$(pyenv init -)"' >> ~/.zshrc - pyenv init - exec "$SHELL" - PATH=$(pyenv root)/shims:$PATH - ``` - -4) Pull in required Python version 3.9.x: - ```sh - # install Python build dependences - brew install openssl readline sqlite3 xz zlib - - # replace with up-to-date 3.9.x version - pyenv install 3.9.6 - ``` - -5) Set local Python version: - ```sh - # switch to OpenPype source directory - pyenv local 3.9.6 - ``` - -#### To build OpenPype: - -1) Run `.\tools\create_env.sh` to create virtual environment in `.\venv` -2) Run `.\tools\fetch_thirdparty_libs.sh` to download third-party dependencies like ffmpeg and oiio. Those will be included in build. -3) Run `.\tools\build.sh` to build OpenPype executables in `.\build\` - -### Linux - -#### Docker -Easiest way to build OpenPype on Linux is using [Docker](https://www.docker.com/). Just run: - -```sh -sudo ./tools/docker_build.sh -``` - -This will by default use Debian as base image. If you need to make Centos 7 compatible build, please run: - -```sh -sudo ./tools/docker_build.sh centos7 -``` - -If all is successful, you'll find built OpenPype in `./build/` folder. - -Docker build can be also started from Windows machine, just use `./tools/docker_build.ps1` instead of shell script. - -This could be used even for building linux build (with argument `centos7` or `debian`) - -#### Manual build -You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. - -To build Python related stuff, you need Python header files installed (`python3-dev` on Ubuntu for example). - -You'll need also other tools to build -some OpenPype dependencies like [CMake](https://cmake.org/). Python 3 should be part of all modern distributions. You can use your package manager to install **git** and **cmake**. - -
-Details for Ubuntu -Install git, cmake and curl - -```sh -sudo apt install build-essential checkinstall -sudo apt install git cmake curl -``` -#### Note: -In case you run in error about `xcb` when running OpenPype, -you'll need also additional libraries for Qt5: - -```sh -sudo apt install qt5-default -``` -or if you are on Ubuntu > 20.04, there is no `qt5-default` packages so you need to install its content individually: - -```sh -sudo apt-get install qtbase5-dev qtchooser qt5-qmake qtbase5-dev-tools -``` -
- -
-Details for Centos -Install git, cmake and curl - -```sh -sudo yum install qit cmake -``` - -#### Note: -In case you run in error about `xcb` when running OpenPype, -you'll need also additional libraries for Qt5: - -```sh -sudo yum install qt5-qtbase-devel -``` -
- -
-Use pyenv to install Python version for OpenPype build - -You will need **bzip2**, **readline**, **sqlite3** and other libraries. - -For more details about Python build environments see: - -https://github.com/pyenv/pyenv/wiki#suggested-build-environment - -**For Ubuntu:** -```sh -sudo apt-get update; sudo apt-get install --no-install-recommends make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev -``` - -**For Centos:** -```sh -yum install gcc zlib-devel bzip2 bzip2-devel readline-devel sqlite sqlite-devel openssl-devel tk-devel libffi-devel -``` - -**install pyenv** -```sh -curl https://pyenv.run | bash - -# you can add those to ~/.bashrc -export PATH="$HOME/.pyenv/bin:$PATH" -eval "$(pyenv init -)" -eval "$(pyenv virtualenv-init -)" - -# reload shell -exec $SHELL - -# install Python 3.9.x -pyenv install -v 3.9.6 - -# change path to OpenPype 3 -cd /path/to/openpype-3 - -# set local python version -pyenv local 3.9.6 - -``` -
- -#### To build OpenPype: - -1) Run `.\tools\create_env.sh` to create virtual environment in `.\venv` -2) Run `.\tools\build.sh` to build OpenPype executables in `.\build\` - - -Running OpenPype ----------------- - -OpenPype can by executed either from live sources (this repository) or from -*"frozen code"* - executables that can be build using steps described above. - -If OpenPype is executed from live sources, it will use OpenPype version included in them. If -it is executed from frozen code it will try to find latest OpenPype version installed locally -on current computer and if it is not found, it will ask for its location. On that location -OpenPype can be either in directories or zip files. OpenPype will try to find latest version and -install it to user data directory (on Windows to `%LOCALAPPDATA%\pypeclub\openpype`, on Linux -`~/.local/share/openpype` and on macOS in `~/Library/Application Support/openpype`). - -### From sources -OpenPype can be run directly from sources by activating virtual environment: - -```sh -poetry run python start.py tray -``` - -This will use current OpenPype version with sources. You can override this with `--use-version=x.x.x` and -then OpenPype will try to find locally installed specified version (present in user data directory). - -### From frozen code - -You need to build OpenPype first. This will produce two executables - `openpype_gui(.exe)` and `openpype_console(.exe)`. -First one will act as GUI application and will not create console (useful in production environments). -The second one will create console and will write output there - useful for headless application and -debugging purposes. If you need OpenPype version installed, just run `./tools/create_zip(.ps1|.sh)` without -arguments and it will create zip file that OpenPype can use. - - -Building documentation ----------------------- - -To build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation -from current sources in `.\docs\build`. - -**Note that it needs existing virtual environment.** - -Running tests -------------- - -To run tests, execute `.\tools\run_tests(.ps1|.sh)`. - -**Note that it needs existing virtual environment.** - - -Developer tools ---------------- - -In case you wish to add your own tools to `.\tools` folder without git tracking, it is possible by adding it with `dev_*` suffix (example: `dev_clear_pyc(.ps1|.sh)`). - - - -## Contributors โœจ - -Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Milan Kolar
Milan Kolar

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ’ผ ๐Ÿ–‹ ๐Ÿ” ๐Ÿšง ๐Ÿ“† ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Jakub Jeลพek
Jakub Jeลพek

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ
Ondล™ej Samohel
Ondล™ej Samohel

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ
Jakub Trllo
Jakub Trllo

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ
Petr Kalis
Petr Kalis

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ
64qam
64qam

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿš‡ ๐Ÿ“† ๐Ÿšง ๐Ÿ–‹ ๐Ÿ““
Roy Nieterau
Roy Nieterau

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Toke Jepsen
Toke Jepsen

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Jiri Sindelar
Jiri Sindelar

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿ–‹ โœ… ๐Ÿ““
Simone Barbieri
Simone Barbieri

๐Ÿ’ป ๐Ÿ“–
karimmozilla
karimmozilla

๐Ÿ’ป
Allan I. A.
Allan I. A.

๐Ÿ’ป
murphy
murphy

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ““ ๐Ÿ“– ๐Ÿ“†
Wijnand Koreman
Wijnand Koreman

๐Ÿ’ป
Bo Zhou
Bo Zhou

๐Ÿ’ป
Clรฉment Hector
Clรฉment Hector

๐Ÿ’ป ๐Ÿ‘€
David Lai
David Lai

๐Ÿ’ป ๐Ÿ‘€
Derek
Derek

๐Ÿ’ป ๐Ÿ“–
Gรกbor Marinov
Gรกbor Marinov

๐Ÿ’ป ๐Ÿ“–
icyvapor
icyvapor

๐Ÿ’ป ๐Ÿ“–
Jรฉrรดme LORRAIN
Jรฉrรดme LORRAIN

๐Ÿ’ป
David Morris-Oliveros
David Morris-Oliveros

๐Ÿ’ป
BenoitConnan
BenoitConnan

๐Ÿ’ป
Malthaldar
Malthaldar

๐Ÿ’ป
Sven Neve
Sven Neve

๐Ÿ’ป
zafrs
zafrs

๐Ÿ’ป
Fรฉlix David
Fรฉlix David

๐Ÿ’ป ๐Ÿ“–
Alexey Bogomolov
Alexey Bogomolov

๐Ÿ’ป
- - - - - - -This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! +AYON-core is a successor to OpenPype repository (minus all the addons) and still in the process of cleaning up of all references. Please bear with us during this transitional phase. diff --git a/app_launcher.py b/app_launcher.py deleted file mode 100644 index 6dc1518370..0000000000 --- a/app_launcher.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Launch process that is not child process of python or OpenPype. - -This is written for linux distributions where process tree may affect what -is when closed or blocked to be closed. -""" - -import os -import sys -import subprocess -import json - - -def main(input_json_path): - """Read launch arguments from json file and launch the process. - - Expected that json contains "args" key with string or list of strings. - - Arguments are converted to string using `list2cmdline`. At the end is added - `&` which will cause that launched process is detached and running as - "background" process. - - ## Notes - @iLLiCiT: This should be possible to do with 'disown' or double forking but - I didn't find a way how to do it properly. Disown didn't work as - expected for me and double forking killed parent process which is - unexpected too. - """ - with open(input_json_path, "r") as stream: - data = json.load(stream) - - # Change environment variables - env = data.get("env") or {} - for key, value in env.items(): - os.environ[key] = value - - # Prepare launch arguments - args = data["args"] - if isinstance(args, list): - args = subprocess.list2cmdline(args) - - # Run the command as background process - shell_cmd = args + " &" - os.system(shell_cmd) - sys.exit(0) - - -if __name__ == "__main__": - # Expect that last argument is path to a json with launch args information - main(sys.argv[-1]) diff --git a/client/ayon_core/__init__.py b/client/ayon_core/__init__.py new file mode 100644 index 0000000000..c9c0dfc614 --- /dev/null +++ b/client/ayon_core/__init__.py @@ -0,0 +1,9 @@ +import os + + +AYON_CORE_ROOT = os.path.dirname(os.path.abspath(__file__)) + +# TODO remove after '1.x.x' +PACKAGE_DIR = AYON_CORE_ROOT +PLUGINS_DIR = os.path.join(AYON_CORE_ROOT, "plugins") +AYON_SERVER_ENABLED = True diff --git a/client/ayon_core/__main__.py b/client/ayon_core/__main__.py new file mode 100644 index 0000000000..a84239a7b4 --- /dev/null +++ b/client/ayon_core/__main__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +"""Main entry point for AYON command.""" +from ayon_core import cli + + +if __name__ == "__main__": + cli.main() diff --git a/client/ayon_core/addon/README.md b/client/ayon_core/addon/README.md new file mode 100644 index 0000000000..b793b0ffb4 --- /dev/null +++ b/client/ayon_core/addon/README.md @@ -0,0 +1,92 @@ +# AYON addons +AYON addons should contain separated logic of specific kind of implementation, such as ftrack connection and its usage code, Deadline farm rendering or may contain only special plugins. Addons work the same way currently, there is no difference between module and addon functionality. + +## Addons concept +- addons are dynamically imported based on current AYON bundle + +## Base class `AYONAddon` +- abstract class as base for each addon +- implementation should contain addon's api without GUI parts +- may implement `get_global_environments` method which should return dictionary of environments that are globally applicable and value is the same for whole studio if launched at any workstation (except os specific paths) +- abstract parts: + - `name` attribute - name of a addon + - `initialize` method - method for own initialization of a addon (should not override `__init__`) + - `connect_with_addons` method - where addon may look for it's interfaces implementations or check for other addons +- `__init__` should not be overridden and `initialize` should not do time consuming part but only prepare base data about addon + - also keep in mind that they may be initialized in headless mode +- connection with other addons is made with help of interfaces +- `cli` method - add cli commands specific for the addon + - command line arguments are handled using `click_wrap` python module located in `ayon_core.addon` + - `cli` method should expect single argument which is click group on which can be called any group specific methods (e.g. `add_command` to add another click group as children see `ExampleAddon`) + - it is possible to add trigger cli commands using `./ayon addon *args` + +# Interfaces +- interface is class that has defined abstract methods to implement and may contain pre implemented helper methods +- addon that inherit from an interface must implement those abstract methods otherwise won't be initialized +- it is easy to find which addon object inherited from which interfaces with 100% chance they have implemented required methods +- default interfaces are defined in `interfaces.py` + +## IPluginPaths +- addon wants to add directory path/s to avalon or publish plugins +- addon must implement `get_plugin_paths` which must return dictionary with possible keys `"publish"`, `"load"`, `"create"` or `"actions"` + - each key may contain list or string with a path to directory with plugins + +## ITrayModule +- addon has more logic when used in a tray + - it is possible that addon can be used only in the tray +- abstract methods + - `tray_init` - initialization triggered after `initialize` when used in `TrayModulesManager` and before `connect_with_addons` + - `tray_menu` - add actions to tray widget's menu that represent the addon + - `tray_start` - start of addon's login in tray + - addon is initialized and connected with other addons + - `tray_exit` - addon's cleanup like stop and join threads etc. + - order of calling is based on implementation this order is how it works with `TrayModulesManager` + - it is recommended to import and use GUI implementation only in these methods +- has attribute `tray_initialized` (bool) which is set to False by default and is set by `TrayModulesManager` to True after `tray_init` + - if addon has logic only in tray or for both then should be checking for `tray_initialized` attribute to decide how should handle situations + +### ITrayService +- inherits from `ITrayModule` and implements `tray_menu` method for you + - adds action to submenu "Services" in tray widget menu with icon and label +- abstract attribute `label` + - label shown in menu +- interface has pre implemented methods to change icon color + - `set_service_running` - green icon + - `set_service_failed` - red icon + - `set_service_idle` - orange icon + - these states must be set by addon itself `set_service_running` is default state on initialization + +### ITrayAction +- inherits from `ITrayModule` and implements `tray_menu` method for you + - adds action to tray widget menu with label +- abstract attribute `label` + - label shown in menu +- abstract method `on_action_trigger` + - what should happen when an action is triggered +- NOTE: It is a good idea to implement logic in `on_action_trigger` to the api method and trigger that method on callbacks. This gives ability to trigger that method outside tray + + +### AddonsManager +- collects addon classes and tries to initialize them +- important attributes + - `addons` - list of available attributes + - `addons_by_id` - dictionary of addons mapped by their ids + - `addons_by_name` - dictionary of addons mapped by their names + - all these attributes contain all found addons even if are not enabled +- helper methods + - `collect_global_environments` to collect all global environments from enabled addons with calling `get_global_environments` on each of them + - `collect_plugin_paths` collects plugin paths from all enabled addons + - output is always dictionary with all keys and values as an list + ``` + { + "publish": [], + "create": [], + "load": [], + "actions": [], + "inventory": [] + } + ``` + +### TrayAddonsManager +- inherits from `AddonsManager` +- has specific implementation for Pype Tray tool and handle `ITrayModule` methods diff --git a/client/ayon_core/addon/__init__.py b/client/ayon_core/addon/__init__.py new file mode 100644 index 0000000000..fe8865c730 --- /dev/null +++ b/client/ayon_core/addon/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from . import click_wrap +from .interfaces import ( + IPluginPaths, + ITrayAddon, + ITrayAction, + ITrayService, + IHostAddon, +) + +from .base import ( + AYONAddon, + AddonsManager, + TrayAddonsManager, + load_addons, +) + + +__all__ = ( + "click_wrap", + + "IPluginPaths", + "ITrayAddon", + "ITrayAction", + "ITrayService", + "IHostAddon", + + "AYONAddon", + "AddonsManager", + "TrayAddonsManager", + "load_addons", +) diff --git a/client/ayon_core/addon/base.py b/client/ayon_core/addon/base.py new file mode 100644 index 0000000000..a3920c4acb --- /dev/null +++ b/client/ayon_core/addon/base.py @@ -0,0 +1,1417 @@ +# -*- coding: utf-8 -*- +"""Base class for AYON addons.""" +import copy +import os +import sys +import time +import inspect +import logging +import threading +import collections + +from uuid import uuid4 +from abc import ABCMeta, abstractmethod + +import six +import appdirs + +from ayon_core.lib import Logger +from ayon_core.client import get_ayon_server_api_connection +from ayon_core.settings import get_system_settings +from ayon_core.settings.ayon_settings import ( + is_dev_mode_enabled, + get_ayon_settings, +) + +from .interfaces import ( + IPluginPaths, + IHostAddon, + ITrayAddon, + ITrayService +) + +# Files that will be always ignored on addons import +IGNORED_FILENAMES = ( + "__pycache__", +) +# Files ignored on addons import from "./ayon_core/modules" +IGNORED_DEFAULT_FILENAMES = ( + "__init__.py", + "base.py", + "interfaces.py", + "click_wrap.py", + "example_addons", + "default_modules", +) +IGNORED_HOSTS_IN_AYON = { + "flame", + "harmony", +} +IGNORED_MODULES_IN_AYON = set() + + +# Inherit from `object` for Python 2 hosts +class _ModuleClass(object): + """Fake module class for storing AYON addons. + + Object of this class can be stored to `sys.modules` and used for storing + dynamically imported modules. + """ + + def __init__(self, name): + # Call setattr on super class + super(_ModuleClass, self).__setattr__("name", name) + super(_ModuleClass, self).__setattr__("__name__", name) + + # Where modules and interfaces are stored + super(_ModuleClass, self).__setattr__("__attributes__", dict()) + super(_ModuleClass, self).__setattr__("__defaults__", set()) + + super(_ModuleClass, self).__setattr__("_log", None) + + def __getattr__(self, attr_name): + if attr_name not in self.__attributes__: + if attr_name in ("__path__", "__file__"): + return None + raise AttributeError("'{}' has not attribute '{}'".format( + self.name, attr_name + )) + return self.__attributes__[attr_name] + + def __iter__(self): + for module in self.values(): + yield module + + def __setattr__(self, attr_name, value): + if attr_name in self.__attributes__: + self.log.warning( + "Duplicated name \"{}\" in {}. Overriding.".format( + attr_name, self.name + ) + ) + self.__attributes__[attr_name] = value + + def __setitem__(self, key, value): + self.__setattr__(key, value) + + def __getitem__(self, key): + return getattr(self, key) + + @property + def log(self): + if self._log is None: + super(_ModuleClass, self).__setattr__( + "_log", Logger.get_logger(self.name) + ) + return self._log + + def get(self, key, default=None): + return self.__attributes__.get(key, default) + + def keys(self): + return self.__attributes__.keys() + + def values(self): + return self.__attributes__.values() + + def items(self): + return self.__attributes__.items() + + +class _LoadCache: + addons_lock = threading.Lock() + addons_loaded = False + + +def load_addons(force=False): + """Load AYON addons as python modules. + + Modules does not load only classes (like in Interfaces) because there must + be ability to use inner code of addon and be able to import it from one + defined place. + + With this it is possible to import addon's content from predefined module. + + Args: + force (bool): Force to load addons even if are already loaded. + This won't update already loaded and used (cached) modules. + """ + + if _LoadCache.addons_loaded and not force: + return + + if not _LoadCache.addons_lock.locked(): + with _LoadCache.addons_lock: + _load_addons() + _LoadCache.addons_loaded = True + else: + # If lock is locked wait until is finished + while _LoadCache.addons_lock.locked(): + time.sleep(0.1) + + +def _get_ayon_bundle_data(): + con = get_ayon_server_api_connection() + bundles = con.get_bundles()["bundles"] + + bundle_name = os.getenv("AYON_BUNDLE_NAME") + + return next( + ( + bundle + for bundle in bundles + if bundle["name"] == bundle_name + ), + None + ) + + +def _get_ayon_addons_information(bundle_info): + """Receive information about addons to use from server. + + Todos: + Actually ask server for the information. + Allow project name as optional argument to be able to query information + about used addons for specific project. + + Returns: + List[Dict[str, Any]]: List of addon information to use. + """ + + output = [] + bundle_addons = bundle_info["addons"] + con = get_ayon_server_api_connection() + addons = con.get_addons_info()["addons"] + for addon in addons: + name = addon["name"] + versions = addon.get("versions") + addon_version = bundle_addons.get(name) + if addon_version is None or not versions: + continue + version = versions.get(addon_version) + if version: + version = copy.deepcopy(version) + version["name"] = name + version["version"] = addon_version + output.append(version) + return output + + +def _load_ayon_addons(openpype_modules, modules_key, log): + """Load AYON addons based on information from server. + + This function should not trigger downloading of any addons but only use + what is already available on the machine (at least in first stages of + development). + + Args: + openpype_modules (_ModuleClass): Module object where modules are + stored. + modules_key (str): Key under which will be modules imported in + `sys.modules`. + log (logging.Logger): Logger object. + + Returns: + List[str]: List of v3 addons to skip to load because v4 alternative is + imported. + """ + + addons_to_skip_in_core = [] + + bundle_info = _get_ayon_bundle_data() + addons_info = _get_ayon_addons_information(bundle_info) + if not addons_info: + return addons_to_skip_in_core + + addons_dir = os.environ.get("AYON_ADDONS_DIR") + if not addons_dir: + addons_dir = os.path.join( + appdirs.user_data_dir("AYON", "Ynput"), + "addons" + ) + + dev_mode_enabled = is_dev_mode_enabled() + dev_addons_info = {} + if dev_mode_enabled: + # Get dev addons info only when dev mode is enabled + dev_addons_info = bundle_info.get("addonDevelopment", dev_addons_info) + + addons_dir_exists = os.path.exists(addons_dir) + if not addons_dir_exists: + log.warning("Addons directory does not exists. Path \"{}\"".format( + addons_dir + )) + + for addon_info in addons_info: + addon_name = addon_info["name"] + addon_version = addon_info["version"] + + # core addon does not have any addon object + if addon_name in ("openpype", "core"): + continue + + dev_addon_info = dev_addons_info.get(addon_name, {}) + use_dev_path = dev_addon_info.get("enabled", False) + + addon_dir = None + if use_dev_path: + addon_dir = dev_addon_info["path"] + if not addon_dir or not os.path.exists(addon_dir): + log.warning(( + "Dev addon {} {} path does not exists. Path \"{}\"" + ).format(addon_name, addon_version, addon_dir)) + continue + + elif addons_dir_exists: + folder_name = "{}_{}".format(addon_name, addon_version) + addon_dir = os.path.join(addons_dir, folder_name) + if not os.path.exists(addon_dir): + log.debug(( + "No localized client code found for addon {} {}." + ).format(addon_name, addon_version)) + continue + + if not addon_dir: + continue + + sys.path.insert(0, addon_dir) + imported_modules = [] + for name in os.listdir(addon_dir): + # Ignore of files is implemented to be able to run code from code + # where usually is more files than just the addon + # Ignore start and setup scripts + if name in ("setup.py", "start.py", "__pycache__"): + continue + + path = os.path.join(addon_dir, name) + basename, ext = os.path.splitext(name) + # Ignore folders/files with dot in name + # - dot names cannot be imported in Python + if "." in basename: + continue + is_dir = os.path.isdir(path) + is_py_file = ext.lower() == ".py" + if not is_py_file and not is_dir: + continue + + try: + mod = __import__(basename, fromlist=("",)) + for attr_name in dir(mod): + attr = getattr(mod, attr_name) + if ( + inspect.isclass(attr) + and issubclass(attr, AYONAddon) + ): + imported_modules.append(mod) + break + + except BaseException: + log.warning( + "Failed to import \"{}\"".format(basename), + exc_info=True + ) + + if not imported_modules: + log.warning("Addon {} {} has no content to import".format( + addon_name, addon_version + )) + continue + + if len(imported_modules) > 1: + log.warning(( + "Skipping addon '{}'." + " Multiple modules were found ({}) in dir {}." + ).format( + addon_name, + ", ".join([m.__name__ for m in imported_modules]), + addon_dir, + )) + continue + + mod = imported_modules[0] + addon_alias = getattr(mod, "V3_ALIAS", None) + if not addon_alias: + addon_alias = addon_name + addons_to_skip_in_core.append(addon_alias) + new_import_str = "{}.{}".format(modules_key, addon_alias) + + sys.modules[new_import_str] = mod + setattr(openpype_modules, addon_alias, mod) + + return addons_to_skip_in_core + + +def _load_addons_in_core( + ignore_addon_names, openpype_modules, modules_key, log +): + # Add current directory at first place + # - has small differences in import logic + current_dir = os.path.abspath(os.path.dirname(__file__)) + hosts_dir = os.path.join(os.path.dirname(current_dir), "hosts") + modules_dir = os.path.join(os.path.dirname(current_dir), "modules") + + ignored_host_names = set(IGNORED_HOSTS_IN_AYON) + ignored_module_dir_filenames = ( + set(IGNORED_DEFAULT_FILENAMES) + | IGNORED_MODULES_IN_AYON + ) + + for dirpath in {hosts_dir, modules_dir}: + if not os.path.exists(dirpath): + log.warning(( + "Could not find path when loading AYON addons \"{}\"" + ).format(dirpath)) + continue + + is_in_modules_dir = dirpath == modules_dir + if is_in_modules_dir: + ignored_filenames = ignored_module_dir_filenames + else: + ignored_filenames = ignored_host_names + + for filename in os.listdir(dirpath): + # Ignore filenames + if filename in IGNORED_FILENAMES or filename in ignored_filenames: + continue + + fullpath = os.path.join(dirpath, filename) + basename, ext = os.path.splitext(filename) + + if basename in ignore_addon_names: + continue + + # Validations + if os.path.isdir(fullpath): + # Check existence of init file + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Addon directory does not contain __init__.py" + " file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): + continue + + # TODO add more logic how to define if folder is addon or not + # - check manifest and content of manifest + try: + # Don't import dynamically current directory modules + new_import_str = "{}.{}".format(modules_key, basename) + if is_in_modules_dir: + import_str = "ayon_core.modules.{}".format(basename) + default_module = __import__(import_str, fromlist=("", )) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + else: + import_str = "ayon_core.hosts.{}".format(basename) + # Until all hosts are converted to be able use them as + # modules is this error check needed + try: + default_module = __import__( + import_str, fromlist=("", ) + ) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + except Exception: + log.warning( + "Failed to import host folder {}".format(basename), + exc_info=True + ) + + except Exception: + if is_in_modules_dir: + msg = "Failed to import in-core addon '{}'.".format( + basename + ) + else: + msg = "Failed to import addon '{}'.".format(fullpath) + log.error(msg, exc_info=True) + + +def _load_addons(): + # Support to use 'openpype' imports + sys.modules["openpype"] = sys.modules["ayon_core"] + + # Key under which will be modules imported in `sys.modules` + modules_key = "openpype_modules" + + # Change `sys.modules` + sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key) + + log = Logger.get_logger("AddonsLoader") + + ignore_addon_names = _load_ayon_addons( + openpype_modules, modules_key, log + ) + _load_addons_in_core( + ignore_addon_names, openpype_modules, modules_key, log + ) + + +_MARKING_ATTR = "_marking" +def mark_func(func): + """Mark function to be used in report. + + Args: + func (Callable): Function to mark. + + Returns: + Callable: Marked function. + """ + + setattr(func, _MARKING_ATTR, True) + return func + + +def is_func_marked(func): + return getattr(func, _MARKING_ATTR, False) + + +@six.add_metaclass(ABCMeta) +class AYONAddon(object): + """Base class of AYON addon. + + Attributes: + id (UUID): Addon object id. + enabled (bool): Is addon enabled. + name (str): Addon name. + + Args: + manager (AddonsManager): Manager object who discovered addon. + settings (dict[str, Any]): AYON settings. + """ + + enabled = True + _id = None + + def __init__(self, manager, settings): + self.manager = manager + + self.log = Logger.get_logger(self.name) + + self.initialize(settings) + + @property + def id(self): + """Random id of addon object. + + Returns: + str: Object id. + """ + + if self._id is None: + self._id = uuid4() + return self._id + + @property + @abstractmethod + def name(self): + """Addon name. + + Returns: + str: Addon name. + """ + + pass + + def initialize(self, settings): + """Initialization of addon attributes. + + It is not recommended to override __init__ that's why specific method + was implemented. + + Args: + settings (dict[str, Any]): Settings. + """ + + pass + + @mark_func + def connect_with_addons(self, enabled_addons): + """Connect with other enabled addons. + + Args: + enabled_addons (list[AYONAddon]): Addons that are enabled. + """ + + pass + + def get_global_environments(self): + """Get global environments values of addon. + + Environment variables that can be get only from system settings. + + Returns: + dict[str, str]: Environment variables. + """ + + return {} + + def modify_application_launch_arguments(self, application, env): + """Give option to modify launch environments before application launch. + + Implementation is optional. To change environments modify passed + dictionary of environments. + + Args: + application (Application): Application that is launched. + env (dict[str, str]): Current environment variables. + """ + + pass + + def on_host_install(self, host, host_name, project_name): + """Host was installed which gives option to handle in-host logic. + + It is a good option to register in-host event callbacks which are + specific for the addon. The addon is kept in memory for rest of + the process. + + Arguments may change in future. E.g. 'host_name' should be possible + to receive from 'host' object. + + Args: + host (Union[ModuleType, HostBase]): Access to installed/registered + host object. + host_name (str): Name of host. + project_name (str): Project name which is main part of host + context. + """ + + pass + + def cli(self, addon_click_group): + """Add commands to click group. + + The best practise is to create click group for whole addon which is + used to separate commands. + + Example: + class MyPlugin(AYONAddon): + ... + def cli(self, addon_click_group): + addon_click_group.add_command(cli_main) + + + @click.group(, help="") + def cli_main(): + pass + + @cli_main.command() + def mycommand(): + print("my_command") + + Args: + addon_click_group (click.Group): Group to which can be added + commands. + """ + + pass + + +class OpenPypeModule(AYONAddon): + """Base class of OpenPype module. + + Deprecated: + Use `AYONAddon` instead. + + Args: + manager (AddonsManager): Manager object who discovered addon. + settings (dict[str, Any]): Module settings (OpenPype settings). + """ + + # Disable by default + enabled = False + + +class OpenPypeAddOn(OpenPypeModule): + # Enable Addon by default + enabled = True + + +class AddonsManager: + """Manager of addons that helps to load and prepare them to work. + + Args: + settings (Optional[dict[str, Any]]): AYON studio settings. + initialize (Optional[bool]): Initialize addons on init. + True by default. + """ + + # Helper attributes for report + _report_total_key = "Total" + _log = None + + def __init__(self, settings=None, initialize=True): + self._settings = settings + self._system_settings = None + + self._addons = [] + self._addons_by_id = {} + self._addons_by_name = {} + # For report of time consumption + self._report = {} + + if initialize: + self.initialize_addons() + self.connect_addons() + + def __getitem__(self, addon_name): + return self._addons_by_name[addon_name] + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + def get(self, addon_name, default=None): + """Access addon by name. + + Args: + addon_name (str): Name of addon which should be returned. + default (Any): Default output if addon is not available. + + Returns: + Union[AYONAddon, Any]: Addon found by name or `default`. + """ + + return self._addons_by_name.get(addon_name, default) + + @property + def addons(self): + return list(self._addons) + + @property + def addons_by_id(self): + return dict(self._addons_by_id) + + @property + def addons_by_name(self): + return dict(self._addons_by_name) + + def get_enabled_addon(self, addon_name, default=None): + """Fast access to enabled addon. + + If addon is available but is not enabled default value is returned. + + Args: + addon_name (str): Name of addon which should be returned. + default (Any): Default output if addon is not available or is + not enabled. + + Returns: + Union[AYONAddon, None]: Enabled addon found by name or None. + """ + + addon = self.get(addon_name) + if addon is not None and addon.enabled: + return addon + return default + + def get_enabled_addons(self): + """Enabled addons initialized by the manager. + + Returns: + list[AYONAddon]: Initialized and enabled addons. + """ + + return [ + addon + for addon in self._addons + if addon.enabled + ] + + def initialize_addons(self): + """Import and initialize addons.""" + # Make sure modules are loaded + load_addons() + + import openpype_modules + + self.log.debug("*** AYON addons initialization.") + + # Prepare settings for addons + settings = self._settings + if settings is None: + settings = get_ayon_settings() + + # OpenPype settings + system_settings = self._system_settings + if system_settings is None: + system_settings = get_system_settings() + + modules_settings = system_settings["modules"] + + report = {} + time_start = time.time() + prev_start_time = time_start + + addon_classes = [] + for module in openpype_modules: + # Go through globals in `pype.modules` + for name in dir(module): + modules_item = getattr(module, name, None) + # Filter globals that are not classes which inherit from + # AYONAddon + if ( + not inspect.isclass(modules_item) + or modules_item is AYONAddon + or modules_item is OpenPypeModule + or modules_item is OpenPypeAddOn + or not issubclass(modules_item, AYONAddon) + ): + continue + + # Check if class is abstract (Developing purpose) + if inspect.isabstract(modules_item): + # Find abstract attributes by convention on `abc` module + not_implemented = [] + for attr_name in dir(modules_item): + attr = getattr(modules_item, attr_name, None) + abs_method = getattr( + attr, "__isabstractmethod__", None + ) + if attr and abs_method: + not_implemented.append(attr_name) + + # Log missing implementations + self.log.warning(( + "Skipping abstract Class: {}." + " Missing implementations: {}" + ).format(name, ", ".join(not_implemented))) + continue + + addon_classes.append(modules_item) + + for addon_cls in addon_classes: + name = addon_cls.__name__ + if issubclass(addon_cls, OpenPypeModule): + # TODO change to warning + self.log.debug(( + "Addon '{}' is inherited from 'OpenPypeModule'." + " Please use 'AYONAddon'." + ).format(name)) + + try: + # Try initialize module + if issubclass(addon_cls, OpenPypeModule): + addon = addon_cls(self, modules_settings) + else: + addon = addon_cls(self, settings) + # Store initialized object + self._addons.append(addon) + self._addons_by_id[addon.id] = addon + self._addons_by_name[addon.name] = addon + enabled_str = "X" + if not addon.enabled: + enabled_str = " " + self.log.debug("[{}] {}".format(enabled_str, name)) + + now = time.time() + report[addon.__class__.__name__] = now - prev_start_time + prev_start_time = now + + except Exception: + self.log.warning( + "Initialization of addon '{}' failed.".format(name), + exc_info=True + ) + + if self._report is not None: + report[self._report_total_key] = time.time() - time_start + self._report["Initialization"] = report + + def connect_addons(self): + """Trigger connection with other enabled addons. + + Addons should handle their interfaces in `connect_with_addons`. + """ + report = {} + time_start = time.time() + prev_start_time = time_start + enabled_modules = self.get_enabled_addons() + self.log.debug("Has {} enabled modules.".format(len(enabled_modules))) + for module in enabled_modules: + try: + if not is_func_marked(module.connect_with_addons): + module.connect_with_addons(enabled_modules) + + elif hasattr(module, "connect_with_modules"): + self.log.warning(( + "DEPRECATION WARNING: Addon '{}' still uses" + " 'connect_with_modules' method. Please switch to use" + " 'connect_with_addons' method." + ).format(module.name)) + module.connect_with_modules(enabled_modules) + + except Exception: + self.log.error( + "BUG: Module failed on connection with other modules.", + exc_info=True + ) + + now = time.time() + report[module.__class__.__name__] = now - prev_start_time + prev_start_time = now + + if self._report is not None: + report[self._report_total_key] = time.time() - time_start + self._report["Connect modules"] = report + + def collect_global_environments(self): + """Helper to collect global environment variabled from modules. + + Returns: + dict: Global environment variables from enabled modules. + + Raises: + AssertionError: Global environment variables must be unique for + all modules. + """ + module_envs = {} + for module in self.get_enabled_addons(): + # Collect global module's global environments + _envs = module.get_global_environments() + for key, value in _envs.items(): + if key in module_envs: + # TODO better error message + raise AssertionError( + "Duplicated environment key {}".format(key) + ) + module_envs[key] = value + return module_envs + + def collect_plugin_paths(self): + """Helper to collect all plugins from modules inherited IPluginPaths. + + Unknown keys are logged out. + + Returns: + dict: Output is dictionary with keys "publish", "create", "load", + "actions" and "inventory" each containing list of paths. + """ + # Output structure + output = { + "publish": [], + "create": [], + "load": [], + "actions": [], + "inventory": [] + } + unknown_keys_by_addon = {} + for addon in self.get_enabled_addons(): + # Skip module that do not inherit from `IPluginPaths` + if not isinstance(addon, IPluginPaths): + continue + plugin_paths = addon.get_plugin_paths() + for key, value in plugin_paths.items(): + # Filter unknown keys + if key not in output: + if addon.name not in unknown_keys_by_addon: + unknown_keys_by_addon[addon.name] = [] + unknown_keys_by_addon[addon.name].append(key) + continue + + # Skip if value is empty + if not value: + continue + + # Convert to list if value is not list + if not isinstance(value, (list, tuple, set)): + value = [value] + output[key].extend(value) + + # Report unknown keys (Developing purposes) + if unknown_keys_by_addon: + expected_keys = ", ".join([ + "\"{}\"".format(key) for key in output.keys() + ]) + msg_template = "Addon: \"{}\" - got key {}" + msg_items = [] + for addon_name, keys in unknown_keys_by_addon.items(): + joined_keys = ", ".join([ + "\"{}\"".format(key) for key in keys + ]) + msg_items.append(msg_template.format(addon_name, joined_keys)) + self.log.warning(( + "Expected keys from `get_plugin_paths` are {}. {}" + ).format(expected_keys, " | ".join(msg_items))) + return output + + def _collect_plugin_paths(self, method_name, *args, **kwargs): + output = [] + for addon in self.get_enabled_addons(): + # Skip addon that do not inherit from `IPluginPaths` + if not isinstance(addon, IPluginPaths): + continue + + method = getattr(addon, method_name) + try: + paths = method(*args, **kwargs) + except Exception: + self.log.warning( + ( + "Failed to get plugin paths from addon" + " '{}' using '{}'." + ).format(addon.__class__.__name__, method_name), + exc_info=True + ) + continue + + if paths: + # Convert to list if value is not list + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + output.extend(paths) + return output + + def collect_create_plugin_paths(self, host_name): + """Helper to collect creator plugin paths from addons. + + Args: + host_name (str): For which host are creators meant. + + Returns: + list: List of creator plugin paths. + """ + + return self._collect_plugin_paths( + "get_create_plugin_paths", + host_name + ) + + collect_creator_plugin_paths = collect_create_plugin_paths + + def collect_load_plugin_paths(self, host_name): + """Helper to collect load plugin paths from addons. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of load plugin paths. + """ + + return self._collect_plugin_paths( + "get_load_plugin_paths", + host_name + ) + + def collect_publish_plugin_paths(self, host_name): + """Helper to collect load plugin paths from addons. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of pyblish plugin paths. + """ + + return self._collect_plugin_paths( + "get_publish_plugin_paths", + host_name + ) + + def collect_inventory_action_paths(self, host_name): + """Helper to collect load plugin paths from addons. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of pyblish plugin paths. + """ + + return self._collect_plugin_paths( + "get_inventory_action_paths", + host_name + ) + + def get_host_addon(self, host_name): + """Find host addon by host name. + + Args: + host_name (str): Host name for which is found host addon. + + Returns: + Union[AYONAddon, None]: Found host addon by name or `None`. + """ + + for addon in self.get_enabled_addons(): + if ( + isinstance(addon, IHostAddon) + and addon.host_name == host_name + ): + return addon + return None + + def get_host_names(self): + """List of available host names based on host addons. + + Returns: + Iterable[str]: All available host names based on enabled addons + inheriting 'IHostAddon'. + """ + + return { + addon.host_name + for addon in self.get_enabled_addons() + if isinstance(addon, IHostAddon) + } + + def print_report(self): + """Print out report of time spent on addons initialization parts. + + Reporting is not automated must be implemented for each initialization + part separatelly. Reports must be stored to `_report` attribute. + Print is skipped if `_report` is empty. + + Attribute `_report` is dictionary where key is "label" describing + the processed part and value is dictionary where key is addon's + class name and value is time delta of it's processing. + + It is good idea to add total time delta on processed part under key + which is defined in attribute `_report_total_key`. By default has value + `"Total"` but use the attribute please. + + ```javascript + { + "Initialization": { + "FtrackAddon": 0.003, + ... + "Total": 1.003, + }, + ... + } + ``` + """ + + if not self._report: + return + + available_col_names = set() + for addon_names in self._report.values(): + available_col_names |= set(addon_names.keys()) + + # Prepare ordered dictionary for columns + cols = collections.OrderedDict() + # Add addon names to first columnt + cols["Addon name"] = list(sorted( + addon.__class__.__name__ + for addon in self.addons + if addon.__class__.__name__ in available_col_names + )) + # Add total key (as last addon) + cols["Addon name"].append(self._report_total_key) + + # Add columns from report + for label in self._report.keys(): + cols[label] = [] + + total_addon_times = {} + for addon_name in cols["Addon name"]: + total_addon_times[addon_name] = 0 + + for label, reported in self._report.items(): + for addon_name in cols["Addon name"]: + col_time = reported.get(addon_name) + if col_time is None: + cols[label].append("N/A") + continue + cols[label].append("{:.3f}".format(col_time)) + total_addon_times[addon_name] += col_time + + # Add to also total column that should sum the row + cols[self._report_total_key] = [] + for addon_name in cols["Addon name"]: + cols[self._report_total_key].append( + "{:.3f}".format(total_addon_times[addon_name]) + ) + + # Prepare column widths and total row count + # - column width is by + col_widths = {} + total_rows = None + for key, values in cols.items(): + if total_rows is None: + total_rows = 1 + len(values) + max_width = len(key) + for value in values: + value_length = len(value) + if value_length > max_width: + max_width = value_length + col_widths[key] = max_width + + rows = [] + for _idx in range(total_rows): + rows.append([]) + + for key, values in cols.items(): + width = col_widths[key] + idx = 0 + rows[idx].append(key.ljust(width)) + for value in values: + idx += 1 + rows[idx].append(value.ljust(width)) + + filler_parts = [] + for width in col_widths.values(): + filler_parts.append(width * "-") + filler = "+".join(filler_parts) + + formatted_rows = [filler] + last_row_idx = len(rows) - 1 + for idx, row in enumerate(rows): + # Add filler before last row + if idx == last_row_idx: + formatted_rows.append(filler) + + formatted_rows.append("|".join(row)) + + # Add filler after first row + if idx == 0: + formatted_rows.append(filler) + + # Join rows with newline char and add new line at the end + output = "\n".join(formatted_rows) + "\n" + print(output) + + # DEPRECATED - Module compatibility + @property + def modules(self): + self.log.warning( + "DEPRECATION WARNING: Used deprecated property" + " 'modules' please use 'addons' instead." + ) + return self.addons + + @property + def modules_by_id(self): + self.log.warning( + "DEPRECATION WARNING: Used deprecated property" + " 'modules_by_id' please use 'addons_by_id' instead." + ) + return self.addons_by_id + + @property + def modules_by_name(self): + self.log.warning( + "DEPRECATION WARNING: Used deprecated property" + " 'modules_by_name' please use 'addons_by_name' instead." + ) + return self.addons_by_name + + def get_enabled_module(self, *args, **kwargs): + self.log.warning( + "DEPRECATION WARNING: Used deprecated method" + " 'get_enabled_module' please use 'get_enabled_addon' instead." + ) + return self.get_enabled_addon(*args, **kwargs) + + def initialize_modules(self): + self.log.warning( + "DEPRECATION WARNING: Used deprecated method" + " 'initialize_modules' please use 'initialize_addons' instead." + ) + self.initialize_addons() + + def get_enabled_modules(self): + self.log.warning( + "DEPRECATION WARNING: Used deprecated method" + " 'get_enabled_modules' please use 'get_enabled_addons' instead." + ) + return self.get_enabled_addons() + + def get_host_module(self, host_name): + self.log.warning( + "DEPRECATION WARNING: Used deprecated method" + " 'get_host_module' please use 'get_host_addon' instead." + ) + return self.get_host_addon(host_name) + + +class TrayAddonsManager(AddonsManager): + # Define order of addons in menu + # TODO find better way how to define order + addons_menu_order = ( + "user", + "ftrack", + "kitsu", + "launcher_tool", + "avalon", + "clockify", + "traypublish_tool", + "log_viewer", + ) + + def __init__(self, settings=None): + super(TrayAddonsManager, self).__init__(settings, initialize=False) + + self.tray_manager = None + + self.doubleclick_callbacks = {} + self.doubleclick_callback = None + + def add_doubleclick_callback(self, addon, callback): + """Register doubleclick callbacks on tray icon. + + Currently there is no way how to determine which is launched. Name of + callback can be defined with `doubleclick_callback` attribute. + + Missing feature how to define default callback. + + Args: + addon (AYONAddon): Addon object. + callback (FunctionType): Function callback. + """ + + callback_name = "_".join([addon.name, callback.__name__]) + if callback_name not in self.doubleclick_callbacks: + self.doubleclick_callbacks[callback_name] = callback + if self.doubleclick_callback is None: + self.doubleclick_callback = callback_name + return + + self.log.warning(( + "Callback with name \"{}\" is already registered." + ).format(callback_name)) + + def initialize(self, tray_manager, tray_menu): + self.tray_manager = tray_manager + self.initialize_addons() + self.tray_init() + self.connect_addons() + self.tray_menu(tray_menu) + + def get_enabled_tray_addons(self): + """Enabled tray addons. + + Returns: + list[AYONAddon]: Enabled addons that inherit from tray interface. + """ + + return [ + addon + for addon in self.get_enabled_addons() + if isinstance(addon, ITrayAddon) + ] + + def restart_tray(self): + if self.tray_manager: + self.tray_manager.restart() + + def tray_init(self): + report = {} + time_start = time.time() + prev_start_time = time_start + for addon in self.get_enabled_tray_addons(): + try: + addon._tray_manager = self.tray_manager + addon.tray_init() + addon.tray_initialized = True + except Exception: + self.log.warning( + "Addon \"{}\" crashed on `tray_init`.".format( + addon.name + ), + exc_info=True + ) + + now = time.time() + report[addon.__class__.__name__] = now - prev_start_time + prev_start_time = now + + if self._report is not None: + report[self._report_total_key] = time.time() - time_start + self._report["Tray init"] = report + + def tray_menu(self, tray_menu): + ordered_addons = [] + enabled_by_name = { + addon.name: addon + for addon in self.get_enabled_tray_addons() + } + + for name in self.addons_menu_order: + addon_by_name = enabled_by_name.pop(name, None) + if addon_by_name: + ordered_addons.append(addon_by_name) + ordered_addons.extend(enabled_by_name.values()) + + report = {} + time_start = time.time() + prev_start_time = time_start + for addon in ordered_addons: + if not addon.tray_initialized: + continue + + try: + addon.tray_menu(tray_menu) + except Exception: + # Unset initialized mark + addon.tray_initialized = False + self.log.warning( + "Addon \"{}\" crashed on `tray_menu`.".format( + addon.name + ), + exc_info=True + ) + now = time.time() + report[addon.__class__.__name__] = now - prev_start_time + prev_start_time = now + + if self._report is not None: + report[self._report_total_key] = time.time() - time_start + self._report["Tray menu"] = report + + def start_addons(self): + report = {} + time_start = time.time() + prev_start_time = time_start + for addon in self.get_enabled_tray_addons(): + if not addon.tray_initialized: + if isinstance(addon, ITrayService): + addon.set_service_failed_icon() + continue + + try: + addon.tray_start() + except Exception: + self.log.warning( + "Addon \"{}\" crashed on `tray_start`.".format( + addon.name + ), + exc_info=True + ) + now = time.time() + report[addon.__class__.__name__] = now - prev_start_time + prev_start_time = now + + if self._report is not None: + report[self._report_total_key] = time.time() - time_start + self._report["Addons start"] = report + + def on_exit(self): + for addon in self.get_enabled_tray_addons(): + if addon.tray_initialized: + try: + addon.tray_exit() + except Exception: + self.log.warning( + "Addon \"{}\" crashed on `tray_exit`.".format( + addon.name + ), + exc_info=True + ) + + # DEPRECATED + def get_enabled_tray_modules(self): + return self.get_enabled_tray_addons() + + def start_modules(self): + self.start_addons() diff --git a/client/ayon_core/addon/click_wrap.py b/client/ayon_core/addon/click_wrap.py new file mode 100644 index 0000000000..d49188312d --- /dev/null +++ b/client/ayon_core/addon/click_wrap.py @@ -0,0 +1,365 @@ +"""Simplified wrapper for 'click' python module. + +Module 'click' is used as main cli handler in AYON. Addons can +register their own subcommands with options. This wrapper allows to define +commands and options as with 'click', but without any dependency. + +Why not to use 'click' directly? Version of 'click' used in AYON +is not compatible with 'click' version used in some DCCs (e.g. Houdini 20+). +And updating 'click' would break other DCCs. + +How to use it? If you already have cli commands defined in addon, just replace +'click' with 'click_wrap' and it should work and modify your addon's cli +method to convert 'click_wrap' object to 'click' object. + +Before +```python +import click +from ayon_core.modules import AYONAddon + + +class ExampleAddon(AYONAddon): + name = "example" + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group(ExampleAddon.name, help="Example addon") +def cli_main(): + pass + + +@cli_main.command(help="Example command") +@click.option("--arg1", help="Example argument 1", default="default1") +@click.option("--arg2", help="Example argument 2", is_flag=True) +def mycommand(arg1, arg2): + print(arg1, arg2) +``` + +Now +``` +from ayon_core import click_wrap +from ayon_core.modules import AYONAddon + + +class ExampleAddon(AYONAddon): + name = "example" + + def cli(self, click_group): + click_group.add_command(cli_main.to_click_obj()) + + +@click_wrap.group(ExampleAddon.name, help="Example addon") +def cli_main(): + pass + + +@cli_main.command(help="Example command") +@click_wrap.option("--arg1", help="Example argument 1", default="default1") +@click_wrap.option("--arg2", help="Example argument 2", is_flag=True) +def mycommand(arg1, arg2): + print(arg1, arg2) +``` + + +Added small enhancements: +- most of the methods can be used as chained calls +- functions/methods 'command' and 'group' can be used in a way that + first argument is callback function and the rest are arguments + for click + +Example: + ```python + from ayon_core import click_wrap + from ayon_core.modules import AYONAddon + + + class ExampleAddon(AYONAddon): + name = "example" + + def cli(self, click_group): + # Define main command (name 'example') + main = click_wrap.group( + self._cli_main, name=self.name, help="Example addon" + ) + # Add subcommand (name 'mycommand') + ( + main.command( + self._cli_command, name="mycommand", help="Example command" + ) + .option( + "--arg1", help="Example argument 1", default="default1" + ) + .option( + "--arg2", help="Example argument 2", is_flag=True, + ) + ) + # Convert main command to click object and add it to parent group + click_group.add_command(main.to_click_obj()) + + def _cli_main(self): + pass + + def _cli_command(self, arg1, arg2): + print(arg1, arg2) + ``` + + ```shell + ayon addon example mycommand --arg1 value1 --arg2 + ``` +""" + +import collections + +FUNC_ATTR_NAME = "__ayon_cli_options__" + + +class Command(object): + def __init__(self, func, *args, **kwargs): + # Command function + self._func = func + # Command definition arguments + self._args = args + # Command definition kwargs + self._kwargs = kwargs + # Both 'options' and 'arguments' are stored to the same variable + # - keep order of options and arguments + self._options = getattr(func, FUNC_ATTR_NAME, []) + + def to_click_obj(self): + """Converts this object to click object. + + Returns: + click.Command: Click command object. + """ + return convert_to_click(self) + + # --- Methods for 'convert_to_click' function --- + def get_args(self): + """ + Returns: + tuple: Command definition arguments. + """ + return self._args + + def get_kwargs(self): + """ + Returns: + dict[str, Any]: Command definition kwargs. + """ + return self._kwargs + + def get_func(self): + """ + Returns: + Function: Function to invoke on command trigger. + """ + return self._func + + def iter_options(self): + """ + Yields: + tuple[str, tuple, dict]: Option type name with args and kwargs. + """ + for item in self._options: + yield item + # ----------------------------------------------- + + def add_option(self, *args, **kwargs): + return self.add_option_by_type("option", *args, **kwargs) + + def add_argument(self, *args, **kwargs): + return self.add_option_by_type("argument", *args, **kwargs) + + option = add_option + argument = add_argument + + def add_option_by_type(self, option_name, *args, **kwargs): + self._options.append((option_name, args, kwargs)) + return self + + +class Group(Command): + def __init__(self, func, *args, **kwargs): + super(Group, self).__init__(func, *args, **kwargs) + # Store sub-groupd and sub-commands to the same variable + self._commands = [] + + # --- Methods for 'convert_to_click' function --- + def iter_commands(self): + for command in self._commands: + yield command + # ----------------------------------------------- + + def add_command(self, command): + """Add prepared command object as child. + + Args: + command (Command): Prepared command object. + """ + if command not in self._commands: + self._commands.append(command) + + def add_group(self, group): + """Add prepared group object as child. + + Args: + group (Group): Prepared group object. + """ + if group not in self._commands: + self._commands.append(group) + + def command(self, *args, **kwargs): + """Add child command. + + Returns: + Union[Command, Function]: New command object, or wrapper function. + """ + return self._add_new(Command, *args, **kwargs) + + def group(self, *args, **kwargs): + """Add child group. + + Returns: + Union[Group, Function]: New group object, or wrapper function. + """ + return self._add_new(Group, *args, **kwargs) + + def _add_new(self, target_cls, *args, **kwargs): + func = None + if args and callable(args[0]): + args = list(args) + func = args.pop(0) + args = tuple(args) + + def decorator(_func): + out = target_cls(_func, *args, **kwargs) + self._commands.append(out) + return out + + if func is not None: + return decorator(func) + return decorator + + +def convert_to_click(obj_to_convert): + """Convert wrapped object to click object. + + Args: + obj_to_convert (Command): Object to convert to click object. + + Returns: + click.Command: Click command object. + """ + import click + + commands_queue = collections.deque() + commands_queue.append((obj_to_convert, None)) + top_obj = None + while commands_queue: + item = commands_queue.popleft() + command_obj, parent_obj = item + if not isinstance(command_obj, Command): + raise TypeError( + "Invalid type '{}' expected 'Command'".format( + type(command_obj) + ) + ) + + if isinstance(command_obj, Group): + click_obj = ( + click.group( + *command_obj.get_args(), + **command_obj.get_kwargs() + )(command_obj.get_func()) + ) + + else: + click_obj = ( + click.command( + *command_obj.get_args(), + **command_obj.get_kwargs() + )(command_obj.get_func()) + ) + + for item in command_obj.iter_options(): + option_name, args, kwargs = item + if option_name == "option": + click.option(*args, **kwargs)(click_obj) + elif option_name == "argument": + click.argument(*args, **kwargs)(click_obj) + else: + raise ValueError( + "Invalid option name '{}'".format(option_name) + ) + + if top_obj is None: + top_obj = click_obj + + if parent_obj is not None: + parent_obj.add_command(click_obj) + + if isinstance(command_obj, Group): + for command in command_obj.iter_commands(): + commands_queue.append((command, click_obj)) + + return top_obj + + +def group(*args, **kwargs): + func = None + if args and callable(args[0]): + args = list(args) + func = args.pop(0) + args = tuple(args) + + def decorator(_func): + return Group(_func, *args, **kwargs) + + if func is not None: + return decorator(func) + return decorator + + +def command(*args, **kwargs): + func = None + if args and callable(args[0]): + args = list(args) + func = args.pop(0) + args = tuple(args) + + def decorator(_func): + return Command(_func, *args, **kwargs) + + if func is not None: + return decorator(func) + return decorator + + +def argument(*args, **kwargs): + def decorator(func): + return _add_option_to_func( + func, "argument", *args, **kwargs + ) + return decorator + + +def option(*args, **kwargs): + def decorator(func): + return _add_option_to_func( + func, "option", *args, **kwargs + ) + return decorator + + +def _add_option_to_func(func, option_name, *args, **kwargs): + if isinstance(func, Command): + func.add_option_by_type(option_name, *args, **kwargs) + return func + + if not hasattr(func, FUNC_ATTR_NAME): + setattr(func, FUNC_ATTR_NAME, []) + cli_options = getattr(func, FUNC_ATTR_NAME) + cli_options.append((option_name, args, kwargs)) + return func diff --git a/client/ayon_core/addon/interfaces.py b/client/ayon_core/addon/interfaces.py new file mode 100644 index 0000000000..86e0c6e060 --- /dev/null +++ b/client/ayon_core/addon/interfaces.py @@ -0,0 +1,385 @@ +from abc import ABCMeta, abstractmethod + +import six + +from ayon_core import resources + + +class _AYONInterfaceMeta(ABCMeta): + """AYONInterface meta class to print proper string.""" + + def __str__(self): + return "<'AYONInterface.{}'>".format(self.__name__) + + def __repr__(self): + return str(self) + + +@six.add_metaclass(_AYONInterfaceMeta) +class AYONInterface: + """Base class of Interface that can be used as Mixin with abstract parts. + + This is way how AYON addon can define that contains specific predefined + functionality. + + Child classes of AYONInterface may be used as mixin in different + AYON addons which means they have to have implemented methods defined + in the interface. By default, interface does not have any abstract parts. + """ + + pass + + +class IPluginPaths(AYONInterface): + """Addon has plugin paths to return. + + Expected result is dictionary with keys "publish", "create", "load", + "actions" or "inventory" and values as list or string. + { + "publish": ["path/to/publish_plugins"] + } + """ + + @abstractmethod + def get_plugin_paths(self): + pass + + def _get_plugin_paths_by_type(self, plugin_type): + paths = self.get_plugin_paths() + if not paths or plugin_type not in paths: + return [] + + paths = paths[plugin_type] + if not paths: + return [] + + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + return paths + + def get_create_plugin_paths(self, host_name): + """Receive create plugin paths. + + Give addons ability to add create plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all create plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("create") + + def get_load_plugin_paths(self, host_name): + """Receive load plugin paths. + + Give addons ability to add load plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all load plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("load") + + def get_publish_plugin_paths(self, host_name): + """Receive publish plugin paths. + + Give addons ability to add publish plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all publish plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("publish") + + def get_inventory_action_paths(self, host_name): + """Receive inventory action paths. + + Give addons ability to add inventory action plugin paths. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all publish plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("inventory") + + +class ITrayAddon(AYONInterface): + """Addon has special procedures when used in Tray tool. + + IMPORTANT: + The addon. still must be usable if is not used in tray even if + would do nothing. + """ + + tray_initialized = False + _tray_manager = None + + @abstractmethod + def tray_init(self): + """Initialization part of tray implementation. + + Triggered between `initialization` and `connect_with_addons`. + + This is where GUIs should be loaded or tray specific parts should be + prepared. + """ + + pass + + @abstractmethod + def tray_menu(self, tray_menu): + """Add addon's action to tray menu.""" + + pass + + @abstractmethod + def tray_start(self): + """Start procedure in tray tool.""" + + pass + + @abstractmethod + def tray_exit(self): + """Cleanup method which is executed on tray shutdown. + + This is place where all threads should be shut. + """ + + pass + + def execute_in_main_thread(self, callback): + """ Pushes callback to the queue or process 'callback' on a main thread + + Some callbacks need to be processed on main thread (menu actions + must be added on main thread or they won't get triggered etc.) + """ + + if not self.tray_initialized: + # TODO Called without initialized tray, still main thread needed + try: + callback() + + except Exception: + self.log.warning( + "Failed to execute {} in main thread".format(callback), + exc_info=True) + + return + self.manager.tray_manager.execute_in_main_thread(callback) + + def show_tray_message(self, title, message, icon=None, msecs=None): + """Show tray message. + + Args: + title (str): Title of message. + message (str): Content of message. + icon (QSystemTrayIcon.MessageIcon): Message's icon. Default is + Information icon, may differ by Qt version. + msecs (int): Duration of message visibility in milliseconds. + Default is 10000 msecs, may differ by Qt version. + """ + + if self._tray_manager: + self._tray_manager.show_tray_message(title, message, icon, msecs) + + def add_doubleclick_callback(self, callback): + if hasattr(self.manager, "add_doubleclick_callback"): + self.manager.add_doubleclick_callback(self, callback) + + +class ITrayAction(ITrayAddon): + """Implementation of Tray action. + + Add action to tray menu which will trigger `on_action_trigger`. + It is expected to be used for showing tools. + + Methods `tray_start`, `tray_exit` and `connect_with_addons` are overridden + as it's not expected that action will use them. But it is possible if + necessary. + """ + + admin_action = False + _admin_submenu = None + _action_item = None + + @property + @abstractmethod + def label(self): + """Service label showed in menu.""" + pass + + @abstractmethod + def on_action_trigger(self): + """What happens on actions click.""" + pass + + def tray_menu(self, tray_menu): + from qtpy import QtWidgets + + if self.admin_action: + menu = self.admin_submenu(tray_menu) + action = QtWidgets.QAction(self.label, menu) + menu.addAction(action) + if not menu.menuAction().isVisible(): + menu.menuAction().setVisible(True) + + else: + action = QtWidgets.QAction(self.label, tray_menu) + tray_menu.addAction(action) + + action.triggered.connect(self.on_action_trigger) + self._action_item = action + + def tray_start(self): + return + + def tray_exit(self): + return + + @staticmethod + def admin_submenu(tray_menu): + if ITrayAction._admin_submenu is None: + from qtpy import QtWidgets + + admin_submenu = QtWidgets.QMenu("Admin", tray_menu) + admin_submenu.menuAction().setVisible(False) + ITrayAction._admin_submenu = admin_submenu + return ITrayAction._admin_submenu + + +class ITrayService(ITrayAddon): + # Module's property + menu_action = None + + # Class properties + _services_submenu = None + _icon_failed = None + _icon_running = None + _icon_idle = None + + @property + @abstractmethod + def label(self): + """Service label showed in menu.""" + pass + + # TODO be able to get any sort of information to show/print + # @abstractmethod + # def get_service_info(self): + # pass + + @staticmethod + def services_submenu(tray_menu): + if ITrayService._services_submenu is None: + from qtpy import QtWidgets + + services_submenu = QtWidgets.QMenu("Services", tray_menu) + services_submenu.menuAction().setVisible(False) + ITrayService._services_submenu = services_submenu + return ITrayService._services_submenu + + @staticmethod + def add_service_action(action): + ITrayService._services_submenu.addAction(action) + if not ITrayService._services_submenu.menuAction().isVisible(): + ITrayService._services_submenu.menuAction().setVisible(True) + + @staticmethod + def _load_service_icons(): + from qtpy import QtGui + + ITrayService._failed_icon = QtGui.QIcon( + resources.get_resource("icons", "circle_red.png") + ) + ITrayService._icon_running = QtGui.QIcon( + resources.get_resource("icons", "circle_green.png") + ) + ITrayService._icon_idle = QtGui.QIcon( + resources.get_resource("icons", "circle_orange.png") + ) + + @staticmethod + def get_icon_running(): + if ITrayService._icon_running is None: + ITrayService._load_service_icons() + return ITrayService._icon_running + + @staticmethod + def get_icon_idle(): + if ITrayService._icon_idle is None: + ITrayService._load_service_icons() + return ITrayService._icon_idle + + @staticmethod + def get_icon_failed(): + if ITrayService._failed_icon is None: + ITrayService._load_service_icons() + return ITrayService._failed_icon + + def tray_menu(self, tray_menu): + from qtpy import QtWidgets + + action = QtWidgets.QAction( + self.label, + self.services_submenu(tray_menu) + ) + self.menu_action = action + + self.add_service_action(action) + + self.set_service_running_icon() + + def set_service_running_icon(self): + """Change icon of an QAction to green circle.""" + + if self.menu_action: + self.menu_action.setIcon(self.get_icon_running()) + + def set_service_failed_icon(self): + """Change icon of an QAction to red circle.""" + + if self.menu_action: + self.menu_action.setIcon(self.get_icon_failed()) + + def set_service_idle_icon(self): + """Change icon of an QAction to orange circle.""" + + if self.menu_action: + self.menu_action.setIcon(self.get_icon_idle()) + + +class IHostAddon(AYONInterface): + """Addon which also contain a host implementation.""" + + @property + @abstractmethod + def host_name(self): + """Name of host which addon represents.""" + + pass + + def get_workfile_extensions(self): + """Define workfile extensions for host. + + Not all hosts support workfiles thus this is optional implementation. + + Returns: + List[str]: Extensions used for workfiles with dot. + """ + + return [] diff --git a/client/ayon_core/cli.py b/client/ayon_core/cli.py new file mode 100644 index 0000000000..786b8fb3b9 --- /dev/null +++ b/client/ayon_core/cli.py @@ -0,0 +1,332 @@ +# -*- coding: utf-8 -*- +"""Package for handling pype command line arguments.""" +import os +import sys +import code +import traceback + +import click +import acre + +from ayon_core import AYON_CORE_ROOT +from ayon_core.addon import AddonsManager +from ayon_core.settings import get_general_environments + +from .cli_commands import Commands + + +class AliasedGroup(click.Group): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._aliases = {} + + def set_alias(self, src_name, dst_name): + self._aliases[dst_name] = src_name + + def get_command(self, ctx, cmd_name): + if cmd_name in self._aliases: + cmd_name = self._aliases[cmd_name] + return super().get_command(ctx, cmd_name) + + +@click.group(cls=AliasedGroup, invoke_without_command=True) +@click.pass_context +@click.option("--use-version", + expose_value=False, help="use specified version") +@click.option("--use-staging", is_flag=True, + expose_value=False, help="use staging variants") +@click.option("--list-versions", is_flag=True, expose_value=False, + help="list all detected versions.") +@click.option("--validate-version", expose_value=False, + help="validate given version integrity") +@click.option("--debug", is_flag=True, expose_value=False, + help="Enable debug") +@click.option("--verbose", expose_value=False, + help=("Change AYON log level (debug - critical or 0-50)")) +@click.option("--automatic-tests", is_flag=True, expose_value=False, + help=("Run in automatic tests mode")) +def main_cli(ctx): + """Pype is main command serving as entry point to pipeline system. + + It wraps different commands together. + """ + + if ctx.invoked_subcommand is None: + # Print help if headless mode is used + if os.getenv("AYON_HEADLESS_MODE") == "1": + print(ctx.get_help()) + sys.exit(0) + else: + ctx.invoke(tray) + + +@main_cli.command() +def tray(): + """Launch pype tray. + + Default action of pype command is to launch tray widget to control basic + aspects of pype. See documentation for more information. + """ + Commands.launch_tray() + + +@Commands.add_addons +@main_cli.group(help="Run command line arguments of AYON addons") +@click.pass_context +def addon(ctx): + """Addon specific commands created dynamically. + + These commands are generated dynamically by currently loaded addons. + """ + pass + + +# Add 'addon' as alias for module +main_cli.set_alias("addon", "module") + + +@main_cli.command() +@click.argument("output_json_path") +@click.option("--project", help="Project name", default=None) +@click.option("--asset", help="Asset name", default=None) +@click.option("--task", help="Task name", default=None) +@click.option("--app", help="Application name", default=None) +@click.option( + "--envgroup", help="Environment group (e.g. \"farm\")", default=None +) +def extractenvironments(output_json_path, project, asset, task, app, envgroup): + """Extract environment variables for entered context to a json file. + + Entered output filepath will be created if does not exists. + + All context options must be passed otherwise only pype's global + environments will be extracted. + + Context options are "project", "asset", "task", "app" + """ + Commands.extractenvironments( + output_json_path, project, asset, task, app, envgroup + ) + + +@main_cli.command() +@click.argument("paths", nargs=-1) +@click.option("-t", "--targets", help="Targets module", default=None, + multiple=True) +@click.option("-g", "--gui", is_flag=True, + help="Show Publish UI", default=False) +def publish(paths, targets, gui): + """Start CLI publishing. + + Publish collects json from paths provided as an argument. + More than one path is allowed. + """ + + Commands.publish(list(paths), targets, gui) + + +@main_cli.command(context_settings={"ignore_unknown_options": True}) +def publish_report_viewer(): + from ayon_core.tools.publisher.publish_report_viewer import main + + sys.exit(main()) + + +@main_cli.command() +@click.argument("output_path") +@click.option("--project", help="Define project context") +@click.option("--asset", help="Define asset in project (project must be set)") +@click.option( + "--strict", + is_flag=True, + help="Full context must be set otherwise dialog can't be closed." +) +def contextselection( + output_path, + project, + asset, + strict +): + """Show Qt dialog to select context. + + Context is project name, asset name and task name. The result is stored + into json file which path is passed in first argument. + """ + Commands.contextselection( + output_path, + project, + asset, + strict + ) + + +@main_cli.command( + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True)) +@click.argument("script", required=True, type=click.Path(exists=True)) +def run(script): + """Run python script in Pype context.""" + import runpy + + if not script: + print("Error: missing path to script file.") + else: + + args = sys.argv + args.remove("run") + args.remove(script) + sys.argv = args + args_string = " ".join(args[1:]) + print(f"... running: {script} {args_string}") + runpy.run_path(script, run_name="__main__", ) + + +@main_cli.command() +@click.argument("folder", nargs=-1) +@click.option("-m", + "--mark", + help="Run tests marked by", + default=None) +@click.option("-p", + "--pyargs", + help="Run tests from package", + default=None) +@click.option("-t", + "--test_data_folder", + help="Unzipped directory path of test file", + default=None) +@click.option("-s", + "--persist", + help="Persist test DB and published files after test end", + default=None) +@click.option("-a", + "--app_variant", + help="Provide specific app variant for test, empty for latest", + default=None) +@click.option("--app_group", + help="Provide specific app group for test, empty for default", + default=None) +@click.option("-t", + "--timeout", + help="Provide specific timeout value for test case", + default=None) +@click.option("-so", + "--setup_only", + help="Only create dbs, do not run tests", + default=None) +@click.option("--mongo_url", + help="MongoDB for testing.", + default=None) +@click.option("--dump_databases", + help="Dump all databases to data folder.", + default=None) +def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, + timeout, setup_only, mongo_url, app_group, dump_databases): + """Run all automatic tests after proper initialization via start.py""" + Commands.run_tests(folder, mark, pyargs, test_data_folder, + persist, app_variant, timeout, setup_only, + mongo_url, app_group, dump_databases) + + +@main_cli.command() +def interactive(): + """Interactive (Python like) console. + + Helpful command not only for development to directly work with python + interpreter. + + Warning: + Executable 'ayon.exe' on Windows won't work. + """ + version = os.environ["AYON_VERSION"] + banner = ( + f"AYON launcher {version}\nPython {sys.version} on {sys.platform}" + ) + code.interact(banner) + + +@main_cli.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print AYON launcher version. + + Deprecated: + This function has questionable usage. + """ + print(os.environ["AYON_VERSION"]) + + +def _set_global_environments() -> None: + """Set global AYON environments.""" + general_env = get_general_environments() + + # first resolve general environment because merge doesn't expect + # values to be list. + # TODO: switch to OpenPype environment functions + merged_env = acre.merge( + acre.compute(acre.parse(general_env), cleanup=False), + dict(os.environ) + ) + env = acre.compute( + merged_env, + cleanup=False + ) + os.environ.clear() + os.environ.update(env) + + # Hardcoded default values + os.environ["PYBLISH_GUI"] = "pyblish_pype" + # Change scale factor only if is not set + if "QT_AUTO_SCREEN_SCALE_FACTOR" not in os.environ: + os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1" + + +def _set_addons_environments(): + """Set global environments for OpenPype modules. + + This requires to have OpenPype in `sys.path`. + """ + + addons_manager = AddonsManager() + + # Merge environments with current environments and update values + if module_envs := addons_manager.collect_global_environments(): + parsed_envs = acre.parse(module_envs) + env = acre.merge(parsed_envs, dict(os.environ)) + os.environ.clear() + os.environ.update(env) + + +def main(*args, **kwargs): + python_path = os.getenv("PYTHONPATH", "") + split_paths = python_path.split(os.pathsep) + + additional_paths = [ + # add OpenPype tools + os.path.join(AYON_CORE_ROOT, "tools"), + # add common OpenPype vendor + # (common for multiple Python interpreter versions) + os.path.join(AYON_CORE_ROOT, "vendor", "python", "common") + ] + for path in additional_paths: + if path not in split_paths: + split_paths.insert(0, path) + if path not in sys.path: + sys.path.insert(0, path) + os.environ["PYTHONPATH"] = os.pathsep.join(split_paths) + + print(">>> loading environments ...") + print(" - global AYON ...") + _set_global_environments() + print(" - for addons ...") + _set_addons_environments() + + try: + main_cli(obj={}, prog_name="ayon") + except Exception: # noqa + exc_info = sys.exc_info() + print("!!! AYON crashed:") + traceback.print_exception(*exc_info) + sys.exit(1) diff --git a/client/ayon_core/cli_commands.py b/client/ayon_core/cli_commands.py new file mode 100644 index 0000000000..36f7f0b836 --- /dev/null +++ b/client/ayon_core/cli_commands.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +"""Implementation of Pype commands.""" +import os +import sys +import json + + +class Commands: + """Class implementing commands used by Pype. + + Most of its methods are called by :mod:`cli` module. + """ + @staticmethod + def launch_tray(): + from ayon_core.lib import Logger + from ayon_core.tools import tray + + Logger.set_process_name("Tray") + + tray.main() + + @staticmethod + def add_addons(click_func): + """Modules/Addons can add their cli commands dynamically.""" + + from ayon_core.lib import Logger + from ayon_core.addon import AddonsManager + + manager = AddonsManager() + log = Logger.get_logger("CLI-AddModules") + for addon in manager.addons: + try: + addon.cli(click_func) + + except Exception: + log.warning( + "Failed to add cli command for module \"{}\"".format( + addon.name + ) + ) + return click_func + + @staticmethod + def publish(paths, targets=None, gui=False): + """Start headless publishing. + + Publish use json from passed paths argument. + + Args: + paths (list): Paths to jsons. + targets (string): What module should be targeted + (to choose validator for example) + gui (bool): Show publish UI. + + Raises: + RuntimeError: When there is no path to process. + """ + + from ayon_core.lib import Logger + from ayon_core.lib.applications import ( + get_app_environments_for_context, + LaunchTypes, + ) + from ayon_core.addon import AddonsManager + from ayon_core.pipeline import ( + install_ayon_plugins, + get_global_context, + ) + from ayon_core.tools.utils.host_tools import show_publish + from ayon_core.tools.utils.lib import qt_app_context + + # Register target and host + import pyblish.api + import pyblish.util + + log = Logger.get_logger("CLI-publish") + + install_ayon_plugins() + + manager = AddonsManager() + + publish_paths = manager.collect_plugin_paths()["publish"] + + for path in publish_paths: + pyblish.api.register_plugin_path(path) + + if not any(paths): + raise RuntimeError("No publish paths specified") + + app_full_name = os.getenv("AVALON_APP_NAME") + if app_full_name: + context = get_global_context() + env = get_app_environments_for_context( + context["project_name"], + context["asset_name"], + context["task_name"], + app_full_name, + launch_type=LaunchTypes.farm_publish, + ) + os.environ.update(env) + + pyblish.api.register_host("shell") + + if targets: + for target in targets: + print(f"setting target: {target}") + pyblish.api.register_target(target) + else: + pyblish.api.register_target("farm") + + os.environ["AYON_PUBLISH_DATA"] = os.pathsep.join(paths) + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + + log.info("Running publish ...") + + plugins = pyblish.api.discover() + print("Using plugins:") + for plugin in plugins: + print(plugin) + + if gui: + with qt_app_context(): + show_publish() + else: + # Error exit as soon as any error occurs. + error_format = ("Failed {plugin.__name__}: " + "{error} -- {error.traceback}") + + for result in pyblish.util.publish_iter(): + if result["error"]: + log.error(error_format.format(**result)) + # uninstall() + sys.exit(1) + + log.info("Publish finished.") + + @staticmethod + def extractenvironments(output_json_path, project, asset, task, app, + env_group): + """Produces json file with environment based on project and app. + + Called by Deadline plugin to propagate environment into render jobs. + """ + + from ayon_core.lib.applications import ( + get_app_environments_for_context, + LaunchTypes, + ) + + if all((project, asset, task, app)): + env = get_app_environments_for_context( + project, + asset, + task, + app, + env_group=env_group, + launch_type=LaunchTypes.farm_render + ) + else: + env = os.environ.copy() + + output_dir = os.path.dirname(output_json_path) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + with open(output_json_path, "w") as file_stream: + json.dump(env, file_stream, indent=4) + + @staticmethod + def contextselection(output_path, project_name, asset_name, strict): + from ayon_core.tools.context_dialog import main + + main(output_path, project_name, asset_name, strict) + + @staticmethod + def run_tests(folder, mark, pyargs, + test_data_folder, persist, app_variant, timeout, setup_only, + mongo_url, app_group, dump_databases): + """ + Runs tests from 'folder' + + Args: + folder (str): relative path to folder with tests + mark (str): label to run tests marked by it (slow etc) + pyargs (str): package path to test + test_data_folder (str): url to unzipped folder of test data + persist (bool): True if keep test db and published after test + end + app_variant (str): variant (eg 2020 for AE), empty if use + latest installed version + timeout (int): explicit timeout for single test + setup_only (bool): if only preparation steps should be + triggered, no tests (useful for debugging/development) + mongo_url (str): url to Openpype Mongo database + """ + print("run_tests") + if folder: + folder = " ".join(list(folder)) + else: + folder = "../tests" + + # disable warnings and show captured stdout even if success + args = [ + "--disable-pytest-warnings", + "--capture=sys", + "--print", + "-W ignore::DeprecationWarning", + "-rP", + folder + ] + + if mark: + args.extend(["-m", mark]) + + if pyargs: + args.extend(["--pyargs", pyargs]) + + if test_data_folder: + args.extend(["--test_data_folder", test_data_folder]) + + if persist: + args.extend(["--persist", persist]) + + if app_group: + args.extend(["--app_group", app_group]) + + if app_variant: + args.extend(["--app_variant", app_variant]) + + if timeout: + args.extend(["--timeout", timeout]) + + if setup_only: + args.extend(["--setup_only", setup_only]) + + if mongo_url: + args.extend(["--mongo_url", mongo_url]) + + if dump_databases: + msg = "dump_databases format is not recognized: {}".format( + dump_databases + ) + assert dump_databases in ["bson", "json"], msg + args.extend(["--dump_databases", dump_databases]) + + print("run_tests args: {}".format(args)) + import pytest + pytest.main(args) diff --git a/client/ayon_core/client/__init__.py b/client/ayon_core/client/__init__.py new file mode 100644 index 0000000000..00f4d9863f --- /dev/null +++ b/client/ayon_core/client/__init__.py @@ -0,0 +1,110 @@ +from .utils import get_ayon_server_api_connection + +from .entities import ( + get_projects, + get_project, + get_whole_project, + + get_asset_by_id, + get_asset_by_name, + get_assets, + get_archived_assets, + get_asset_ids_with_subsets, + + get_subset_by_id, + get_subset_by_name, + get_subsets, + get_subset_families, + + get_version_by_id, + get_version_by_name, + get_versions, + get_hero_version_by_id, + get_hero_version_by_subset_id, + get_hero_versions, + get_last_versions, + get_last_version_by_subset_id, + get_last_version_by_subset_name, + get_output_link_versions, + + version_is_latest, + + get_representation_by_id, + get_representation_by_name, + get_representations, + get_representation_parents, + get_representations_parents, + get_archived_representations, + + get_thumbnail, + get_thumbnails, + get_thumbnail_id_from_source, + + get_workfile_info, + + get_asset_name_identifier, +) + +from .entity_links import ( + get_linked_asset_ids, + get_linked_assets, + get_linked_representation_id, +) + +from .operations import ( + create_project, +) + + +__all__ = ( + "get_ayon_server_api_connection", + + "get_projects", + "get_project", + "get_whole_project", + + "get_asset_by_id", + "get_asset_by_name", + "get_assets", + "get_archived_assets", + "get_asset_ids_with_subsets", + + "get_subset_by_id", + "get_subset_by_name", + "get_subsets", + "get_subset_families", + + "get_version_by_id", + "get_version_by_name", + "get_versions", + "get_hero_version_by_id", + "get_hero_version_by_subset_id", + "get_hero_versions", + "get_last_versions", + "get_last_version_by_subset_id", + "get_last_version_by_subset_name", + "get_output_link_versions", + + "version_is_latest", + + "get_representation_by_id", + "get_representation_by_name", + "get_representations", + "get_representation_parents", + "get_representations_parents", + "get_archived_representations", + + "get_thumbnail", + "get_thumbnails", + "get_thumbnail_id_from_source", + + "get_workfile_info", + + "get_linked_asset_ids", + "get_linked_assets", + "get_linked_representation_id", + + "create_project", + + "get_asset_name_identifier", +) diff --git a/client/ayon_core/client/constants.py b/client/ayon_core/client/constants.py new file mode 100644 index 0000000000..379c0d665f --- /dev/null +++ b/client/ayon_core/client/constants.py @@ -0,0 +1,28 @@ +# --- Folders --- +DEFAULT_FOLDER_FIELDS = { + "id", + "name", + "path", + "parentId", + "active", + "parents", + "thumbnailId" +} + +REPRESENTATION_FILES_FIELDS = { + "files.name", + "files.hash", + "files.id", + "files.path", + "files.size", +} + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" +CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0" diff --git a/openpype/client/server/conversion_utils.py b/client/ayon_core/client/conversion_utils.py similarity index 99% rename from openpype/client/server/conversion_utils.py rename to client/ayon_core/client/conversion_utils.py index e8d3c4cbe4..192eb194db 100644 --- a/openpype/client/server/conversion_utils.py +++ b/client/ayon_core/client/conversion_utils.py @@ -5,8 +5,8 @@ import six -from openpype.client.operations_base import REMOVED_VALUE -from openpype.client.mongo.operations import ( +from ayon_core.client.operations_base import REMOVED_VALUE +from .constants import ( CURRENT_PROJECT_SCHEMA, CURRENT_ASSET_DOC_SCHEMA, CURRENT_SUBSET_SCHEMA, @@ -14,8 +14,8 @@ CURRENT_HERO_VERSION_SCHEMA, CURRENT_REPRESENTATION_SCHEMA, CURRENT_WORKFILE_INFO_SCHEMA, + REPRESENTATION_FILES_FIELDS, ) -from .constants import REPRESENTATION_FILES_FIELDS from .utils import create_entity_id, prepare_entity_changes # --- Project entity --- diff --git a/client/ayon_core/client/entities.py b/client/ayon_core/client/entities.py new file mode 100644 index 0000000000..5ef2571421 --- /dev/null +++ b/client/ayon_core/client/entities.py @@ -0,0 +1,741 @@ +import collections + +from .constants import CURRENT_THUMBNAIL_SCHEMA +from .utils import get_ayon_server_api_connection +from .openpype_comp import get_folders_with_tasks +from .conversion_utils import ( + project_fields_v3_to_v4, + convert_v4_project_to_v3, + + folder_fields_v3_to_v4, + convert_v4_folder_to_v3, + + subset_fields_v3_to_v4, + convert_v4_subset_to_v3, + + version_fields_v3_to_v4, + convert_v4_version_to_v3, + + representation_fields_v3_to_v4, + convert_v4_representation_to_v3, + + workfile_info_fields_v3_to_v4, + convert_v4_workfile_info_to_v3, +) + + +def get_asset_name_identifier(asset_doc): + """Get asset name identifier by asset document. + + This function is added because of AYON implementation where name + identifier is not just a name but full path. + + Asset document must have "name" key, and "data.parents" when in AYON mode. + + Args: + asset_doc (dict[str, Any]): Asset document. + """ + + parents = list(asset_doc["data"]["parents"]) + parents.append(asset_doc["name"]) + return "/" + "/".join(parents) + + +def get_projects(active=True, inactive=False, library=None, fields=None): + if not active and not inactive: + return + + if active and inactive: + active = None + elif active: + active = True + elif inactive: + active = False + + con = get_ayon_server_api_connection() + fields = project_fields_v3_to_v4(fields, con) + for project in con.get_projects(active, library, fields=fields): + yield convert_v4_project_to_v3(project) + + +def get_project(project_name, active=True, inactive=False, fields=None): + # Skip if both are disabled + con = get_ayon_server_api_connection() + fields = project_fields_v3_to_v4(fields, con) + return convert_v4_project_to_v3( + con.get_project(project_name, fields=fields) + ) + + +def get_whole_project(*args, **kwargs): + raise NotImplementedError("'get_whole_project' not implemented") + + +def _get_subsets( + project_name, + subset_ids=None, + subset_names=None, + folder_ids=None, + names_by_folder_ids=None, + archived=False, + fields=None +): + # Convert fields and add minimum required fields + con = get_ayon_server_api_connection() + fields = subset_fields_v3_to_v4(fields, con) + if fields is not None: + for key in ( + "id", + "active" + ): + fields.add(key) + + active = True + if archived: + active = None + + for subset in con.get_products( + project_name, + product_ids=subset_ids, + product_names=subset_names, + folder_ids=folder_ids, + names_by_folder_ids=names_by_folder_ids, + active=active, + fields=fields, + ): + yield convert_v4_subset_to_v3(subset) + + +def _get_versions( + project_name, + version_ids=None, + subset_ids=None, + versions=None, + hero=True, + standard=True, + latest=None, + active=None, + fields=None +): + con = get_ayon_server_api_connection() + + fields = version_fields_v3_to_v4(fields, con) + + # Make sure 'productId' and 'version' are available when hero versions + # are queried + if fields and hero: + fields = set(fields) + fields |= {"productId", "version"} + + queried_versions = con.get_versions( + project_name, + version_ids=version_ids, + product_ids=subset_ids, + versions=versions, + hero=hero, + standard=standard, + latest=latest, + active=active, + fields=fields + ) + + version_entities = [] + hero_versions = [] + for version in queried_versions: + if version["version"] < 0: + hero_versions.append(version) + else: + version_entities.append(convert_v4_version_to_v3(version)) + + if hero_versions: + subset_ids = set() + versions_nums = set() + for hero_version in hero_versions: + versions_nums.add(abs(hero_version["version"])) + subset_ids.add(hero_version["productId"]) + + hero_eq_versions = con.get_versions( + project_name, + product_ids=subset_ids, + versions=versions_nums, + hero=False, + fields=["id", "version", "productId"] + ) + hero_eq_by_subset_id = collections.defaultdict(list) + for version in hero_eq_versions: + hero_eq_by_subset_id[version["productId"]].append(version) + + for hero_version in hero_versions: + abs_version = abs(hero_version["version"]) + subset_id = hero_version["productId"] + version_id = None + for version in hero_eq_by_subset_id.get(subset_id, []): + if version["version"] == abs_version: + version_id = version["id"] + break + conv_hero = convert_v4_version_to_v3(hero_version) + conv_hero["version_id"] = version_id + version_entities.append(conv_hero) + + return version_entities + + +def get_asset_by_id(project_name, asset_id, fields=None): + assets = get_assets( + project_name, asset_ids=[asset_id], fields=fields + ) + for asset in assets: + return asset + return None + + +def get_asset_by_name(project_name, asset_name, fields=None): + assets = get_assets( + project_name, asset_names=[asset_name], fields=fields + ) + for asset in assets: + return asset + return None + + +def _folders_query(project_name, con, fields, **kwargs): + if fields is None or "tasks" in fields: + folders = get_folders_with_tasks( + con, project_name, fields=fields, **kwargs + ) + + else: + folders = con.get_folders(project_name, fields=fields, **kwargs) + + for folder in folders: + yield folder + + +def get_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + archived=False, + fields=None +): + if not project_name: + return + + active = True + if archived: + active = None + + con = get_ayon_server_api_connection() + fields = folder_fields_v3_to_v4(fields, con) + kwargs = dict( + folder_ids=asset_ids, + parent_ids=parent_ids, + active=active, + ) + if not asset_names: + for folder in _folders_query(project_name, con, fields, **kwargs): + yield convert_v4_folder_to_v3(folder, project_name) + return + + new_asset_names = set() + folder_paths = set() + for name in asset_names: + if "/" in name: + folder_paths.add(name) + else: + new_asset_names.add(name) + + yielded_ids = set() + if folder_paths: + for folder in _folders_query( + project_name, con, fields, folder_paths=folder_paths, **kwargs + ): + yielded_ids.add(folder["id"]) + yield convert_v4_folder_to_v3(folder, project_name) + + if not new_asset_names: + return + + for folder in _folders_query( + project_name, con, fields, folder_names=new_asset_names, **kwargs + ): + if folder["id"] not in yielded_ids: + yielded_ids.add(folder["id"]) + yield convert_v4_folder_to_v3(folder, project_name) + + +def get_archived_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + fields=None +): + return get_assets( + project_name, + asset_ids, + asset_names, + parent_ids, + True, + fields + ) + + +def get_asset_ids_with_subsets(project_name, asset_ids=None): + con = get_ayon_server_api_connection() + return con.get_folder_ids_with_products(project_name, asset_ids) + + +def get_subset_by_id(project_name, subset_id, fields=None): + subsets = get_subsets( + project_name, subset_ids=[subset_id], fields=fields + ) + for subset in subsets: + return subset + return None + + +def get_subset_by_name(project_name, subset_name, asset_id, fields=None): + subsets = get_subsets( + project_name, + subset_names=[subset_name], + asset_ids=[asset_id], + fields=fields + ) + for subset in subsets: + return subset + return None + + +def get_subsets( + project_name, + subset_ids=None, + subset_names=None, + asset_ids=None, + names_by_asset_ids=None, + archived=False, + fields=None +): + return _get_subsets( + project_name, + subset_ids, + subset_names, + asset_ids, + names_by_asset_ids, + archived, + fields=fields + ) + + +def get_subset_families(project_name, subset_ids=None): + con = get_ayon_server_api_connection() + return con.get_product_type_names(project_name, subset_ids) + + +def get_version_by_id(project_name, version_id, fields=None): + versions = get_versions( + project_name, + version_ids=[version_id], + fields=fields, + hero=True + ) + for version in versions: + return version + return None + + +def get_version_by_name(project_name, version, subset_id, fields=None): + versions = get_versions( + project_name, + subset_ids=[subset_id], + versions=[version], + fields=fields + ) + for version in versions: + return version + return None + + +def get_versions( + project_name, + version_ids=None, + subset_ids=None, + versions=None, + hero=False, + fields=None +): + return _get_versions( + project_name, + version_ids, + subset_ids, + versions, + hero=hero, + standard=True, + fields=fields + ) + + +def get_hero_version_by_id(project_name, version_id, fields=None): + versions = get_hero_versions( + project_name, + version_ids=[version_id], + fields=fields + ) + for version in versions: + return version + return None + + +def get_hero_version_by_subset_id( + project_name, subset_id, fields=None +): + versions = get_hero_versions( + project_name, + subset_ids=[subset_id], + fields=fields + ) + for version in versions: + return version + return None + + +def get_hero_versions( + project_name, subset_ids=None, version_ids=None, fields=None +): + return _get_versions( + project_name, + version_ids=version_ids, + subset_ids=subset_ids, + hero=True, + standard=False, + fields=fields + ) + + +def get_last_versions(project_name, subset_ids, active=None, fields=None): + if fields: + fields = set(fields) + fields.add("parent") + + versions = _get_versions( + project_name, + subset_ids=subset_ids, + latest=True, + hero=False, + active=active, + fields=fields + ) + return { + version["parent"]: version + for version in versions + } + + +def get_last_version_by_subset_id(project_name, subset_id, fields=None): + versions = _get_versions( + project_name, + subset_ids=[subset_id], + latest=True, + hero=False, + fields=fields + ) + if not versions: + return None + return versions[0] + + +def get_last_version_by_subset_name( + project_name, + subset_name, + asset_id=None, + asset_name=None, + fields=None +): + if not asset_id and not asset_name: + return None + + if not asset_id: + asset = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + if not asset: + return None + asset_id = asset["_id"] + + subset = get_subset_by_name( + project_name, subset_name, asset_id, fields=["_id"] + ) + if not subset: + return None + return get_last_version_by_subset_id( + project_name, subset["_id"], fields=fields + ) + + +def get_output_link_versions(project_name, version_id, fields=None): + if not version_id: + return [] + + con = get_ayon_server_api_connection() + version_links = con.get_version_links( + project_name, version_id, link_direction="out") + + version_ids = { + link["entityId"] + for link in version_links + if link["entityType"] == "version" + } + if not version_ids: + return [] + + return get_versions(project_name, version_ids=version_ids, fields=fields) + + +def version_is_latest(project_name, version_id): + con = get_ayon_server_api_connection() + return con.version_is_latest(project_name, version_id) + + +def get_representation_by_id(project_name, representation_id, fields=None): + representations = get_representations( + project_name, + representation_ids=[representation_id], + fields=fields + ) + for representation in representations: + return representation + return None + + +def get_representation_by_name( + project_name, representation_name, version_id, fields=None +): + representations = get_representations( + project_name, + representation_names=[representation_name], + version_ids=[version_id], + fields=fields + ) + for representation in representations: + return representation + return None + + +def get_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + archived=False, + standard=True, + fields=None +): + if context_filters is not None: + # TODO should we add the support? + # - there was ability to fitler using regex + raise ValueError("OP v4 can't filter by representation context.") + + if not archived and not standard: + return + + if archived and not standard: + active = False + elif not archived and standard: + active = True + else: + active = None + + con = get_ayon_server_api_connection() + fields = representation_fields_v3_to_v4(fields, con) + if fields and active is not None: + fields.add("active") + + representations = con.get_representations( + project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + names_by_version_ids=names_by_version_ids, + active=active, + fields=fields + ) + for representation in representations: + yield convert_v4_representation_to_v3(representation) + + +def get_representation_parents(project_name, representation): + if not representation: + return None + + repre_id = representation["_id"] + parents_by_repre_id = get_representations_parents( + project_name, [representation] + ) + return parents_by_repre_id[repre_id] + + +def get_representations_parents(project_name, representations): + repre_ids = { + repre["_id"] + for repre in representations + } + con = get_ayon_server_api_connection() + parents_by_repre_id = con.get_representations_parents(project_name, + repre_ids) + folder_ids = set() + for parents in parents_by_repre_id .values(): + folder_ids.add(parents[2]["id"]) + + tasks_by_folder_id = {} + + new_parents = {} + for repre_id, parents in parents_by_repre_id .items(): + version, subset, folder, project = parents + folder_tasks = tasks_by_folder_id.get(folder["id"]) or {} + folder["tasks"] = folder_tasks + new_parents[repre_id] = ( + convert_v4_version_to_v3(version), + convert_v4_subset_to_v3(subset), + convert_v4_folder_to_v3(folder, project_name), + project + ) + return new_parents + + +def get_archived_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + fields=None +): + return get_representations( + project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + context_filters=context_filters, + names_by_version_ids=names_by_version_ids, + archived=True, + standard=False, + fields=fields + ) + + +def get_thumbnail( + project_name, thumbnail_id, entity_type, entity_id, fields=None +): + """Receive thumbnail entity data. + + Args: + project_name (str): Name of project where to look for queried entities. + thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. + entity_type (str): Type of entity for which the thumbnail should be + received. + entity_id (str): Id of entity for which the thumbnail should be + received. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If thumbnail with specified id was not found. + Dict: Thumbnail entity data which can be reduced to specified 'fields'. + """ + + if not thumbnail_id or not entity_type or not entity_id: + return None + + if entity_type == "asset": + entity_type = "folder" + + elif entity_type == "hero_version": + entity_type = "version" + + return { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": CURRENT_THUMBNAIL_SCHEMA, + "data": { + "entity_type": entity_type, + "entity_id": entity_id + } + } + + +def get_thumbnails(project_name, thumbnail_contexts, fields=None): + """Get thumbnail entities. + + Warning: + This function is not OpenPype compatible. There is none usage of this + function in codebase so there is nothing to convert. The previous + implementation cannot be AYON compatible without entity types. + """ + + thumbnail_items = set() + for thumbnail_context in thumbnail_contexts: + thumbnail_id, entity_type, entity_id = thumbnail_context + thumbnail_item = get_thumbnail( + project_name, thumbnail_id, entity_type, entity_id + ) + if thumbnail_item: + thumbnail_items.add(thumbnail_item) + return list(thumbnail_items) + + +def get_thumbnail_id_from_source(project_name, src_type, src_id): + """Receive thumbnail id from source entity. + + Args: + project_name (str): Name of project where to look for queried entities. + src_type (str): Type of source entity ('asset', 'version'). + src_id (Union[str, ObjectId]): Id of source entity. + + Returns: + ObjectId: Thumbnail id assigned to entity. + None: If Source entity does not have any thumbnail id assigned. + """ + + if not src_type or not src_id: + return None + + if src_type == "version": + version = get_version_by_id( + project_name, src_id, fields=["data.thumbnail_id"] + ) or {} + return version.get("data", {}).get("thumbnail_id") + + if src_type == "asset": + asset = get_asset_by_id( + project_name, src_id, fields=["data.thumbnail_id"] + ) or {} + return asset.get("data", {}).get("thumbnail_id") + + return None + + +def get_workfile_info( + project_name, asset_id, task_name, filename, fields=None +): + if not asset_id or not task_name or not filename: + return None + + con = get_ayon_server_api_connection() + task = con.get_task_by_name( + project_name, asset_id, task_name, fields=["id", "name", "folderId"] + ) + if not task: + return None + + fields = workfile_info_fields_v3_to_v4(fields) + + for workfile_info in con.get_workfiles_info( + project_name, task_ids=[task["id"]], fields=fields + ): + if workfile_info["name"] == filename: + return convert_v4_workfile_info_to_v3(workfile_info, task) + return None diff --git a/openpype/client/server/entity_links.py b/client/ayon_core/client/entity_links.py similarity index 100% rename from openpype/client/server/entity_links.py rename to client/ayon_core/client/entity_links.py diff --git a/openpype/client/notes.md b/client/ayon_core/client/notes.md similarity index 100% rename from openpype/client/notes.md rename to client/ayon_core/client/notes.md diff --git a/openpype/client/server/openpype_comp.py b/client/ayon_core/client/openpype_comp.py similarity index 100% rename from openpype/client/server/openpype_comp.py rename to client/ayon_core/client/openpype_comp.py diff --git a/client/ayon_core/client/operations.py b/client/ayon_core/client/operations.py new file mode 100644 index 0000000000..71b3ca226a --- /dev/null +++ b/client/ayon_core/client/operations.py @@ -0,0 +1,880 @@ +import copy +import json +import collections +import uuid +import datetime + +from ayon_api.server_api import ( + PROJECT_NAME_ALLOWED_SYMBOLS, + PROJECT_NAME_REGEX, +) + +from .constants import ( + CURRENT_PROJECT_SCHEMA, + CURRENT_PROJECT_CONFIG_SCHEMA, + CURRENT_ASSET_DOC_SCHEMA, + CURRENT_SUBSET_SCHEMA, + CURRENT_VERSION_SCHEMA, + CURRENT_HERO_VERSION_SCHEMA, + CURRENT_REPRESENTATION_SCHEMA, + CURRENT_WORKFILE_INFO_SCHEMA, + CURRENT_THUMBNAIL_SCHEMA, +) +from .operations_base import ( + REMOVED_VALUE, + CreateOperation, + UpdateOperation, + DeleteOperation, + BaseOperationsSession +) +from .conversion_utils import ( + convert_create_asset_to_v4, + convert_create_task_to_v4, + convert_create_subset_to_v4, + convert_create_version_to_v4, + convert_create_hero_version_to_v4, + convert_create_representation_to_v4, + convert_create_workfile_info_to_v4, + + convert_update_folder_to_v4, + convert_update_subset_to_v4, + convert_update_version_to_v4, + convert_update_hero_version_to_v4, + convert_update_representation_to_v4, + convert_update_workfile_info_to_v4, +) +from .utils import create_entity_id, get_ayon_server_api_connection + + +def _create_or_convert_to_id(entity_id=None): + if entity_id is None: + return create_entity_id() + + # Validate if can be converted to uuid + uuid.UUID(entity_id) + return entity_id + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = _create_or_convert_to_id(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_id(entity_id), + "type": "asset", + "name": name, + # This will be ignored + "parent": project_id, + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": _create_or_convert_to_id(asset_id) + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": _create_or_convert_to_id(subset_id), + "data": data + } + + +def new_hero_version_doc(subset_id, data, version=None, entity_id=None): + """Create skeleton data of hero version document. + + Args: + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + version (int): Version of source version. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if version is None: + version = -1 + elif version > 0: + version = -version + + return { + "_id": _create_or_convert_to_id(entity_id), + "schema": CURRENT_HERO_VERSION_SCHEMA, + "type": "hero_version", + "version": version, + "parent": _create_or_convert_to_id(subset_id), + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of representation document. + + Args: + name (str): Representation name considered as unique identifier + of representation under version. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": _create_or_convert_to_id(version_id), + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_thumbnail_doc(data=None, entity_id=None): + """Create skeleton data of thumbnail document. + + Args: + data (Dict[str, Any]): Thumbnail document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of thumbnail document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_id(entity_id), + "type": "thumbnail", + "schema": CURRENT_THUMBNAIL_SCHEMA, + "data": data + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_id(entity_id), + "type": "workfile", + "parent": _create_or_convert_to_id(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for + 'MongoUpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for + 'MongoUpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_hero_version_update_data(old_doc, new_doc, replace=True): + """Compare two hero version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + changes = _prepare_update_data(old_doc, new_doc, replace) + changes.pop("version_id", None) + return changes + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for + 'MongoUpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + changes = _prepare_update_data(old_doc, new_doc, replace) + context = changes.get("data", {}).get("context") + # Make sure that both 'family' and 'subset' are in changes if + # one of them changed (they'll both become 'product'). + if ( + context + and ("family" in context or "subset" in context) + ): + context["family"] = new_doc["data"]["context"]["family"] + context["subset"] = new_doc["data"]["context"]["subset"] + + return changes + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for + 'MongoUpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +class FailedOperations(Exception): + pass + + +def entity_data_json_default(value): + if isinstance(value, datetime.datetime): + return int(value.timestamp()) + + raise TypeError( + "Object of type {} is not JSON serializable".format(str(type(value))) + ) + + +def failed_json_default(value): + return "< Failed value {} > {}".format(type(value), str(value)) + + +class ServerCreateOperation(CreateOperation): + """Operation to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + def __init__(self, project_name, entity_type, data, session): + self._session = session + + if not data: + data = {} + data = copy.deepcopy(data) + if entity_type == "project": + raise ValueError("Project cannot be created using operations") + + tasks = None + if entity_type in "asset": + # TODO handle tasks + entity_type = "folder" + if "data" in data: + tasks = data["data"].get("tasks") + + project = self._session.get_project(project_name) + new_data = convert_create_asset_to_v4(data, project, self.con) + + elif entity_type == "task": + project = self._session.get_project(project_name) + new_data = convert_create_task_to_v4(data, project, self.con) + + elif entity_type == "subset": + new_data = convert_create_subset_to_v4(data, self.con) + entity_type = "product" + + elif entity_type == "version": + new_data = convert_create_version_to_v4(data, self.con) + + elif entity_type == "hero_version": + new_data = convert_create_hero_version_to_v4( + data, project_name, self.con + ) + entity_type = "version" + + elif entity_type in ("representation", "archived_representation"): + new_data = convert_create_representation_to_v4(data, self.con) + entity_type = "representation" + + elif entity_type == "workfile": + new_data = convert_create_workfile_info_to_v4( + data, project_name, self.con + ) + + else: + raise ValueError( + "Unhandled entity type \"{}\"".format(entity_type) + ) + + # Simple check if data can be dumped into json + # - should raise error on 'ObjectId' object + try: + new_data = json.loads( + json.dumps(new_data, default=entity_data_json_default) + ) + + except: + raise ValueError("Couldn't json parse body: {}".format( + json.dumps(new_data, default=failed_json_default) + )) + + super(ServerCreateOperation, self).__init__( + project_name, entity_type, new_data + ) + + if "id" not in self._data: + self._data["id"] = create_entity_id() + + if tasks: + copied_tasks = copy.deepcopy(tasks) + for task_name, task in copied_tasks.items(): + task["name"] = task_name + task["folderId"] = self._data["id"] + self.session.create_entity( + project_name, "task", task, nested_id=self.id + ) + + @property + def con(self): + return self.session.con + + @property + def session(self): + return self._session + + @property + def entity_id(self): + return self._data["id"] + + def to_server_operation(self): + return { + "id": self.id, + "type": "create", + "entityType": self.entity_type, + "entityId": self.entity_id, + "data": self._data + } + + +class ServerUpdateOperation(UpdateOperation): + """Operation to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + def __init__( + self, project_name, entity_type, entity_id, update_data, session + ): + self._session = session + + update_data = copy.deepcopy(update_data) + if entity_type == "project": + raise ValueError("Project cannot be created using operations") + + if entity_type in ("asset", "archived_asset"): + new_update_data = convert_update_folder_to_v4( + project_name, entity_id, update_data, self.con + ) + entity_type = "folder" + + elif entity_type == "subset": + new_update_data = convert_update_subset_to_v4( + project_name, entity_id, update_data, self.con + ) + entity_type = "product" + + elif entity_type == "version": + new_update_data = convert_update_version_to_v4( + project_name, entity_id, update_data, self.con + ) + + elif entity_type == "hero_version": + new_update_data = convert_update_hero_version_to_v4( + project_name, entity_id, update_data, self.con + ) + entity_type = "version" + + elif entity_type in ("representation", "archived_representation"): + new_update_data = convert_update_representation_to_v4( + project_name, entity_id, update_data, self.con + ) + entity_type = "representation" + + elif entity_type == "workfile": + new_update_data = convert_update_workfile_info_to_v4( + project_name, entity_id, update_data, self.con + ) + + else: + raise ValueError( + "Unhandled entity type \"{}\"".format(entity_type) + ) + + try: + new_update_data = json.loads( + json.dumps(new_update_data, default=entity_data_json_default) + ) + + except: + raise ValueError("Couldn't json parse body: {}".format( + json.dumps(new_update_data, default=failed_json_default) + )) + + super(ServerUpdateOperation, self).__init__( + project_name, entity_type, entity_id, new_update_data + ) + + @property + def con(self): + return self.session.con + + @property + def session(self): + return self._session + + def to_server_operation(self): + if not self._update_data: + return None + + update_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + update_data[key] = value + + return { + "id": self.id, + "type": "update", + "entityType": self.entity_type, + "entityId": self.entity_id, + "data": update_data + } + + +class ServerDeleteOperation(DeleteOperation): + """Operation to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + def __init__(self, project_name, entity_type, entity_id, session): + self._session = session + + if entity_type == "asset": + entity_type = "folder" + + elif entity_type == "hero_version": + entity_type = "version" + + elif entity_type == "subset": + entity_type = "product" + + super(ServerDeleteOperation, self).__init__( + project_name, entity_type, entity_id + ) + + @property + def con(self): + return self.session.con + + @property + def session(self): + return self._session + + def to_server_operation(self): + return { + "id": self.id, + "type": self.operation_name, + "entityId": self.entity_id, + "entityType": self.entity_type, + } + + +class OperationsSession(BaseOperationsSession): + def __init__(self, con=None, *args, **kwargs): + super(OperationsSession, self).__init__(*args, **kwargs) + if con is None: + con = get_ayon_server_api_connection() + self._con = con + self._project_cache = {} + self._nested_operations = collections.defaultdict(list) + + @property + def con(self): + return self._con + + def get_project(self, project_name): + if project_name not in self._project_cache: + self._project_cache[project_name] = self.con.get_project( + project_name) + return copy.deepcopy(self._project_cache[project_name]) + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + body_by_id = {} + results = [] + for project_name, operations in operations_by_project.items(): + operations_body = [] + for operation in operations: + body = operation.to_server_operation() + if body is not None: + try: + json.dumps(body) + except: + raise ValueError("Couldn't json parse body: {}".format( + json.dumps( + body, indent=4, default=failed_json_default + ) + )) + + body_by_id[operation.id] = body + operations_body.append(body) + + if operations_body: + result = self._con.post( + "projects/{}/operations".format(project_name), + operations=operations_body, + canFail=False + ) + results.append(result.data) + + for result in results: + if result.get("success"): + continue + + if "operations" not in result: + raise FailedOperations( + "Operation failed. Content: {}".format(str(result)) + ) + + for op_result in result["operations"]: + if not op_result["success"]: + operation_id = op_result["id"] + raise FailedOperations(( + "Operation \"{}\" failed with data:\n{}\nError: {}." + ).format( + operation_id, + json.dumps(body_by_id[operation_id], indent=4), + op_result.get("error", "unknown"), + )) + + def create_entity(self, project_name, entity_type, data, nested_id=None): + """Fast access to 'ServerCreateOperation'. + + Args: + project_name (str): On which project the creation happens. + entity_type (str): Which entity type will be created. + data (Dicst[str, Any]): Entity data. + nested_id (str): Id of other operation from which is triggered + operation -> Operations can trigger suboperations but they + must be added to operations list after it's parent is added. + + Returns: + ServerCreateOperation: Object of update operation. + """ + + operation = ServerCreateOperation( + project_name, entity_type, data, self + ) + + if nested_id: + self._nested_operations[nested_id].append(operation) + else: + self.add(operation) + if operation.id in self._nested_operations: + self.extend(self._nested_operations.pop(operation.id)) + + return operation + + def update_entity( + self, project_name, entity_type, entity_id, update_data, nested_id=None + ): + """Fast access to 'ServerUpdateOperation'. + + Returns: + ServerUpdateOperation: Object of update operation. + """ + + operation = ServerUpdateOperation( + project_name, entity_type, entity_id, update_data, self + ) + if nested_id: + self._nested_operations[nested_id].append(operation) + else: + self.add(operation) + if operation.id in self._nested_operations: + self.extend(self._nested_operations.pop(operation.id)) + return operation + + def delete_entity( + self, project_name, entity_type, entity_id, nested_id=None + ): + """Fast access to 'ServerDeleteOperation'. + + Returns: + ServerDeleteOperation: Object of delete operation. + """ + + operation = ServerDeleteOperation( + project_name, entity_type, entity_id, self + ) + if nested_id: + self._nested_operations[nested_id].append(operation) + else: + self.add(operation) + if operation.id in self._nested_operations: + self.extend(self._nested_operations.pop(operation.id)) + return operation + + +def create_project( + project_name, + project_code, + library_project=False, + preset_name=None, + con=None +): + """Create project using OpenPype settings. + + This project creation function is not validating project document on + creation. It is because project document is created blindly with only + minimum required information about project which is it's name, code, type + and schema. + + Entered project name must be unique and project must not exist yet. + + Note: + This function is here to be OP v4 ready but in v3 has more logic + to do. That's why inner imports are in the body. + + Args: + project_name (str): New project name. Should be unique. + project_code (str): Project's code should be unique too. + library_project (bool): Project is library project. + preset_name (str): Name of anatomy preset. Default is used if not + passed. + con (ServerAPI): Connection to server with logged user. + + Raises: + ValueError: When project name already exists in MongoDB. + + Returns: + dict: Created project document. + """ + + if con is None: + con = get_ayon_server_api_connection() + + return con.create_project( + project_name, + project_code, + library_project, + preset_name + ) + + +def delete_project(project_name, con=None): + if con is None: + con = get_ayon_server_api_connection() + + return con.delete_project(project_name) + + +def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None): + if con is None: + con = get_ayon_server_api_connection() + return con.create_thumbnail(project_name, src_filepath, thumbnail_id) diff --git a/openpype/client/operations_base.py b/client/ayon_core/client/operations_base.py similarity index 100% rename from openpype/client/operations_base.py rename to client/ayon_core/client/operations_base.py diff --git a/openpype/client/server/thumbnails.py b/client/ayon_core/client/thumbnails.py similarity index 100% rename from openpype/client/server/thumbnails.py rename to client/ayon_core/client/thumbnails.py diff --git a/client/ayon_core/client/utils.py b/client/ayon_core/client/utils.py new file mode 100644 index 0000000000..26da6e34e1 --- /dev/null +++ b/client/ayon_core/client/utils.py @@ -0,0 +1,134 @@ +import os +import uuid + +import ayon_api + +from ayon_core.client.operations_base import REMOVED_VALUE + + +class _GlobalCache: + initialized = False + + +def get_ayon_server_api_connection(): + if _GlobalCache.initialized: + con = ayon_api.get_server_api_connection() + else: + from ayon_core.lib.local_settings import get_local_site_id + + _GlobalCache.initialized = True + site_id = get_local_site_id() + version = os.getenv("AYON_VERSION") + if ayon_api.is_connection_created(): + con = ayon_api.get_server_api_connection() + con.set_site_id(site_id) + con.set_client_version(version) + else: + con = ayon_api.create_connection(site_id, version) + return con + + +def create_entity_id(): + return uuid.uuid1().hex + + +def prepare_attribute_changes(old_entity, new_entity, replace=False): + """Prepare changes of attributes on entities. + + Compare 'attrib' of old and new entity data to prepare only changed + values that should be sent to server for update. + + Example: + >>> # Limited entity data to 'attrib' + >>> old_entity = { + ... "attrib": {"attr_1": 1, "attr_2": "MyString", "attr_3": True} + ... } + >>> new_entity = { + ... "attrib": {"attr_1": 2, "attr_3": True, "attr_4": 3} + ... } + >>> # Changes if replacement should not happen + >>> expected_changes = { + ... "attr_1": 2, + ... "attr_4": 3 + ... } + >>> changes = prepare_attribute_changes(old_entity, new_entity) + >>> changes == expected_changes + True + + >>> # Changes if replacement should happen + >>> expected_changes_replace = { + ... "attr_1": 2, + ... "attr_2": REMOVED_VALUE, + ... "attr_4": 3 + ... } + >>> changes_replace = prepare_attribute_changes( + ... old_entity, new_entity, True) + >>> changes_replace == expected_changes_replace + True + + Args: + old_entity (dict[str, Any]): Data of entity queried from server. + new_entity (dict[str, Any]): Entity data with applied changes. + replace (bool): New entity should fully replace all old entity values. + + Returns: + Dict[str, Any]: Values from new entity only if value has changed. + """ + + attrib_changes = {} + new_attrib = new_entity.get("attrib") + old_attrib = old_entity.get("attrib") + if new_attrib is None: + if not replace: + return attrib_changes + new_attrib = {} + + if old_attrib is None: + return new_attrib + + for attr, new_attr_value in new_attrib.items(): + old_attr_value = old_attrib.get(attr) + if old_attr_value != new_attr_value: + attrib_changes[attr] = new_attr_value + + if replace: + for attr in old_attrib: + if attr not in new_attrib: + attrib_changes[attr] = REMOVED_VALUE + + return attrib_changes + + +def prepare_entity_changes(old_entity, new_entity, replace=False): + """Prepare changes of AYON entities. + + Compare old and new entity to filter values from new data that changed. + + Args: + old_entity (dict[str, Any]): Data of entity queried from server. + new_entity (dict[str, Any]): Entity data with applied changes. + replace (bool): All attributes should be replaced by new values. So + all attribute values that are not on new entity will be removed. + + Returns: + Dict[str, Any]: Only values from new entity that changed. + """ + + changes = {} + for key, new_value in new_entity.items(): + if key == "attrib": + continue + + old_value = old_entity.get(key) + if old_value != new_value: + changes[key] = new_value + + if replace: + for key in old_entity: + if key not in new_entity: + changes[key] = REMOVED_VALUE + + attr_changes = prepare_attribute_changes(old_entity, new_entity, replace) + if attr_changes: + changes["attrib"] = attr_changes + return changes diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/client/ayon_core/hooks/pre_add_last_workfile_arg.py similarity index 95% rename from openpype/hooks/pre_add_last_workfile_arg.py rename to client/ayon_core/hooks/pre_add_last_workfile_arg.py index 6e255ae82a..8144afd401 100644 --- a/openpype/hooks/pre_add_last_workfile_arg.py +++ b/client/ayon_core/hooks/pre_add_last_workfile_arg.py @@ -1,6 +1,6 @@ import os -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class AddLastWorkfileToLaunchArgs(PreLaunchHook): diff --git a/openpype/hooks/pre_copy_template_workfile.py b/client/ayon_core/hooks/pre_copy_template_workfile.py similarity index 95% rename from openpype/hooks/pre_copy_template_workfile.py rename to client/ayon_core/hooks/pre_copy_template_workfile.py index 4d91d83c95..e5d2d4f640 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/client/ayon_core/hooks/pre_copy_template_workfile.py @@ -1,8 +1,8 @@ import os import shutil -from openpype.settings import get_project_settings -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.pipeline.workfile import ( +from ayon_core.settings import get_project_settings +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.pipeline.workfile import ( get_custom_workfile_template, get_custom_workfile_template_by_string_context ) diff --git a/openpype/hooks/pre_create_extra_workdir_folders.py b/client/ayon_core/hooks/pre_create_extra_workdir_folders.py similarity index 87% rename from openpype/hooks/pre_create_extra_workdir_folders.py rename to client/ayon_core/hooks/pre_create_extra_workdir_folders.py index 4c9d08b375..6116d5fbd3 100644 --- a/openpype/hooks/pre_create_extra_workdir_folders.py +++ b/client/ayon_core/hooks/pre_create_extra_workdir_folders.py @@ -1,6 +1,6 @@ import os -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.pipeline.workfile import create_workdir_extra_folders +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.pipeline.workfile import create_workdir_extra_folders class CreateWorkdirExtraFolders(PreLaunchHook): diff --git a/openpype/hooks/pre_global_host_data.py b/client/ayon_core/hooks/pre_global_host_data.py similarity index 94% rename from openpype/hooks/pre_global_host_data.py rename to client/ayon_core/hooks/pre_global_host_data.py index 813df24af0..3422c87484 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/client/ayon_core/hooks/pre_global_host_data.py @@ -1,11 +1,11 @@ -from openpype.client import get_project, get_asset_by_name -from openpype.lib.applications import ( +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.lib.applications import ( PreLaunchHook, EnvironmentPrepData, prepare_app_environments, prepare_context_environments ) -from openpype.pipeline import Anatomy +from ayon_core.pipeline import Anatomy class GlobalHostDataHook(PreLaunchHook): diff --git a/openpype/hooks/pre_mac_launch.py b/client/ayon_core/hooks/pre_mac_launch.py similarity index 86% rename from openpype/hooks/pre_mac_launch.py rename to client/ayon_core/hooks/pre_mac_launch.py index 402e9a5517..34680155f1 100644 --- a/openpype/hooks/pre_mac_launch.py +++ b/client/ayon_core/hooks/pre_mac_launch.py @@ -1,5 +1,5 @@ import os -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class LaunchWithTerminal(PreLaunchHook): @@ -22,9 +22,9 @@ def execute(self): return # Check if first argument match executable path - # - Few applications are not executed directly but through OpenPype - # process (Photoshop, AfterEffects, Harmony, ...). These should not - # use `open`. + # - Few applications are not executed directly but through AYON + # launcher process (Photoshop, AfterEffects, Harmony, ...). + # These should not use `open`. if self.launch_context.launch_args[0] != executable: return diff --git a/openpype/hooks/pre_new_console_apps.py b/client/ayon_core/hooks/pre_new_console_apps.py similarity index 94% rename from openpype/hooks/pre_new_console_apps.py rename to client/ayon_core/hooks/pre_new_console_apps.py index 9727b4fb78..c81b924573 100644 --- a/openpype/hooks/pre_new_console_apps.py +++ b/client/ayon_core/hooks/pre_new_console_apps.py @@ -1,5 +1,5 @@ import subprocess -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class LaunchNewConsoleApps(PreLaunchHook): diff --git a/openpype/hooks/pre_non_python_host_launch.py b/client/ayon_core/hooks/pre_non_python_host_launch.py similarity index 83% rename from openpype/hooks/pre_non_python_host_launch.py rename to client/ayon_core/hooks/pre_non_python_host_launch.py index d9e912c826..fed4c99447 100644 --- a/openpype/hooks/pre_non_python_host_launch.py +++ b/client/ayon_core/hooks/pre_non_python_host_launch.py @@ -1,13 +1,13 @@ import os -from openpype.lib import get_openpype_execute_args -from openpype.lib.applications import ( +from ayon_core.lib import get_ayon_launcher_args +from ayon_core.lib.applications import ( get_non_python_host_kwargs, PreLaunchHook, LaunchTypes, ) -from openpype import PACKAGE_DIR as OPENPYPE_DIR +from ayon_core import AYON_CORE_ROOT class NonPythonHostHook(PreLaunchHook): @@ -15,7 +15,7 @@ class NonPythonHostHook(PreLaunchHook): Non python host implementation do not launch host directly but use python script which launch the host. For these cases it is necessary to - prepend python (or openpype) executable and script path before application's. + prepend python (or ayon) executable and script path before application's. """ app_groups = {"harmony", "photoshop", "aftereffects"} @@ -32,12 +32,12 @@ def execute(self): remainders.append(self.launch_context.launch_args.pop(0)) script_path = os.path.join( - OPENPYPE_DIR, + AYON_CORE_ROOT, "scripts", "non_python_host_launch.py" ) - new_launch_args = get_openpype_execute_args( + new_launch_args = get_ayon_launcher_args( "run", script_path, executable_path ) # Add workfile path if exists diff --git a/openpype/hooks/pre_ocio_hook.py b/client/ayon_core/hooks/pre_ocio_hook.py similarity index 88% rename from openpype/hooks/pre_ocio_hook.py rename to client/ayon_core/hooks/pre_ocio_hook.py index e695cf3fe8..00ba9a3bcb 100644 --- a/openpype/hooks/pre_ocio_hook.py +++ b/client/ayon_core/hooks/pre_ocio_hook.py @@ -1,7 +1,7 @@ -from openpype.lib.applications import PreLaunchHook +from ayon_core.lib.applications import PreLaunchHook -from openpype.pipeline.colorspace import get_imageio_config -from openpype.pipeline.template_data import get_template_data_with_names +from ayon_core.pipeline.colorspace import get_imageio_config +from ayon_core.pipeline.template_data import get_template_data_with_names class OCIOEnvHook(PreLaunchHook): diff --git a/openpype/host/__init__.py b/client/ayon_core/host/__init__.py similarity index 100% rename from openpype/host/__init__.py rename to client/ayon_core/host/__init__.py diff --git a/openpype/host/dirmap.py b/client/ayon_core/host/dirmap.py similarity index 97% rename from openpype/host/dirmap.py rename to client/ayon_core/host/dirmap.py index 96a98e808e..cecd689a4c 100644 --- a/openpype/host/dirmap.py +++ b/client/ayon_core/host/dirmap.py @@ -12,10 +12,10 @@ import six -from openpype.lib import Logger -from openpype.modules import ModulesManager -from openpype.settings import get_project_settings -from openpype.settings.lib import get_site_local_overrides +from ayon_core.lib import Logger +from ayon_core.addon import AddonsManager +from ayon_core.settings import get_project_settings +from ayon_core.settings.lib import get_site_local_overrides @six.add_metaclass(ABCMeta) @@ -50,7 +50,7 @@ def __init__( def sync_module(self): if not self._sync_module_discovered: self._sync_module_discovered = True - manager = ModulesManager() + manager = AddonsManager() self._sync_module = manager.get("sync_server") return self._sync_module diff --git a/openpype/host/host.py b/client/ayon_core/host/host.py similarity index 95% rename from openpype/host/host.py rename to client/ayon_core/host/host.py index afe06d1f55..6d129e18d9 100644 --- a/openpype/host/host.py +++ b/client/ayon_core/host/host.py @@ -31,16 +31,16 @@ class HostBase(object): # Installation of host before (avalon concept): ```python - from openpype.pipeline import install_host - import openpype.hosts.maya.api as host + from ayon_core.pipeline import install_host + import ayon_core.hosts.maya.api as host install_host(host) ``` # Installation of host now: ```python - from openpype.pipeline import install_host - from openpype.hosts.maya.api import MayaHost + from ayon_core.pipeline import install_host + from ayon_core.hosts.maya.api import MayaHost host = MayaHost() install_host(host) @@ -83,7 +83,7 @@ def install(self): This is where should be added menu with tools, registered callbacks and other host integration initialization. - It is called automatically when 'openpype.pipeline.install_host' is + It is called automatically when 'ayon_core.pipeline.install_host' is triggered. """ diff --git a/openpype/host/interfaces.py b/client/ayon_core/host/interfaces.py similarity index 100% rename from openpype/host/interfaces.py rename to client/ayon_core/host/interfaces.py diff --git a/openpype/client/server/__init__.py b/client/ayon_core/hosts/__init__.py similarity index 100% rename from openpype/client/server/__init__.py rename to client/ayon_core/hosts/__init__.py diff --git a/openpype/hosts/aftereffects/__init__.py b/client/ayon_core/hosts/aftereffects/__init__.py similarity index 100% rename from openpype/hosts/aftereffects/__init__.py rename to client/ayon_core/hosts/aftereffects/__init__.py diff --git a/client/ayon_core/hosts/aftereffects/addon.py b/client/ayon_core/hosts/aftereffects/addon.py new file mode 100644 index 0000000000..278f836a72 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/addon.py @@ -0,0 +1,22 @@ +from ayon_core.modules import OpenPypeModule, IHostAddon + + +class AfterEffectsAddon(OpenPypeModule, IHostAddon): + name = "aftereffects" + host_name = "aftereffects" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + defaults = { + "AYON_LOG_NO_COLORS": "1", + "WEBSOCKET_URL": "ws://localhost:8097/ws/" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".aep"] diff --git a/client/ayon_core/hosts/aftereffects/api/README.md b/client/ayon_core/hosts/aftereffects/api/README.md new file mode 100644 index 0000000000..53eb896255 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/README.md @@ -0,0 +1,68 @@ +# AfterEffects Integration + +Requirements: This extension requires use of Javascript engine, which is +available since CC 16.0. +Please check your File>Project Settings>Expressions>Expressions Engine + +## Setup + +The After Effects integration requires two components to work; `extension` and `server`. + +### Extension + +To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd). + +``` +ExManCmd /install {path to addon}/api/extension.zxp +``` +OR +download [Anastasiyโ€™s Extension Manager](https://install.anastasiy.com/) + +`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.) + +### Server + +The easiest way to get the server and After Effects launch is with: + +``` +python -c ^"import ayon_core.hosts.photoshop;ayon_core.hosts.aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^" +``` + +`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists. + +## Usage + +The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this: + +![Ayon Panel](panel.png "Ayon Panel") + + +## Developing + +### Extension +When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions). + +When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide). + +``` +ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12 +ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon +``` + +### Plugin Examples + +These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py). + +Expected deployed extension location on default Windows: +`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel` + +For easier debugging of Javascript: +https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1 +Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome +then localhost:8092 + +Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 +## Resources + - https://javascript-tools-guide.readthedocs.io/introduction/index.html + - https://github.com/Adobe-CEP/Getting-Started-guides + - https://github.com/Adobe-CEP/CEP-Resources diff --git a/openpype/hosts/aftereffects/api/__init__.py b/client/ayon_core/hosts/aftereffects/api/__init__.py similarity index 100% rename from openpype/hosts/aftereffects/api/__init__.py rename to client/ayon_core/hosts/aftereffects/api/__init__.py diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/client/ayon_core/hosts/aftereffects/api/extension.zxp similarity index 100% rename from openpype/hosts/aftereffects/api/extension.zxp rename to client/ayon_core/hosts/aftereffects/api/extension.zxp diff --git a/openpype/hosts/aftereffects/api/extension/.debug b/client/ayon_core/hosts/aftereffects/api/extension/.debug similarity index 100% rename from openpype/hosts/aftereffects/api/extension/.debug rename to client/ayon_core/hosts/aftereffects/api/extension/.debug diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/client/ayon_core/hosts/aftereffects/api/extension/CSXS/manifest.xml similarity index 100% rename from openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml rename to client/ayon_core/hosts/aftereffects/api/extension/CSXS/manifest.xml diff --git a/openpype/hosts/aftereffects/api/extension/css/boilerplate.css b/client/ayon_core/hosts/aftereffects/api/extension/css/boilerplate.css similarity index 100% rename from openpype/hosts/aftereffects/api/extension/css/boilerplate.css rename to client/ayon_core/hosts/aftereffects/api/extension/css/boilerplate.css diff --git a/openpype/hosts/aftereffects/api/extension/css/styles.css b/client/ayon_core/hosts/aftereffects/api/extension/css/styles.css similarity index 100% rename from openpype/hosts/aftereffects/api/extension/css/styles.css rename to client/ayon_core/hosts/aftereffects/api/extension/css/styles.css diff --git a/openpype/hosts/aftereffects/api/extension/css/topcoat-desktop-dark.min.css b/client/ayon_core/hosts/aftereffects/api/extension/css/topcoat-desktop-dark.min.css similarity index 100% rename from openpype/hosts/aftereffects/api/extension/css/topcoat-desktop-dark.min.css rename to client/ayon_core/hosts/aftereffects/api/extension/css/topcoat-desktop-dark.min.css diff --git a/openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/ayon_logo.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/ayon_logo.png diff --git a/openpype/hosts/aftereffects/api/extension/icons/iconDarkNormal.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/iconDarkNormal.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/iconDarkNormal.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/iconDarkNormal.png diff --git a/openpype/hosts/aftereffects/api/extension/icons/iconDarkRollover.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/iconDarkRollover.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/iconDarkRollover.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/iconDarkRollover.png diff --git a/openpype/hosts/aftereffects/api/extension/icons/iconDisabled.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/iconDisabled.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/iconDisabled.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/iconDisabled.png diff --git a/openpype/hosts/aftereffects/api/extension/icons/iconNormal.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/iconNormal.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/iconNormal.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/iconNormal.png diff --git a/openpype/hosts/aftereffects/api/extension/icons/iconRollover.png b/client/ayon_core/hosts/aftereffects/api/extension/icons/iconRollover.png similarity index 100% rename from openpype/hosts/aftereffects/api/extension/icons/iconRollover.png rename to client/ayon_core/hosts/aftereffects/api/extension/icons/iconRollover.png diff --git a/openpype/hosts/aftereffects/api/extension/index.html b/client/ayon_core/hosts/aftereffects/api/extension/index.html similarity index 100% rename from openpype/hosts/aftereffects/api/extension/index.html rename to client/ayon_core/hosts/aftereffects/api/extension/index.html diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/CSInterface.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/CSInterface.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/CSInterface.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/CSInterface.js diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/jquery-2.0.2.min.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/jquery-2.0.2.min.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/jquery-2.0.2.min.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/jquery-2.0.2.min.js diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/json.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/json.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/json.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/json.js diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/loglevel.min.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/loglevel.min.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/loglevel.min.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/loglevel.min.js diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/wsrpc.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/wsrpc.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/wsrpc.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/wsrpc.js diff --git a/openpype/hosts/aftereffects/api/extension/js/libs/wsrpc.min.js b/client/ayon_core/hosts/aftereffects/api/extension/js/libs/wsrpc.min.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/libs/wsrpc.min.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/libs/wsrpc.min.js diff --git a/openpype/hosts/aftereffects/api/extension/js/main.js b/client/ayon_core/hosts/aftereffects/api/extension/js/main.js similarity index 99% rename from openpype/hosts/aftereffects/api/extension/js/main.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/main.js index 643671b3e1..a9f91e436f 100644 --- a/openpype/hosts/aftereffects/api/extension/js/main.js +++ b/client/ayon_core/hosts/aftereffects/api/extension/js/main.js @@ -17,7 +17,7 @@ async function startUp(url){ var res = await promis; log.warn("res: " + res); - promis = runEvalScript("getEnv('OPENPYPE_DEBUG')"); + promis = runEvalScript("getEnv('AYON_DEBUG')"); var debug = await promis; log.warn("debug: " + debug); if (debug && debug.toString() == '3'){ diff --git a/openpype/hosts/aftereffects/api/extension/js/themeManager.js b/client/ayon_core/hosts/aftereffects/api/extension/js/themeManager.js similarity index 100% rename from openpype/hosts/aftereffects/api/extension/js/themeManager.js rename to client/ayon_core/hosts/aftereffects/api/extension/js/themeManager.js diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/client/ayon_core/hosts/aftereffects/api/extension/jsx/hostscript.jsx similarity index 100% rename from openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx rename to client/ayon_core/hosts/aftereffects/api/extension/jsx/hostscript.jsx diff --git a/client/ayon_core/hosts/aftereffects/api/launch_logic.py b/client/ayon_core/hosts/aftereffects/api/launch_logic.py new file mode 100644 index 0000000000..ad521c2f01 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/launch_logic.py @@ -0,0 +1,389 @@ +import os +import sys +import subprocess +import collections +import logging +import asyncio +import functools +import traceback + + +from wsrpc_aiohttp import ( + WebSocketRoute, + WebSocketAsync +) + +from qtpy import QtCore + +from ayon_core.lib import Logger +from ayon_core.tests.lib import is_in_tests +from ayon_core.pipeline import install_host, legacy_io +from ayon_core.addon import AddonsManager +from ayon_core.tools.utils import host_tools, get_ayon_qt_app +from ayon_core.tools.adobe_webserver.app import WebServerTool + +from .ws_stub import get_stub +from .lib import set_settings + +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + + +def safe_excepthook(*args): + traceback.print_exception(*args) + + +def main(*subprocess_args): + """Main entrypoint to AE launching, called from pre hook.""" + sys.excepthook = safe_excepthook + + from ayon_core.hosts.aftereffects.api import AfterEffectsHost + + host = AfterEffectsHost() + install_host(host) + + os.environ["AYON_LOG_NO_COLORS"] = "0" + app = get_ayon_qt_app() + app.setQuitOnLastWindowClosed(False) + + launcher = ProcessLauncher(subprocess_args) + launcher.start() + + if os.environ.get("HEADLESS_PUBLISH"): + manager = AddonsManager() + webpublisher_addon = manager["webpublisher"] + + launcher.execute_in_main_thread( + functools.partial( + webpublisher_addon.headless_publish, + log, + "CloseAE", + is_in_tests() + ) + ) + + elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): + save = False + if os.getenv("WORKFILES_SAVE_AS"): + save = True + + launcher.execute_in_main_thread( + lambda: host_tools.show_tool_by_name("workfiles", save=save) + ) + + sys.exit(app.exec_()) + + +def show_tool_by_name(tool_name): + kwargs = {} + if tool_name == "loader": + kwargs["use_context"] = True + + host_tools.show_tool_by_name(tool_name, **kwargs) + + +class ProcessLauncher(QtCore.QObject): + """Launches webserver, connects to it, runs main thread.""" + route_name = "AfterEffects" + _main_thread_callbacks = collections.deque() + + def __init__(self, subprocess_args): + self._subprocess_args = subprocess_args + self._log = None + + super(ProcessLauncher, self).__init__() + + # Keep track if launcher was alreadu started + self._started = False + + self._process = None + self._websocket_server = None + + start_process_timer = QtCore.QTimer() + start_process_timer.setInterval(100) + + loop_timer = QtCore.QTimer() + loop_timer.setInterval(200) + + start_process_timer.timeout.connect(self._on_start_process_timer) + loop_timer.timeout.connect(self._on_loop_timer) + + self._start_process_timer = start_process_timer + self._loop_timer = loop_timer + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger("{}-launcher".format( + self.route_name)) + return self._log + + @property + def websocket_server_is_running(self): + if self._websocket_server is not None: + return self._websocket_server.is_running + return False + + @property + def is_process_running(self): + if self._process is not None: + return self._process.poll() is None + return False + + @property + def is_host_connected(self): + """Returns True if connected, False if app is not running at all.""" + if not self.is_process_running: + return False + + try: + + _stub = get_stub() + if _stub: + return True + except Exception: + pass + + return None + + @classmethod + def execute_in_main_thread(cls, callback): + cls._main_thread_callbacks.append(callback) + + def start(self): + if self._started: + return + self.log.info("Started launch logic of AfterEffects") + self._started = True + self._start_process_timer.start() + + def exit(self): + """ Exit whole application. """ + if self._start_process_timer.isActive(): + self._start_process_timer.stop() + if self._loop_timer.isActive(): + self._loop_timer.stop() + + if self._websocket_server is not None: + self._websocket_server.stop() + + if self._process: + self._process.kill() + self._process.wait() + + QtCore.QCoreApplication.exit() + + def _on_loop_timer(self): + # TODO find better way and catch errors + # Run only callbacks that are in queue at the moment + cls = self.__class__ + for _ in range(len(cls._main_thread_callbacks)): + if cls._main_thread_callbacks: + callback = cls._main_thread_callbacks.popleft() + callback() + + if not self.is_process_running: + self.log.info("Host process is not running. Closing") + self.exit() + + elif not self.websocket_server_is_running: + self.log.info("Websocket server is not running. Closing") + self.exit() + + def _on_start_process_timer(self): + # TODO add try except validations for each part in this method + # Start server as first thing + if self._websocket_server is None: + self._init_server() + return + + # TODO add waiting time + # Wait for webserver + if not self.websocket_server_is_running: + return + + # Start application process + if self._process is None: + self._start_process() + self.log.info("Waiting for host to connect") + return + + # TODO add waiting time + # Wait until host is connected + if self.is_host_connected: + self._start_process_timer.stop() + self._loop_timer.start() + elif ( + not self.is_process_running + or not self.websocket_server_is_running + ): + self.exit() + + def _init_server(self): + if self._websocket_server is not None: + return + + self.log.debug( + "Initialization of websocket server for host communication" + ) + + self._websocket_server = websocket_server = WebServerTool() + if websocket_server.port_occupied( + websocket_server.host_name, + websocket_server.port + ): + self.log.info( + "Server already running, sending actual context and exit." + ) + asyncio.run(websocket_server.send_context_change(self.route_name)) + self.exit() + return + + # Add Websocket route + websocket_server.add_route("*", "/ws/", WebSocketAsync) + # Add after effects route to websocket handler + + print("Adding {} route".format(self.route_name)) + WebSocketAsync.add_route( + self.route_name, AfterEffectsRoute + ) + self.log.info("Starting websocket server for host communication") + websocket_server.start_server() + + def _start_process(self): + if self._process is not None: + return + self.log.info("Starting host process") + try: + self._process = subprocess.Popen( + self._subprocess_args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + except Exception: + self.log.info("exce", exc_info=True) + self.exit() + + +class AfterEffectsRoute(WebSocketRoute): + """ + One route, mimicking external application (like Harmony, etc). + All functions could be called from client. + 'do_notify' function calls function on the client - mimicking + notification after long running job on the server or similar + """ + instance = None + + def init(self, **kwargs): + # Python __init__ must be return "self". + # This method might return anything. + log.debug("someone called AfterEffects route") + self.instance = self + return kwargs + + # server functions + async def ping(self): + log.debug("someone called AfterEffects route ping") + + # This method calls function on the client side + # client functions + async def set_context(self, project, asset, task): + """ + Sets 'project' and 'asset' to envs, eg. setting context + + Args: + project (str) + asset (str) + """ + log.info("Setting context change") + log.info("project {} asset {} ".format(project, asset)) + if project: + legacy_io.Session["AVALON_PROJECT"] = project + os.environ["AVALON_PROJECT"] = project + if asset: + legacy_io.Session["AVALON_ASSET"] = asset + os.environ["AVALON_ASSET"] = asset + if task: + legacy_io.Session["AVALON_TASK"] = task + os.environ["AVALON_TASK"] = task + + async def read(self): + log.debug("aftereffects.read client calls server server calls " + "aftereffects client") + return await self.socket.call('aftereffects.read') + + # panel routes for tools + async def workfiles_route(self): + self._tool_route("workfiles") + + async def loader_route(self): + self._tool_route("loader") + + async def publish_route(self): + self._tool_route("publisher") + + async def sceneinventory_route(self): + self._tool_route("sceneinventory") + + async def setresolution_route(self): + self._settings_route(False, True) + + async def setframes_route(self): + self._settings_route(True, False) + + async def setall_route(self): + self._settings_route(True, True) + + async def experimental_tools_route(self): + self._tool_route("experimental_tools") + + def _tool_route(self, _tool_name): + """The address accessed when clicking on the buttons.""" + + partial_method = functools.partial(show_tool_by_name, + _tool_name) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" + + def _settings_route(self, frames, resolution): + partial_method = functools.partial(set_settings, + frames, + resolution) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" + + def create_placeholder_route(self): + from ayon_core.hosts.aftereffects.api.workfile_template_builder import \ + create_placeholder + partial_method = functools.partial(create_placeholder) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" + + def update_placeholder_route(self): + from ayon_core.hosts.aftereffects.api.workfile_template_builder import \ + update_placeholder + partial_method = functools.partial(update_placeholder) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" + + def build_workfile_template_route(self): + from ayon_core.hosts.aftereffects.api.workfile_template_builder import \ + build_workfile_template + partial_method = functools.partial(build_workfile_template) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" diff --git a/client/ayon_core/hosts/aftereffects/api/lib.py b/client/ayon_core/hosts/aftereffects/api/lib.py new file mode 100644 index 0000000000..f5f2d98698 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/lib.py @@ -0,0 +1,160 @@ +import os +import re +import json +import contextlib +import logging + +from ayon_core.pipeline.context_tools import get_current_context +from ayon_core.client import get_asset_by_name +from .ws_stub import get_stub + +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context.""" + selection = get_stub().get_selected_items(True, False, False) + try: + yield selection + finally: + pass + + +def get_extension_manifest_path(): + return os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "extension", + "CSXS", + "manifest.xml" + ) + + +def get_unique_layer_name(layers, name): + """ + Gets all layer names and if 'name' is present in them, increases + suffix by 1 (eg. creates unique layer name - for Loader) + Args: + layers (list): of strings, names only + name (string): checked value + + Returns: + (string): name_00X (without version) + """ + names = {} + for layer in layers: + layer_name = re.sub(r'_\d{3}$', '', layer) + if layer_name in names.keys(): + names[layer_name] = names[layer_name] + 1 + else: + names[layer_name] = 1 + occurrences = names.get(name, 0) + + return "{}_{:0>3d}".format(name, occurrences + 1) + + +def get_background_layers(file_url): + """ + Pulls file name from background json file, enrich with folder url for + AE to be able import files. + + Order is important, follows order in json. + + Args: + file_url (str): abs url of background json + + Returns: + (list): of abs paths to images + """ + with open(file_url) as json_file: + data = json.load(json_file) + + layers = list() + bg_folder = os.path.dirname(file_url) + for child in data['children']: + if child.get("filename"): + layers.append(os.path.join(bg_folder, child.get("filename")). + replace("\\", "/")) + else: + for layer in child['children']: + if layer.get("filename"): + layers.append(os.path.join(bg_folder, + layer.get("filename")). + replace("\\", "/")) + return layers + + +def get_asset_settings(asset_doc): + """Get settings on current asset from database. + + Returns: + dict: Scene data. + + """ + asset_data = asset_doc["data"] + fps = asset_data.get("fps", 0) + frame_start = asset_data.get("frameStart", 0) + frame_end = asset_data.get("frameEnd", 0) + handle_start = asset_data.get("handleStart", 0) + handle_end = asset_data.get("handleEnd", 0) + resolution_width = asset_data.get("resolutionWidth", 0) + resolution_height = asset_data.get("resolutionHeight", 0) + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + return { + "fps": fps, + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "duration": duration + } + + +def set_settings(frames, resolution, comp_ids=None, print_msg=True): + """Sets number of frames and resolution to selected comps. + + Args: + frames (bool): True if set frame info + resolution (bool): True if set resolution + comp_ids (list): specific composition ids, if empty + it tries to look for currently selected + print_msg (bool): True throw JS alert with msg + """ + frame_start = frames_duration = fps = width = height = None + current_context = get_current_context() + + asset_doc = get_asset_by_name(current_context["project_name"], + current_context["asset_name"]) + settings = get_asset_settings(asset_doc) + + msg = '' + if frames: + frame_start = settings["frameStart"] - settings["handleStart"] + frames_duration = settings["duration"] + fps = settings["fps"] + msg += f"frame start:{frame_start}, duration:{frames_duration}, "\ + f"fps:{fps}" + if resolution: + width = settings["resolutionWidth"] + height = settings["resolutionHeight"] + msg += f"width:{width} and height:{height}" + + stub = get_stub() + if not comp_ids: + comps = stub.get_selected_items(True, False, False) + comp_ids = [comp.id for comp in comps] + if not comp_ids: + stub.print_msg("Select at least one composition to apply settings.") + return + + for comp_id in comp_ids: + msg = f"Setting for comp {comp_id} " + msg + log.debug(msg) + stub.set_comp_properties(comp_id, frame_start, frames_duration, + fps, width, height) + if print_msg: + stub.print_msg(msg) diff --git a/openpype/hosts/aftereffects/api/panel.png b/client/ayon_core/hosts/aftereffects/api/panel.png similarity index 100% rename from openpype/hosts/aftereffects/api/panel.png rename to client/ayon_core/hosts/aftereffects/api/panel.png diff --git a/openpype/hosts/aftereffects/api/panel_failure.png b/client/ayon_core/hosts/aftereffects/api/panel_failure.png similarity index 100% rename from openpype/hosts/aftereffects/api/panel_failure.png rename to client/ayon_core/hosts/aftereffects/api/panel_failure.png diff --git a/client/ayon_core/hosts/aftereffects/api/pipeline.py b/client/ayon_core/hosts/aftereffects/api/pipeline.py new file mode 100644 index 0000000000..32e064d8cb --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/pipeline.py @@ -0,0 +1,293 @@ +import os + +from qtpy import QtWidgets + +import pyblish.api + +from ayon_core.lib import Logger, register_event_callback +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.aftereffects.api.workfile_template_builder import ( + AEPlaceholderLoadPlugin, + AEPlaceholderCreatePlugin +) +from ayon_core.pipeline.load import any_outdated_containers +import ayon_core.hosts.aftereffects + +from ayon_core.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) +from ayon_core.tools.utils import get_ayon_qt_app + +from .launch_logic import get_stub +from .ws_stub import ConnectionNotEstablishedYet + +log = Logger.get_logger(__name__) + + +HOST_DIR = os.path.dirname( + os.path.abspath(ayon_core.hosts.aftereffects.__file__) +) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") + + +class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "aftereffects" + + def __init__(self): + self._stub = None + super(AfterEffectsHost, self).__init__() + + @property + def stub(self): + """ + Handle pulling stub from PS to run operations on host + Returns: + (AEServerStub) or None + """ + if self._stub: + return self._stub + + try: + stub = get_stub() # only after Photoshop is up + except ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + self._stub = stub + return self._stub + + def install(self): + print("Installing Pype config...") + + pyblish.api.register_host("aftereffects") + pyblish.api.register_plugin_path(PUBLISH_PATH) + + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + register_event_callback("application.launched", application_launch) + + def get_workfile_extensions(self): + return [".aep"] + + def save_workfile(self, dst_path=None): + self.stub.saveAs(dst_path, True) + + def open_workfile(self, filepath): + self.stub.open(filepath) + + return True + + def get_current_workfile(self): + try: + full_name = get_stub().get_active_document_full_name() + if full_name and full_name != "null": + return os.path.normpath(full_name).replace("\\", "/") + except ValueError: + print("Nothing opened") + pass + + return None + + def get_containers(self): + return ls() + + def get_context_data(self): + meta = self.stub.get_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + def update_context_data(self, data, changes): + item = data + item["id"] = "publish_context" + self.stub.imprint(item["id"], item) + + def get_workfile_build_placeholder_plugins(self): + return [ + AEPlaceholderLoadPlugin, + AEPlaceholderCreatePlugin + ] + + # created instances section + def list_instances(self): + """List all created instances from current workfile which + will be published. + + Pulls from File > File Info + + For SubsetManager + + Returns: + (list) of dictionaries matching instances format + """ + stub = self.stub + if not stub: + return [] + + instances = [] + layers_meta = stub.get_metadata() + + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) + return instances + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + For SubsetManager + + Args: + instance (dict): instance representation from subsetmanager model + """ + stub = self.stub + + if not stub: + return + + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_item(instance["members"][0]) + if item: + stub.rename_item(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + +def application_launch(): + """Triggered after start of app""" + check_inventory() + + +def ls(): + """Yields containers from active AfterEffects document. + + This is the host-equivalent of api.ls(), but instead of listing + assets on disk, it lists assets already loaded in AE; once loaded + they are called 'containers'. Used in Manage tool. + + Containers could be on multiple levels, single images/videos/was as a + FootageItem, or multiple items - backgrounds (folder with automatically + created composition and all imported layers). + + Yields: + dict: container + + """ + try: + stub = get_stub() # only after AfterEffects is up + except ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + layers_meta = stub.get_metadata() + for item in stub.get_items(comps=True, + folders=True, + footages=True): + data = stub.read(item, layers_meta) + # Skip non-tagged layers. + if not data: + continue + + # Filter to only containers. + if "container" not in data["id"]: + continue + + # Append transient data + data["objectName"] = item.name.replace(stub.LOADED_ICON, '') + data["layer"] = item + yield data + + +def check_inventory(): + """Checks loaded containers if they are of highest version""" + if not any_outdated_containers(): + return + + # Warn about outdated containers. + _app = get_ayon_qt_app() + + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) + msg = "There are outdated containers in the scene." + message_box.setText(msg) + message_box.exec_() + + +def containerise(name, + namespace, + comp, + context, + loader=None, + suffix="_CON"): + """ + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Creates dictionary payloads that gets saved into file metadata. Each + container contains of who loaded (loader) and members (single or multiple + in case of background). + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + comp (AEItem): Composition to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + """ + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + "members": comp.members or [comp.id] + } + + stub = get_stub() + stub.imprint(comp.id, data) + + return comp + + +def cache_and_get_instances(creator): + """Cache instances in shared data. + + Storing all instances as a list as legacy instances might be still present. + Args: + creator (Creator): Plugin which would like to get instances from host. + Returns: + List[]: list of all instances stored in metadata + """ + shared_key = "openpype.photoshop.instances" + if shared_key not in creator.collection_shared_data: + creator.collection_shared_data[shared_key] = \ + creator.host.list_instances() + return creator.collection_shared_data[shared_key] diff --git a/client/ayon_core/hosts/aftereffects/api/plugin.py b/client/ayon_core/hosts/aftereffects/api/plugin.py new file mode 100644 index 0000000000..0ddb244645 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/plugin.py @@ -0,0 +1,12 @@ +import six +from abc import ABCMeta + +from ayon_core.pipeline import LoaderPlugin +from .launch_logic import get_stub + + +@six.add_metaclass(ABCMeta) +class AfterEffectsLoader(LoaderPlugin): + @staticmethod + def get_stub(): + return get_stub() diff --git a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py new file mode 100644 index 0000000000..aa2f36e8aa --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py @@ -0,0 +1,271 @@ +import os.path +import uuid +import shutil + +from ayon_core.pipeline import registered_host +from ayon_core.tools.workfile_template_build import ( + WorkfileBuildPlaceholderDialog, +) +from ayon_core.pipeline.workfile.workfile_template_builder import ( + AbstractTemplateBuilder, + PlaceholderPlugin, + LoadPlaceholderItem, + CreatePlaceholderItem, + PlaceholderLoadMixin, + PlaceholderCreateMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api.lib import set_settings + +PLACEHOLDER_SET = "PLACEHOLDERS_SET" +PLACEHOLDER_ID = "openpype.placeholder" + + +class AETemplateBuilder(AbstractTemplateBuilder): + """Concrete implementation of AbstractTemplateBuilder for AE""" + + def import_template(self, path): + """Import template into current scene. + Block if a template is already loaded. + + Args: + path (str): A path to current template (usually given by + get_template_preset implementation) + + Returns: + bool: Whether the template was successfully imported or not + """ + stub = get_stub() + if not os.path.exists(path): + stub.print_msg(f"Template file on {path} doesn't exist.") + return + + stub.save() + workfile_path = stub.get_active_document_full_name() + shutil.copy2(path, workfile_path) + stub.open(workfile_path) + + return True + + +class AEPlaceholderPlugin(PlaceholderPlugin): + """Contains generic methods for all PlaceholderPlugins.""" + + def collect_placeholders(self): + """Collect info from file metadata about created placeholders. + + Returns: + (list) (LoadPlaceholderItem) + """ + output = [] + scene_placeholders = self._collect_scene_placeholders() + for item in scene_placeholders: + if item.get("plugin_identifier") != self.identifier: + continue + + if isinstance(self, AEPlaceholderLoadPlugin): + item = LoadPlaceholderItem(item["uuid"], + item["data"], + self) + elif isinstance(self, AEPlaceholderCreatePlugin): + item = CreatePlaceholderItem(item["uuid"], + item["data"], + self) + else: + raise NotImplementedError(f"Not implemented for {type(self)}") + + output.append(item) + + return output + + def update_placeholder(self, placeholder_item, placeholder_data): + """Resave changed properties for placeholders""" + item_id, metadata_item = self._get_item(placeholder_item) + stub = get_stub() + if not item_id: + stub.print_msg("Cannot find item for " + f"{placeholder_item.scene_identifier}") + return + metadata_item["data"] = placeholder_data + stub.imprint(item_id, metadata_item) + + def _get_item(self, placeholder_item): + """Returns item id and item metadata for placeholder from file meta""" + stub = get_stub() + placeholder_uuid = placeholder_item.scene_identifier + for metadata_item in stub.get_metadata(): + if not metadata_item.get("is_placeholder"): + continue + if placeholder_uuid in metadata_item.get("uuid"): + return metadata_item["members"][0], metadata_item + return None, None + + def _collect_scene_placeholders(self): + """" Cache placeholder data to shared data. + Returns: + (list) of dicts + """ + placeholder_items = self.builder.get_shared_populate_data( + "placeholder_items" + ) + if not placeholder_items: + placeholder_items = [] + for item in get_stub().get_metadata(): + if not item.get("is_placeholder"): + continue + placeholder_items.append(item) + + self.builder.set_shared_populate_data( + "placeholder_items", placeholder_items + ) + return placeholder_items + + def _imprint_item(self, item_id, name, placeholder_data, stub): + if not item_id: + raise ValueError("Couldn't create a placeholder") + container_data = { + "id": "openpype.placeholder", + "name": name, + "is_placeholder": True, + "plugin_identifier": self.identifier, + "uuid": str(uuid.uuid4()), # scene_identifier + "data": placeholder_data, + "members": [item_id] + } + stub.imprint(item_id, container_data) + + +class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin): + """Adds Create placeholder. + + This adds composition and runs Create + """ + identifier = "aftereffects.create" + label = "AfterEffects create" + + def create_placeholder(self, placeholder_data): + stub = get_stub() + name = "CREATEPLACEHOLDER" + item_id = stub.add_item(name, "COMP") + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Replace 'placeholder' with publishable instance. + + Renames prepared composition name, creates publishable instance, sets + frame/duration settings according to DB. + """ + pre_create_data = {"use_selection": True} + item_id, item = self._get_item(placeholder) + get_stub().select_items([item_id]) + self.populate_create_placeholder(placeholder, pre_create_data) + + # apply settings for populated composition + item_id, metadata_item = self._get_item(placeholder) + set_settings(True, True, [item_id]) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) + + +class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "aftereffects.load" + label = "AfterEffects load" + + def create_placeholder(self, placeholder_data): + """Creates AE's Placeholder item in Project items list. + + Sets dummy resolution/duration/fps settings, will be replaced when + populated. + """ + stub = get_stub() + name = "LOADERPLACEHOLDER" + item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Use Openpype Loader from `placeholder` to create new FootageItems + + New FootageItems are created, files are imported. + """ + self.populate_load_placeholder(placeholder) + errors = placeholder.get_errors() + stub = get_stub() + if errors: + stub.print_msg("\n".join(errors)) + else: + if not placeholder.data["keep_placeholder"]: + metadata = stub.get_metadata() + for item in metadata: + if not item.get("is_placeholder"): + continue + scene_identifier = item.get("uuid") + if (scene_identifier and + scene_identifier == placeholder.scene_identifier): + stub.delete_item(item["members"][0]) + stub.remove_instance(placeholder.scene_identifier, metadata) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + placeholder_item_id, _ = self._get_item(placeholder) + item_id = container.id + get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) + + +def build_workfile_template(*args, **kwargs): + builder = AETemplateBuilder(registered_host()) + builder.build_template(*args, **kwargs) + + +def update_workfile_template(*args): + builder = AETemplateBuilder(registered_host()) + builder.rebuild_template() + + +def create_placeholder(*args): + """Called when new workile placeholder should be created.""" + host = registered_host() + builder = AETemplateBuilder(host) + window = WorkfileBuildPlaceholderDialog(host, builder) + window.exec_() + + +def update_placeholder(*args): + """Called after placeholder item is selected to modify it.""" + host = registered_host() + builder = AETemplateBuilder(host) + + stub = get_stub() + selected_items = stub.get_selected_items(True, True, True) + + if len(selected_items) != 1: + stub.print_msg("Please select just 1 placeholder") + return + + selected_id = selected_items[0].id + placeholder_item = None + + placeholder_items_by_id = { + placeholder_item.scene_identifier: placeholder_item + for placeholder_item in builder.get_placeholders() + } + for metadata_item in stub.get_metadata(): + if not metadata_item.get("is_placeholder"): + continue + if selected_id in metadata_item.get("members"): + placeholder_item = placeholder_items_by_id.get( + metadata_item["uuid"]) + break + + if not placeholder_item: + stub.print_msg("Didn't find placeholder metadata. " + "Remove and re-create placeholder.") + return + + window = WorkfileBuildPlaceholderDialog(host, builder) + window.set_update_mode(placeholder_item) + window.exec_() diff --git a/client/ayon_core/hosts/aftereffects/api/ws_stub.py b/client/ayon_core/hosts/aftereffects/api/ws_stub.py new file mode 100644 index 0000000000..869acc3405 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/api/ws_stub.py @@ -0,0 +1,731 @@ +""" + Stub handling connection from server to client. + Used anywhere solution is calling client methods. +""" +import json +import logging + +import attr + +from wsrpc_aiohttp import WebSocketAsync +from ayon_core.tools.adobe_webserver.app import WebServerTool + + +class ConnectionNotEstablishedYet(Exception): + pass + + +@attr.s +class AEItem(object): + """ + Object denoting Item in AE. Each item is created in AE by any Loader, + but contains same fields, which are being used in later processing. + """ + # metadata + id = attr.ib() # id created by AE, could be used for querying + name = attr.ib() # name of item + item_type = attr.ib(default=None) # item type (footage, folder, comp) + # all imported elements, single for + # regular image, array for Backgrounds + members = attr.ib(factory=list) + frameStart = attr.ib(default=None) + framesDuration = attr.ib(default=None) + frameRate = attr.ib(default=None) + file_name = attr.ib(default=None) + instance_id = attr.ib(default=None) # New Publisher + width = attr.ib(default=None) + height = attr.ib(default=None) + is_placeholder = attr.ib(default=False) + uuid = attr.ib(default=False) + path = attr.ib(default=False) # path to FootageItem to validate + # list of composition Footage is in + containing_comps = attr.ib(factory=list) + + +class AfterEffectsServerStub(): + """ + Stub for calling function on client (Photoshop js) side. + Expects that client is already connected (started when avalon menu + is opened). + 'self.websocketserver.call' is used as async wrapper + """ + PUBLISH_ICON = '\u2117 ' + LOADED_ICON = '\u25bc' + + def __init__(self): + self.websocketserver = WebServerTool.get_instance() + self.client = self.get_client() + self.log = logging.getLogger(self.__class__.__name__) + + @staticmethod + def get_client(): + """ + Return first connected client to WebSocket + TODO implement selection by Route + :return: client + """ + clients = WebSocketAsync.get_clients() + client = None + if len(clients) > 0: + key = list(clients.keys())[0] + client = clients.get(key) + + return client + + def open(self, path): + """ + Open file located at 'path' (local). + Args: + path(string): file path locally + Returns: None + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.open', path=path)) + + return self._handle_return(res) + + def get_metadata(self): + """ + Get complete stored JSON with metadata from AE.Metadata.Label + field. + + It contains containers loaded by any Loader OR instances created + by Creator. + + Returns: + (list) + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.get_metadata')) + metadata = self._handle_return(res) + + return metadata or [] + + def read(self, item, layers_meta=None): + """ + Parses item metadata from Label field of active document. + Used as filter to pick metadata for specific 'item' only. + + Args: + item (AEItem): pulled info from AE + layers_meta (dict): full list from Headline + (load and inject for better performance in loops) + Returns: + (dict): + """ + if layers_meta is None: + layers_meta = self.get_metadata() + for item_meta in layers_meta: + if 'container' in item_meta.get('id') and \ + str(item.id) == str(item_meta.get('members')[0]): + return item_meta + + self.log.debug("Couldn't find layer metadata") + + def imprint(self, item_id, data, all_items=None, items_meta=None): + """ + Save item metadata to Label field of metadata of active document + Args: + item_id (int|str): id of FootageItem or instance_id for workfiles + data(string): json representation for single layer + all_items (list of item): for performance, could be + injected for usage in loop, if not, single call will be + triggered + items_meta(string): json representation from Headline + (for performance - provide only if imprint is in + loop - value should be same) + Returns: None + """ + if not items_meta: + items_meta = self.get_metadata() + + result_meta = [] + # fix existing + is_new = True + + for item_meta in items_meta: + if ((item_meta.get('members') and + str(item_id) == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): + is_new = False + if data: + item_meta.update(data) + result_meta.append(item_meta) + else: + result_meta.append(item_meta) + + if is_new: + result_meta.append(data) + + # Ensure only valid ids are stored. + if not all_items: + # loaders create FootageItem now + all_items = self.get_items(comps=True, + folders=True, + footages=True) + item_ids = [int(item.id) for item in all_items] + cleaned_data = [] + for meta in result_meta: + # do not added instance with nonexistend item id + if meta.get("members"): + if int(meta["members"][0]) not in item_ids: + continue + + cleaned_data.append(meta) + + payload = json.dumps(cleaned_data, indent=4) + + res = self.websocketserver.call(self.client.call + ('AfterEffects.imprint', + payload=payload)) + return self._handle_return(res) + + def get_active_document_full_name(self): + """ + Returns absolute path of active document via ws call + Returns(string): file name + """ + res = self.websocketserver.call(self.client.call( + 'AfterEffects.get_active_document_full_name')) + + return self._handle_return(res) + + def get_active_document_name(self): + """ + Returns just a name of active document via ws call + Returns(string): file name + """ + res = self.websocketserver.call(self.client.call( + 'AfterEffects.get_active_document_name')) + + return self._handle_return(res) + + def get_items(self, comps, folders=False, footages=False): + """ + Get all items from Project panel according to arguments. + There are multiple different types: + CompItem (could have multiple layers - source for Creator, + will be rendered) + FolderItem (collection type, currently used for Background + loading) + FootageItem (imported file - created by Loader) + Args: + comps (bool): return CompItems + folders (bool): return FolderItem + footages (bool: return FootageItem + + Returns: + (list) of namedtuples + """ + res = self.websocketserver.call( + self.client.call('AfterEffects.get_items', + comps=comps, + folders=folders, + footages=footages) + ) + return self._to_records(self._handle_return(res)) + + def select_items(self, items): + """ + Select items in Project list + Args: + items (list): of int item ids + """ + self.websocketserver.call( + self.client.call('AfterEffects.select_items', items=items)) + + + def get_selected_items(self, comps, folders=False, footages=False): + """ + Same as get_items but using selected items only + Args: + comps (bool): return CompItems + folders (bool): return FolderItem + footages (bool: return FootageItem + + Returns: + (list) of namedtuples + + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.get_selected_items', + comps=comps, + folders=folders, + footages=footages) + ) + return self._to_records(self._handle_return(res)) + + def add_item(self, name, item_type): + """ + Adds either composition or folder to project item list. + + Args: + name (str) + item_type (str): COMP|FOLDER + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.add_item', + name=name, + item_type=item_type)) + + return self._handle_return(res) + + def get_item(self, item_id): + """ + Returns metadata for particular 'item_id' or None + + Args: + item_id (int, or string) + """ + for item in self.get_items(True, True, True): + if str(item.id) == str(item_id): + return item + + return None + + def import_file(self, path, item_name, import_options=None): + """ + Imports file as a FootageItem. Used in Loader + Args: + path (string): absolute path for asset file + item_name (string): label for created FootageItem + import_options (dict): different files (img vs psd) need different + config + + """ + res = self.websocketserver.call( + self.client.call('AfterEffects.import_file', + path=path, + item_name=item_name, + import_options=import_options) + ) + records = self._to_records(self._handle_return(res)) + if records: + return records.pop() + + def replace_item(self, item_id, path, item_name): + """ Replace FootageItem with new file + + Args: + item_id (int): + path (string):absolute path + item_name (string): label on item in Project list + + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.replace_item', + item_id=item_id, + path=path, item_name=item_name)) + + return self._handle_return(res) + + def rename_item(self, item_id, item_name): + """ Replace item with item_name + + Args: + item_id (int): + item_name (string): label on item in Project list + + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.rename_item', + item_id=item_id, + item_name=item_name)) + + return self._handle_return(res) + + def delete_item(self, item_id): + """ Deletes *Item in a file + Args: + item_id (int): + + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.delete_item', + item_id=item_id)) + + return self._handle_return(res) + + def remove_instance(self, instance_id, metadata=None): + """ + Removes instance with 'instance_id' from file's metadata and + saves them. + + Keep matching item in file though. + + Args: + instance_id(string): instance id + """ + cleaned_data = [] + + if metadata is None: + metadata = self.get_metadata() + + for instance in metadata: + inst_id = instance.get("instance_id") or instance.get("uuid") + if inst_id != instance_id: + cleaned_data.append(instance) + + payload = json.dumps(cleaned_data, indent=4) + res = self.websocketserver.call(self.client.call + ('AfterEffects.imprint', + payload=payload)) + + return self._handle_return(res) + + def is_saved(self): + # TODO + return True + + def set_label_color(self, item_id, color_idx): + """ + Used for highlight additional information in Project panel. + Green color is loaded asset, blue is created asset + Args: + item_id (int): + color_idx (int): 0-16 Label colors from AE Project view + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.set_label_color', + item_id=item_id, + color_idx=color_idx)) + + return self._handle_return(res) + + def get_comp_properties(self, comp_id): + """ Get composition information for render purposes + + Returns startFrame, frameDuration, fps, width, height. + + Args: + comp_id (int): + + Returns: + (AEItem) + + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.get_comp_properties', + item_id=comp_id + )) + + records = self._to_records(self._handle_return(res)) + if records: + return records.pop() + + def set_comp_properties(self, comp_id, start, duration, frame_rate, + width, height): + """ + Set work area to predefined values (from Ftrack). + Work area directs what gets rendered. + Beware of rounding, AE expects seconds, not frames directly. + + Args: + comp_id (int): + start (int): workAreaStart in frames + duration (int): in frames + frame_rate (float): frames in seconds + width (int): resolution width + height (int): resolution height + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.set_comp_properties', + item_id=comp_id, + start=start, + duration=duration, + frame_rate=frame_rate, + width=width, + height=height)) + return self._handle_return(res) + + def save(self): + """ + Saves active document + Returns: None + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.save')) + + return self._handle_return(res) + + def saveAs(self, project_path, as_copy): + """ + Saves active project to aep (copy) or png or jpg + Args: + project_path(string): full local path + as_copy: + Returns: None + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.saveAs', + image_path=project_path, + as_copy=as_copy)) + + return self._handle_return(res) + + def get_render_info(self, comp_id): + """ Get render queue info for render purposes + + Returns: + (list) of (AEItem): with 'file_name' field + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.get_render_info', + comp_id=comp_id)) + + records = self._to_records(self._handle_return(res)) + return records + + def get_audio_url(self, item_id): + """ Get audio layer absolute url for comp + + Args: + item_id (int): composition id + Returns: + (str): absolute path url + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.get_audio_url', + item_id=item_id)) + + return self._handle_return(res) + + def import_background(self, comp_id, comp_name, files): + """ + Imports backgrounds images to existing or new composition. + + If comp_id is not provided, new composition is created, basic + values (width, heights, frameRatio) takes from first imported + image. + + All images from background json are imported as a FootageItem and + separate layer is created for each of them under composition. + + Order of imported 'files' is important. + + Args: + comp_id (int): id of existing composition (null if new) + comp_name (str): used when new composition + files (list): list of absolute paths to import and + add as layers + + Returns: + (AEItem): object with id of created folder, all imported images + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.import_background', + comp_id=comp_id, + comp_name=comp_name, + files=files)) + + records = self._to_records(self._handle_return(res)) + if records: + return records.pop() + + def reload_background(self, comp_id, comp_name, files): + """ + Reloads backgrounds images to existing composition. + + It actually deletes complete folder with imported images and + created composition for safety. + + Args: + comp_id (int): id of existing composition to be overwritten + comp_name (str): new name of composition (could be same as old + if version up only) + files (list): list of absolute paths to import and + add as layers + Returns: + (AEItem): object with id of created folder, all imported images + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.reload_background', + comp_id=comp_id, + comp_name=comp_name, + files=files)) + + records = self._to_records(self._handle_return(res)) + if records: + return records.pop() + + def add_item_as_layer(self, comp_id, item_id): + """ + Adds already imported FootageItem ('item_id') as a new + layer to composition ('comp_id'). + + Args: + comp_id (int): id of target composition + item_id (int): FootageItem.id + comp already found previously + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.add_item_as_layer', + comp_id=comp_id, + item_id=item_id)) + + records = self._to_records(self._handle_return(res)) + if records: + return records.pop() + + def add_item_instead_placeholder(self, placeholder_item_id, item_id): + """ + Adds item_id to layers where plaeholder_item_id is present. + + 1 placeholder could result in multiple loaded containers (eg items) + + Args: + placeholder_item_id (int): id of placeholder item + item_id (int): loaded FootageItem id + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.add_item_instead_placeholder', # noqa + placeholder_item_id=placeholder_item_id, # noqa + item_id=item_id)) + + return self._handle_return(res) + + def add_placeholder(self, name, width, height, fps, duration): + """ + Adds new FootageItem as a placeholder for workfile builder + + Placeholder requires width etc, currently probably only hardcoded + values. + + Args: + name (str) + width (int) + height (int) + fps (float) + duration (int) + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.add_placeholder', + name=name, + width=width, + height=height, + fps=fps, + duration=duration)) + + return self._handle_return(res) + + def render(self, folder_url, comp_id): + """ + Render all renderqueueitem to 'folder_url' + Args: + folder_url(string): local folder path for collecting + Returns: None + """ + res = self.websocketserver.call(self.client.call + ('AfterEffects.render', + folder_url=folder_url, + comp_id=comp_id)) + return self._handle_return(res) + + def get_extension_version(self): + """Returns version number of installed extension.""" + res = self.websocketserver.call(self.client.call( + 'AfterEffects.get_extension_version')) + + return self._handle_return(res) + + def get_app_version(self): + """Returns version number of installed application (17.5...).""" + res = self.websocketserver.call(self.client.call( + 'AfterEffects.get_app_version')) + + return self._handle_return(res) + + def close(self): + res = self.websocketserver.call(self.client.call('AfterEffects.close')) + + return self._handle_return(res) + + def print_msg(self, msg): + """Triggers Javascript alert dialog.""" + self.websocketserver.call(self.client.call + ('AfterEffects.print_msg', + msg=msg)) + + def _handle_return(self, res): + """Wraps return, throws ValueError if 'error' key is present.""" + if res and isinstance(res, str) and res != "undefined": + try: + parsed = json.loads(res) + except json.decoder.JSONDecodeError: + raise ValueError("Received broken JSON {}".format(res)) + + if not parsed: # empty list + return parsed + + first_item = parsed + if isinstance(parsed, list): + first_item = parsed[0] + + if first_item: + if first_item.get("error"): + raise ValueError(first_item["error"]) + # singular values (file name etc) + if first_item.get("result") is not None: + return first_item["result"] + return parsed # parsed + return res + + def _to_records(self, payload): + """ + Converts string json representation into list of AEItem + dot notation access to work. + Returns: + payload(dict): - dictionary from json representation, expected to + come from _handle_return + """ + if not payload: + return [] + + if isinstance(payload, str): # safety fallback + try: + payload = json.loads(payload) + except json.decoder.JSONDecodeError: + raise ValueError("Received broken JSON {}".format(payload)) + + if isinstance(payload, dict): + payload = [payload] + + ret = [] + # convert to AEItem to use dot donation + for d in payload: + if not d: + continue + # currently implemented and expected fields + item = AEItem(d.get('id'), + d.get('name'), + d.get('type'), + d.get('members'), + d.get('frameStart'), + d.get('framesDuration'), + d.get('frameRate'), + d.get('file_name'), + d.get("instance_id"), + d.get("width"), + d.get("height"), + d.get("is_placeholder"), + d.get("uuid"), + d.get("path"), + d.get("containing_comps"),) + + ret.append(item) + return ret + + +def get_stub(): + """ + Convenience function to get server RPC stub to call methods directed + for host (Photoshop). + It expects already created connection, started from client. + Currently created when panel is opened (PS: Window>Extensions>Avalon) + :return: where functions could be called from + """ + ae_stub = AfterEffectsServerStub() + if not ae_stub.client: + raise ConnectionNotEstablishedYet("Connection is not created yet") + + return ae_stub diff --git a/openpype/hosts/__init__.py b/client/ayon_core/hosts/aftereffects/plugins/__init__.py similarity index 100% rename from openpype/hosts/__init__.py rename to client/ayon_core/hosts/aftereffects/plugins/__init__.py diff --git a/client/ayon_core/hosts/aftereffects/plugins/create/create_render.py b/client/ayon_core/hosts/aftereffects/plugins/create/create_render.py new file mode 100644 index 0000000000..78aa49a562 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/create/create_render.py @@ -0,0 +1,244 @@ +import re + +from ayon_core import resources +from ayon_core.lib import BoolDef, UISeparatorDef +from ayon_core.hosts.aftereffects import api +from ayon_core.pipeline import ( + Creator, + CreatedInstance, + CreatorError +) +from ayon_core.hosts.aftereffects.api.pipeline import cache_and_get_instances +from ayon_core.hosts.aftereffects.api.lib import set_settings +from ayon_core.lib import prepare_template_data +from ayon_core.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS + + +class RenderCreator(Creator): + """Creates 'render' instance for publishing. + + Result of 'render' instance is video or sequence of images for particular + composition based of configuration in its RenderQueue. + """ + identifier = "render" + label = "Render" + family = "render" + description = "Render creator" + + create_allow_context_change = True + + # Settings + mark_for_review = True + force_setting_values = True + + def create(self, subset_name_from_ui, data, pre_create_data): + stub = api.get_stub() # only after After Effects is up + + try: + _ = stub.get_active_document_full_name() + except ValueError: + raise CreatorError( + "Please save workfile via Workfile app first!" + ) + + if pre_create_data.get("use_selection"): + comps = stub.get_selected_items( + comps=True, folders=False, footages=False + ) + else: + comps = stub.get_items(comps=True, folders=False, footages=False) + + if not comps: + raise CreatorError( + "Nothing to create. Select composition in Project Bin if " + "'Use selection' is toggled or create at least " + "one composition." + ) + use_composition_name = (pre_create_data.get("use_composition_name") or + len(comps) > 1) + for comp in comps: + composition_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + comp.name + ) + if use_composition_name: + if "{composition}" not in subset_name_from_ui.lower(): + subset_name_from_ui += "{Composition}" + + dynamic_fill = prepare_template_data({"composition": + composition_name}) + subset_name = subset_name_from_ui.format(**dynamic_fill) + data["composition_name"] = composition_name + else: + subset_name = subset_name_from_ui + subset_name = re.sub(r"\{composition\}", '', subset_name, + flags=re.IGNORECASE) + + for inst in self.create_context.instances: + if subset_name == inst.subset_name: + raise CreatorError("{} already exists".format( + inst.subset_name)) + + data["members"] = [comp.id] + data["orig_comp_name"] = composition_name + + new_instance = CreatedInstance(self.family, subset_name, data, + self) + if "farm" in pre_create_data: + use_farm = pre_create_data["farm"] + new_instance.creator_attributes["farm"] = use_farm + + review = pre_create_data["mark_for_review"] + new_instance. creator_attributes["mark_for_review"] = review + + api.get_stub().imprint(new_instance.id, + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) + + stub.rename_item(comp.id, subset_name) + if self.force_setting_values: + set_settings(True, True, [comp.id], print_msg=False) + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", + tooltip="Composition for publishable instance should be " + "selected by default.", + default=True, label="Use selection"), + BoolDef("use_composition_name", + label="Use composition name in subset"), + UISeparatorDef(), + BoolDef("farm", label="Render on farm"), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + return output + + def get_instance_attr_defs(self): + return [ + BoolDef("farm", label="Render on farm"), + BoolDef( + "mark_for_review", + label="Review", + default=False + ) + ] + + def get_icon(self): + return resources.get_openpype_splash_filepath() + + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + # legacy instances have family=='render' or 'renderLocal', use them + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family", '').replace("Local", '')) + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + api.get_stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + subset_change = _changes.get("subset") + if subset_change: + api.get_stub().rename_item(created_inst.data["members"][0], + subset_change.new_value) + + def remove_instances(self, instances): + """Removes metadata and renames to original comp name if available.""" + for instance in instances: + self._remove_instance_from_context(instance) + self.host.remove_instance(instance) + + comp_id = instance.data["members"][0] + comp = api.get_stub().get_item(comp_id) + orig_comp_name = instance.data.get("orig_comp_name") + if comp: + if orig_comp_name: + new_comp_name = orig_comp_name + else: + new_comp_name = "dummyCompName" + api.get_stub().rename_item(comp_id, + new_comp_name) + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["aftereffects"]["create"]["RenderCreator"] + ) + + self.mark_for_review = plugin_settings["mark_for_review"] + self.default_variants = plugin_settings.get( + "default_variants", + plugin_settings.get("defaults") or [] + ) + + def get_detail_description(self): + return """Creator for Render instances + + Main publishable item in AfterEffects will be of `render` family. + Result of this item (instance) is picture sequence or video that could + be a final delivery product or loaded and used in another DCCs. + + Select single composition and create instance of 'render' family or + turn off 'Use selection' to create instance for all compositions. + + 'Use composition name in subset' allows to explicitly add composition + name into created subset name. + + Position of composition name could be set in + `project_settings/global/tools/creator/subset_name_profiles` with some + form of '{composition}' placeholder. + + Composition name will be used implicitly if multiple composition should + be handled at same time. + + If {composition} placeholder is not us 'subset_name_profiles' + composition name will be capitalized and set at the end of subset name + if necessary. + + If composition name should be used, it will be cleaned up of characters + that would cause an issue in published file names. + """ + + def get_dynamic_data(self, variant, task_name, asset_doc, + project_name, host_name, instance): + dynamic_data = {} + if instance is not None: + composition_name = instance.get("composition_name") + if composition_name: + dynamic_data["composition"] = composition_name + else: + dynamic_data["composition"] = "{composition}" + + return dynamic_data + + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] + + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.get_stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = self.create_context.get_current_task_name() + + if not instance_data.get("creator_attributes"): + is_old_farm = instance_data["family"] != "renderLocal" + instance_data["creator_attributes"] = {"farm": is_old_farm} + instance_data["family"] = self.family + + if instance_data["creator_attributes"].get("mark_for_review") is None: + instance_data["creator_attributes"]["mark_for_review"] = True + + return instance_data diff --git a/client/ayon_core/hosts/aftereffects/plugins/create/workfile_creator.py b/client/ayon_core/hosts/aftereffects/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..49f965800d --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/create/workfile_creator.py @@ -0,0 +1,85 @@ +import ayon_core.hosts.aftereffects.api as api +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import ( + AutoCreator, + CreatedInstance +) +from ayon_core.hosts.aftereffects.api.pipeline import cache_and_get_instances + + +class AEWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + default_variant = "Main" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + subset_name = instance_data["subset"] + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # nothing to change on workfiles + pass + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name + + existing_asset_name = None + if existing_instance is not None: + existing_asset_name = existing_instance.get("folderPath") + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant, + } + data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, None + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + api.get_stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_asset_name != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/client/ayon_core/hosts/aftereffects/plugins/load/load_background.py b/client/ayon_core/hosts/aftereffects/plugins/load/load_background.py new file mode 100644 index 0000000000..f23d7ec0bd --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/load/load_background.py @@ -0,0 +1,108 @@ +import re + +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.aftereffects import api + +from ayon_core.hosts.aftereffects.api.lib import ( + get_background_layers, + get_unique_layer_name, +) + + +class BackgroundLoader(api.AfterEffectsLoader): + """ + Load images from Background family + Creates for each background separate folder with all imported images + from background json AND automatically created composition with layers, + each layer for separate image. + + For each load container is created and stored in project (.aep) + metadata + """ + label = "Load JSON Background" + families = ["background"] + representations = ["json"] + + def load(self, context, name=None, namespace=None, data=None): + stub = self.get_stub() + items = stub.get_items(comps=True) + existing_items = [layer.name.replace(stub.LOADED_ICON, '') + for layer in items] + + comp_name = get_unique_layer_name( + existing_items, + "{}_{}".format(context["asset"]["name"], name)) + + path = self.filepath_from_context(context) + layers = get_background_layers(path) + if not layers: + raise ValueError("No layers found in {}".format(path)) + + comp = stub.import_background(None, stub.LOADED_ICON + comp_name, + layers) + + if not comp: + raise ValueError("Import background failed. " + "Please contact support") + + self[:] = [comp] + namespace = namespace or comp_name + + return api.containerise( + name, + namespace, + comp, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + """ Switch asset or change version """ + stub = self.get_stub() + context = representation.get("context", {}) + _ = container.pop("layer") + + # without iterator number (_001, 002...) + namespace_from_container = re.sub(r'_\d{3}$', '', + container["namespace"]) + comp_name = "{}_{}".format(context["asset"], context["subset"]) + + # switching assets + if namespace_from_container != comp_name: + items = stub.get_items(comps=True) + existing_items = [layer.name for layer in items] + comp_name = get_unique_layer_name( + existing_items, + "{}_{}".format(context["asset"], context["subset"])) + else: # switching version - keep same name + comp_name = container["namespace"] + + path = get_representation_path(representation) + + layers = get_background_layers(path) + comp = stub.reload_background(container["members"][1], + stub.LOADED_ICON + comp_name, + layers) + + # update container + container["representation"] = str(representation["_id"]) + container["name"] = context["subset"] + container["namespace"] = comp_name + container["members"] = comp.members + + stub.imprint(comp.id, container) + + def remove(self, container): + """ + Removes element from scene: deletes layer + removes from file + metadata. + Args: + container (dict): container to be removed - used to get layer_id + """ + stub = self.get_stub() + layer = container.pop("layer") + stub.imprint(layer.id, {}) + stub.delete_item(layer.id) + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/client/ayon_core/hosts/aftereffects/plugins/load/load_file.py similarity index 95% rename from openpype/hosts/aftereffects/plugins/load/load_file.py rename to client/ayon_core/hosts/aftereffects/plugins/load/load_file.py index 8d52aac546..a8e67e9f88 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/client/ayon_core/hosts/aftereffects/plugins/load/load_file.py @@ -1,8 +1,8 @@ import re -from openpype.pipeline import get_representation_path -from openpype.hosts.aftereffects import api -from openpype.hosts.aftereffects.api.lib import get_unique_layer_name +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.aftereffects import api +from ayon_core.hosts.aftereffects.api.lib import get_unique_layer_name class FileLoader(api.AfterEffectsLoader): diff --git a/openpype/hosts/aftereffects/plugins/publish/add_publish_highlight.py b/client/ayon_core/hosts/aftereffects/plugins/publish/add_publish_highlight.py similarity index 90% rename from openpype/hosts/aftereffects/plugins/publish/add_publish_highlight.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/add_publish_highlight.py index d437a6754b..331d5281ed 100644 --- a/openpype/hosts/aftereffects/plugins/publish/add_publish_highlight.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/add_publish_highlight.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api import get_stub class AddPublishHighlight(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/aftereffects/plugins/publish/closeAE.py b/client/ayon_core/hosts/aftereffects/plugins/publish/closeAE.py similarity index 91% rename from openpype/hosts/aftereffects/plugins/publish/closeAE.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/closeAE.py index 0be20d9f05..c00591729e 100644 --- a/openpype/hosts/aftereffects/plugins/publish/closeAE.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/closeAE.py @@ -2,7 +2,7 @@ """Close AE after publish. For Webpublishing only.""" import pyblish.api -from openpype.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api import get_stub class CloseAE(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_audio.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_audio.py new file mode 100644 index 0000000000..c0ef0b71a4 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_audio.py @@ -0,0 +1,27 @@ +import os + +import pyblish.api + +from ayon_core.hosts.aftereffects.api import get_stub + + +class CollectAudio(pyblish.api.ContextPlugin): + """Inject audio file url for rendered composition into context. + Needs to run AFTER 'collect_render'. Use collected comp_id to check + if there is an AVLayer in this composition + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = "Collect Audio" + hosts = ["aftereffects"] + + def process(self, context): + for instance in context: + if 'render.farm' in instance.data.get("families", []): + comp_id = instance.data["comp_id"] + if not comp_id: + self.log.debug("No comp_id filled in instance") + continue + context.data["audioFile"] = os.path.normpath( + get_stub().get_audio_url(comp_id) + ).replace("\\", "/") diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..aead872461 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_current_file.py @@ -0,0 +1,18 @@ +import os + +import pyblish.api + +from ayon_core.hosts.aftereffects.api import get_stub + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Current File" + hosts = ["aftereffects"] + + def process(self, context): + context.data["currentFile"] = os.path.normpath( + get_stub().get_active_document_full_name() + ).replace("\\", "/") diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_extension_version.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_extension_version.py new file mode 100644 index 0000000000..5b8393a49a --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_extension_version.py @@ -0,0 +1,58 @@ +import os +import re +import pyblish.api + +from ayon_core.hosts.aftereffects.api import ( + get_stub, + get_extension_manifest_path +) + + +class CollectExtensionVersion(pyblish.api.ContextPlugin): + """ Pulls and compares version of installed extension. + + It is recommended to use same extension as in provided Openpype code. + + Please use Anastasiyโ€™s Extension Manager or ZXPInstaller to update + extension in case of an error. + + You can locate extension.zxp in your installed Openpype code in + `repos/avalon-core/avalon/aftereffects` + """ + # This technically should be a validator, but other collectors might be + # impacted with usage of obsolete extension, so collector that runs first + # was chosen + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect extension version" + hosts = ["aftereffects"] + + optional = True + active = True + + def process(self, context): + installed_version = get_stub().get_extension_version() + + if not installed_version: + raise ValueError("Unknown version, probably old extension") + + manifest_url = get_extension_manifest_path() + + if not os.path.exists(manifest_url): + self.log.debug("Unable to locate extension manifest, not checking") + return + + expected_version = None + with open(manifest_url) as fp: + content = fp.read() + found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', + content) + if found: + expected_version = found[0][1] + + if expected_version != installed_version: + msg = ( + "Expected version '{}' found '{}'\n Please update" + " your installed extension, it might not work properly." + ).format(expected_version, installed_version) + + raise ValueError(msg) diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py new file mode 100644 index 0000000000..a8a316ea80 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py @@ -0,0 +1,225 @@ +import os +import re +import tempfile +import attr + +import pyblish.api + +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import RenderInstance + +from ayon_core.hosts.aftereffects.api import get_stub + + +@attr.s +class AERenderInstance(RenderInstance): + # extend generic, composition name is needed + comp_name = attr.ib(default=None) + comp_id = attr.ib(default=None) + fps = attr.ib(default=None) + projectEntity = attr.ib(default=None) + stagingDir = attr.ib(default=None) + app_version = attr.ib(default=None) + publish_attributes = attr.ib(default={}) + file_names = attr.ib(default=[]) + + +class CollectAERender(publish.AbstractCollectRender): + + order = pyblish.api.CollectorOrder + 0.405 + label = "Collect After Effects Render Layers" + hosts = ["aftereffects"] + + padding_width = 6 + rendered_extension = 'png' + + _stub = None + + @classmethod + def get_stub(cls): + if not cls._stub: + cls._stub = get_stub() + return cls._stub + + def get_instances(self, context): + instances = [] + instances_to_remove = [] + + app_version = CollectAERender.get_stub().get_app_version() + app_version = app_version[0:4] + + current_file = context.data["currentFile"] + version = context.data["version"] + + project_entity = context.data["projectEntity"] + + compositions = CollectAERender.get_stub().get_items(True) + compositions_by_id = {item.id: item for item in compositions} + for inst in context: + if not inst.data.get("active", True): + continue + + family = inst.data["family"] + if family not in ["render", "renderLocal"]: # legacy + continue + + comp_id = int(inst.data["members"][0]) + + comp_info = CollectAERender.get_stub().get_comp_properties( + comp_id) + + if not comp_info: + self.log.warning("Orphaned instance, deleting metadata") + inst_id = inst.data.get("instance_id") or str(comp_id) + CollectAERender.get_stub().remove_instance(inst_id) + continue + + frame_start = comp_info.frameStart + frame_end = round(comp_info.frameStart + + comp_info.framesDuration) - 1 + fps = comp_info.frameRate + # TODO add resolution when supported by extension + + task_name = inst.data.get("task") # legacy + + render_q = CollectAERender.get_stub().get_render_info(comp_id) + if not render_q: + raise ValueError("No file extension set in Render Queue") + render_item = render_q[0] + + instance_families = inst.data.get("families", []) + subset_name = inst.data["subset"] + instance = AERenderInstance( + family="render", + families=instance_families, + version=version, + time="", + source=current_file, + label="{} - {}".format(subset_name, family), + subset=subset_name, + asset=inst.data["asset"], + task=task_name, + attachTo=False, + setMembers='', + publish=True, + name=subset_name, + resolutionWidth=render_item.width, + resolutionHeight=render_item.height, + pixelAspect=1, + tileRendering=False, + tilesX=0, + tilesY=0, + review="review" in instance_families, + frameStart=frame_start, + frameEnd=frame_end, + frameStep=1, + fps=fps, + app_version=app_version, + publish_attributes=inst.data.get("publish_attributes", {}), + file_names=[item.file_name for item in render_q] + ) + + comp = compositions_by_id.get(comp_id) + if not comp: + raise ValueError("There is no composition for item {}". + format(comp_id)) + instance.outputDir = self._get_output_dir(instance) + instance.comp_name = comp.name + instance.comp_id = comp_id + + is_local = "renderLocal" in inst.data["family"] # legacy + if inst.data.get("creator_attributes"): + is_local = not inst.data["creator_attributes"].get("farm") + if is_local: + # for local renders + instance = self._update_for_local(instance, project_entity) + else: + fam = "render.farm" + if fam not in instance.families: + instance.families.append(fam) + instance.renderer = "aerender" + instance.farm = True # to skip integrate + if "review" in instance.families: + # to skip ExtractReview locally + instance.families.remove("review") + + instances.append(instance) + instances_to_remove.append(inst) + + for instance in instances_to_remove: + context.remove(instance) + return instances + + def get_expected_files(self, render_instance): + """ + Returns list of rendered files that should be created by + Deadline. These are not published directly, they are source + for later 'submit_publish_job'. + + Args: + render_instance (RenderInstance): to pull anatomy and parts used + in url + + Returns: + (list) of absolute urls to rendered file + """ + start = render_instance.frameStart + end = render_instance.frameEnd + + base_dir = self._get_output_dir(render_instance) + expected_files = [] + for file_name in render_instance.file_names: + _, ext = os.path.splitext(os.path.basename(file_name)) + ext = ext.replace('.', '') + version_str = "v{:03d}".format(render_instance.version) + if "#" not in file_name: # single frame (mov)W + path = os.path.join(base_dir, "{}_{}_{}.{}".format( + render_instance.asset, + render_instance.subset, + version_str, + ext + )) + expected_files.append(path) + else: + for frame in range(start, end + 1): + path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format( + render_instance.asset, + render_instance.subset, + version_str, + str(frame).zfill(self.padding_width), + ext + )) + expected_files.append(path) + return expected_files + + def _get_output_dir(self, render_instance): + """ + Returns dir path of rendered files, used in submit_publish_job + for metadata.json location. + Should be in separate folder inside of work area. + + Args: + render_instance (RenderInstance): + + Returns: + (str): absolute path to rendered files + """ + # render to folder of workfile + base_dir = os.path.dirname(render_instance.source) + file_name, _ = os.path.splitext( + os.path.basename(render_instance.source)) + base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name) + + # for submit_publish_job + return base_dir + + def _update_for_local(self, instance, project_entity): + """Update old saved instances to current publishing format""" + instance.stagingDir = tempfile.mkdtemp() + instance.projectEntity = project_entity + fam = "render.local" + if fam not in instance.families: + instance.families.append(fam) + + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_review.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_review.py similarity index 100% rename from openpype/hosts/aftereffects/plugins/publish/collect_review.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/collect_review.py diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..538d646ab4 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api + +from ayon_core.client import get_asset_name_identifier +from ayon_core.pipeline.create import get_subset_name + + +class CollectWorkfile(pyblish.api.ContextPlugin): + """ Adds the AE render instances """ + + label = "Collect After Effects Workfile Instance" + order = pyblish.api.CollectorOrder + 0.1 + + default_variant = "Main" + + def process(self, context): + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + + current_file = context.data["currentFile"] + staging_dir = os.path.dirname(current_file) + scene_file = os.path.basename(current_file) + if existing_instance is None: # old publish + instance = self._get_new_instance(context, scene_file) + else: + instance = existing_instance + + # creating representation + representation = { + 'name': 'aep', + 'ext': 'aep', + 'files': scene_file, + "stagingDir": staging_dir, + } + + if not instance.data.get("representations"): + instance.data["representations"] = [] + instance.data["representations"].append(representation) + + instance.data["publish"] = instance.data["active"] # for DL + + def _get_new_instance(self, context, scene_file): + task = context.data["task"] + version = context.data["version"] + asset_entity = context.data["assetEntity"] + project_entity = context.data["projectEntity"] + + asset_name = get_asset_name_identifier(asset_entity) + + instance_data = { + "active": True, + "asset": asset_name, + "task": task, + "frameStart": context.data['frameStart'], + "frameEnd": context.data['frameEnd'], + "handleStart": context.data['handleStart'], + "handleEnd": context.data['handleEnd'], + "fps": asset_entity["data"]["fps"], + "resolutionWidth": asset_entity["data"].get( + "resolutionWidth", + project_entity["data"]["resolutionWidth"]), + "resolutionHeight": asset_entity["data"].get( + "resolutionHeight", + project_entity["data"]["resolutionHeight"]), + "pixelAspect": 1, + "step": 1, + "version": version + } + + # workfile instance + family = "workfile" + subset = get_subset_name( + family, + self.default_variant, + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] + ) + # Create instance + instance = context.create_instance(subset) + + # creating instance data + instance.data.update({ + "subset": subset, + "label": scene_file, + "family": family, + "families": [family], + "representations": list() + }) + + instance.data.update(instance_data) + + return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/extract_local_render.py similarity index 96% rename from openpype/hosts/aftereffects/plugins/publish/extract_local_render.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/extract_local_render.py index b44e986d83..c5e62a2f54 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/extract_local_render.py @@ -1,7 +1,7 @@ import os -from openpype.pipeline import publish -from openpype.hosts.aftereffects.api import get_stub +from ayon_core.pipeline import publish +from ayon_core.hosts.aftereffects.api import get_stub class ExtractLocalRender(publish.Extractor): diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/extract_save_scene.py b/client/ayon_core/hosts/aftereffects/plugins/publish/extract_save_scene.py new file mode 100644 index 0000000000..f0007f96d9 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/extract_save_scene.py @@ -0,0 +1,16 @@ +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.aftereffects.api import get_stub + + +class ExtractSaveScene(pyblish.api.ContextPlugin): + """Save scene before extraction.""" + + order = publish.Extractor.order - 0.48 + label = "Extract Save Scene" + hosts = ["aftereffects"] + + def process(self, context): + stub = get_stub() + stub.save() diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_footage_items.xml b/client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_footage_items.xml similarity index 100% rename from openpype/hosts/aftereffects/plugins/publish/help/validate_footage_items.xml rename to client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_footage_items.xml diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml b/client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml similarity index 100% rename from openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml rename to client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml similarity index 100% rename from openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml rename to client/ayon_core/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/increment_workfile.py b/client/ayon_core/hosts/aftereffects/plugins/publish/increment_workfile.py new file mode 100644 index 0000000000..fc51ff9176 --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/increment_workfile.py @@ -0,0 +1,30 @@ +import pyblish.api +from ayon_core.lib import version_up +from ayon_core.pipeline.publish import get_errored_plugins_from_context + +from ayon_core.hosts.aftereffects.api import get_stub + + +class IncrementWorkfile(pyblish.api.InstancePlugin): + """Increment the current workfile. + + Saves the current scene with an increased version number. + """ + + label = "Increment Workfile" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["aftereffects"] + families = ["workfile"] + optional = True + + def process(self, instance): + errored_plugins = get_errored_plugins_from_context(instance.context) + if errored_plugins: + raise RuntimeError( + "Skipping incrementing current file because publishing failed." + ) + + scene_path = version_up(instance.context.data["currentFile"]) + get_stub().saveAs(scene_path, True) + + self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/pre_collect_render.py similarity index 96% rename from openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/pre_collect_render.py index 85a42830a4..de3c935dff 100644 --- a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/pre_collect_render.py @@ -1,6 +1,6 @@ import json import pyblish.api -from openpype.hosts.aftereffects.api import AfterEffectsHost +from ayon_core.hosts.aftereffects.api import AfterEffectsHost class PreCollectRender(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py b/client/ayon_core/hosts/aftereffects/plugins/publish/remove_publish_highlight.py similarity index 89% rename from openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/remove_publish_highlight.py index 370f916f04..70b6efecb0 100644 --- a/openpype/hosts/aftereffects/plugins/publish/remove_publish_highlight.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/remove_publish_highlight.py @@ -1,5 +1,5 @@ -from openpype.pipeline import publish -from openpype.hosts.aftereffects.api import get_stub +from ayon_core.pipeline import publish +from ayon_core.hosts.aftereffects.api import get_stub class RemovePublishHighlight(publish.Extractor): diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_footage_items.py b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_footage_items.py similarity index 95% rename from openpype/hosts/aftereffects/plugins/publish/validate_footage_items.py rename to client/ayon_core/hosts/aftereffects/plugins/publish/validate_footage_items.py index 40a08a2c3f..ae20102417 100644 --- a/openpype/hosts/aftereffects/plugins/publish/validate_footage_items.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_footage_items.py @@ -6,10 +6,10 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError ) -from openpype.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api import get_stub class ValidateFootageItems(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_instance_asset.py new file mode 100644 index 0000000000..c3938ecbda --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -0,0 +1,64 @@ +import pyblish.api + +from ayon_core.pipeline import get_current_asset_name +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) +from ayon_core.hosts.aftereffects.api import get_stub + + +class ValidateInstanceAssetRepair(pyblish.api.Action): + """Repair the instance asset with value from Context.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + stub = get_stub() + for instance in instances: + data = stub.read(instance[0]) + + data["asset"] = get_current_asset_name() + stub.imprint(instance[0].instance_id, data) + + +class ValidateInstanceAsset(pyblish.api.InstancePlugin): + """Validate the instance asset is the current selected context asset. + + As it might happen that multiple worfiles are opened at same time, + switching between them would mess with selected context. (From Launcher + or Ftrack). + + In that case outputs might be output under wrong asset! + + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. + """ + + label = "Validate Instance Asset" + hosts = ["aftereffects"] + actions = [ValidateInstanceAssetRepair] + order = ValidateContentsOrder + + def process(self, instance): + instance_asset = instance.data["asset"] + current_asset = get_current_asset_name() + msg = ( + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}." + ) + + if instance_asset != current_asset: + raise PublishXmlValidationError(self, msg) diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..0a90ae2a5a --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +"""Validate scene settings. +Requires: + instance -> assetEntity + instance -> anatomyData +""" +import os +import re + +import pyblish.api + +from ayon_core.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.hosts.aftereffects.api import get_asset_settings + + +class ValidateSceneSettings(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """ + Ensures that Composition Settings (right mouse on comp) are same as + in FTrack on task. + + By default checks only duration - how many frames should be rendered. + Compares: + Frame start - Frame end + 1 from FTrack + against + Duration in Composition Settings. + + If this complains: + Check error message where is discrepancy. + Check FTrack task 'pype' section of task attributes for expected + values. + Check/modify rendered Composition Settings. + + If you know what you are doing run publishing again, uncheck this + validation before Validation phase. + """ + + """ + Dev docu: + Could be configured by 'presets/plugins/aftereffects/publish' + + skip_timelines_check - fill task name for which skip validation of + frameStart + frameEnd + fps + handleStart + handleEnd + skip_resolution_check - fill entity type ('asset') to skip validation + resolutionWidth + resolutionHeight + TODO support in extension is missing for now + + By defaults validates duration (how many frames should be published) + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Scene Settings" + families = ["render.farm", "render.local", "render"] + hosts = ["aftereffects"] + optional = True + + skip_timelines_check = [".*"] # * >> skip for all + skip_resolution_check = [".*"] + + def process(self, instance): + """Plugin entry point.""" + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + asset_doc = instance.data["assetEntity"] + expected_settings = get_asset_settings(asset_doc) + self.log.info("config from DB::{}".format(expected_settings)) + + task_name = instance.data["anatomyData"]["task"]["name"] + if any(re.search(pattern, task_name) + for pattern in self.skip_resolution_check): + expected_settings.pop("resolutionWidth") + expected_settings.pop("resolutionHeight") + + if any(re.search(pattern, task_name) + for pattern in self.skip_timelines_check): + expected_settings.pop('fps', None) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('handleStart', None) + expected_settings.pop('handleEnd', None) + + # handle case where ftrack uses only two decimal places + # 23.976023976023978 vs. 23.98 + fps = instance.data.get("fps") + if fps: + if isinstance(fps, float): + fps = float( + "{:.2f}".format(fps)) + expected_settings["fps"] = fps + + duration = instance.data.get("frameEndHandle") - \ + instance.data.get("frameStartHandle") + 1 + + self.log.debug("validated items::{}".format(expected_settings)) + + current_settings = { + "fps": fps, + "frameStart": instance.data.get("frameStart"), + "frameEnd": instance.data.get("frameEnd"), + "handleStart": instance.data.get("handleStart"), + "handleEnd": instance.data.get("handleEnd"), + "frameStartHandle": instance.data.get("frameStartHandle"), + "frameEndHandle": instance.data.get("frameEndHandle"), + "resolutionWidth": instance.data.get("resolutionWidth"), + "resolutionHeight": instance.data.get("resolutionHeight"), + "duration": duration + } + self.log.info("current_settings:: {}".format(current_settings)) + + invalid_settings = [] + invalid_keys = set() + for key, value in expected_settings.items(): + if value != current_settings[key]: + msg = "'{}' expected: '{}' found: '{}'".format( + key, value, current_settings[key]) + + if key == "duration" and expected_settings.get("handleStart"): + msg += "Handles included in calculation. Remove " \ + "handles in DB or extend frame range in " \ + "Composition Setting." + + invalid_settings.append(msg) + invalid_keys.add(key) + + if invalid_settings: + msg = "Found invalid settings:\n{}".format( + "\n".join(invalid_settings) + ) + + invalid_keys_str = ",".join(invalid_keys) + break_str = "
" + invalid_setting_str = "Found invalid settings:
{}".\ + format(break_str.join(invalid_settings)) + + formatting_data = { + "invalid_setting_str": invalid_setting_str, + "invalid_keys_str": invalid_keys_str + } + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + if not os.path.exists(instance.data.get("source")): + scene_url = instance.data.get("source") + msg = "Scene file {} not found (saved under wrong name)".format( + scene_url + ) + formatting_data = { + "scene_url": scene_url + } + raise PublishXmlValidationError(self, msg, key="file_not_found", + formatting_data=formatting_data) diff --git a/openpype/hosts/aftereffects/resources/template.aep b/client/ayon_core/hosts/aftereffects/resources/template.aep similarity index 100% rename from openpype/hosts/aftereffects/resources/template.aep rename to client/ayon_core/hosts/aftereffects/resources/template.aep diff --git a/openpype/hosts/blender/__init__.py b/client/ayon_core/hosts/blender/__init__.py similarity index 100% rename from openpype/hosts/blender/__init__.py rename to client/ayon_core/hosts/blender/__init__.py diff --git a/client/ayon_core/hosts/blender/addon.py b/client/ayon_core/hosts/blender/addon.py new file mode 100644 index 0000000000..c3804382e5 --- /dev/null +++ b/client/ayon_core/hosts/blender/addon.py @@ -0,0 +1,72 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class BlenderAddon(OpenPypeModule, IHostAddon): + name = "blender" + host_name = "blender" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + # Prepare path to implementation script + implementation_user_script_path = os.path.join( + BLENDER_ROOT_DIR, + "blender_addon" + ) + + # Add blender implementation script path to PYTHONPATH + python_path = env.get("PYTHONPATH") or "" + python_path_parts = [ + path + for path in python_path.split(os.pathsep) + if path + ] + python_path_parts.insert(0, implementation_user_script_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Modify Blender user scripts path + previous_user_scripts = set() + # Implementation path is added to set for easier paths check inside + # loops - will be removed at the end + previous_user_scripts.add(implementation_user_script_path) + + ayon_blender_user_scripts = ( + env.get("AYON_BLENDER_USER_SCRIPTS") or "" + ) + for path in ayon_blender_user_scripts.split(os.pathsep): + if path: + previous_user_scripts.add(os.path.normpath(path)) + + blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" + for path in blender_user_scripts.split(os.pathsep): + if path: + previous_user_scripts.add(os.path.normpath(path)) + + # Remove implementation path from user script paths as is set to + # `BLENDER_USER_SCRIPTS` + previous_user_scripts.remove(implementation_user_script_path) + env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path + + # Set custom user scripts env + env["AYON_BLENDER_USER_SCRIPTS"] = os.pathsep.join( + previous_user_scripts + ) + + # Define Qt binding if not defined + if not env.get("QT_PREFERRED_BINDING"): + env["QT_PREFERRED_BINDING"] = "PySide2" + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(BLENDER_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".blend"] diff --git a/openpype/hosts/blender/api/__init__.py b/client/ayon_core/hosts/blender/api/__init__.py similarity index 100% rename from openpype/hosts/blender/api/__init__.py rename to client/ayon_core/hosts/blender/api/__init__.py diff --git a/client/ayon_core/hosts/blender/api/action.py b/client/ayon_core/hosts/blender/api/action.py new file mode 100644 index 0000000000..865c2443e0 --- /dev/null +++ b/client/ayon_core/hosts/blender/api/action.py @@ -0,0 +1,47 @@ +import bpy + +import pyblish.api + +from ayon_core.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid objects in Blender when a publish plug-in failed.""" + label = "Select Invalid" + on = "failed" + icon = "search" + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context(context, + plugin=plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes...") + invalid = list() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning( + "Failed plug-in doesn't have any selectable objects." + ) + + bpy.ops.object.select_all(action='DESELECT') + + # Make sure every node is only processed once + invalid = list(set(invalid)) + if not invalid: + self.log.info("No invalid nodes found.") + return + + invalid_names = [obj.name for obj in invalid] + self.log.info( + "Selecting invalid objects: %s", ", ".join(invalid_names) + ) + # Select the objects and also make the last one the active object. + for obj in invalid: + obj.select_set(True) + + bpy.context.view_layer.objects.active = invalid[-1] diff --git a/openpype/hosts/blender/api/capture.py b/client/ayon_core/hosts/blender/api/capture.py similarity index 100% rename from openpype/hosts/blender/api/capture.py rename to client/ayon_core/hosts/blender/api/capture.py diff --git a/openpype/hosts/blender/api/colorspace.py b/client/ayon_core/hosts/blender/api/colorspace.py similarity index 100% rename from openpype/hosts/blender/api/colorspace.py rename to client/ayon_core/hosts/blender/api/colorspace.py diff --git a/openpype/hosts/blender/api/icons/pyblish-32x32.png b/client/ayon_core/hosts/blender/api/icons/pyblish-32x32.png similarity index 100% rename from openpype/hosts/blender/api/icons/pyblish-32x32.png rename to client/ayon_core/hosts/blender/api/icons/pyblish-32x32.png diff --git a/client/ayon_core/hosts/blender/api/lib.py b/client/ayon_core/hosts/blender/api/lib.py new file mode 100644 index 0000000000..458a275b51 --- /dev/null +++ b/client/ayon_core/hosts/blender/api/lib.py @@ -0,0 +1,367 @@ +import os +import traceback +import importlib +import contextlib +from typing import Dict, List, Union + +import bpy +import addon_utils +from ayon_core.lib import Logger + +from . import pipeline + +log = Logger.get_logger(__name__) + + +def load_scripts(paths): + """Copy of `load_scripts` from Blender's implementation. + + It is possible that this function will be changed in future and usage will + be based on Blender version. + """ + import bpy_types + + loaded_modules = set() + + previous_classes = [ + cls + for cls in bpy.types.bpy_struct.__subclasses__() + ] + + def register_module_call(mod): + register = getattr(mod, "register", None) + if register: + try: + register() + except: + traceback.print_exc() + else: + print("\nWarning! '%s' has no register function, " + "this is now a requirement for registerable scripts" % + mod.__file__) + + def unregister_module_call(mod): + unregister = getattr(mod, "unregister", None) + if unregister: + try: + unregister() + except: + traceback.print_exc() + + def test_reload(mod): + # reloading this causes internal errors + # because the classes from this module are stored internally + # possibly to refresh internal references too but for now, best not to. + if mod == bpy_types: + return mod + + try: + return importlib.reload(mod) + except: + traceback.print_exc() + + def test_register(mod): + if mod: + register_module_call(mod) + bpy.utils._global_loaded_modules.append(mod.__name__) + + from bpy_restrict_state import RestrictBlend + + with RestrictBlend(): + for base_path in paths: + for path_subdir in bpy.utils._script_module_dirs: + path = os.path.join(base_path, path_subdir) + if not os.path.isdir(path): + continue + + bpy.utils._sys_path_ensure_prepend(path) + + # Only add to 'sys.modules' unless this is 'startup'. + if path_subdir != "startup": + continue + for mod in bpy.utils.modules_from_path(path, loaded_modules): + test_register(mod) + + addons_paths = [] + for base_path in paths: + addons_path = os.path.join(base_path, "addons") + if not os.path.exists(addons_path): + continue + addons_paths.append(addons_path) + addons_module_path = os.path.join(addons_path, "modules") + if os.path.exists(addons_module_path): + bpy.utils._sys_path_ensure_prepend(addons_module_path) + + if addons_paths: + # Fake addons + origin_paths = addon_utils.paths + + def new_paths(): + paths = origin_paths() + addons_paths + return paths + + addon_utils.paths = new_paths + addon_utils.modules_refresh() + + # load template (if set) + if any(bpy.utils.app_template_paths()): + import bl_app_template_utils + bl_app_template_utils.reset(reload_scripts=False) + del bl_app_template_utils + + for cls in bpy.types.bpy_struct.__subclasses__(): + if cls in previous_classes: + continue + if not getattr(cls, "is_registered", False): + continue + for subcls in cls.__subclasses__(): + if not subcls.is_registered: + print( + "Warning, unregistered class: %s(%s)" % + (subcls.__name__, cls.__name__) + ) + + +def append_user_scripts(): + user_scripts = os.environ.get("AYON_BLENDER_USER_SCRIPTS") + if not user_scripts: + return + + try: + load_scripts(user_scripts.split(os.pathsep)) + except Exception: + print("Couldn't load user scripts \"{}\"".format(user_scripts)) + traceback.print_exc() + + +def set_app_templates_path(): + # Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`. + # After running Blender, we set that variable to our custom path, so + # that the user can use their custom app templates. + + # We look among the scripts paths for one of the paths that contains + # the app templates. The path must contain the subfolder + # `startup/bl_app_templates_user`. + paths = os.environ.get("AYON_BLENDER_USER_SCRIPTS").split(os.pathsep) + + app_templates_path = None + for path in paths: + if os.path.isdir( + os.path.join(path, "startup", "bl_app_templates_user")): + app_templates_path = path + break + + if app_templates_path and os.path.isdir(app_templates_path): + os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path + + +def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict): + r"""Write `data` to `node` as userDefined attributes + + Arguments: + node: Long name of node + data: Dictionary of key/value pairs + + Example: + >>> import bpy + >>> def compute(): + ... return 6 + ... + >>> bpy.ops.mesh.primitive_cube_add() + >>> cube = bpy.context.view_layer.objects.active + >>> imprint(cube, { + ... "regularString": "myFamily", + ... "computedValue": lambda: compute() + ... }) + ... + >>> cube['avalon']['computedValue'] + 6 + """ + + imprint_data = dict() + + for key, value in data.items(): + if value is None: + continue + + if callable(value): + # Support values evaluated at imprint + value = value() + + if not isinstance(value, (int, float, bool, str, list, dict)): + raise TypeError(f"Unsupported type: {type(value)}") + + imprint_data[key] = value + + pipeline.metadata_update(node, imprint_data) + + +def lsattr(attr: str, + value: Union[str, int, bool, List, Dict, None] = None) -> List: + r"""Return nodes matching `attr` and `value` + + Arguments: + attr: Name of Blender property + value: Value of attribute. If none + is provided, return all nodes with this attribute. + + Example: + >>> lsattr("id", "myId") + ... [bpy.data.objects["myNode"] + >>> lsattr("id") + ... [bpy.data.objects["myNode"], bpy.data.objects["myOtherNode"]] + + Returns: + list + """ + + return lsattrs({attr: value}) + + +def lsattrs(attrs: Dict) -> List: + r"""Return nodes with the given attribute(s). + + Arguments: + attrs: Name and value pairs of expected matches + + Example: + >>> lsattrs({"age": 5}) # Return nodes with an `age` of 5 + # Return nodes with both `age` and `color` of 5 and blue + >>> lsattrs({"age": 5, "color": "blue"}) + + Returns a list. + + """ + + # For now return all objects, not filtered by scene/collection/view_layer. + matches = set() + for coll in dir(bpy.data): + if not isinstance( + getattr(bpy.data, coll), + bpy.types.bpy_prop_collection, + ): + continue + for node in getattr(bpy.data, coll): + for attr, value in attrs.items(): + avalon_prop = node.get(pipeline.AVALON_PROPERTY) + if not avalon_prop: + continue + if (avalon_prop.get(attr) + and (value is None or avalon_prop.get(attr) == value)): + matches.add(node) + return list(matches) + + +def read(node: bpy.types.bpy_struct_meta_idprop): + """Return user-defined attributes from `node`""" + + data = dict(node.get(pipeline.AVALON_PROPERTY, {})) + + # Ignore hidden/internal data + data = { + key: value + for key, value in data.items() if not key.startswith("_") + } + + return data + + +def get_selected_collections(): + """ + Returns a list of the currently selected collections in the outliner. + + Raises: + RuntimeError: If the outliner cannot be found in the main Blender + window. + + Returns: + list: A list of `bpy.types.Collection` objects that are currently + selected in the outliner. + """ + window = bpy.context.window or bpy.context.window_manager.windows[0] + + try: + area = next( + area for area in window.screen.areas + if area.type == 'OUTLINER') + region = next( + region for region in area.regions + if region.type == 'WINDOW') + except StopIteration as e: + raise RuntimeError("Could not find outliner. An outliner space " + "must be in the main Blender window.") from e + + with bpy.context.temp_override( + window=window, + area=area, + region=region, + screen=window.screen + ): + ids = bpy.context.selected_ids + + return [id for id in ids if isinstance(id, bpy.types.Collection)] + + +def get_selection(include_collections: bool = False) -> List[bpy.types.Object]: + """ + Returns a list of selected objects in the current Blender scene. + + Args: + include_collections (bool, optional): Whether to include selected + collections in the result. Defaults to False. + + Returns: + List[bpy.types.Object]: A list of selected objects. + """ + selection = [obj for obj in bpy.context.scene.objects if obj.select_get()] + + if include_collections: + selection.extend(get_selected_collections()) + + return selection + + +@contextlib.contextmanager +def maintained_selection(): + r"""Maintain selection during context + + Example: + >>> with maintained_selection(): + ... # Modify selection + ... bpy.ops.object.select_all(action='DESELECT') + >>> # Selection restored + """ + + previous_selection = get_selection() + previous_active = bpy.context.view_layer.objects.active + try: + yield + finally: + # Clear the selection + for node in get_selection(): + node.select_set(state=False) + if previous_selection: + for node in previous_selection: + try: + node.select_set(state=True) + except ReferenceError: + # This could happen if a selected node was deleted during + # the context. + log.exception("Failed to reselect") + continue + try: + bpy.context.view_layer.objects.active = previous_active + except ReferenceError: + # This could happen if the active node was deleted during the + # context. + log.exception("Failed to set active object.") + + +@contextlib.contextmanager +def maintained_time(): + """Maintain current frame during context.""" + current_time = bpy.context.scene.frame_current + try: + yield + finally: + bpy.context.scene.frame_current = current_time diff --git a/openpype/hosts/blender/api/ops.py b/client/ayon_core/hosts/blender/api/ops.py similarity index 91% rename from openpype/hosts/blender/api/ops.py rename to client/ayon_core/hosts/blender/api/ops.py index f4d96e563a..dcbc44bcad 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/client/ayon_core/hosts/blender/api/ops.py @@ -15,10 +15,9 @@ import bpy import bpy.utils.previews -from openpype import style -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import get_current_asset_name, get_current_task_name -from openpype.tools.utils import host_tools +from ayon_core import style +from ayon_core.pipeline import get_current_asset_name, get_current_task_name +from ayon_core.tools.utils import host_tools from .workio import OpenFileCacher from . import pipeline @@ -312,14 +311,6 @@ class LaunchLoader(LaunchQtApp): bl_label = "Load..." _tool_name = "loader" - def before_window_show(self): - if AYON_SERVER_ENABLED: - return - self._window.set_context( - {"asset": get_current_asset_name()}, - refresh=True - ) - class LaunchPublisher(LaunchQtApp): """Launch Avalon Publisher.""" @@ -339,11 +330,6 @@ class LaunchManager(LaunchQtApp): bl_label = "Manage..." _tool_name = "sceneinventory" - def before_window_show(self): - if AYON_SERVER_ENABLED: - return - self._window.refresh() - class LaunchLibrary(LaunchQtApp): """Launch Library Loader.""" @@ -352,11 +338,6 @@ class LaunchLibrary(LaunchQtApp): bl_label = "Library..." _tool_name = "libraryloader" - def before_window_show(self): - if AYON_SERVER_ENABLED: - return - self._window.refresh() - class LaunchWorkFiles(LaunchQtApp): """Launch Avalon Work Files.""" @@ -366,22 +347,7 @@ class LaunchWorkFiles(LaunchQtApp): _tool_name = "workfiles" def execute(self, context): - result = super().execute(context) - if not AYON_SERVER_ENABLED: - self._window.set_context({ - "asset": get_current_asset_name(), - "task": get_current_task_name() - }) - return result - - def before_window_show(self): - if AYON_SERVER_ENABLED: - return - self._window.root = str(Path( - os.environ.get("AVALON_WORKDIR", ""), - os.environ.get("AVALON_SCENEDIR", ""), - )) - self._window.refresh() + return super().execute(context) class SetFrameRange(bpy.types.Operator): @@ -408,7 +374,7 @@ class TOPBAR_MT_avalon(bpy.types.Menu): """Avalon menu.""" bl_idname = "TOPBAR_MT_avalon" - bl_label = os.environ.get("AVALON_LABEL") + bl_label = os.environ.get("AYON_MENU_LABEL") def draw(self, context): """Draw the menu in the UI.""" diff --git a/client/ayon_core/hosts/blender/api/pipeline.py b/client/ayon_core/hosts/blender/api/pipeline.py new file mode 100644 index 0000000000..6801b1f71b --- /dev/null +++ b/client/ayon_core/hosts/blender/api/pipeline.py @@ -0,0 +1,574 @@ +import os +import sys +import traceback +from typing import Callable, Dict, Iterator, List, Optional + +import bpy + +from . import lib +from . import ops + +import pyblish.api + +from ayon_core.host import ( + HostBase, + IWorkfileHost, + IPublishHost, + ILoadHost +) +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import ( + schema, + legacy_io, + get_current_project_name, + get_current_asset_name, + register_loader_plugin_path, + register_creator_plugin_path, + deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.lib import ( + Logger, + register_event_callback, + emit_event +) +import ayon_core.hosts.blender +from ayon_core.settings import get_project_settings +from .workio import ( + open_file, + save_file, + current_file, + has_unsaved_changes, + file_extensions, + work_root, +) + + +HOST_DIR = os.path.dirname(os.path.abspath(ayon_core.hosts.blender.__file__)) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") + +ORIGINAL_EXCEPTHOOK = sys.excepthook + +AVALON_INSTANCES = "AVALON_INSTANCES" +AVALON_CONTAINERS = "AVALON_CONTAINERS" +AVALON_PROPERTY = 'avalon' +IS_HEADLESS = bpy.app.background + +log = Logger.get_logger(__name__) + + +class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost): + name = "blender" + + def install(self): + """Override install method from HostBase. + Install Blender host functionality.""" + install() + + def get_containers(self) -> Iterator: + """List containers from active Blender scene.""" + return ls() + + def get_workfile_extensions(self) -> List[str]: + """Override get_workfile_extensions method from IWorkfileHost. + Get workfile possible extensions. + + Returns: + List[str]: Workfile extensions. + """ + return file_extensions() + + def save_workfile(self, dst_path: str = None): + """Override save_workfile method from IWorkfileHost. + Save currently opened workfile. + + Args: + dst_path (str): Where the current scene should be saved. Or use + current path if `None` is passed. + """ + save_file(dst_path if dst_path else bpy.data.filepath) + + def open_workfile(self, filepath: str): + """Override open_workfile method from IWorkfileHost. + Open workfile at specified filepath in the host. + + Args: + filepath (str): Path to workfile. + """ + open_file(filepath) + + def get_current_workfile(self) -> str: + """Override get_current_workfile method from IWorkfileHost. + Retrieve currently opened workfile path. + + Returns: + str: Path to currently opened workfile. + """ + return current_file() + + def workfile_has_unsaved_changes(self) -> bool: + """Override wokfile_has_unsaved_changes method from IWorkfileHost. + Returns True if opened workfile has no unsaved changes. + + Returns: + bool: True if scene is saved and False if it has unsaved + modifications. + """ + return has_unsaved_changes() + + def work_root(self, session) -> str: + """Override work_root method from IWorkfileHost. + Modify workdir per host. + + Args: + session (dict): Session context data. + + Returns: + str: Path to new workdir. + """ + return work_root(session) + + def get_context_data(self) -> dict: + """Override abstract method from IPublishHost. + Get global data related to creation-publishing from workfile. + + Returns: + dict: Context data stored using 'update_context_data'. + """ + property = bpy.context.scene.get(AVALON_PROPERTY) + if property: + return property.to_dict() + return {} + + def update_context_data(self, data: dict, changes: dict): + """Override abstract method from IPublishHost. + Store global context data to workfile. + + Args: + data (dict): New data as are. + changes (dict): Only data that has been changed. Each value has + tuple with '(, )' value. + """ + bpy.context.scene[AVALON_PROPERTY] = data + + +def pype_excepthook_handler(*args): + traceback.print_exception(*args) + + +def install(): + """Install Blender configuration for Avalon.""" + sys.excepthook = pype_excepthook_handler + + pyblish.api.register_host("blender") + pyblish.api.register_plugin_path(str(PUBLISH_PATH)) + + register_loader_plugin_path(str(LOAD_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) + + lib.append_user_scripts() + lib.set_app_templates_path() + + register_event_callback("new", on_new) + register_event_callback("open", on_open) + + _register_callbacks() + _register_events() + + if not IS_HEADLESS: + ops.register() + + +def uninstall(): + """Uninstall Blender configuration for Avalon.""" + sys.excepthook = ORIGINAL_EXCEPTHOOK + + pyblish.api.deregister_host("blender") + pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) + + deregister_loader_plugin_path(str(LOAD_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) + + if not IS_HEADLESS: + ops.unregister() + + +def show_message(title, message): + from ayon_core.tools.utils import show_message_dialog + from .ops import BlenderApplication + + BlenderApplication.get_app() + + show_message_dialog( + title=title, + message=message, + level="warning") + + +def message_window(title, message): + from .ops import ( + MainThreadItem, + execute_in_main_thread, + _process_app_events + ) + + mti = MainThreadItem(show_message, title, message) + execute_in_main_thread(mti) + _process_app_events() + + +def get_asset_data(): + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name) + + return asset_doc.get("data") + + +def set_frame_range(data): + scene = bpy.context.scene + + # Default scene settings + frameStart = scene.frame_start + frameEnd = scene.frame_end + fps = scene.render.fps / scene.render.fps_base + + if not data: + return + + if data.get("frameStart"): + frameStart = data.get("frameStart") + if data.get("frameEnd"): + frameEnd = data.get("frameEnd") + if data.get("fps"): + fps = data.get("fps") + + scene.frame_start = frameStart + scene.frame_end = frameEnd + scene.render.fps = round(fps) + scene.render.fps_base = round(fps) / fps + + +def set_resolution(data): + scene = bpy.context.scene + + # Default scene settings + resolution_x = scene.render.resolution_x + resolution_y = scene.render.resolution_y + + if not data: + return + + if data.get("resolutionWidth"): + resolution_x = data.get("resolutionWidth") + if data.get("resolutionHeight"): + resolution_y = data.get("resolutionHeight") + + scene.render.resolution_x = resolution_x + scene.render.resolution_y = resolution_y + + +def on_new(): + project = os.environ.get("AVALON_PROJECT") + settings = get_project_settings(project).get("blender") + + set_resolution_startup = settings.get("set_resolution_startup") + set_frames_startup = settings.get("set_frames_startup") + + data = get_asset_data() + + if set_resolution_startup: + set_resolution(data) + if set_frames_startup: + set_frame_range(data) + + unit_scale_settings = settings.get("unit_scale_settings") + unit_scale_enabled = unit_scale_settings.get("enabled") + if unit_scale_enabled: + unit_scale = unit_scale_settings.get("base_file_unit_scale") + bpy.context.scene.unit_settings.scale_length = unit_scale + + +def on_open(): + project = os.environ.get("AVALON_PROJECT") + settings = get_project_settings(project).get("blender") + + set_resolution_startup = settings.get("set_resolution_startup") + set_frames_startup = settings.get("set_frames_startup") + + data = get_asset_data() + + if set_resolution_startup: + set_resolution(data) + if set_frames_startup: + set_frame_range(data) + + unit_scale_settings = settings.get("unit_scale_settings") + unit_scale_enabled = unit_scale_settings.get("enabled") + apply_on_opening = unit_scale_settings.get("apply_on_opening") + if unit_scale_enabled and apply_on_opening: + unit_scale = unit_scale_settings.get("base_file_unit_scale") + prev_unit_scale = bpy.context.scene.unit_settings.scale_length + + if unit_scale != prev_unit_scale: + bpy.context.scene.unit_settings.scale_length = unit_scale + + message_window( + "Base file unit scale changed", + "Base file unit scale changed to match the project settings.") + + +@bpy.app.handlers.persistent +def _on_save_pre(*args): + emit_event("before.save") + + +@bpy.app.handlers.persistent +def _on_save_post(*args): + emit_event("save") + + +@bpy.app.handlers.persistent +def _on_load_post(*args): + # Detect new file or opening an existing file + if bpy.data.filepath: + # Likely this was an open operation since it has a filepath + emit_event("open") + else: + emit_event("new") + + ops.OpenFileCacher.post_load() + + +def _register_callbacks(): + """Register callbacks for certain events.""" + def _remove_handler(handlers: List, callback: Callable): + """Remove the callback from the given handler list.""" + + try: + handlers.remove(callback) + except ValueError: + pass + + # TODO (jasper): implement on_init callback? + + # Be sure to remove existig ones first. + _remove_handler(bpy.app.handlers.save_pre, _on_save_pre) + _remove_handler(bpy.app.handlers.save_post, _on_save_post) + _remove_handler(bpy.app.handlers.load_post, _on_load_post) + + bpy.app.handlers.save_pre.append(_on_save_pre) + bpy.app.handlers.save_post.append(_on_save_post) + bpy.app.handlers.load_post.append(_on_load_post) + + log.info("Installed event handler _on_save_pre...") + log.info("Installed event handler _on_save_post...") + log.info("Installed event handler _on_load_post...") + + +def _on_task_changed(): + """Callback for when the task in the context is changed.""" + + # TODO (jasper): Blender has no concept of projects or workspace. + # It would be nice to override 'bpy.ops.wm.open_mainfile' so it takes the + # workdir as starting directory. But I don't know if that is possible. + # Another option would be to create a custom 'File Selector' and add the + # `directory` attribute, so it opens in that directory (does it?). + # https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector + # https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add + workdir = legacy_io.Session["AVALON_WORKDIR"] + log.debug("New working directory: %s", workdir) + + +def _register_events(): + """Install callbacks for specific events.""" + + register_event_callback("taskChanged", _on_task_changed) + log.info("Installed event callback for 'taskChanged'...") + + +def _discover_gui() -> Optional[Callable]: + """Return the most desirable of the currently registered GUIs""" + + # Prefer last registered + guis = reversed(pyblish.api.registered_guis()) + + for gui in guis: + try: + gui = __import__(gui).show + except (ImportError, AttributeError): + continue + else: + return gui + + return None + + +def add_to_avalon_container(container: bpy.types.Collection): + """Add the container to the Avalon container.""" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + + # Link the container to the scene so it's easily visible to the artist + # and can be managed easily. Otherwise it's only found in "Blender + # File" view and it will be removed by Blenders garbage collection, + # unless you set a 'fake user'. + bpy.context.scene.collection.children.link(avalon_container) + + avalon_container.children.link(container) + + # Disable Avalon containers for the view layers. + for view_layer in bpy.context.scene.view_layers: + for child in view_layer.layer_collection.children: + if child.collection == avalon_container: + child.exclude = True + + +def metadata_update(node: bpy.types.bpy_struct_meta_idprop, data: Dict): + """Imprint the node with metadata. + + Existing metadata will be updated. + """ + + if not node.get(AVALON_PROPERTY): + node[AVALON_PROPERTY] = dict() + for key, value in data.items(): + if value is None: + continue + node[AVALON_PROPERTY][key] = value + + +def containerise(name: str, + namespace: str, + nodes: List, + context: Dict, + loader: Optional[str] = None, + suffix: Optional[str] = "CON") -> bpy.types.Collection: + """Bundle `nodes` into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + name: Name of resulting assembly + namespace: Namespace under which to host container + nodes: Long names of nodes to containerise + context: Asset information + loader: Name of loader used to produce this container. + suffix: Suffix of container, defaults to `_CON`. + + Returns: + The container assembly + + """ + + node_name = f"{context['asset']['name']}_{name}" + if namespace: + node_name = f"{namespace}:{node_name}" + if suffix: + node_name = f"{node_name}_{suffix}" + container = bpy.data.collections.new(name=node_name) + # Link the children nodes + for obj in nodes: + container.objects.link(obj) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + + metadata_update(container, data) + add_to_avalon_container(container) + + return container + + +def containerise_existing( + container: bpy.types.Collection, + name: str, + namespace: str, + context: Dict, + loader: Optional[str] = None, + suffix: Optional[str] = "CON") -> bpy.types.Collection: + """Imprint or update container with metadata. + + Arguments: + name: Name of resulting assembly + namespace: Namespace under which to host container + context: Asset information + loader: Name of loader used to produce this container. + suffix: Suffix of container, defaults to `_CON`. + + Returns: + The container assembly + """ + + node_name = container.name + if suffix: + node_name = f"{node_name}_{suffix}" + container.name = node_name + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + + metadata_update(container, data) + add_to_avalon_container(container) + + return container + + +def parse_container(container: bpy.types.Collection, + validate: bool = True) -> Dict: + """Return the container node's full container data. + + Args: + container: A container node name. + validate: turn the validation for the container on or off + + Returns: + The container schema data for this container node. + + """ + + data = lib.read(container) + + # Append transient data + data["objectName"] = container.name + + if validate: + schema.validate(data) + + return data + + +def ls() -> Iterator: + """List containers from active Blender scene. + + This is the host-equivalent of api.ls(), but instead of listing assets on + disk, it lists assets already loaded in Blender; once loaded they are + called containers. + """ + + for container in lib.lsattr("id", AVALON_CONTAINER_ID): + yield parse_container(container) + + +def publish(): + """Shorthand to publish from within host.""" + + return pyblish.util.publish() diff --git a/client/ayon_core/hosts/blender/api/plugin.py b/client/ayon_core/hosts/blender/api/plugin.py new file mode 100644 index 0000000000..2cd8d1f291 --- /dev/null +++ b/client/ayon_core/hosts/blender/api/plugin.py @@ -0,0 +1,522 @@ +"""Shared functionality for pipeline plugins for Blender.""" + +import itertools +from pathlib import Path +from typing import Dict, List, Optional + +import bpy + +from ayon_core.pipeline import ( + Creator, + CreatedInstance, + LoaderPlugin, +) +from ayon_core.lib import BoolDef + +from .pipeline import ( + AVALON_CONTAINERS, + AVALON_INSTANCES, + AVALON_PROPERTY, +) +from .ops import ( + MainThreadItem, + execute_in_main_thread +) +from .lib import imprint + +VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"] + + +def prepare_scene_name( + asset: str, subset: str, namespace: Optional[str] = None +) -> str: + """Return a consistent name for an asset.""" + name = f"{asset}" + if namespace: + name = f"{name}_{namespace}" + name = f"{name}_{subset}" + + # Blender name for a collection or object cannot be longer than 63 + # characters. If the name is longer, it will raise an error. + if len(name) > 63: + raise ValueError(f"Scene name '{name}' would be too long.") + + return name + + +def get_unique_number( + asset: str, subset: str +) -> str: + """Return a unique number based on the asset name.""" + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + return "01" + # Check the names of both object and collection containers + obj_asset_groups = avalon_container.objects + obj_group_names = { + c.name for c in obj_asset_groups + if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)} + coll_asset_groups = avalon_container.children + coll_group_names = { + c.name for c in coll_asset_groups + if c.get(AVALON_PROPERTY)} + container_names = obj_group_names.union(coll_group_names) + count = 1 + name = f"{asset}_{count:0>2}_{subset}" + while name in container_names: + count += 1 + name = f"{asset}_{count:0>2}_{subset}" + return f"{count:0>2}" + + +def prepare_data(data, container_name=None): + name = data.name + local_data = data.make_local() + if container_name: + local_data.name = f"{container_name}:{name}" + else: + local_data.name = f"{name}" + return local_data + + +def create_blender_context(active: Optional[bpy.types.Object] = None, + selected: Optional[bpy.types.Object] = None, + window: Optional[bpy.types.Window] = None): + """Create a new Blender context. If an object is passed as + parameter, it is set as selected and active. + """ + + if not isinstance(selected, list): + selected = [selected] + + override_context = bpy.context.copy() + + windows = [window] if window else bpy.context.window_manager.windows + + for win in windows: + for area in win.screen.areas: + if area.type == 'VIEW_3D': + for region in area.regions: + if region.type == 'WINDOW': + override_context['window'] = win + override_context['screen'] = win.screen + override_context['area'] = area + override_context['region'] = region + override_context['scene'] = bpy.context.scene + override_context['active_object'] = active + override_context['selected_objects'] = selected + return override_context + raise Exception("Could not create a custom Blender context.") + + +def get_parent_collection(collection): + """Get the parent of the input collection""" + check_list = [bpy.context.scene.collection] + + for c in check_list: + if collection.name in c.children.keys(): + return c + check_list.extend(c.children) + + return None + + +def get_local_collection_with_name(name): + for collection in bpy.data.collections: + if collection.name == name and collection.library is None: + return collection + return None + + +def deselect_all(): + """Deselect all objects in the scene. + + Blender gives context error if trying to deselect object that it isn't + in object mode. + """ + modes = [] + active = bpy.context.view_layer.objects.active + + for obj in bpy.data.objects: + if obj.mode != 'OBJECT': + modes.append((obj, obj.mode)) + bpy.context.view_layer.objects.active = obj + bpy.ops.object.mode_set(mode='OBJECT') + + bpy.ops.object.select_all(action='DESELECT') + + for p in modes: + bpy.context.view_layer.objects.active = p[0] + bpy.ops.object.mode_set(mode=p[1]) + + bpy.context.view_layer.objects.active = active + + +class BaseCreator(Creator): + """Base class for Blender Creator plug-ins.""" + defaults = ['Main'] + + create_as_asset_group = False + + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators shared data. + + Create `blender_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `blender_cached_legacy_subsets` key and fill it with + all legacy subsets from this family as a value. # key or value? + + Args: + shared_data(Dict[str, Any]): Shared data. + + Return: + Dict[str, Any]: Shared data with cached subsets. + """ + if not shared_data.get('blender_cached_subsets'): + cache = {} + cache_legacy = {} + + avalon_instances = bpy.data.collections.get(AVALON_INSTANCES) + avalon_instance_objs = ( + avalon_instances.objects if avalon_instances else [] + ) + + for obj_or_col in itertools.chain( + avalon_instance_objs, + bpy.data.collections + ): + avalon_prop = obj_or_col.get(AVALON_PROPERTY, {}) + if not avalon_prop: + continue + + if avalon_prop.get('id') != 'pyblish.avalon.instance': + continue + + creator_id = avalon_prop.get('creator_identifier') + if creator_id: + # Creator instance + cache.setdefault(creator_id, []).append(obj_or_col) + else: + family = avalon_prop.get('family') + if family: + # Legacy creator instance + cache_legacy.setdefault(family, []).append(obj_or_col) + + shared_data["blender_cached_subsets"] = cache + shared_data["blender_cached_legacy_subsets"] = cache_legacy + + return shared_data + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + """Override abstract method from Creator. + Create new instance and store it. + + Args: + subset_name(str): Subset name of created instance. + instance_data(dict): Instance base data. + pre_create_data(dict): Data based on pre creation attributes. + Those may affect how creator works. + """ + # Get Instance Container or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create asset group + asset_name = instance_data["folderPath"].split("/")[-1] + + name = prepare_scene_name(asset_name, subset_name) + if self.create_as_asset_group: + # Create instance as empty + instance_node = bpy.data.objects.new(name=name, object_data=None) + instance_node.empty_display_type = 'SINGLE_ARROW' + instances.objects.link(instance_node) + else: + # Create instance collection + instance_node = bpy.data.collections.new(name=name) + instances.children.link(instance_node) + + self.set_instance_data(subset_name, instance_data) + + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + instance.transient_data["instance_node"] = instance_node + self._add_instance_to_context(instance) + + imprint(instance_node, instance_data) + + return instance_node + + def collect_instances(self): + """Override abstract method from BaseCreator. + Collect existing instances related to this creator plugin.""" + + # Cache subsets in shared data + self.cache_subsets(self.collection_shared_data) + + # Get cached subsets + cached_subsets = self.collection_shared_data.get( + "blender_cached_subsets" + ) + if not cached_subsets: + return + + # Process only instances that were created by this creator + for instance_node in cached_subsets.get(self.identifier, []): + property = instance_node.get(AVALON_PROPERTY) + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data=property.to_dict(), + creator=self + ) + instance.transient_data["instance_node"] = instance_node + + # Add instance to create context + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + """Override abstract method from BaseCreator. + Store changes of existing instances so they can be recollected. + + Args: + update_list(List[UpdateData]): Changed instances + and their changes, as a list of tuples. + """ + + for created_instance, changes in update_list: + data = created_instance.data_to_store() + node = created_instance.transient_data["instance_node"] + if not node: + # We can't update if we don't know the node + self.log.error( + f"Unable to update instance {created_instance} " + f"without instance node." + ) + return + + # Rename the instance node in the scene if subset or asset changed. + # Do not rename the instance if the family is workfile, as the + # workfile instance is included in the AVALON_CONTAINER collection. + if ( + "subset" in changes.changed_keys + or "folderPath" in changes.changed_keys + ) and created_instance.family != "workfile": + asset_name = data["folderPath"].split("/")[-1] + name = prepare_scene_name( + asset=asset_name, subset=data["subset"] + ) + node.name = name + + imprint(node, data) + + def remove_instances(self, instances: List[CreatedInstance]): + + for instance in instances: + node = instance.transient_data["instance_node"] + + if isinstance(node, bpy.types.Collection): + for children in node.children_recursive: + if isinstance(children, bpy.types.Collection): + bpy.data.collections.remove(children) + else: + bpy.data.objects.remove(children) + + bpy.data.collections.remove(node) + elif isinstance(node, bpy.types.Object): + bpy.data.objects.remove(node) + + self._remove_instance_from_context(instance) + + def set_instance_data( + self, + subset_name: str, + instance_data: dict + ): + """Fill instance data with required items. + + Args: + subset_name(str): Subset name of created instance. + instance_data(dict): Instance base data. + instance_node(bpy.types.ID): Instance node in blender scene. + """ + if not instance_data: + instance_data = {} + + instance_data.update( + { + "id": "pyblish.avalon.instance", + "creator_identifier": self.identifier, + "subset": subset_name, + } + ) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", + label="Use selection", + default=True) + ] + + +class Loader(LoaderPlugin): + """Base class for Loader plug-ins.""" + + hosts = ["blender"] + + +class AssetLoader(LoaderPlugin): + """A basic AssetLoader for Blender + + This will implement the basic logic for linking/appending assets + into another Blender scene. + + The `update` method should be implemented by a sub-class, because + it's different for different types (e.g. model, rig, animation, + etc.). + """ + + @staticmethod + def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]: + """Get the 'instance empty' that holds the collection instance.""" + for node in nodes: + if not isinstance(node, bpy.types.Object): + continue + if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION' + and node.instance_collection and node.name == instance_name): + return node + return None + + @staticmethod + def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]: + """Get the 'instance collection' (container) for this asset.""" + for node in nodes: + if not isinstance(node, bpy.types.Collection): + continue + if node.name == instance_name: + return node + return None + + @staticmethod + def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library: + """Find the library file from the container. + + It traverses the objects from this collection, checks if there is only + 1 library from which the objects come from and returns the library. + + Warning: + No nested collections are supported at the moment! + """ + assert not container.children, "Nested collections are not supported." + assert container.objects, "The collection doesn't contain any objects." + libraries = set() + for obj in container.objects: + assert obj.library, f"'{obj.name}' is not linked." + libraries.add(obj.library) + + assert len( + libraries) == 1, "'{container.name}' contains objects from more then 1 library." + + return list(libraries)[0] + + def process_asset(self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def load(self, + context: dict, + name: Optional[str] = None, + namespace: Optional[str] = None, + options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: + """ Run the loader on Blender main thread""" + mti = MainThreadItem(self._load, context, name, namespace, options) + execute_in_main_thread(mti) + + def _load(self, + context: dict, + name: Optional[str] = None, + namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[bpy.types.Collection]: + """Load asset via database + + Arguments: + context: Full parenthood of representation to load + name: Use pre-defined name + namespace: Use pre-defined namespace + options: Additional settings dictionary + """ + # TODO: make it possible to add the asset several times by + # just re-using the collection + filepath = self.filepath_from_context(context) + assert Path(filepath).exists(), f"{filepath} doesn't exist." + + asset = context["asset"]["name"] + subset = context["subset"]["name"] + unique_number = get_unique_number( + asset, subset + ) + namespace = namespace or f"{asset}_{unique_number}" + name = name or prepare_scene_name( + asset, subset, unique_number + ) + + nodes = self.process_asset( + context=context, + name=name, + namespace=namespace, + options=options, + ) + + # Only containerise if anything was loaded by the Loader. + if not nodes: + return None + + # Only containerise if it's not already a collection from a .blend file. + # representation = context["representation"]["name"] + # if representation != "blend": + # from ayon_core.hosts.blender.api.pipeline import containerise + # return containerise( + # name=name, + # namespace=namespace, + # nodes=nodes, + # context=context, + # loader=self.__class__.__name__, + # ) + + # asset = context["asset"]["name"] + # subset = context["subset"]["name"] + # instance_name = prepare_scene_name( + # asset, subset, unique_number + # ) + '_CON' + + # return self._get_instance_collection(instance_name, nodes) + + def exec_update(self, container: Dict, representation: Dict): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def update(self, container: Dict, representation: Dict): + """ Run the update on Blender main thread""" + mti = MainThreadItem(self.exec_update, container, representation) + execute_in_main_thread(mti) + + def exec_remove(self, container: Dict) -> bool: + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def remove(self, container: Dict) -> bool: + """ Run the remove on Blender main thread""" + mti = MainThreadItem(self.exec_remove, container) + execute_in_main_thread(mti) diff --git a/openpype/hosts/blender/api/render_lib.py b/client/ayon_core/hosts/blender/api/render_lib.py similarity index 98% rename from openpype/hosts/blender/api/render_lib.py rename to client/ayon_core/hosts/blender/api/render_lib.py index b437078ad8..35aa1f12c9 100644 --- a/openpype/hosts/blender/api/render_lib.py +++ b/client/ayon_core/hosts/blender/api/render_lib.py @@ -2,8 +2,8 @@ import bpy -from openpype.settings import get_project_settings -from openpype.pipeline import get_current_project_name +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import get_current_project_name def get_default_render_folder(settings): diff --git a/openpype/hosts/blender/api/workio.py b/client/ayon_core/hosts/blender/api/workio.py similarity index 100% rename from openpype/hosts/blender/api/workio.py rename to client/ayon_core/hosts/blender/api/workio.py diff --git a/client/ayon_core/hosts/blender/blender_addon/startup/init.py b/client/ayon_core/hosts/blender/blender_addon/startup/init.py new file mode 100644 index 0000000000..816f30f73f --- /dev/null +++ b/client/ayon_core/hosts/blender/blender_addon/startup/init.py @@ -0,0 +1,10 @@ +from ayon_core.pipeline import install_host +from ayon_core.hosts.blender.api import BlenderHost + + +def register(): + install_host(BlenderHost()) + + +def unregister(): + pass diff --git a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py b/client/ayon_core/hosts/blender/hooks/pre_add_run_python_script_arg.py similarity index 96% rename from openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py rename to client/ayon_core/hosts/blender/hooks/pre_add_run_python_script_arg.py index 68c9bfdd57..00b297f998 100644 --- a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py +++ b/client/ayon_core/hosts/blender/hooks/pre_add_run_python_script_arg.py @@ -1,6 +1,6 @@ from pathlib import Path -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class AddPythonScriptToLaunchArgs(PreLaunchHook): diff --git a/client/ayon_core/hosts/blender/hooks/pre_pyside_install.py b/client/ayon_core/hosts/blender/hooks/pre_pyside_install.py new file mode 100644 index 0000000000..c80a1bd669 --- /dev/null +++ b/client/ayon_core/hosts/blender/hooks/pre_pyside_install.py @@ -0,0 +1,231 @@ +import os +import re +import subprocess +from platform import system +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes + + +class InstallPySideToBlender(PreLaunchHook): + """Install Qt binding to blender's python packages. + + Prelaunch hook does 2 things: + 1.) Blender's python packages are pushed to the beginning of PYTHONPATH. + 2.) Check if blender has installed PySide2 and will try to install if not. + + For pipeline implementation is required to have Qt binding installed in + blender's python packages. + """ + + app_groups = {"blender"} + launch_types = {LaunchTypes.local} + + def execute(self): + # Prelaunch hook is not crucial + try: + self.inner_execute() + except Exception: + self.log.warning( + "Processing of {} crashed.".format(self.__class__.__name__), + exc_info=True + ) + + def inner_execute(self): + # Get blender's python directory + version_regex = re.compile(r"^[2-4]\.[0-9]+$") + + platform = system().lower() + executable = self.launch_context.executable.executable_path + expected_executable = "blender" + if platform == "windows": + expected_executable += ".exe" + + if os.path.basename(executable).lower() != expected_executable: + self.log.info(( + f"Executable does not lead to {expected_executable} file." + "Can't determine blender's python to check/install PySide2." + )) + return + + versions_dir = os.path.dirname(executable) + if platform == "darwin": + versions_dir = os.path.join( + os.path.dirname(versions_dir), "Resources" + ) + version_subfolders = [] + for dir_entry in os.scandir(versions_dir): + if dir_entry.is_dir() and version_regex.match(dir_entry.name): + version_subfolders.append(dir_entry.name) + + if not version_subfolders: + self.log.info( + "Didn't find version subfolder next to Blender executable" + ) + return + + if len(version_subfolders) > 1: + self.log.info(( + "Found more than one version subfolder next" + " to blender executable. {}" + ).format(", ".join([ + '"./{}"'.format(name) + for name in version_subfolders + ]))) + return + + version_subfolder = version_subfolders[0] + + python_dir = os.path.join(versions_dir, version_subfolder, "python") + python_lib = os.path.join(python_dir, "lib") + python_version = "python" + + if platform != "windows": + for dir_entry in os.scandir(python_lib): + if dir_entry.is_dir() and dir_entry.name.startswith("python"): + python_lib = dir_entry.path + python_version = dir_entry.name + break + + # Change PYTHONPATH to contain blender's packages as first + python_paths = [ + python_lib, + os.path.join(python_lib, "site-packages"), + ] + python_path = self.launch_context.env.get("PYTHONPATH") or "" + for path in python_path.split(os.pathsep): + if path: + python_paths.append(path) + + self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) + + # Get blender's python executable + python_bin = os.path.join(python_dir, "bin") + if platform == "windows": + python_executable = os.path.join(python_bin, "python.exe") + else: + python_executable = os.path.join(python_bin, python_version) + # Check for python with enabled 'pymalloc' + if not os.path.exists(python_executable): + python_executable += "m" + + if not os.path.exists(python_executable): + self.log.warning( + "Couldn't find python executable for blender. {}".format( + executable + ) + ) + return + + # Check if PySide2 is installed and skip if yes + if self.is_pyside_installed(python_executable): + self.log.debug("Blender has already installed PySide2.") + return + + # Install PySide2 in blender's python + if platform == "windows": + result = self.install_pyside_windows(python_executable) + else: + result = self.install_pyside(python_executable) + + if result: + self.log.info("Successfully installed PySide2 module to blender.") + else: + self.log.warning("Failed to install PySide2 module to blender.") + + def install_pyside_windows(self, python_executable): + """Install PySide2 python module to blender's python. + + Installation requires administration rights that's why it is required + to use "pywin32" module which can execute command's and ask for + administration rights. + """ + try: + import win32api + import win32con + import win32process + import win32event + import pywintypes + from win32comext.shell.shell import ShellExecuteEx + from win32comext.shell import shellcon + except Exception: + self.log.warning("Couldn't import \"pywin32\" modules") + return + + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to blender's + # site-packages and make sure it is binary compatible + parameters = "-m pip install --ignore-installed PySide2" + + # Execute command and ask for administrator's rights + process_info = ShellExecuteEx( + nShow=win32con.SW_SHOWNORMAL, + fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, + lpVerb="runas", + lpFile=python_executable, + lpParameters=parameters, + lpDirectory=os.path.dirname(python_executable) + ) + process_handle = process_info["hProcess"] + win32event.WaitForSingleObject(process_handle, win32event.INFINITE) + returncode = win32process.GetExitCodeProcess(process_handle) + return returncode == 0 + except pywintypes.error: + pass + + def install_pyside(self, python_executable): + """Install PySide2 python module to blender's python.""" + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to blender's + # site-packages and make sure it is binary compatible + args = [ + python_executable, + "-m", + "pip", + "install", + "--ignore-installed", + "PySide2", + ] + process = subprocess.Popen( + args, stdout=subprocess.PIPE, universal_newlines=True + ) + process.communicate() + return process.returncode == 0 + except PermissionError: + self.log.warning( + "Permission denied with command:" + "\"{}\".".format(" ".join(args)) + ) + except OSError as error: + self.log.warning(f"OS error has occurred: \"{error}\".") + except subprocess.SubprocessError: + pass + + def is_pyside_installed(self, python_executable): + """Check if PySide2 module is in blender's pip list. + + Check that PySide2 is installed directly in blender's site-packages. + It is possible that it is installed in user's site-packages but that + may be incompatible with blender's python. + """ + # Get pip list from blender's python executable + args = [python_executable, "-m", "pip", "list"] + process = subprocess.Popen(args, stdout=subprocess.PIPE) + stdout, _ = process.communicate() + lines = stdout.decode().split(os.linesep) + # Second line contain dashes that define maximum length of module name. + # Second column of dashes define maximum length of module version. + package_dashes, *_ = lines[1].split(" ") + package_len = len(package_dashes) + + # Got through printed lines starting at line 3 + for idx in range(2, len(lines)): + line = lines[idx] + if not line: + continue + package_name = line[0:package_len].strip() + if package_name.lower() == "pyside2": + return True + return False diff --git a/openpype/hosts/blender/hooks/pre_windows_console.py b/client/ayon_core/hosts/blender/hooks/pre_windows_console.py similarity index 94% rename from openpype/hosts/blender/hooks/pre_windows_console.py rename to client/ayon_core/hosts/blender/hooks/pre_windows_console.py index 2161b7a2f5..e3a8593cd9 100644 --- a/openpype/hosts/blender/hooks/pre_windows_console.py +++ b/client/ayon_core/hosts/blender/hooks/pre_windows_console.py @@ -1,5 +1,5 @@ import subprocess -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class BlenderConsoleWindows(PreLaunchHook): diff --git a/client/ayon_core/hosts/blender/plugins/create/convert_legacy.py b/client/ayon_core/hosts/blender/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..fcd4a7a26e --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/convert_legacy.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +"""Converter for legacy Houdini subsets.""" +from ayon_core.pipeline.create.creator_plugins import SubsetConvertorPlugin +from ayon_core.hosts.blender.api.lib import imprint + + +class BlenderLegacyConvertor(SubsetConvertorPlugin): + """Find and convert any legacy subsets in the scene. + + This Converter will find all legacy subsets in the scene and will + transform them to the current system. Since the old subsets doesn't + retain any information about their original creators, the only mapping + we can do is based on their families. + + Its limitation is that you can have multiple creators creating subset + of the same family and there is no way to handle it. This code should + nevertheless cover all creators that came with OpenPype. + + """ + identifier = "io.openpype.creators.blender.legacy" + family_to_id = { + "action": "io.openpype.creators.blender.action", + "camera": "io.openpype.creators.blender.camera", + "animation": "io.openpype.creators.blender.animation", + "blendScene": "io.openpype.creators.blender.blendscene", + "layout": "io.openpype.creators.blender.layout", + "model": "io.openpype.creators.blender.model", + "pointcache": "io.openpype.creators.blender.pointcache", + "render": "io.openpype.creators.blender.render", + "review": "io.openpype.creators.blender.review", + "rig": "io.openpype.creators.blender.rig", + } + + def __init__(self, *args, **kwargs): + super(BlenderLegacyConvertor, self).__init__(*args, **kwargs) + self.legacy_subsets = {} + + def find_instances(self): + """Find legacy subsets in the scene. + + Legacy subsets are the ones that doesn't have `creator_identifier` + parameter on them. + + This is using cached entries done in + :py:meth:`~BaseCreator.cache_subsets()` + + """ + self.legacy_subsets = self.collection_shared_data.get( + "blender_cached_legacy_subsets") + if not self.legacy_subsets: + return + self.add_convertor_item( + "Found {} incompatible subset{}".format( + len(self.legacy_subsets), + "s" if len(self.legacy_subsets) > 1 else "" + ) + ) + + def convert(self): + """Convert all legacy subsets to current. + + It is enough to add `creator_identifier` and `instance_node`. + + """ + if not self.legacy_subsets: + return + + for family, instance_nodes in self.legacy_subsets.items(): + if family in self.family_to_id: + for instance_node in instance_nodes: + creator_identifier = self.family_to_id[family] + self.log.info( + "Converting {} to {}".format(instance_node.name, + creator_identifier) + ) + imprint(instance_node, data={ + "creator_identifier": creator_identifier + }) diff --git a/openpype/hosts/blender/plugins/create/create_action.py b/client/ayon_core/hosts/blender/plugins/create/create_action.py similarity index 95% rename from openpype/hosts/blender/plugins/create/create_action.py rename to client/ayon_core/hosts/blender/plugins/create/create_action.py index caaa72fe8d..2331daf7b7 100644 --- a/openpype/hosts/blender/plugins/create/create_action.py +++ b/client/ayon_core/hosts/blender/plugins/create/create_action.py @@ -2,7 +2,7 @@ import bpy -from openpype.hosts.blender.api import lib, plugin +from ayon_core.hosts.blender.api import lib, plugin class CreateAction(plugin.BaseCreator): diff --git a/client/ayon_core/hosts/blender/plugins/create/create_animation.py b/client/ayon_core/hosts/blender/plugins/create/create_animation.py new file mode 100644 index 0000000000..8671d3bfdb --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_animation.py @@ -0,0 +1,32 @@ +"""Create an animation asset.""" + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreateAnimation(plugin.BaseCreator): + """Animation output for character rigs.""" + + identifier = "io.openpype.creators.blender.animation" + label = "Animation" + family = "animation" + icon = "male" + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + # Run parent create method + collection = super().create( + subset_name, instance_data, pre_create_data + ) + + if pre_create_data.get("use_selection"): + selected = lib.get_selection() + for obj in selected: + collection.objects.link(obj) + elif pre_create_data.get("asset_group"): + # Use for Load Blend automated creation of animation instances + # upon loading rig files + obj = pre_create_data.get("asset_group") + collection.objects.link(obj) + + return collection diff --git a/openpype/hosts/blender/plugins/create/create_blendScene.py b/client/ayon_core/hosts/blender/plugins/create/create_blendScene.py similarity index 94% rename from openpype/hosts/blender/plugins/create/create_blendScene.py rename to client/ayon_core/hosts/blender/plugins/create/create_blendScene.py index e1026282c0..6afb2ca6a0 100644 --- a/openpype/hosts/blender/plugins/create/create_blendScene.py +++ b/client/ayon_core/hosts/blender/plugins/create/create_blendScene.py @@ -2,7 +2,7 @@ import bpy -from openpype.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api import plugin, lib class CreateBlendScene(plugin.BaseCreator): diff --git a/client/ayon_core/hosts/blender/plugins/create/create_camera.py b/client/ayon_core/hosts/blender/plugins/create/create_camera.py new file mode 100644 index 0000000000..c63a294cf9 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_camera.py @@ -0,0 +1,42 @@ +"""Create a camera asset.""" + +import bpy + +from ayon_core.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api.pipeline import AVALON_INSTANCES + + +class CreateCamera(plugin.BaseCreator): + """Polygonal static geometry.""" + + identifier = "io.openpype.creators.blender.camera" + label = "Camera" + family = "camera" + icon = "video-camera" + + create_as_asset_group = True + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + + asset_group = super().create(subset_name, + instance_data, + pre_create_data) + + bpy.context.view_layer.objects.active = asset_group + if pre_create_data.get("use_selection"): + for obj in lib.get_selection(): + obj.parent = asset_group + else: + plugin.deselect_all() + camera = bpy.data.cameras.new(subset_name) + camera_obj = bpy.data.objects.new(subset_name, camera) + + instances = bpy.data.collections.get(AVALON_INSTANCES) + instances.objects.link(camera_obj) + + bpy.context.view_layer.objects.active = asset_group + camera_obj.parent = asset_group + + return asset_group diff --git a/client/ayon_core/hosts/blender/plugins/create/create_layout.py b/client/ayon_core/hosts/blender/plugins/create/create_layout.py new file mode 100644 index 0000000000..3da3916aef --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_layout.py @@ -0,0 +1,32 @@ +"""Create a layout asset.""" + +import bpy + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreateLayout(plugin.BaseCreator): + """Layout output for character rigs.""" + + identifier = "io.openpype.creators.blender.layout" + label = "Layout" + family = "layout" + icon = "cubes" + + create_as_asset_group = True + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + + asset_group = super().create(subset_name, + instance_data, + pre_create_data) + + # Add selected objects to instance + if pre_create_data.get("use_selection"): + bpy.context.view_layer.objects.active = asset_group + for obj in lib.get_selection(): + obj.parent = asset_group + + return asset_group diff --git a/client/ayon_core/hosts/blender/plugins/create/create_model.py b/client/ayon_core/hosts/blender/plugins/create/create_model.py new file mode 100644 index 0000000000..b6d89c8862 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_model.py @@ -0,0 +1,31 @@ +"""Create a model asset.""" + +import bpy + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreateModel(plugin.BaseCreator): + """Polygonal static geometry.""" + + identifier = "io.openpype.creators.blender.model" + label = "Model" + family = "model" + icon = "cube" + + create_as_asset_group = True + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + asset_group = super().create(subset_name, + instance_data, + pre_create_data) + + # Add selected objects to instance + if pre_create_data.get("use_selection"): + bpy.context.view_layer.objects.active = asset_group + for obj in lib.get_selection(): + obj.parent = asset_group + + return asset_group diff --git a/client/ayon_core/hosts/blender/plugins/create/create_pointcache.py b/client/ayon_core/hosts/blender/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..20ef3fbde4 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_pointcache.py @@ -0,0 +1,29 @@ +"""Create a pointcache asset.""" + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreatePointcache(plugin.BaseCreator): + """Polygonal static geometry.""" + + identifier = "io.openpype.creators.blender.pointcache" + label = "Point Cache" + family = "pointcache" + icon = "gears" + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + # Run parent create method + collection = super().create( + subset_name, instance_data, pre_create_data + ) + + if pre_create_data.get("use_selection"): + objects = lib.get_selection() + for obj in objects: + collection.objects.link(obj) + if obj.type == 'EMPTY': + objects.extend(obj.children) + + return collection diff --git a/client/ayon_core/hosts/blender/plugins/create/create_render.py b/client/ayon_core/hosts/blender/plugins/create/create_render.py new file mode 100644 index 0000000000..82337a47f2 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_render.py @@ -0,0 +1,42 @@ +"""Create render.""" +import bpy + +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.render_lib import prepare_rendering + + +class CreateRenderlayer(plugin.BaseCreator): + """Single baked camera.""" + + identifier = "io.openpype.creators.blender.render" + label = "Render" + family = "render" + icon = "eye" + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + try: + # Run parent create method + collection = super().create( + subset_name, instance_data, pre_create_data + ) + + prepare_rendering(collection) + except Exception: + # Remove the instance if there was an error + bpy.data.collections.remove(collection) + raise + + # TODO: this is undesiderable, but it's the only way to be sure that + # the file is saved before the render starts. + # Blender, by design, doesn't set the file as dirty if modifications + # happen by script. So, when creating the instance and setting the + # render settings, the file is not marked as dirty. This means that + # there is the risk of sending to deadline a file without the right + # settings. Even the validator to check that the file is saved will + # detect the file as saved, even if it isn't. The only solution for + # now it is to force the file to be saved. + bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) + + return collection diff --git a/client/ayon_core/hosts/blender/plugins/create/create_review.py b/client/ayon_core/hosts/blender/plugins/create/create_review.py new file mode 100644 index 0000000000..cf94819b3e --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_review.py @@ -0,0 +1,27 @@ +"""Create review.""" + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreateReview(plugin.BaseCreator): + """Single baked camera.""" + + identifier = "io.openpype.creators.blender.review" + label = "Review" + family = "review" + icon = "video-camera" + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + # Run parent create method + collection = super().create( + subset_name, instance_data, pre_create_data + ) + + if pre_create_data.get("use_selection"): + selected = lib.get_selection() + for obj in selected: + collection.objects.link(obj) + + return collection diff --git a/client/ayon_core/hosts/blender/plugins/create/create_rig.py b/client/ayon_core/hosts/blender/plugins/create/create_rig.py new file mode 100644 index 0000000000..07b33fe4ba --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_rig.py @@ -0,0 +1,31 @@ +"""Create a rig asset.""" + +import bpy + +from ayon_core.hosts.blender.api import plugin, lib + + +class CreateRig(plugin.BaseCreator): + """Artist-friendly rig with controls to direct motion.""" + + identifier = "io.openpype.creators.blender.rig" + label = "Rig" + family = "rig" + icon = "wheelchair" + + create_as_asset_group = True + + def create( + self, subset_name: str, instance_data: dict, pre_create_data: dict + ): + asset_group = super().create(subset_name, + instance_data, + pre_create_data) + + # Add selected objects to instance + if pre_create_data.get("use_selection"): + bpy.context.view_layer.objects.active = asset_group + for obj in lib.get_selection(): + obj.parent = asset_group + + return asset_group diff --git a/client/ayon_core/hosts/blender/plugins/create/create_workfile.py b/client/ayon_core/hosts/blender/plugins/create/create_workfile.py new file mode 100644 index 0000000000..09947f85d1 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/create/create_workfile.py @@ -0,0 +1,114 @@ +import bpy + +from ayon_core.pipeline import CreatedInstance, AutoCreator +from ayon_core.client import get_asset_by_name +from ayon_core.hosts.blender.api.plugin import BaseCreator +from ayon_core.hosts.blender.api.pipeline import ( + AVALON_PROPERTY, + AVALON_CONTAINERS +) + + +class CreateWorkfile(BaseCreator, AutoCreator): + """Workfile auto-creator. + + The workfile instance stores its data on the `AVALON_CONTAINERS` collection + as custom attributes, because unlike other instances it doesn't have an + instance node of its own. + + """ + identifier = "io.openpype.creators.blender.workfile" + label = "Workfile" + family = "workfile" + icon = "fa5.file" + + def create(self): + """Create workfile instances.""" + workfile_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), + None, + ) + + project_name = self.project_name + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + existing_asset_name = None + if workfile_instance is not None: + existing_asset_name = workfile_instance.get("folderPath") + + if not workfile_instance: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + task_name, task_name, asset_doc, project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": task_name, + } + data.update( + self.get_dynamic_data( + task_name, + task_name, + asset_doc, + project_name, + host_name, + workfile_instance, + ) + ) + self.log.info("Auto-creating workfile instance...") + workfile_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(workfile_instance) + + elif ( + existing_asset_name != asset_name + or workfile_instance["task"] != task_name + ): + # Update instance context if it's different + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + task_name, task_name, asset_doc, project_name, host_name + ) + + workfile_instance["folderPath"] = asset_name + workfile_instance["task"] = task_name + workfile_instance["subset"] = subset_name + + instance_node = bpy.data.collections.get(AVALON_CONTAINERS) + if not instance_node: + instance_node = bpy.data.collections.new(name=AVALON_CONTAINERS) + workfile_instance.transient_data["instance_node"] = instance_node + + def collect_instances(self): + + instance_node = bpy.data.collections.get(AVALON_CONTAINERS) + if not instance_node: + return + + property = instance_node.get(AVALON_PROPERTY) + if not property: + return + + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data=property.to_dict(), + creator=self + ) + instance.transient_data["instance_node"] = instance_node + + # Add instance to create context + self._add_instance_to_context(instance) + + def remove_instances(self, instances): + for instance in instances: + node = instance.transient_data["instance_node"] + del node[AVALON_PROPERTY] + + self._remove_instance_from_context(instance) diff --git a/openpype/hosts/blender/plugins/load/import_workfile.py b/client/ayon_core/hosts/blender/plugins/load/import_workfile.py similarity index 98% rename from openpype/hosts/blender/plugins/load/import_workfile.py rename to client/ayon_core/hosts/blender/plugins/load/import_workfile.py index 331f6a8bdb..061c6108ad 100644 --- a/openpype/hosts/blender/plugins/load/import_workfile.py +++ b/client/ayon_core/hosts/blender/plugins/load/import_workfile.py @@ -1,6 +1,6 @@ import bpy -from openpype.hosts.blender.api import plugin +from ayon_core.hosts.blender.api import plugin def append_workfile(context, fname, do_import): diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/client/ayon_core/hosts/blender/plugins/load/load_abc.py similarity index 98% rename from openpype/hosts/blender/plugins/load/load_abc.py rename to client/ayon_core/hosts/blender/plugins/load/load_abc.py index d7e82d1900..b25f4eb277 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_abc.py @@ -6,16 +6,16 @@ import bpy -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AVALON_CONTAINER_ID, ) -from openpype.hosts.blender.api.pipeline import ( +from ayon_core.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, ) -from openpype.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api import plugin, lib class CacheModelLoader(plugin.AssetLoader): diff --git a/openpype/hosts/blender/plugins/load/load_action.py b/client/ayon_core/hosts/blender/plugins/load/load_action.py similarity index 97% rename from openpype/hosts/blender/plugins/load/load_action.py rename to client/ayon_core/hosts/blender/plugins/load/load_action.py index f7d32f92a5..5c8ba0df44 100644 --- a/openpype/hosts/blender/plugins/load/load_action.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_action.py @@ -6,14 +6,14 @@ from typing import Dict, List, Optional import bpy -from openpype.pipeline import get_representation_path -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import ( +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import ( containerise_existing, AVALON_PROPERTY, ) -logger = logging.getLogger("openpype").getChild("blender").getChild("load_action") +logger = logging.getLogger("ayon").getChild("blender").getChild("load_action") class BlendActionLoader(plugin.AssetLoader): diff --git a/client/ayon_core/hosts/blender/plugins/load/load_animation.py b/client/ayon_core/hosts/blender/plugins/load/load_animation.py new file mode 100644 index 0000000000..b805790c28 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_animation.py @@ -0,0 +1,70 @@ +"""Load an animation in Blender.""" + +from typing import Dict, List, Optional + +import bpy + +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY + + +class BlendAnimationLoader(plugin.AssetLoader): + """Load animations from a .blend file. + + Warning: + Loading the same asset more then once is not properly supported at the + moment. + """ + + families = ["animation"] + representations = ["blend"] + + label = "Link Animation" + icon = "code-fork" + color = "orange" + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.filepath_from_context(context) + + with bpy.data.libraries.load( + libpath, link=True, relative=False + ) as (data_from, data_to): + data_to.objects = data_from.objects + data_to.actions = data_from.actions + + container = data_to.objects[0] + + assert container, "No asset group found" + + target_namespace = container.get(AVALON_PROPERTY).get('namespace') + + action = data_to.actions[0].make_local().copy() + + for obj in bpy.data.objects: + if obj.get(AVALON_PROPERTY) and obj.get(AVALON_PROPERTY).get( + 'namespace') == target_namespace: + if obj.children[0]: + if not obj.children[0].animation_data: + obj.children[0].animation_data_create() + obj.children[0].animation_data.action = action + break + + bpy.data.objects.remove(container) + + filename = bpy.path.basename(libpath) + # Blender has a limit of 63 characters for any data name. + # If the filename is longer, it will be truncated. + if len(filename) > 63: + filename = filename[:63] + library = bpy.data.libraries.get(filename) + bpy.data.libraries.remove(library) diff --git a/client/ayon_core/hosts/blender/plugins/load/load_audio.py b/client/ayon_core/hosts/blender/plugins/load/load_audio.py new file mode 100644 index 0000000000..007889f6f6 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_audio.py @@ -0,0 +1,224 @@ +"""Load audio in Blender.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from ayon_core.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import ( + AVALON_CONTAINERS, + AVALON_PROPERTY, +) + + +class AudioLoader(plugin.AssetLoader): + """Load audio in Blender.""" + + families = ["audio"] + representations = ["wav"] + + label = "Load Audio" + icon = "volume-up" + color = "orange" + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.filepath_from_context(context) + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.prepare_scene_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.prepare_scene_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + # Blender needs the Sequence Editor in the current window, to be able + # to load the audio. We take one of the areas in the window, save its + # type, and switch to the Sequence Editor. After loading the audio, + # we switch back to the previous area. + window_manager = bpy.context.window_manager + old_type = window_manager.windows[-1].screen.areas[0].type + window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" + + # We override the context to load the audio in the sequence editor. + oc = bpy.context.copy() + oc["area"] = window_manager.windows[-1].screen.areas[0] + + with bpy.context.temp_override(**oc): + bpy.ops.sequencer.sound_strip_add(filepath=libpath, frame_start=1) + + window_manager.windows[-1].screen.areas[0].type = old_type + + p = Path(libpath) + audio = p.name + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name, + "audio": audio + } + + objects = [] + self[:] = objects + return [objects] + + def exec_update(self, container: Dict, representation: Dict): + """Update an audio strip in the sequence editor. + + Arguments: + container (openpype:container-1.0): Container to update, + from `host.ls()`. + representation (openpype:representation-1.0): Representation to + update, from `host.ls()`. + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(get_representation_path(representation)) + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + old_audio = container["audio"] + p = Path(libpath) + new_audio = p.name + + # Blender needs the Sequence Editor in the current window, to be able + # to update the audio. We take one of the areas in the window, save its + # type, and switch to the Sequence Editor. After updating the audio, + # we switch back to the previous area. + window_manager = bpy.context.window_manager + old_type = window_manager.windows[-1].screen.areas[0].type + window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" + + # We override the context to load the audio in the sequence editor. + oc = bpy.context.copy() + oc["area"] = window_manager.windows[-1].screen.areas[0] + + with bpy.context.temp_override(**oc): + # We deselect all sequencer strips, and then select the one we + # need to remove. + bpy.ops.sequencer.select_all(action='DESELECT') + scene = bpy.context.scene + scene.sequence_editor.sequences_all[old_audio].select = True + + bpy.ops.sequencer.delete() + bpy.data.sounds.remove(bpy.data.sounds[old_audio]) + + bpy.ops.sequencer.sound_strip_add( + filepath=str(libpath), frame_start=1) + + window_manager.windows[-1].screen.areas[0].type = old_type + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + metadata["parent"] = str(representation["parent"]) + metadata["audio"] = new_audio + + def exec_remove(self, container: Dict) -> bool: + """Remove an audio strip from the sequence editor and the container. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + audio = container["audio"] + + # Blender needs the Sequence Editor in the current window, to be able + # to remove the audio. We take one of the areas in the window, save its + # type, and switch to the Sequence Editor. After removing the audio, + # we switch back to the previous area. + window_manager = bpy.context.window_manager + old_type = window_manager.windows[-1].screen.areas[0].type + window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" + + # We override the context to load the audio in the sequence editor. + oc = bpy.context.copy() + oc["area"] = window_manager.windows[-1].screen.areas[0] + + with bpy.context.temp_override(**oc): + # We deselect all sequencer strips, and then select the one we + # need to remove. + bpy.ops.sequencer.select_all(action='DESELECT') + scene = bpy.context.scene + scene.sequence_editor.sequences_all[audio].select = True + bpy.ops.sequencer.delete() + + window_manager.windows[-1].screen.areas[0].type = old_type + + bpy.data.sounds.remove(bpy.data.sounds[audio]) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_blend.py b/client/ayon_core/hosts/blender/plugins/load/load_blend.py similarity index 97% rename from openpype/hosts/blender/plugins/load/load_blend.py rename to client/ayon_core/hosts/blender/plugins/load/load_blend.py index 1a84f5afbb..c9862f9841 100644 --- a/openpype/hosts/blender/plugins/load/load_blend.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_blend.py @@ -3,15 +3,15 @@ import bpy -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AVALON_CONTAINER_ID, registered_host ) -from openpype.pipeline.create import CreateContext -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.lib import imprint -from openpype.hosts.blender.api.pipeline import ( +from ayon_core.pipeline.create import CreateContext +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.lib import imprint +from ayon_core.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, ) diff --git a/openpype/hosts/blender/plugins/load/load_blendscene.py b/client/ayon_core/hosts/blender/plugins/load/load_blendscene.py similarity index 97% rename from openpype/hosts/blender/plugins/load/load_blendscene.py rename to client/ayon_core/hosts/blender/plugins/load/load_blendscene.py index fba0245af1..248bf5a901 100644 --- a/openpype/hosts/blender/plugins/load/load_blendscene.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_blendscene.py @@ -3,13 +3,13 @@ import bpy -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AVALON_CONTAINER_ID, ) -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.lib import imprint -from openpype.hosts.blender.api.pipeline import ( +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.lib import imprint +from ayon_core.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, ) diff --git a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py new file mode 100644 index 0000000000..8f0bd6741d --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py @@ -0,0 +1,211 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from ayon_core.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api.pipeline import ( + AVALON_CONTAINERS, + AVALON_PROPERTY, +) + + +class AbcCameraLoader(plugin.AssetLoader): + """Load a camera from Alembic file. + + Stores the imported asset in an empty named after the asset. + """ + + families = ["camera"] + representations = ["abc"] + + label = "Load Camera (ABC)" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == "CAMERA": + bpy.data.cameras.remove(obj.data) + elif obj.type == "EMPTY": + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _process(self, libpath, asset_group, group_name): + plugin.deselect_all() + + bpy.ops.wm.alembic_import(filepath=libpath) + + objects = lib.get_selection() + + for obj in objects: + obj.parent = asset_group + + for obj in objects: + name = obj.name + obj.name = f"{group_name}:{name}" + if obj.type != "EMPTY": + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" + + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() + + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + plugin.deselect_all() + + return objects + + def process_asset( + self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None, + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.filepath_from_context(context) + + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.prepare_scene_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.prepare_scene_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + self._process(libpath, asset_group, group_name) + + objects = [] + nodes = list(asset_group.children) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or "", + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name, + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}") + assert libpath, ( + f"No existing library file found for {container['objectName']}") + assert libpath.is_file(), f"The file doesn't exist: {libpath}" + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}") + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = str( + Path(bpy.path.abspath(group_libpath)).resolve()) + normalized_libpath = str( + Path(bpy.path.abspath(str(libpath))).resolve()) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + mat = asset_group.matrix_basis.copy() + + self._remove(asset_group) + self._process(str(libpath), asset_group, object_name) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/client/ayon_core/hosts/blender/plugins/load/load_camera_fbx.py b/client/ayon_core/hosts/blender/plugins/load/load_camera_fbx.py new file mode 100644 index 0000000000..7642871dc7 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_camera_fbx.py @@ -0,0 +1,221 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from ayon_core.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api.pipeline import ( + AVALON_CONTAINERS, + AVALON_PROPERTY, +) + + +class FbxCameraLoader(plugin.AssetLoader): + """Load a camera from FBX. + + Stores the imported asset in an empty named after the asset. + """ + + families = ["camera"] + representations = ["fbx"] + + label = "Load Camera (FBX)" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == 'CAMERA': + bpy.data.cameras.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _process(self, libpath, asset_group, group_name): + plugin.deselect_all() + + collection = bpy.context.view_layer.active_layer_collection.collection + + bpy.ops.import_scene.fbx(filepath=libpath) + + parent = bpy.context.scene.collection + + objects = lib.get_selection() + + for obj in objects: + obj.parent = asset_group + + for obj in objects: + parent.objects.link(obj) + collection.objects.unlink(obj) + + for obj in objects: + name = obj.name + obj.name = f"{group_name}:{name}" + if obj.type != 'EMPTY': + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" + + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() + + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + plugin.deselect_all() + + return objects + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.filepath_from_context(context) + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.prepare_scene_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.prepare_scene_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + self._process(libpath, asset_group, group_name) + + objects = [] + nodes = list(asset_group.children) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + mat = asset_group.matrix_basis.copy() + + self._remove(asset_group) + self._process(str(libpath), asset_group, object_name) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/client/ayon_core/hosts/blender/plugins/load/load_fbx.py b/client/ayon_core/hosts/blender/plugins/load/load_fbx.py new file mode 100644 index 0000000000..03993c9f5e --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_fbx.py @@ -0,0 +1,276 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from ayon_core.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.blender.api import plugin, lib +from ayon_core.hosts.blender.api.pipeline import ( + AVALON_CONTAINERS, + AVALON_PROPERTY, +) + + +class FbxModelLoader(plugin.AssetLoader): + """Load FBX models. + + Stores the imported asset in an empty named after the asset. + """ + + families = ["model", "rig"] + representations = ["fbx"] + + label = "Load FBX" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == 'MESH': + for material_slot in list(obj.material_slots): + if material_slot.material: + bpy.data.materials.remove(material_slot.material) + bpy.data.meshes.remove(obj.data) + elif obj.type == 'ARMATURE': + objects.extend(obj.children) + bpy.data.armatures.remove(obj.data) + elif obj.type == 'CURVE': + bpy.data.curves.remove(obj.data) + elif obj.type == 'EMPTY': + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _process(self, libpath, asset_group, group_name, action): + plugin.deselect_all() + + collection = bpy.context.view_layer.active_layer_collection.collection + + bpy.ops.import_scene.fbx(filepath=libpath) + + parent = bpy.context.scene.collection + + imported = lib.get_selection() + + empties = [obj for obj in imported if obj.type == 'EMPTY'] + + container = None + + for empty in empties: + if not empty.parent: + container = empty + break + + assert container, "No asset group found" + + # Children must be linked before parents, + # otherwise the hierarchy will break + objects = [] + nodes = list(container.children) + + for obj in nodes: + obj.parent = asset_group + + bpy.data.objects.remove(container) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + objects.reverse() + + for obj in objects: + parent.objects.link(obj) + collection.objects.unlink(obj) + + for obj in objects: + name = obj.name + obj.name = f"{group_name}:{name}" + if obj.type != 'EMPTY': + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" + + if obj.type == 'MESH': + for material_slot in obj.material_slots: + name_mat = material_slot.material.name + material_slot.material.name = f"{group_name}:{name_mat}" + elif obj.type == 'ARMATURE': + anim_data = obj.animation_data + if action is not None: + anim_data.action = action + elif anim_data.action is not None: + name_action = anim_data.action.name + anim_data.action.name = f"{group_name}:{name_action}" + + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() + + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + plugin.deselect_all() + + return objects + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.filepath_from_context(context) + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.prepare_scene_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.prepare_scene_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + objects = self._process(libpath, asset_group, group_name, None) + + objects = [] + nodes = list(asset_group.children) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or '', + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = ( + str(Path(bpy.path.abspath(group_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + # Get the armature of the rig + objects = asset_group.children + armatures = [obj for obj in objects if obj.type == 'ARMATURE'] + action = None + + if armatures: + armature = armatures[0] + + if armature.animation_data and armature.animation_data.action: + action = armature.animation_data.action + + mat = asset_group.matrix_basis.copy() + self._remove(asset_group) + + self._process(str(libpath), asset_group, object_name, action) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/client/ayon_core/hosts/blender/plugins/load/load_layout_json.py similarity index 98% rename from openpype/hosts/blender/plugins/load/load_layout_json.py rename to client/ayon_core/hosts/blender/plugins/load/load_layout_json.py index 748ac619b6..f48862a803 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_json.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_layout_json.py @@ -7,7 +7,7 @@ import bpy -from openpype.pipeline import ( +from ayon_core.pipeline import ( discover_loader_plugins, remove_container, load_container, @@ -15,12 +15,12 @@ loaders_from_representation, AVALON_CONTAINER_ID, ) -from openpype.hosts.blender.api.pipeline import ( +from ayon_core.hosts.blender.api.pipeline import ( AVALON_INSTANCES, AVALON_CONTAINERS, AVALON_PROPERTY, ) -from openpype.hosts.blender.api import plugin +from ayon_core.hosts.blender.api import plugin class JsonLayoutLoader(plugin.AssetLoader): diff --git a/client/ayon_core/hosts/blender/plugins/load/load_look.py b/client/ayon_core/hosts/blender/plugins/load/load_look.py new file mode 100644 index 0000000000..f9ebf98912 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/load/load_look.py @@ -0,0 +1,222 @@ +"""Load a model asset in Blender.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import os +import json +import bpy + +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import ( + containerise_existing, + AVALON_PROPERTY +) + + +class BlendLookLoader(plugin.AssetLoader): + """Load models from a .blend file. + + Because they come from a .blend file we can simply link the collection that + contains the model. There is no further need to 'containerise' it. + """ + + families = ["look"] + representations = ["json"] + + label = "Load Look" + icon = "code-fork" + color = "orange" + + def get_all_children(self, obj): + children = list(obj.children) + + for child in children: + children.extend(child.children) + + return children + + def _process(self, libpath, container_name, objects): + with open(libpath, "r") as fp: + data = json.load(fp) + + path = os.path.dirname(libpath) + materials_path = f"{path}/resources" + + materials = [] + + for entry in data: + file = entry.get('fbx_filename') + if file is None: + continue + + bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}") + + mesh = [o for o in bpy.context.scene.objects if o.select_get()][0] + material = mesh.data.materials[0] + material.name = f"{material.name}:{container_name}" + + texture_file = entry.get('tga_filename') + if texture_file: + node_tree = material.node_tree + pbsdf = node_tree.nodes['Principled BSDF'] + base_color = pbsdf.inputs[0] + tex_node = base_color.links[0].from_node + tex_node.image.filepath = f"{materials_path}/{texture_file}" + + materials.append(material) + + for obj in objects: + for child in self.get_all_children(obj): + mesh_name = child.name.split(':')[0] + if mesh_name == material.name.split(':')[0]: + child.data.materials.clear() + child.data.materials.append(material) + break + + bpy.data.objects.remove(mesh) + + return materials, objects + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.filepath_from_context(context) + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + lib_container = plugin.prepare_scene_name( + asset, subset + ) + unique_number = plugin.get_unique_number( + asset, subset + ) + namespace = namespace or f"{asset}_{unique_number}" + container_name = plugin.prepare_scene_name( + asset, subset, unique_number + ) + + container = bpy.data.collections.new(lib_container) + container.name = container_name + containerise_existing( + container, + name, + namespace, + context, + self.__class__.__name__, + ) + + metadata = container.get(AVALON_PROPERTY) + + metadata["libpath"] = libpath + metadata["lib_container"] = lib_container + + selected = [o for o in bpy.context.scene.objects if o.select_get()] + + materials, objects = self._process(libpath, container_name, selected) + + # Save the list of imported materials in the metadata container + metadata["objects"] = objects + metadata["materials"] = materials + + metadata["parent"] = str(context["representation"]["parent"]) + metadata["family"] = context["representation"]["context"]["family"] + + nodes = list(container.objects) + nodes.append(container) + self[:] = nodes + return nodes + + def update(self, container: Dict, representation: Dict): + collection = bpy.data.collections.get(container["objectName"]) + libpath = Path(get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert collection, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert not (collection.children), ( + "Nested collections are not supported." + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + collection_metadata = collection.get(AVALON_PROPERTY) + collection_libpath = collection_metadata["libpath"] + + normalized_collection_libpath = ( + str(Path(bpy.path.abspath(collection_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_collection_libpath, + normalized_libpath, + ) + if normalized_collection_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + for obj in collection_metadata['objects']: + for child in self.get_all_children(obj): + child.data.materials.clear() + + for material in collection_metadata['materials']: + bpy.data.materials.remove(material) + + namespace = collection_metadata['namespace'] + name = collection_metadata['name'] + + container_name = f"{namespace}_{name}" + + materials, objects = self._process( + libpath, container_name, collection_metadata['objects']) + + collection_metadata["objects"] = objects + collection_metadata["materials"] = materials + collection_metadata["libpath"] = str(libpath) + collection_metadata["representation"] = str(representation["_id"]) + + def remove(self, container: Dict) -> bool: + collection = bpy.data.collections.get(container["objectName"]) + if not collection: + return False + + collection_metadata = collection.get(AVALON_PROPERTY) + + for obj in collection_metadata['objects']: + for child in self.get_all_children(obj): + child.data.materials.clear() + + for material in collection_metadata['materials']: + bpy.data.materials.remove(material) + + bpy.data.collections.remove(collection) + + return True diff --git a/client/ayon_core/hosts/blender/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/blender/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..7370f6cbe8 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/collect_current_file.py @@ -0,0 +1,15 @@ +import pyblish.api +from ayon_core.hosts.blender.api import workio + + +class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "Blender Current File" + hosts = ["blender"] + + def process(self, context): + """Inject the current working file""" + current_file = workio.current_file() + context.data["currentFile"] = current_file diff --git a/openpype/hosts/blender/plugins/publish/collect_instance.py b/client/ayon_core/hosts/blender/plugins/publish/collect_instance.py similarity index 92% rename from openpype/hosts/blender/plugins/publish/collect_instance.py rename to client/ayon_core/hosts/blender/plugins/publish/collect_instance.py index 4685472213..f9338cd30a 100644 --- a/openpype/hosts/blender/plugins/publish/collect_instance.py +++ b/client/ayon_core/hosts/blender/plugins/publish/collect_instance.py @@ -2,8 +2,8 @@ import pyblish.api -from openpype.pipeline.publish import KnownPublishError -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY class CollectBlenderInstanceData(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/blender/plugins/publish/collect_render.py b/client/ayon_core/hosts/blender/plugins/publish/collect_render.py new file mode 100644 index 0000000000..1ad2de2b7d --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/collect_render.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +"""Collect render data.""" + +import os +import re + +import bpy + +from ayon_core.hosts.blender.api import colorspace +import pyblish.api + + +class CollectBlenderRender(pyblish.api.InstancePlugin): + """Gather all publishable render instances.""" + + order = pyblish.api.CollectorOrder + 0.01 + hosts = ["blender"] + families = ["render"] + label = "Collect Render" + sync_workfile_version = False + + @staticmethod + def generate_expected_beauty( + render_product, frame_start, frame_end, frame_step, ext + ): + """ + Generate the expected files for the render product for the beauty + render. This returns a list of files that should be rendered. It + replaces the sequence of `#` with the frame number. + """ + path = os.path.dirname(render_product) + file = os.path.basename(render_product) + + expected_files = [] + + for frame in range(frame_start, frame_end + 1, frame_step): + frame_str = str(frame).rjust(4, "0") + filename = re.sub("#+", frame_str, file) + expected_file = f"{os.path.join(path, filename)}.{ext}" + expected_files.append(expected_file.replace("\\", "/")) + + return { + "beauty": expected_files + } + + @staticmethod + def generate_expected_aovs( + aov_file_product, frame_start, frame_end, frame_step, ext + ): + """ + Generate the expected files for the render product for the beauty + render. This returns a list of files that should be rendered. It + replaces the sequence of `#` with the frame number. + """ + expected_files = {} + + for aov_name, aov_file in aov_file_product: + path = os.path.dirname(aov_file) + file = os.path.basename(aov_file) + + aov_files = [] + + for frame in range(frame_start, frame_end + 1, frame_step): + frame_str = str(frame).rjust(4, "0") + filename = re.sub("#+", frame_str, file) + expected_file = f"{os.path.join(path, filename)}.{ext}" + aov_files.append(expected_file.replace("\\", "/")) + + expected_files[aov_name] = aov_files + + return expected_files + + def process(self, instance): + context = instance.context + + instance_node = instance.data["transientData"]["instance_node"] + render_data = instance_node.get("render_data") + + assert render_data, "No render data found." + + render_product = render_data.get("render_product") + aov_file_product = render_data.get("aov_file_product") + ext = render_data.get("image_format") + multilayer = render_data.get("multilayer_exr") + + frame_start = context.data["frameStart"] + frame_end = context.data["frameEnd"] + frame_handle_start = context.data["frameStartHandle"] + frame_handle_end = context.data["frameEndHandle"] + + expected_beauty = self.generate_expected_beauty( + render_product, int(frame_start), int(frame_end), + int(bpy.context.scene.frame_step), ext) + + expected_aovs = self.generate_expected_aovs( + aov_file_product, int(frame_start), int(frame_end), + int(bpy.context.scene.frame_step), ext) + + expected_files = expected_beauty | expected_aovs + + instance.data.update({ + "families": ["render", "render.farm"], + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartHandle": frame_handle_start, + "frameEndHandle": frame_handle_end, + "fps": context.data["fps"], + "byFrameStep": bpy.context.scene.frame_step, + "review": render_data.get("review", False), + "multipartExr": ext == "exr" and multilayer, + "farm": True, + "expectedFiles": [expected_files], + # OCIO not currently implemented in Blender, but the following + # settings are required by the schema, so it is hardcoded. + # TODO: Implement OCIO in Blender + "colorspaceConfig": "", + "colorspaceDisplay": "sRGB", + "colorspaceView": "ACES 1.0 SDR-video", + "renderProducts": colorspace.ARenderProduct(), + }) diff --git a/openpype/hosts/blender/plugins/publish/collect_review.py b/client/ayon_core/hosts/blender/plugins/publish/collect_review.py similarity index 100% rename from openpype/hosts/blender/plugins/publish/collect_review.py rename to client/ayon_core/hosts/blender/plugins/publish/collect_review.py diff --git a/openpype/hosts/blender/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/blender/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/blender/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/blender/plugins/publish/collect_workfile.py diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py similarity index 95% rename from openpype/hosts/blender/plugins/publish/extract_abc.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_abc.py index 0e242e9d53..2f89426e56 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py @@ -2,8 +2,8 @@ import bpy -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin): diff --git a/openpype/hosts/blender/plugins/publish/extract_abc_animation.py b/client/ayon_core/hosts/blender/plugins/publish/extract_abc_animation.py similarity index 96% rename from openpype/hosts/blender/plugins/publish/extract_abc_animation.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_abc_animation.py index 2bf75aa05e..41ad2a99b8 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc_animation.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_abc_animation.py @@ -2,8 +2,8 @@ import bpy -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin class ExtractAnimationABC( diff --git a/openpype/hosts/blender/plugins/publish/extract_blend.py b/client/ayon_core/hosts/blender/plugins/publish/extract_blend.py similarity index 98% rename from openpype/hosts/blender/plugins/publish/extract_blend.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_blend.py index 94e87d537c..00e4074f55 100644 --- a/openpype/hosts/blender/plugins/publish/extract_blend.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_blend.py @@ -2,7 +2,7 @@ import bpy -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractBlend(publish.Extractor, publish.OptionalPyblishPluginMixin): diff --git a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py b/client/ayon_core/hosts/blender/plugins/publish/extract_blend_animation.py similarity index 98% rename from openpype/hosts/blender/plugins/publish/extract_blend_animation.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_blend_animation.py index 11eb268271..6b0d6195b6 100644 --- a/openpype/hosts/blender/plugins/publish/extract_blend_animation.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_blend_animation.py @@ -2,7 +2,7 @@ import bpy -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractBlendAnimation( diff --git a/openpype/hosts/blender/plugins/publish/extract_camera_abc.py b/client/ayon_core/hosts/blender/plugins/publish/extract_camera_abc.py similarity index 92% rename from openpype/hosts/blender/plugins/publish/extract_camera_abc.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_camera_abc.py index df68668eae..efa1faa87c 100644 --- a/openpype/hosts/blender/plugins/publish/extract_camera_abc.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_camera_abc.py @@ -2,9 +2,9 @@ import bpy -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY class ExtractCameraABC(publish.Extractor, publish.OptionalPyblishPluginMixin): diff --git a/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py b/client/ayon_core/hosts/blender/plugins/publish/extract_camera_fbx.py similarity index 96% rename from openpype/hosts/blender/plugins/publish/extract_camera_fbx.py rename to client/ayon_core/hosts/blender/plugins/publish/extract_camera_fbx.py index f904b79ddb..b2b6cd602d 100644 --- a/openpype/hosts/blender/plugins/publish/extract_camera_fbx.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_camera_fbx.py @@ -2,8 +2,8 @@ import bpy -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin class ExtractCamera(publish.Extractor, publish.OptionalPyblishPluginMixin): diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_fbx.py b/client/ayon_core/hosts/blender/plugins/publish/extract_fbx.py new file mode 100644 index 0000000000..7e8c13fea8 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_fbx.py @@ -0,0 +1,92 @@ +import os + +import bpy + +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY + + +class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin): + """Extract as FBX.""" + + label = "Extract FBX" + hosts = ["blender"] + families = ["model", "rig"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + # Define extract output file path + stagingdir = self.staging_dir(instance) + asset_name = instance.data["assetEntity"]["name"] + subset = instance.data["subset"] + instance_name = f"{asset_name}_{subset}" + filename = f"{instance_name}.fbx" + filepath = os.path.join(stagingdir, filename) + + # Perform extraction + self.log.debug("Performing extraction..") + + plugin.deselect_all() + + asset_group = instance.data["transientData"]["instance_node"] + + selected = [] + for obj in instance: + obj.select_set(True) + selected.append(obj) + + context = plugin.create_blender_context( + active=asset_group, selected=selected) + + new_materials = [] + new_materials_objs = [] + objects = list(asset_group.children) + + for obj in objects: + objects.extend(obj.children) + if obj.type == 'MESH' and len(obj.data.materials) == 0: + mat = bpy.data.materials.new(obj.name) + obj.data.materials.append(mat) + new_materials.append(mat) + new_materials_objs.append(obj) + + scale_length = bpy.context.scene.unit_settings.scale_length + bpy.context.scene.unit_settings.scale_length = 0.01 + + with bpy.context.temp_override(**context): + # We export the fbx + bpy.ops.export_scene.fbx( + filepath=filepath, + use_active_collection=False, + use_selection=True, + mesh_smooth_type='FACE', + add_leaf_bones=False + ) + + bpy.context.scene.unit_settings.scale_length = scale_length + + plugin.deselect_all() + + for mat in new_materials: + bpy.data.materials.remove(mat) + + for obj in new_materials_objs: + obj.data.materials.pop() + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + self.log.debug("Extracted instance '%s' to: %s", + instance.name, representation) diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_fbx_animation.py b/client/ayon_core/hosts/blender/plugins/publish/extract_fbx_animation.py new file mode 100644 index 0000000000..3ad4cc3aa9 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_fbx_animation.py @@ -0,0 +1,227 @@ +import os +import json + +import bpy +import bpy_extras +import bpy_extras.anim_utils + +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY + + +def get_all_parents(obj): + """Get all recursive parents of object""" + result = [] + while True: + obj = obj.parent + if not obj: + break + result.append(obj) + return result + + +def get_highest_root(objects): + # Get the highest object that is also in the collection + included_objects = {obj.name_full for obj in objects} + num_parents_to_obj = {} + for obj in objects: + if isinstance(obj, bpy.types.Object): + parents = get_all_parents(obj) + # included parents + parents = [parent for parent in parents if + parent.name_full in included_objects] + if not parents: + # A node without parents must be a highest root + return obj + + num_parents_to_obj.setdefault(len(parents), obj) + + minimum_parent = min(num_parents_to_obj) + return num_parents_to_obj[minimum_parent] + + +class ExtractAnimationFBX( + publish.Extractor, + publish.OptionalPyblishPluginMixin, +): + """Extract as animation.""" + + label = "Extract FBX" + hosts = ["blender"] + families = ["animation"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.debug("Performing extraction..") + + asset_group = instance.data["transientData"]["instance_node"] + + # Get objects in this collection (but not in children collections) + # and for those objects include the children hierarchy + # TODO: Would it make more sense for the Collect Instance collector + # to also always retrieve all the children? + objects = set(asset_group.objects) + + # From the direct children of the collection find the 'root' node + # that we want to export - it is the 'highest' node in a hierarchy + root = get_highest_root(objects) + + for obj in list(objects): + objects.update(obj.children_recursive) + + # Find all armatures among the objects, assume to find only one + armatures = [obj for obj in objects if obj.type == "ARMATURE"] + if not armatures: + raise RuntimeError( + f"Unable to find ARMATURE in collection: " + f"{asset_group.name}" + ) + elif len(armatures) > 1: + self.log.warning( + "Found more than one ARMATURE, using " + f"only first of: {armatures}" + ) + armature = armatures[0] + + object_action_pairs = [] + original_actions = [] + + starting_frames = [] + ending_frames = [] + + # For each armature, we make a copy of the current action + if armature.animation_data and armature.animation_data.action: + curr_action = armature.animation_data.action + copy_action = curr_action.copy() + + curr_frame_range = curr_action.frame_range + + starting_frames.append(curr_frame_range[0]) + ending_frames.append(curr_frame_range[1]) + else: + self.log.info( + f"Armature '{armature.name}' has no animation, " + f"skipping FBX animation extraction for {instance}." + ) + return + + asset_group_name = asset_group.name + asset_name = asset_group.get(AVALON_PROPERTY).get("asset_name") + if asset_name: + # Rename for the export; this data is only present when loaded + # from a JSON Layout (layout family) + asset_group.name = asset_name + + # Remove : from the armature name for the export + armature_name = armature.name + original_name = armature_name.split(':')[1] + armature.name = original_name + + object_action_pairs.append((armature, copy_action)) + original_actions.append(curr_action) + + # We compute the starting and ending frames + max_frame = min(starting_frames) + min_frame = max(ending_frames) + + # We bake the copy of the current action for each object + bpy_extras.anim_utils.bake_action_objects( + object_action_pairs, + frames=range(int(min_frame), int(max_frame)), + do_object=False, + do_clean=False + ) + + for obj in bpy.data.objects: + obj.select_set(False) + + root.select_set(True) + armature.select_set(True) + asset_name = instance.data["assetEntity"]["name"] + subset = instance.data["subset"] + instance_name = f"{asset_name}_{subset}" + fbx_filename = f"{instance_name}_{armature.name}.fbx" + filepath = os.path.join(stagingdir, fbx_filename) + + override = plugin.create_blender_context( + active=root, selected=[root, armature]) + + with bpy.context.temp_override(**override): + # We export the fbx + bpy.ops.export_scene.fbx( + filepath=filepath, + use_active_collection=False, + use_selection=True, + bake_anim_use_nla_strips=False, + bake_anim_use_all_actions=False, + add_leaf_bones=False, + armature_nodetype='ROOT', + object_types={'EMPTY', 'ARMATURE'} + ) + + armature.name = armature_name + asset_group.name = asset_group_name + root.select_set(True) + armature.select_set(False) + + # We delete the baked action and set the original one back + for i in range(0, len(object_action_pairs)): + pair = object_action_pairs[i] + action = original_actions[i] + + if action: + pair[0].animation_data.action = action + + if pair[1]: + pair[1].user_clear() + bpy.data.actions.remove(pair[1]) + + json_filename = f"{instance_name}.json" + json_path = os.path.join(stagingdir, json_filename) + + json_dict = { + "instance_name": asset_group.get(AVALON_PROPERTY).get("objectName") + } + + # collection = instance.data.get("name") + # container = None + # for obj in bpy.data.collections[collection].objects: + # if obj.type == "ARMATURE": + # container_name = obj.get("avalon").get("container_name") + # container = bpy.data.collections[container_name] + # if container: + # json_dict = { + # "instance_name": container.get("avalon").get("instance_name") + # } + + with open(json_path, "w+") as file: + json.dump(json_dict, fp=file, indent=2) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fbx_representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': fbx_filename, + "stagingDir": stagingdir, + } + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(fbx_representation) + instance.data["representations"].append(json_representation) + + self.log.debug("Extracted instance '{}' to: {}".format( + instance.name, fbx_representation)) diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_layout.py b/client/ayon_core/hosts/blender/plugins/publish/extract_layout.py new file mode 100644 index 0000000000..f868db3e74 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_layout.py @@ -0,0 +1,266 @@ +import os +import json + +import bpy +import bpy_extras +import bpy_extras.anim_utils + +from ayon_core.client import get_representation_by_name +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import plugin +from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY + + +class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin): + """Extract a layout.""" + + label = "Extract Layout (JSON)" + hosts = ["blender"] + families = ["layout"] + optional = True + + def _export_animation(self, asset, instance, stagingdir, fbx_count): + n = fbx_count + + for obj in asset.children: + if obj.type != "ARMATURE": + continue + + object_action_pairs = [] + original_actions = [] + + starting_frames = [] + ending_frames = [] + + # For each armature, we make a copy of the current action + curr_action = None + copy_action = None + + if obj.animation_data and obj.animation_data.action: + curr_action = obj.animation_data.action + copy_action = curr_action.copy() + + curr_frame_range = curr_action.frame_range + + starting_frames.append(curr_frame_range[0]) + ending_frames.append(curr_frame_range[1]) + else: + self.log.info("Object has no animation.") + continue + + asset_group_name = asset.name + asset.name = asset.get(AVALON_PROPERTY).get("asset_name") + + armature_name = obj.name + original_name = armature_name.split(':')[1] + obj.name = original_name + + object_action_pairs.append((obj, copy_action)) + original_actions.append(curr_action) + + # We compute the starting and ending frames + max_frame = min(starting_frames) + min_frame = max(ending_frames) + + # We bake the copy of the current action for each object + bpy_extras.anim_utils.bake_action_objects( + object_action_pairs, + frames=range(int(min_frame), int(max_frame)), + do_object=False, + do_clean=False + ) + + for o in bpy.data.objects: + o.select_set(False) + + asset.select_set(True) + obj.select_set(True) + fbx_filename = f"{n:03d}.fbx" + filepath = os.path.join(stagingdir, fbx_filename) + + override = plugin.create_blender_context( + active=asset, selected=[asset, obj]) + with bpy.context.temp_override(**override): + # We export the fbx + bpy.ops.export_scene.fbx( + filepath=filepath, + use_active_collection=False, + use_selection=True, + bake_anim_use_nla_strips=False, + bake_anim_use_all_actions=False, + add_leaf_bones=False, + armature_nodetype='ROOT', + object_types={'EMPTY', 'ARMATURE'} + ) + obj.name = armature_name + asset.name = asset_group_name + asset.select_set(False) + obj.select_set(False) + + # We delete the baked action and set the original one back + for i in range(0, len(object_action_pairs)): + pair = object_action_pairs[i] + action = original_actions[i] + + if action: + pair[0].animation_data.action = action + + if pair[1]: + pair[1].user_clear() + bpy.data.actions.remove(pair[1]) + + return fbx_filename, n + 1 + + return None, n + + def process(self, instance): + if not self.is_active(instance.data): + return + + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.debug("Performing extraction..") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_data = [] + fbx_files = [] + + asset_group = instance.data["transientData"]["instance_node"] + + fbx_count = 0 + + project_name = instance.context.data["projectEntity"]["name"] + for asset in asset_group.children: + metadata = asset.get(AVALON_PROPERTY) + if not metadata: + # Avoid raising error directly if there's just invalid data + # inside the instance; better to log it to the artist + # TODO: This should actually be validated in a validator + self.log.warning( + f"Found content in layout that is not a loaded " + f"asset, skipping: {asset.name_full}" + ) + continue + + version_id = metadata["parent"] + family = metadata["family"] + + self.log.debug("Parent: {}".format(version_id)) + # Get blend reference + blend = get_representation_by_name( + project_name, "blend", version_id, fields=["_id"] + ) + blend_id = None + if blend: + blend_id = blend["_id"] + # Get fbx reference + fbx = get_representation_by_name( + project_name, "fbx", version_id, fields=["_id"] + ) + fbx_id = None + if fbx: + fbx_id = fbx["_id"] + # Get abc reference + abc = get_representation_by_name( + project_name, "abc", version_id, fields=["_id"] + ) + abc_id = None + if abc: + abc_id = abc["_id"] + + json_element = {} + if blend_id: + json_element["reference"] = str(blend_id) + if fbx_id: + json_element["reference_fbx"] = str(fbx_id) + if abc_id: + json_element["reference_abc"] = str(abc_id) + json_element["family"] = family + json_element["instance_name"] = asset.name + json_element["asset_name"] = metadata["asset_name"] + json_element["file_path"] = metadata["libpath"] + + json_element["transform"] = { + "translation": { + "x": asset.location.x, + "y": asset.location.y, + "z": asset.location.z + }, + "rotation": { + "x": asset.rotation_euler.x, + "y": asset.rotation_euler.y, + "z": asset.rotation_euler.z + }, + "scale": { + "x": asset.scale.x, + "y": asset.scale.y, + "z": asset.scale.z + } + } + + json_element["transform_matrix"] = [] + + for row in list(asset.matrix_world.transposed()): + json_element["transform_matrix"].append(list(row)) + + json_element["basis"] = [ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ] + + # Extract the animation as well + if family == "rig": + f, n = self._export_animation( + asset, instance, stagingdir, fbx_count) + if f: + fbx_files.append(f) + json_element["animation"] = f + fbx_count = n + + json_data.append(json_element) + + asset_name = instance.data["assetEntity"]["name"] + subset = instance.data["subset"] + instance_name = f"{asset_name}_{subset}" + json_filename = f"{instance_name}.json" + + json_path = os.path.join(stagingdir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(json_representation) + + self.log.debug(fbx_files) + + if len(fbx_files) == 1: + fbx_representation = { + 'name': 'fbx', + 'ext': '000.fbx', + 'files': fbx_files[0], + "stagingDir": stagingdir, + } + instance.data["representations"].append(fbx_representation) + elif len(fbx_files) > 1: + fbx_representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': fbx_files, + "stagingDir": stagingdir, + } + instance.data["representations"].append(fbx_representation) + + self.log.debug("Extracted instance '%s' to: %s", + instance.name, json_representation) diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_playblast.py b/client/ayon_core/hosts/blender/plugins/publish/extract_playblast.py new file mode 100644 index 0000000000..83e6b26fbe --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_playblast.py @@ -0,0 +1,124 @@ +import os +import clique + +import bpy + +import pyblish.api +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import capture +from ayon_core.hosts.blender.api.lib import maintained_time + + +class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin): + """ + Extract viewport playblast. + + Takes review camera and creates review Quicktime video based on viewport + capture. + """ + + label = "Extract Playblast" + hosts = ["blender"] + families = ["review"] + optional = True + order = pyblish.api.ExtractorOrder + 0.01 + + def process(self, instance): + if not self.is_active(instance.data): + return + + # get scene fps + fps = instance.data.get("fps") + if fps is None: + fps = bpy.context.scene.render.fps + instance.data["fps"] = fps + + self.log.debug(f"fps: {fps}") + + # If start and end frames cannot be determined, + # get them from Blender timeline. + start = instance.data.get("frameStart", bpy.context.scene.frame_start) + end = instance.data.get("frameEnd", bpy.context.scene.frame_end) + + self.log.debug(f"start: {start}, end: {end}") + assert end > start, "Invalid time range !" + + # get cameras + camera = instance.data("review_camera", None) + + # get isolate objects list + isolate = instance.data("isolate", None) + + # get output path + stagingdir = self.staging_dir(instance) + asset_name = instance.data["assetEntity"]["name"] + subset = instance.data["subset"] + filename = f"{asset_name}_{subset}" + + path = os.path.join(stagingdir, filename) + + self.log.debug(f"Outputting images to {path}") + + project_settings = instance.context.data["project_settings"]["blender"] + presets = project_settings["publish"]["ExtractPlayblast"]["presets"] + preset = presets.get("default") + preset.update({ + "camera": camera, + "start_frame": start, + "end_frame": end, + "filename": path, + "overwrite": True, + "isolate": isolate, + }) + preset.setdefault( + "image_settings", + { + "file_format": "PNG", + "color_mode": "RGB", + "color_depth": "8", + "compression": 15, + }, + ) + + with maintained_time(): + path = capture(**preset) + + self.log.debug(f"playblast path {path}") + + collected_files = os.listdir(stagingdir) + collections, remainder = clique.assemble( + collected_files, + patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"], + ) + + if len(collections) > 1: + raise RuntimeError( + f"More than one collection found in stagingdir: {stagingdir}" + ) + elif len(collections) == 0: + raise RuntimeError( + f"No collection found in stagingdir: {stagingdir}" + ) + + frame_collection = collections[0] + + self.log.debug(f"Found collection of interest {frame_collection}") + + instance.data.setdefault("representations", []) + + tags = ["review"] + if not instance.data.get("keepImages"): + tags.append("delete") + + representation = { + "name": "png", + "ext": "png", + "files": list(frame_collection), + "stagingDir": stagingdir, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "tags": tags, + "camera_name": camera + } + instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_thumbnail.py b/client/ayon_core/hosts/blender/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..7b445a0113 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_thumbnail.py @@ -0,0 +1,106 @@ +import os +import glob + +import pyblish.api +from ayon_core.pipeline import publish +from ayon_core.hosts.blender.api import capture +from ayon_core.hosts.blender.api.lib import maintained_time + +import bpy + + +class ExtractThumbnail(publish.Extractor): + """Extract viewport thumbnail. + + Takes review camera and creates a thumbnail based on viewport + capture. + + """ + + label = "Extract Thumbnail" + hosts = ["blender"] + families = ["review"] + order = pyblish.api.ExtractorOrder + 0.01 + presets = {} + + def process(self, instance): + self.log.debug("Extracting capture..") + + if instance.data.get("thumbnailSource"): + self.log.debug("Thumbnail source found, skipping...") + return + + stagingdir = self.staging_dir(instance) + asset_name = instance.data["assetEntity"]["name"] + subset = instance.data["subset"] + filename = f"{asset_name}_{subset}" + + path = os.path.join(stagingdir, filename) + + self.log.debug(f"Outputting images to {path}") + + camera = instance.data.get("review_camera", "AUTO") + start = instance.data.get("frameStart", bpy.context.scene.frame_start) + family = instance.data.get("family") + isolate = instance.data("isolate", None) + + preset = self.presets.get(family, {}) + + preset.update({ + "camera": camera, + "start_frame": start, + "end_frame": start, + "filename": path, + "overwrite": True, + "isolate": isolate, + }) + preset.setdefault( + "image_settings", + { + "file_format": "JPEG", + "color_mode": "RGB", + "quality": 100, + }, + ) + + with maintained_time(): + path = capture(**preset) + + thumbnail = os.path.basename(self._fix_output_path(path)) + + self.log.debug(f"thumbnail: {thumbnail}") + + instance.data.setdefault("representations", []) + + representation = { + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, + "stagingDir": stagingdir, + "thumbnail": True + } + instance.data["representations"].append(representation) + + def _fix_output_path(self, filepath): + """"Workaround to return correct filepath. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning( + "Playblast did not result in output path. " + "Playblast is probably interrupted." + ) + return None + + if not os.path.exists(filepath): + files = glob.glob(f"{filepath}.*.jpg") + + if not files: + raise RuntimeError(f"Couldn't find playblast from: {filepath}") + filepath = max(files, key=os.path.getmtime) + + return filepath diff --git a/client/ayon_core/hosts/blender/plugins/publish/increment_workfile_version.py b/client/ayon_core/hosts/blender/plugins/publish/increment_workfile_version.py new file mode 100644 index 0000000000..b6e0ea3e19 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/increment_workfile_version.py @@ -0,0 +1,32 @@ +import pyblish.api +from ayon_core.pipeline.publish import OptionalPyblishPluginMixin +from ayon_core.hosts.blender.api.workio import save_file + + +class IncrementWorkfileVersion( + pyblish.api.ContextPlugin, + OptionalPyblishPluginMixin +): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 0.9 + label = "Increment Workfile Version" + optional = True + hosts = ["blender"] + families = ["animation", "model", "rig", "action", "layout", "blendScene", + "pointcache", "render.farm"] + + def process(self, context): + if not self.is_active(context.data): + return + + assert all(result["success"] for result in context.data["results"]), ( + "Publishing not successful so version is not increased.") + + from ayon_core.lib import version_up + path = context.data["currentFile"] + filepath = version_up(path) + + save_file(filepath, copy=False) + + self.log.debug('Incrementing blender workfile version') diff --git a/openpype/hosts/blender/plugins/publish/integrate_animation.py b/client/ayon_core/hosts/blender/plugins/publish/integrate_animation.py similarity index 95% rename from openpype/hosts/blender/plugins/publish/integrate_animation.py rename to client/ayon_core/hosts/blender/plugins/publish/integrate_animation.py index 623da9c585..c461d56e7c 100644 --- a/openpype/hosts/blender/plugins/publish/integrate_animation.py +++ b/client/ayon_core/hosts/blender/plugins/publish/integrate_animation.py @@ -1,7 +1,7 @@ import json import pyblish.api -from openpype.pipeline.publish import OptionalPyblishPluginMixin +from ayon_core.pipeline.publish import OptionalPyblishPluginMixin class IntegrateAnimation( diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/client/ayon_core/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py similarity index 91% rename from openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py rename to client/ayon_core/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py index 9b6e513897..cce95e9cf9 100644 --- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py @@ -4,8 +4,8 @@ import pyblish.api -import openpype.hosts.blender.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.blender.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError, OptionalPyblishPluginMixin @@ -25,7 +25,7 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin, hosts = ["blender"] families = ["camera"] label = "Zero Keyframe" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance) -> List: diff --git a/client/ayon_core/hosts/blender/plugins/publish/validate_deadline_publish.py b/client/ayon_core/hosts/blender/plugins/publish/validate_deadline_publish.py new file mode 100644 index 0000000000..b9310f9da0 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_deadline_publish.py @@ -0,0 +1,47 @@ +import os + +import bpy + +import pyblish.api +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.hosts.blender.api.render_lib import prepare_rendering + + +class ValidateDeadlinePublish(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates Render File Directory is + not the same in every submission + """ + + order = ValidateContentsOrder + families = ["render"] + hosts = ["blender"] + label = "Validate Render Output for Deadline" + optional = True + actions = [RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + return + filepath = bpy.data.filepath + file = os.path.basename(filepath) + filename, ext = os.path.splitext(file) + if filename not in bpy.context.scene.render.filepath: + raise PublishValidationError( + "Render output folder " + "doesn't match the blender scene name! " + "Use Repair action to " + "fix the folder file path." + ) + + @classmethod + def repair(cls, instance): + container = instance.data["transientData"]["instance_node"] + prepare_rendering(container) + bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) + cls.log.debug("Reset the render output folder...") diff --git a/client/ayon_core/hosts/blender/plugins/publish/validate_file_saved.py b/client/ayon_core/hosts/blender/plugins/publish/validate_file_saved.py new file mode 100644 index 0000000000..0b8762fed5 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_file_saved.py @@ -0,0 +1,61 @@ +import bpy + +import pyblish.api + +from ayon_core.pipeline.publish import ( + OptionalPyblishPluginMixin, + PublishValidationError +) + + +class SaveWorkfileAction(pyblish.api.Action): + """Save Workfile.""" + label = "Save Workfile" + on = "failed" + icon = "save" + + def process(self, context, plugin): + bpy.ops.wm.avalon_workfiles() + + +class ValidateFileSaved(pyblish.api.ContextPlugin, + OptionalPyblishPluginMixin): + """Validate that the workfile has been saved.""" + + order = pyblish.api.ValidatorOrder - 0.01 + hosts = ["blender"] + label = "Validate File Saved" + optional = False + exclude_families = [] + actions = [SaveWorkfileAction] + + def process(self, context): + if not self.is_active(context.data): + return + + if not context.data["currentFile"]: + # File has not been saved at all and has no filename + raise PublishValidationError( + "Current file is empty. Save the file before continuing." + ) + + # Do not validate workfile has unsaved changes if only instances + # present of families that should be excluded + families = { + instance.data["family"] for instance in context + # Consider only enabled instances + if instance.data.get("publish", True) + and instance.data.get("active", True) + } + + def is_excluded(family): + return any(family in exclude_family + for exclude_family in self.exclude_families) + + if all(is_excluded(family) for family in families): + self.log.debug("Only excluded families found, skipping workfile " + "unsaved changes validation..") + return + + if bpy.data.is_dirty: + raise PublishValidationError("Workfile has unsaved changes.") diff --git a/openpype/hosts/blender/plugins/publish/validate_instance_empty.py b/client/ayon_core/hosts/blender/plugins/publish/validate_instance_empty.py similarity index 92% rename from openpype/hosts/blender/plugins/publish/validate_instance_empty.py rename to client/ayon_core/hosts/blender/plugins/publish/validate_instance_empty.py index 51a1dcf6ca..f0f4106379 100644 --- a/openpype/hosts/blender/plugins/publish/validate_instance_empty.py +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_instance_empty.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateInstanceEmpty(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_has_uv.py new file mode 100644 index 0000000000..9871dfeb4e --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_has_uv.py @@ -0,0 +1,66 @@ +from typing import List + +import bpy + +import pyblish.api + +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin, + PublishValidationError +) +import ayon_core.hosts.blender.api.action + + +class ValidateMeshHasUvs( + pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin, +): + """Validate that the current mesh has UV's.""" + + order = ValidateContentsOrder + hosts = ["blender"] + families = ["model"] + label = "Mesh Has UVs" + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] + optional = True + + @staticmethod + def has_uvs(obj: bpy.types.Object) -> bool: + """Check if an object has uv's.""" + if not obj.data.uv_layers: + return False + for uv_layer in obj.data.uv_layers: + for polygon in obj.data.polygons: + for loop_index in polygon.loop_indices: + if ( + loop_index >= len(uv_layer.data) + or not uv_layer.data[loop_index].uv + ): + return False + + return True + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': + if obj.mode != "OBJECT": + cls.log.warning( + f"Mesh object {obj.name} should be in 'OBJECT' mode" + " to be properly checked." + ) + if not cls.has_uvs(obj): + invalid.append(obj) + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + return + + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError( + f"Meshes found in instance without valid UV's: {invalid}" + ) diff --git a/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py new file mode 100644 index 0000000000..63b7dc7530 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py @@ -0,0 +1,43 @@ +from typing import List + +import bpy + +import pyblish.api + +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin, + PublishValidationError +) +import ayon_core.hosts.blender.api.action + + +class ValidateMeshNoNegativeScale(pyblish.api.Validator, + OptionalPyblishPluginMixin): + """Ensure that meshes don't have a negative scale.""" + + order = ValidateContentsOrder + hosts = ["blender"] + families = ["model"] + label = "Mesh No Negative Scale" + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] + + @staticmethod + def get_invalid(instance) -> List: + invalid = [] + for obj in instance: + if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': + if any(v < 0 for v in obj.scale): + invalid.append(obj) + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + return + + invalid = self.get_invalid(instance) + if invalid: + names = ", ".join(obj.name for obj in invalid) + raise PublishValidationError( + f"Meshes found in instance with negative scale: {names}" + ) diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/client/ayon_core/hosts/blender/plugins/publish/validate_no_colons_in_name.py similarity index 89% rename from openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py rename to client/ayon_core/hosts/blender/plugins/publish/validate_no_colons_in_name.py index caf555b535..5620946f1e 100644 --- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -4,8 +4,8 @@ import pyblish.api -import openpype.hosts.blender.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.blender.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -25,7 +25,7 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin, hosts = ["blender"] families = ["model", "rig"] label = "No Colons in names" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance) -> List: diff --git a/openpype/hosts/blender/plugins/publish/validate_object_mode.py b/client/ayon_core/hosts/blender/plugins/publish/validate_object_mode.py similarity index 87% rename from openpype/hosts/blender/plugins/publish/validate_object_mode.py rename to client/ayon_core/hosts/blender/plugins/publish/validate_object_mode.py index ab5f4bb467..d215ffc1be 100644 --- a/openpype/hosts/blender/plugins/publish/validate_object_mode.py +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_object_mode.py @@ -3,11 +3,11 @@ import bpy import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError ) -import openpype.hosts.blender.api.action +import ayon_core.hosts.blender.api.action class ValidateObjectIsInObjectMode( @@ -20,7 +20,7 @@ class ValidateObjectIsInObjectMode( hosts = ["blender"] families = ["model", "rig", "layout"] label = "Validate Object Mode" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] optional = False @staticmethod diff --git a/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py b/client/ayon_core/hosts/blender/plugins/publish/validate_render_camera_is_set.py similarity index 94% rename from openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py rename to client/ayon_core/hosts/blender/plugins/publish/validate_render_camera_is_set.py index 86d1fcc681..46bfe45739 100644 --- a/openpype/hosts/blender/plugins/publish/validate_render_camera_is_set.py +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_render_camera_is_set.py @@ -2,7 +2,7 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError ) diff --git a/client/ayon_core/hosts/blender/plugins/publish/validate_transform_zero.py b/client/ayon_core/hosts/blender/plugins/publish/validate_transform_zero.py new file mode 100644 index 0000000000..267eff47e4 --- /dev/null +++ b/client/ayon_core/hosts/blender/plugins/publish/validate_transform_zero.py @@ -0,0 +1,55 @@ +from typing import List + +import mathutils +import bpy + +import pyblish.api + +import ayon_core.hosts.blender.api.action +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin, + PublishValidationError +) + + +class ValidateTransformZero(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Transforms can't have any values + + To solve this issue, try freezing the transforms. So long + as the transforms, rotation and scale values are zero, + you're all good. + + """ + + order = ValidateContentsOrder + hosts = ["blender"] + families = ["model"] + label = "Transform Zero" + actions = [ayon_core.hosts.blender.api.action.SelectInvalidAction] + + _identity = mathutils.Matrix() + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + for obj in instance: + if ( + isinstance(obj, bpy.types.Object) + and obj.matrix_basis != cls._identity + ): + invalid.append(obj) + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + return + + invalid = self.get_invalid(instance) + if invalid: + names = ", ".join(obj.name for obj in invalid) + raise PublishValidationError( + "Objects found in instance which do not" + f" have transform set to zero: {names}" + ) diff --git a/openpype/hosts/celaction/__init__.py b/client/ayon_core/hosts/celaction/__init__.py similarity index 100% rename from openpype/hosts/celaction/__init__.py rename to client/ayon_core/hosts/celaction/__init__.py diff --git a/client/ayon_core/hosts/celaction/addon.py b/client/ayon_core/hosts/celaction/addon.py new file mode 100644 index 0000000000..4573ee7e56 --- /dev/null +++ b/client/ayon_core/hosts/celaction/addon.py @@ -0,0 +1,31 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class CelactionAddon(OpenPypeModule, IHostAddon): + name = "celaction" + host_name = "celaction" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(CELACTION_ROOT_DIR, "hooks") + ] + + def add_implementation_envs(self, env, _app): + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".scn"] diff --git a/openpype/hosts/celaction/hooks/pre_celaction_setup.py b/client/ayon_core/hosts/celaction/hooks/pre_celaction_setup.py similarity index 95% rename from openpype/hosts/celaction/hooks/pre_celaction_setup.py rename to client/ayon_core/hosts/celaction/hooks/pre_celaction_setup.py index 83aeab7c58..bf1b4937cd 100644 --- a/openpype/hosts/celaction/hooks/pre_celaction_setup.py +++ b/client/ayon_core/hosts/celaction/hooks/pre_celaction_setup.py @@ -2,9 +2,9 @@ import shutil import winreg import subprocess -from openpype.lib import get_openpype_execute_args -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.hosts.celaction import CELACTION_ROOT_DIR +from ayon_core.lib import get_ayon_launcher_args +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.hosts.celaction import CELACTION_ROOT_DIR class CelactionPrelaunchHook(PreLaunchHook): @@ -38,7 +38,7 @@ def execute(self): path_to_cli = os.path.join( CELACTION_ROOT_DIR, "scripts", "publish_cli.py" ) - subprocess_args = get_openpype_execute_args("run", path_to_cli) + subprocess_args = get_ayon_launcher_args("run", path_to_cli) openpype_executable = subprocess_args.pop(0) workfile_settings = self.get_workfile_settings() diff --git a/openpype/hosts/aftereffects/plugins/__init__.py b/client/ayon_core/hosts/celaction/plugins/__init__.py similarity index 100% rename from openpype/hosts/aftereffects/plugins/__init__.py rename to client/ayon_core/hosts/celaction/plugins/__init__.py diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py b/client/ayon_core/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py similarity index 100% rename from openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py rename to client/ayon_core/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py b/client/ayon_core/hosts/celaction/plugins/publish/collect_celaction_instances.py similarity index 98% rename from openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py rename to client/ayon_core/hosts/celaction/plugins/publish/collect_celaction_instances.py index 875f15fcc5..d0f4c59290 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_celaction_instances.py +++ b/client/ayon_core/hosts/celaction/plugins/publish/collect_celaction_instances.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.client import get_asset_name_identifier +from ayon_core.client import get_asset_name_identifier class CollectCelactionInstances(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/celaction/plugins/publish/collect_render_path.py b/client/ayon_core/hosts/celaction/plugins/publish/collect_render_path.py similarity index 100% rename from openpype/hosts/celaction/plugins/publish/collect_render_path.py rename to client/ayon_core/hosts/celaction/plugins/publish/collect_render_path.py diff --git a/openpype/hosts/celaction/plugins/publish/integrate_version_up.py b/client/ayon_core/hosts/celaction/plugins/publish/integrate_version_up.py similarity index 87% rename from openpype/hosts/celaction/plugins/publish/integrate_version_up.py rename to client/ayon_core/hosts/celaction/plugins/publish/integrate_version_up.py index dc08127a8a..c165b0c871 100644 --- a/openpype/hosts/celaction/plugins/publish/integrate_version_up.py +++ b/client/ayon_core/hosts/celaction/plugins/publish/integrate_version_up.py @@ -1,7 +1,9 @@ import shutil -import openpype + import pyblish.api +from ayon_core.lib import version_up + class VersionUpScene(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder + 0.5 @@ -12,7 +14,7 @@ class VersionUpScene(pyblish.api.ContextPlugin): def process(self, context): current_file = context.data.get('currentFile') - v_up = openpype.lib.version_up(current_file) + v_up = version_up(current_file) self.log.debug('Current file is: {}'.format(current_file)) self.log.debug('Version up: {}'.format(v_up)) diff --git a/openpype/hosts/celaction/resources/celaction_template_scene.scn b/client/ayon_core/hosts/celaction/resources/celaction_template_scene.scn similarity index 100% rename from openpype/hosts/celaction/resources/celaction_template_scene.scn rename to client/ayon_core/hosts/celaction/resources/celaction_template_scene.scn diff --git a/openpype/hosts/celaction/plugins/__init__.py b/client/ayon_core/hosts/celaction/scripts/__init__.py similarity index 100% rename from openpype/hosts/celaction/plugins/__init__.py rename to client/ayon_core/hosts/celaction/scripts/__init__.py diff --git a/client/ayon_core/hosts/celaction/scripts/publish_cli.py b/client/ayon_core/hosts/celaction/scripts/publish_cli.py new file mode 100644 index 0000000000..92019b8702 --- /dev/null +++ b/client/ayon_core/hosts/celaction/scripts/publish_cli.py @@ -0,0 +1,37 @@ +import os +import sys + +import pyblish.api +import pyblish.util + +import ayon_core.hosts.celaction +from ayon_core.lib import Logger +from ayon_core.tools.utils import host_tools +from ayon_core.pipeline import install_ayon_plugins + + +log = Logger.get_logger("celaction") + +PUBLISH_HOST = "celaction" +HOST_DIR = os.path.dirname(os.path.abspath(ayon_core.hosts.celaction.__file__)) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") + + +def main(): + # Registers pype's Global pyblish plugins + install_ayon_plugins() + + if os.path.exists(PUBLISH_PATH): + log.info(f"Registering path: {PUBLISH_PATH}") + pyblish.api.register_plugin_path(PUBLISH_PATH) + + pyblish.api.register_host(PUBLISH_HOST) + pyblish.api.register_target("local") + + return host_tools.show_publish() + + +if __name__ == "__main__": + result = main() + sys.exit(not bool(result)) diff --git a/openpype/hosts/flame/__init__.py b/client/ayon_core/hosts/flame/__init__.py similarity index 100% rename from openpype/hosts/flame/__init__.py rename to client/ayon_core/hosts/flame/__init__.py diff --git a/client/ayon_core/hosts/flame/addon.py b/client/ayon_core/hosts/flame/addon.py new file mode 100644 index 0000000000..e30d7cab08 --- /dev/null +++ b/client/ayon_core/hosts/flame/addon.py @@ -0,0 +1,35 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class FlameAddon(OpenPypeModule, IHostAddon): + name = "flame" + host_name = "flame" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to DL_PYTHON_HOOK_PATH + env["DL_PYTHON_HOOK_PATH"] = os.path.join(HOST_DIR, "startup") + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".otoc"] diff --git a/openpype/hosts/flame/api/__init__.py b/client/ayon_core/hosts/flame/api/__init__.py similarity index 100% rename from openpype/hosts/flame/api/__init__.py rename to client/ayon_core/hosts/flame/api/__init__.py diff --git a/openpype/hosts/flame/api/batch_utils.py b/client/ayon_core/hosts/flame/api/batch_utils.py similarity index 100% rename from openpype/hosts/flame/api/batch_utils.py rename to client/ayon_core/hosts/flame/api/batch_utils.py diff --git a/openpype/hosts/flame/api/constants.py b/client/ayon_core/hosts/flame/api/constants.py similarity index 100% rename from openpype/hosts/flame/api/constants.py rename to client/ayon_core/hosts/flame/api/constants.py diff --git a/client/ayon_core/hosts/flame/api/lib.py b/client/ayon_core/hosts/flame/api/lib.py new file mode 100644 index 0000000000..efa23fe01e --- /dev/null +++ b/client/ayon_core/hosts/flame/api/lib.py @@ -0,0 +1,1272 @@ +import sys +import os +import re +import json +import pickle +import clique +import tempfile +import traceback +import itertools +import contextlib +import xml.etree.cElementTree as cET +from copy import deepcopy, copy +from xml.etree import ElementTree as ET +from pprint import pformat + +from ayon_core.lib import Logger, run_subprocess + +from .constants import ( + MARKER_COLOR, + MARKER_DURATION, + MARKER_NAME, + COLOR_MAP, + MARKER_PUBLISH_DEFAULT +) + +log = Logger.get_logger(__name__) + +FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") + + +class CTX: + # singleton used for passing data between api modules + app_framework = None + flame_apps = [] + selection = None + + +@contextlib.contextmanager +def io_preferences_file(klass, filepath, write=False): + try: + flag = "w" if write else "r" + yield open(filepath, flag) + + except IOError as _error: + klass.log.info("Unable to work with preferences `{}`: {}".format( + filepath, _error)) + + +class FlameAppFramework(object): + # flameAppFramework class takes care of preferences + + class prefs_dict(dict): + + def __init__(self, master, name, **kwargs): + self.name = name + self.master = master + if not self.master.get(self.name): + self.master[self.name] = {} + self.master[self.name].__init__() + + def __getitem__(self, k): + return self.master[self.name].__getitem__(k) + + def __setitem__(self, k, v): + return self.master[self.name].__setitem__(k, v) + + def __delitem__(self, k): + return self.master[self.name].__delitem__(k) + + def get(self, k, default=None): + return self.master[self.name].get(k, default) + + def setdefault(self, k, default=None): + return self.master[self.name].setdefault(k, default) + + def pop(self, *args, **kwargs): + return self.master[self.name].pop(*args, **kwargs) + + def update(self, mapping=(), **kwargs): + self.master[self.name].update(mapping, **kwargs) + + def __contains__(self, k): + return self.master[self.name].__contains__(k) + + def copy(self): # don"t delegate w/ super - dict.copy() -> dict :( + return type(self)(self) + + def keys(self): + return self.master[self.name].keys() + + @classmethod + def fromkeys(cls, keys, v=None): + return cls.master[cls.name].fromkeys(keys, v) + + def __repr__(self): + return "{0}({1})".format( + type(self).__name__, self.master[self.name].__repr__()) + + def master_keys(self): + return self.master.keys() + + def __init__(self): + self.name = self.__class__.__name__ + self.bundle_name = "OpenPypeFlame" + # self.prefs scope is limited to flame project and user + self.prefs = {} + self.prefs_user = {} + self.prefs_global = {} + self.log = log + + try: + import flame + self.flame = flame + self.flame_project_name = self.flame.project.current_project.name + self.flame_user_name = flame.users.current_user.name + except Exception: + self.flame = None + self.flame_project_name = None + self.flame_user_name = None + + import socket + self.hostname = socket.gethostname() + + if sys.platform == "darwin": + self.prefs_folder = os.path.join( + os.path.expanduser("~"), + "Library", + "Caches", + "OpenPype", + self.bundle_name + ) + elif sys.platform.startswith("linux"): + self.prefs_folder = os.path.join( + os.path.expanduser("~"), + ".OpenPype", + self.bundle_name) + + self.prefs_folder = os.path.join( + self.prefs_folder, + self.hostname, + ) + + self.log.info("[{}] waking up".format(self.__class__.__name__)) + + try: + self.load_prefs() + except RuntimeError: + self.save_prefs() + + # menu auto-refresh defaults + if not self.prefs_global.get("menu_auto_refresh"): + self.prefs_global["menu_auto_refresh"] = { + "media_panel": True, + "batch": True, + "main_menu": True, + "timeline_menu": True + } + + self.apps = [] + + def get_pref_file_paths(self): + + prefix = self.prefs_folder + os.path.sep + self.bundle_name + prefs_file_path = "_".join([ + prefix, self.flame_user_name, + self.flame_project_name]) + ".prefs" + prefs_user_file_path = "_".join([ + prefix, self.flame_user_name]) + ".prefs" + prefs_global_file_path = prefix + ".prefs" + + return (prefs_file_path, prefs_user_file_path, prefs_global_file_path) + + def load_prefs(self): + + (proj_pref_path, user_pref_path, + glob_pref_path) = self.get_pref_file_paths() + + with io_preferences_file(self, proj_pref_path) as prefs_file: + self.prefs = pickle.load(prefs_file) + self.log.info( + "Project - preferences contents:\n{}".format( + pformat(self.prefs) + )) + + with io_preferences_file(self, user_pref_path) as prefs_file: + self.prefs_user = pickle.load(prefs_file) + self.log.info( + "User - preferences contents:\n{}".format( + pformat(self.prefs_user) + )) + + with io_preferences_file(self, glob_pref_path) as prefs_file: + self.prefs_global = pickle.load(prefs_file) + self.log.info( + "Global - preferences contents:\n{}".format( + pformat(self.prefs_global) + )) + + return True + + def save_prefs(self): + # make sure the preference folder is available + if not os.path.isdir(self.prefs_folder): + try: + os.makedirs(self.prefs_folder) + except Exception: + self.log.info("Unable to create folder {}".format( + self.prefs_folder)) + return False + + # get all pref file paths + (proj_pref_path, user_pref_path, + glob_pref_path) = self.get_pref_file_paths() + + with io_preferences_file(self, proj_pref_path, True) as prefs_file: + pickle.dump(self.prefs, prefs_file) + self.log.info( + "Project - preferences contents:\n{}".format( + pformat(self.prefs) + )) + + with io_preferences_file(self, user_pref_path, True) as prefs_file: + pickle.dump(self.prefs_user, prefs_file) + self.log.info( + "User - preferences contents:\n{}".format( + pformat(self.prefs_user) + )) + + with io_preferences_file(self, glob_pref_path, True) as prefs_file: + pickle.dump(self.prefs_global, prefs_file) + self.log.info( + "Global - preferences contents:\n{}".format( + pformat(self.prefs_global) + )) + + return True + + +def get_current_project(): + import flame + return flame.project.current_project + + +def get_current_sequence(selection): + import flame + + def segment_to_sequence(_segment): + track = _segment.parent + version = track.parent + return version.parent + + process_timeline = None + + if len(selection) == 1: + if isinstance(selection[0], flame.PySequence): + process_timeline = selection[0] + if isinstance(selection[0], flame.PySegment): + process_timeline = segment_to_sequence(selection[0]) + else: + for segment in selection: + if isinstance(segment, flame.PySegment): + process_timeline = segment_to_sequence(segment) + break + + return process_timeline + + +def rescan_hooks(): + import flame + try: + flame.execute_shortcut("Rescan Python Hooks") + except Exception: + pass + + +def get_metadata(project_name, _log=None): + # TODO: can be replaced by MediaInfoFile class method + from adsk.libwiretapPythonClientAPI import ( + WireTapClient, + WireTapServerHandle, + WireTapNodeHandle, + WireTapStr + ) + + class GetProjectColorPolicy(object): + def __init__(self, host_name=None, _log=None): + # Create a connection to the Backburner manager using the Wiretap + # python API. + # + self.log = _log or log + self.host_name = host_name or "localhost" + self._wiretap_client = WireTapClient() + if not self._wiretap_client.init(): + raise Exception("Could not initialize Wiretap Client") + self._server = WireTapServerHandle( + "{}:IFFFS".format(self.host_name)) + + def process(self, project_name): + policy_node_handle = WireTapNodeHandle( + self._server, + "/projects/{}/syncolor/policy".format(project_name) + ) + self.log.info(policy_node_handle) + + policy = WireTapStr() + if not policy_node_handle.getNodeTypeStr(policy): + self.log.warning( + "Could not retrieve policy of '%s': %s" % ( + policy_node_handle.getNodeId().id(), + policy_node_handle.lastError() + ) + ) + + return policy.c_str() + + policy_wiretap = GetProjectColorPolicy(_log=_log) + return policy_wiretap.process(project_name) + + +def get_segment_data_marker(segment, with_marker=None): + """ + Get openpype track item tag created by creator or loader plugin. + + Attributes: + segment (flame.PySegment): flame api object + with_marker (bool)[optional]: if true it will return also marker object + + Returns: + dict: openpype tag data + + Returns(with_marker=True): + flame.PyMarker, dict + """ + for marker in segment.markers: + comment = marker.comment.get_value() + color = marker.colour.get_value() + name = marker.name.get_value() + + if (name == MARKER_NAME) and ( + color == COLOR_MAP[MARKER_COLOR]): + if not with_marker: + return json.loads(comment) + else: + return marker, json.loads(comment) + + +def set_segment_data_marker(segment, data=None): + """ + Set openpype track item tag to input segment. + + Attributes: + segment (flame.PySegment): flame api object + + Returns: + dict: json loaded data + """ + data = data or dict() + + marker_data = get_segment_data_marker(segment, True) + + if marker_data: + # get available openpype tag if any + marker, tag_data = marker_data + # update tag data with new data + tag_data.update(data) + # update marker with tag data + marker.comment = json.dumps(tag_data) + else: + # update tag data with new data + marker = create_segment_data_marker(segment) + # add tag data to marker's comment + marker.comment = json.dumps(data) + + +def set_publish_attribute(segment, value): + """ Set Publish attribute in input Tag object + + Attribute: + segment (flame.PySegment)): flame api object + value (bool): True or False + """ + tag_data = get_segment_data_marker(segment) + tag_data["publish"] = value + + # set data to the publish attribute + set_segment_data_marker(segment, tag_data) + + +def get_publish_attribute(segment): + """ Get Publish attribute from input Tag object + + Attribute: + segment (flame.PySegment)): flame api object + + Returns: + bool: True or False + """ + tag_data = get_segment_data_marker(segment) + + if not tag_data: + set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT) + return MARKER_PUBLISH_DEFAULT + + return tag_data["publish"] + + +def create_segment_data_marker(segment): + """ Create openpype marker on a segment. + + Attributes: + segment (flame.PySegment): flame api object + + Returns: + flame.PyMarker: flame api object + """ + # get duration of segment + duration = segment.record_duration.relative_frame + # calculate start frame of the new marker + start_frame = int(segment.record_in.relative_frame) + int(duration / 2) + # create marker + marker = segment.create_marker(start_frame) + # set marker name + marker.name = MARKER_NAME + # set duration + marker.duration = MARKER_DURATION + # set colour + marker.colour = COLOR_MAP[MARKER_COLOR] # Red + + return marker + + +def get_sequence_segments(sequence, selected=False): + segments = [] + # loop versions in sequence + for ver in sequence.versions: + # loop track in versions + for track in ver.tracks: + # ignore all empty tracks and hidden too + if len(track.segments) == 0 and track.hidden: + continue + # loop all segment in remaining tracks + for segment in track.segments: + if segment.name.get_value() == "": + continue + if segment.hidden.get_value() is True: + continue + if ( + selected is True + and segment.selected.get_value() is not True + ): + continue + # add it to original selection + segments.append(segment) + return segments + + +@contextlib.contextmanager +def maintained_segment_selection(sequence): + """Maintain selection during context + + Attributes: + sequence (flame.PySequence): python api object + + Yield: + list of flame.PySegment + + Example: + >>> with maintained_segment_selection(sequence) as selected_segments: + ... for segment in selected_segments: + ... segment.selected = False + >>> print(segment.selected) + True + """ + selected_segments = get_sequence_segments(sequence, True) + try: + # do the operation on selected segments + yield selected_segments + finally: + # reset all selected clips + reset_segment_selection(sequence) + # select only original selection of segments + for segment in selected_segments: + segment.selected = True + + +def reset_segment_selection(sequence): + """Deselect all selected nodes + """ + for ver in sequence.versions: + for track in ver.tracks: + if len(track.segments) == 0 and track.hidden: + continue + for segment in track.segments: + segment.selected = False + + +def _get_shot_tokens_values(clip, tokens): + old_value = None + output = {} + + if not clip.shot_name: + return output + + old_value = clip.shot_name.get_value() + + for token in tokens: + clip.shot_name.set_value(token) + _key = str(re.sub("[<>]", "", token)).replace(" ", "_") + + try: + output[_key] = int(clip.shot_name.get_value()) + except ValueError: + output[_key] = clip.shot_name.get_value() + + clip.shot_name.set_value(old_value) + + return output + + +def get_segment_attributes(segment): + if segment.name.get_value() == "": + return None + + # Add timeline segment to tree + clip_data = { + "shot_name": segment.shot_name.get_value(), + "segment_name": segment.name.get_value(), + "segment_comment": segment.comment.get_value(), + "tape_name": segment.tape_name, + "source_name": segment.source_name, + "fpath": segment.file_path, + "PySegment": segment + } + + # head and tail with forward compatibility + if segment.head: + # `infinite` can be also returned + if isinstance(segment.head, str): + clip_data["segment_head"] = 0 + else: + clip_data["segment_head"] = int(segment.head) + if segment.tail: + # `infinite` can be also returned + if isinstance(segment.tail, str): + clip_data["segment_tail"] = 0 + else: + clip_data["segment_tail"] = int(segment.tail) + + # add all available shot tokens + shot_tokens = _get_shot_tokens_values(segment, [ + "", "", "", "", "", + "", "" + ]) + clip_data.update(shot_tokens) + + # populate shot source metadata + segment_attrs = [ + "record_duration", "record_in", "record_out", + "source_duration", "source_in", "source_out" + ] + segment_attrs_data = {} + for attr_name in segment_attrs: + if not hasattr(segment, attr_name): + continue + attr = getattr(segment, attr_name) + segment_attrs_data[attr_name] = str(attr).replace("+", ":") + + if attr_name in ["record_in", "record_out"]: + clip_data[attr_name] = attr.relative_frame + else: + clip_data[attr_name] = attr.frame + + clip_data["segment_timecodes"] = segment_attrs_data + + return clip_data + + +def get_clips_in_reels(project): + output_clips = [] + project_desktop = project.current_workspace.desktop + + for reel_group in project_desktop.reel_groups: + for reel in reel_group.reels: + for clip in reel.clips: + clip_data = { + "PyClip": clip, + "fps": float(str(clip.frame_rate)[:-4]) + } + + attrs = [ + "name", "width", "height", + "ratio", "sample_rate", "bit_depth" + ] + + for attr in attrs: + val = getattr(clip, attr) + clip_data[attr] = val + + version = clip.versions[-1] + track = version.tracks[-1] + for segment in track.segments: + segment_data = get_segment_attributes(segment) + clip_data.update(segment_data) + + output_clips.append(clip_data) + + return output_clips + + +def get_reformated_filename(filename, padded=True): + """ + Return fixed python expression path + + Args: + filename (str): file name + + Returns: + type: string with reformated path + + Example: + get_reformated_filename("plate.1001.exr") > plate.%04d.exr + + """ + found = FRAME_PATTERN.search(filename) + + if not found: + log.info("File name is not sequence: {}".format(filename)) + return filename + + padding = get_padding_from_filename(filename) + + replacement = "%0{}d".format(padding) if padded else "%d" + start_idx, end_idx = found.span(1) + + return replacement.join( + [filename[:start_idx], filename[end_idx:]] + ) + + +def get_padding_from_filename(filename): + """ + Return padding number from Flame path style + + Args: + filename (str): file name + + Returns: + int: padding number + + Example: + get_padding_from_filename("plate.0001.exr") > 4 + + """ + found = get_frame_from_filename(filename) + + return len(found) if found else None + + +def get_frame_from_filename(filename): + """ + Return sequence number from Flame path style + + Args: + filename (str): file name + + Returns: + int: sequence frame number + + Example: + def get_frame_from_filename(path): + ("plate.0001.exr") > 0001 + + """ + + found = re.findall(FRAME_PATTERN, filename) + + return found.pop() if found else None + + +@contextlib.contextmanager +def maintained_object_duplication(item): + """Maintain input item duplication + + Attributes: + item (any flame.PyObject): python api object + + Yield: + duplicate input PyObject type + """ + import flame + # Duplicate the clip to avoid modifying the original clip + duplicate = flame.duplicate(item) + + try: + # do the operation on selected segments + yield duplicate + finally: + # delete the item at the end + flame.delete(duplicate) + + +@contextlib.contextmanager +def maintained_temp_file_path(suffix=None): + _suffix = suffix or "" + + try: + # Store dumped json to temporary file + temporary_file = tempfile.mktemp( + suffix=_suffix, prefix="flame_maintained_") + yield temporary_file.replace("\\", "/") + + except IOError as _error: + raise IOError( + "Not able to create temp json file: {}".format(_error)) + + finally: + # Remove the temporary json + os.remove(temporary_file) + + +def get_clip_segment(flame_clip): + name = flame_clip.name.get_value() + version = flame_clip.versions[0] + track = version.tracks[0] + segments = track.segments + + if len(segments) < 1: + raise ValueError("Clip `{}` has no segments!".format(name)) + + if len(segments) > 1: + raise ValueError("Clip `{}` has too many segments!".format(name)) + + return segments[0] + + +def get_batch_group_from_desktop(name): + project = get_current_project() + project_desktop = project.current_workspace.desktop + + for bgroup in project_desktop.batch_groups: + if bgroup.name.get_value() in name: + return bgroup + + +class MediaInfoFile(object): + """Class to get media info file clip data + + Raises: + IOError: MEDIA_SCRIPT_PATH path doesn't exists + TypeError: Not able to generate clip xml data file + ET.ParseError: Missing clip in xml clip data + IOError: Not able to save xml clip data to file + + Attributes: + str: `MEDIA_SCRIPT_PATH` path to flame binary + logging.Logger: `log` logger + + TODO: add method for getting metadata to dict + """ + MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" + + log = log + + _clip_data = None + _start_frame = None + _fps = None + _drop_mode = None + _file_pattern = None + + def __init__(self, path, logger=None): + + # replace log if any + if logger: + self.log = logger + + # test if `dl_get_media_info` path exists + self._validate_media_script_path() + + # derivate other feed variables + feed_basename = os.path.basename(path) + feed_dir = os.path.dirname(path) + feed_ext = os.path.splitext(feed_basename)[1][1:].lower() + + with maintained_temp_file_path(".clip") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + self._generate_media_info_file(tmp_path, feed_ext, feed_dir) + + # get collection containing feed_basename from path + self.file_pattern = self._get_collection( + feed_basename, feed_dir, feed_ext) + + if ( + not self.file_pattern + and os.path.exists(os.path.join(feed_dir, feed_basename)) + ): + self.file_pattern = feed_basename + + # get clip data and make them single if there is multiple + # clips data + xml_data = self._make_single_clip_media_info( + tmp_path, feed_basename, self.file_pattern) + self.log.debug("xml_data: {}".format(xml_data)) + self.log.debug("type: {}".format(type(xml_data))) + + # get all time related data and assign them + self._get_time_info_from_origin(xml_data) + self.log.debug("start_frame: {}".format(self.start_frame)) + self.log.debug("fps: {}".format(self.fps)) + self.log.debug("drop frame: {}".format(self.drop_mode)) + self.clip_data = xml_data + + def _get_collection(self, feed_basename, feed_dir, feed_ext): + """ Get collection string + + Args: + feed_basename (str): file base name + feed_dir (str): file's directory + feed_ext (str): file extension + + Raises: + AttributeError: feed_ext is not matching feed_basename + + Returns: + str: collection basename with range of sequence + """ + partialname = self._separate_file_head(feed_basename, feed_ext) + self.log.debug("__ partialname: {}".format(partialname)) + + # make sure partial input basename is having correct extensoon + if not partialname: + raise AttributeError( + "Wrong input attributes. Basename - {}, Ext - {}".format( + feed_basename, feed_ext + ) + ) + + # get all related files + files = [ + f for f in os.listdir(feed_dir) + if partialname == self._separate_file_head(f, feed_ext) + ] + + # ignore reminders as we dont need them + collections = clique.assemble(files)[0] + + # in case no collection found return None + # it is probably just single file + if not collections: + return + + # we expect only one collection + collection = collections[0] + + self.log.debug("__ collection: {}".format(collection)) + + if collection.is_contiguous(): + return self._format_collection(collection) + + # add `[` in front to make sure it want capture + # shot name with the same number + number_from_path = self._separate_number(feed_basename, feed_ext) + search_number_pattern = "[" + number_from_path + # convert to multiple collections + _continues_colls = collection.separate() + for _coll in _continues_colls: + coll_to_text = self._format_collection( + _coll, len(number_from_path)) + self.log.debug("__ coll_to_text: {}".format(coll_to_text)) + if search_number_pattern in coll_to_text: + return coll_to_text + + @staticmethod + def _format_collection(collection, padding=None): + padding = padding or collection.padding + # if no holes then return collection + head = collection.format("{head}") + tail = collection.format("{tail}") + range_template = "[{{:0{0}d}}-{{:0{0}d}}]".format( + padding) + ranges = range_template.format( + min(collection.indexes), + max(collection.indexes) + ) + # if no holes then return collection + return "{}{}{}".format(head, ranges, tail) + + def _separate_file_head(self, basename, extension): + """ Get only head with out sequence and extension + + Args: + basename (str): file base name + extension (str): file extension + + Returns: + str: file head + """ + # in case sequence file + found = re.findall( + r"(.*)[._][\d]*(?=.{})".format(extension), + basename, + ) + if found: + return found.pop() + + # in case single file + name, ext = os.path.splitext(basename) + + if extension == ext[1:]: + return name + + def _separate_number(self, basename, extension): + """ Get only sequence number as string + + Args: + basename (str): file base name + extension (str): file extension + + Returns: + str: number with padding + """ + # in case sequence file + found = re.findall( + r"[._]([\d]*)(?=.{})".format(extension), + basename, + ) + if found: + return found.pop() + + @property + def clip_data(self): + """Clip's xml clip data + + Returns: + xml.etree.ElementTree: xml data + """ + return self._clip_data + + @clip_data.setter + def clip_data(self, data): + self._clip_data = data + + @property + def start_frame(self): + """ Clip's starting frame found in timecode + + Returns: + int: number of frames + """ + return self._start_frame + + @start_frame.setter + def start_frame(self, number): + self._start_frame = int(number) + + @property + def fps(self): + """ Clip's frame rate + + Returns: + float: frame rate + """ + return self._fps + + @fps.setter + def fps(self, fl_number): + self._fps = float(fl_number) + + @property + def drop_mode(self): + """ Clip's drop frame mode + + Returns: + str: drop frame flag + """ + return self._drop_mode + + @drop_mode.setter + def drop_mode(self, text): + self._drop_mode = str(text) + + @property + def file_pattern(self): + """Clips file patter + + Returns: + str: file pattern. ex. file.[1-2].exr + """ + return self._file_pattern + + @file_pattern.setter + def file_pattern(self, fpattern): + self._file_pattern = fpattern + + def _validate_media_script_path(self): + if not os.path.isfile(self.MEDIA_SCRIPT_PATH): + raise IOError("Media Script does not exist: `{}`".format( + self.MEDIA_SCRIPT_PATH)) + + def _generate_media_info_file(self, fpath, feed_ext, feed_dir): + """ Generate media info xml .clip file + + Args: + fpath (str): .clip file path + feed_ext (str): file extension to be filtered + feed_dir (str): look up directory + + Raises: + TypeError: Type error if it fails + """ + # Create cmd arguments for gettig xml file info file + cmd_args = [ + self.MEDIA_SCRIPT_PATH, + "-e", feed_ext, + "-o", fpath, + feed_dir + ] + + try: + # execute creation of clip xml template data + run_subprocess(cmd_args) + except TypeError as error: + raise TypeError( + "Error creating `{}` due: {}".format(fpath, error)) + + def _make_single_clip_media_info(self, fpath, feed_basename, path_pattern): + """ Separate only relative clip object form .clip file + + Args: + fpath (str): clip file path + feed_basename (str): search basename + path_pattern (str): search file pattern (file.[1-2].exr) + + Raises: + ET.ParseError: if nothing found + + Returns: + ET.Element: xml element data of matching clip + """ + with open(fpath) as f: + lines = f.readlines() + _added_root = itertools.chain( + "", deepcopy(lines)[1:], "") + new_root = ET.fromstringlist(_added_root) + + # find the clip which is matching to my input name + xml_clips = new_root.findall("clip") + matching_clip = None + for xml_clip in xml_clips: + clip_name = xml_clip.find("name").text + self.log.debug("__ clip_name: `{}`".format(clip_name)) + if clip_name not in feed_basename: + continue + + # test path pattern + for out_track in xml_clip.iter("track"): + for out_feed in out_track.iter("feed"): + for span in out_feed.iter("span"): + # start frame + span_path = span.find("path") + self.log.debug( + "__ span_path.text: {}, path_pattern: {}".format( + span_path.text, path_pattern + ) + ) + if path_pattern in span_path.text: + matching_clip = xml_clip + + if matching_clip is None: + # return warning there is missing clip + raise ET.ParseError( + "Missing clip in `{}`. Available clips {}".format( + feed_basename, [ + xml_clip.find("name").text + for xml_clip in xml_clips + ] + )) + + return matching_clip + + def _get_time_info_from_origin(self, xml_data): + """Set time info to class attributes + + Args: + xml_data (ET.Element): clip data + """ + try: + for out_track in xml_data.iter("track"): + for out_feed in out_track.iter("feed"): + # start frame + out_feed_nb_ticks_obj = out_feed.find( + "startTimecode/nbTicks") + self.start_frame = out_feed_nb_ticks_obj.text + + # fps + out_feed_fps_obj = out_feed.find( + "startTimecode/rate") + self.fps = out_feed_fps_obj.text + + # drop frame mode + out_feed_drop_mode_obj = out_feed.find( + "startTimecode/dropMode") + self.drop_mode = out_feed_drop_mode_obj.text + break + except Exception as msg: + self.log.warning(msg) + + @staticmethod + def write_clip_data_to_file(fpath, xml_element_data): + """ Write xml element of clip data to file + + Args: + fpath (string): file path + xml_element_data (xml.etree.ElementTree.Element): xml data + + Raises: + IOError: If data could not be written to file + """ + try: + # save it as new file + tree = cET.ElementTree(xml_element_data) + tree.write( + fpath, xml_declaration=True, + method="xml", encoding="UTF-8" + ) + except IOError as error: + raise IOError( + "Not able to write data to file: {}".format(error)) + + +class TimeEffectMetadata(object): + log = log + _data = {} + _retime_modes = { + 0: "speed", + 1: "timewarp", + 2: "duration" + } + + def __init__(self, segment, logger=None): + if logger: + self.log = logger + + self._data = self._get_metadata(segment) + + @property + def data(self): + """ Returns timewarp effect data + + Returns: + dict: retime data + """ + return self._data + + def _get_metadata(self, segment): + effects = segment.effects or [] + for effect in effects: + if effect.type == "Timewarp": + with maintained_temp_file_path(".timewarp_node") as tmp_path: + self.log.info("Temp File: {}".format(tmp_path)) + effect.save_setup(tmp_path) + return self._get_attributes_from_xml(tmp_path) + + return {} + + def _get_attributes_from_xml(self, tmp_path): + with open(tmp_path, "r") as tw_setup_file: + tw_setup_string = tw_setup_file.read() + tw_setup_file.close() + + tw_setup_xml = ET.fromstring(tw_setup_string) + tw_setup = self._dictify(tw_setup_xml) + # pprint(tw_setup) + try: + tw_setup_state = tw_setup["Setup"]["State"][0] + mode = int( + tw_setup_state["TW_RetimerMode"][0]["_text"] + ) + r_data = { + "type": self._retime_modes[mode], + "effectStart": int( + tw_setup["Setup"]["Base"][0]["Range"][0]["Start"]), + "effectEnd": int( + tw_setup["Setup"]["Base"][0]["Range"][0]["End"]) + } + + if mode == 0: # speed + r_data[self._retime_modes[mode]] = float( + tw_setup_state["TW_Speed"] + [0]["Channel"][0]["Value"][0]["_text"] + ) / 100 + elif mode == 1: # timewarp + print("timing") + r_data[self._retime_modes[mode]] = self._get_anim_keys( + tw_setup_state["TW_Timing"] + ) + elif mode == 2: # duration + r_data[self._retime_modes[mode]] = { + "start": { + "source": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][0]["Value"][0]["_text"] + ), + "timeline": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][0]["Frame"][0]["_text"] + ) + }, + "end": { + "source": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][1]["Value"][0]["_text"] + ), + "timeline": int( + tw_setup_state["TW_DurationTiming"][0]["Channel"] + [0]["KFrames"][0]["Key"][1]["Frame"][0]["_text"] + ) + } + } + except Exception: + lines = traceback.format_exception(*sys.exc_info()) + self.log.error("\n".join(lines)) + return + + return r_data + + def _get_anim_keys(self, setup_cat, index=None): + return_data = { + "extrapolation": ( + setup_cat[0]["Channel"][0]["Extrap"][0]["_text"] + ), + "animKeys": [] + } + for key in setup_cat[0]["Channel"][0]["KFrames"][0]["Key"]: + if index and int(key["Index"]) != index: + continue + key_data = { + "source": float(key["Value"][0]["_text"]), + "timeline": float(key["Frame"][0]["_text"]), + "index": int(key["Index"]), + "curveMode": key["CurveMode"][0]["_text"], + "curveOrder": key["CurveOrder"][0]["_text"] + } + if key.get("TangentMode"): + key_data["tangentMode"] = key["TangentMode"][0]["_text"] + + return_data["animKeys"].append(key_data) + + return return_data + + def _dictify(self, xml_, root=True): + """ Convert xml object to dictionary + + Args: + xml_ (xml.etree.ElementTree.Element): xml data + root (bool, optional): is root available. Defaults to True. + + Returns: + dict: dictionarized xml + """ + + if root: + return {xml_.tag: self._dictify(xml_, False)} + + d = copy(xml_.attrib) + if xml_.text: + d["_text"] = xml_.text + + for x in xml_.findall("./*"): + if x.tag not in d: + d[x.tag] = [] + d[x.tag].append(self._dictify(x, False)) + return d diff --git a/client/ayon_core/hosts/flame/api/menu.py b/client/ayon_core/hosts/flame/api/menu.py new file mode 100644 index 0000000000..7e880483f5 --- /dev/null +++ b/client/ayon_core/hosts/flame/api/menu.py @@ -0,0 +1,256 @@ +from copy import deepcopy +from pprint import pformat + +from qtpy import QtWidgets + +from ayon_core.pipeline import get_current_project_name +from ayon_core.tools.utils.host_tools import HostToolsHelper + +menu_group_name = 'OpenPype' + +default_flame_export_presets = { + 'Publish': { + 'PresetVisibility': 2, + 'PresetType': 0, + 'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml' + }, + 'Preview': { + 'PresetVisibility': 3, + 'PresetType': 2, + 'PresetFile': 'Generate Preview.xml' + }, + 'Thumbnail': { + 'PresetVisibility': 3, + 'PresetType': 0, + 'PresetFile': 'Generate Thumbnail.xml' + } +} + + +def callback_selection(selection, function): + import ayon_core.hosts.flame.api as opfapi + opfapi.CTX.selection = selection + print("Hook Selection: \n\t{}".format( + pformat({ + index: (type(item), item.name) + for index, item in enumerate(opfapi.CTX.selection)}) + )) + function() + + +class _FlameMenuApp(object): + def __init__(self, framework): + self.name = self.__class__.__name__ + self.framework = framework + self.log = framework.log + self.menu_group_name = menu_group_name + self.dynamic_menu_data = {} + + # flame module is only available when a + # flame project is loaded and initialized + self.flame = None + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + self.flame_project_name = flame.project.current_project.name + self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name) + self.prefs_user = self.framework.prefs_dict( + self.framework.prefs_user, self.name) + self.prefs_global = self.framework.prefs_dict( + self.framework.prefs_global, self.name) + + self.mbox = QtWidgets.QMessageBox() + project_name = get_current_project_name() + self.menu = { + "actions": [{ + 'name': project_name or "project", + 'isEnabled': False + }], + "name": self.menu_group_name + } + self.tools_helper = HostToolsHelper() + + def __getattr__(self, name): + def method(*args, **kwargs): + print('calling %s' % name) + return method + + def rescan(self, *args, **kwargs): + if not self.flame: + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + if self.flame: + self.flame.execute_shortcut('Rescan Python Hooks') + self.log.info('Rescan Python Hooks') + + +class FlameMenuProjectConnect(_FlameMenuApp): + + # flameMenuProjectconnect app takes care of the preferences dialog as well + + def __init__(self, framework): + _FlameMenuApp.__init__(self, framework) + + def __getattr__(self, name): + def method(*args, **kwargs): + project = self.dynamic_menu_data.get(name) + if project: + self.link_project(project) + return method + + def build_menu(self): + if not self.flame: + return [] + + menu = deepcopy(self.menu) + + menu['actions'].append({ + "name": "Workfiles...", + "execute": lambda x: self.tools_helper.show_workfiles() + }) + menu['actions'].append({ + "name": "Load...", + "execute": lambda x: self.tools_helper.show_loader() + }) + menu['actions'].append({ + "name": "Manage...", + "execute": lambda x: self.tools_helper.show_scene_inventory() + }) + menu['actions'].append({ + "name": "Library...", + "execute": lambda x: self.tools_helper.show_library_loader() + }) + return menu + + def refresh(self, *args, **kwargs): + self.rescan() + + def rescan(self, *args, **kwargs): + if not self.flame: + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + if self.flame: + self.flame.execute_shortcut('Rescan Python Hooks') + self.log.info('Rescan Python Hooks') + + +class FlameMenuTimeline(_FlameMenuApp): + + # flameMenuProjectconnect app takes care of the preferences dialog as well + + def __init__(self, framework): + _FlameMenuApp.__init__(self, framework) + + def __getattr__(self, name): + def method(*args, **kwargs): + project = self.dynamic_menu_data.get(name) + if project: + self.link_project(project) + return method + + def build_menu(self): + if not self.flame: + return [] + + menu = deepcopy(self.menu) + + menu['actions'].append({ + "name": "Create...", + "execute": lambda x: callback_selection( + x, self.tools_helper.show_creator) + }) + menu['actions'].append({ + "name": "Publish...", + "execute": lambda x: callback_selection( + x, self.tools_helper.show_publish) + }) + menu['actions'].append({ + "name": "Load...", + "execute": lambda x: self.tools_helper.show_loader() + }) + menu['actions'].append({ + "name": "Manage...", + "execute": lambda x: self.tools_helper.show_scene_inventory() + }) + menu['actions'].append({ + "name": "Library...", + "execute": lambda x: self.tools_helper.show_library_loader() + }) + return menu + + def refresh(self, *args, **kwargs): + self.rescan() + + def rescan(self, *args, **kwargs): + if not self.flame: + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + if self.flame: + self.flame.execute_shortcut('Rescan Python Hooks') + self.log.info('Rescan Python Hooks') + + +class FlameMenuUniversal(_FlameMenuApp): + + # flameMenuProjectconnect app takes care of the preferences dialog as well + + def __init__(self, framework): + _FlameMenuApp.__init__(self, framework) + + def __getattr__(self, name): + def method(*args, **kwargs): + project = self.dynamic_menu_data.get(name) + if project: + self.link_project(project) + return method + + def build_menu(self): + if not self.flame: + return [] + + menu = deepcopy(self.menu) + + menu['actions'].append({ + "name": "Load...", + "execute": lambda x: callback_selection( + x, self.tools_helper.show_loader) + }) + menu['actions'].append({ + "name": "Manage...", + "execute": lambda x: self.tools_helper.show_scene_inventory() + }) + menu['actions'].append({ + "name": "Library...", + "execute": lambda x: self.tools_helper.show_library_loader() + }) + return menu + + def refresh(self, *args, **kwargs): + self.rescan() + + def rescan(self, *args, **kwargs): + if not self.flame: + try: + import flame + self.flame = flame + except ImportError: + self.flame = None + + if self.flame: + self.flame.execute_shortcut('Rescan Python Hooks') + self.log.info('Rescan Python Hooks') diff --git a/client/ayon_core/hosts/flame/api/pipeline.py b/client/ayon_core/hosts/flame/api/pipeline.py new file mode 100644 index 0000000000..88375f829f --- /dev/null +++ b/client/ayon_core/hosts/flame/api/pipeline.py @@ -0,0 +1,179 @@ +""" +Basic avalon integration +""" +import os +import contextlib +from pyblish import api as pyblish + +from ayon_core.lib import Logger +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from .lib import ( + set_segment_data_marker, + set_publish_attribute, + maintained_segment_selection, + get_current_sequence, + reset_segment_selection +) +from .. import HOST_DIR + +API_DIR = os.path.join(HOST_DIR, "api") +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") + +AVALON_CONTAINERS = "AVALON_CONTAINERS" + +log = Logger.get_logger(__name__) + + +def install(): + pyblish.register_host("flame") + pyblish.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + log.info("OpenPype Flame plug-ins registered ...") + + # register callback for switching publishable + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) + + log.info("OpenPype Flame host installed ...") + + +def uninstall(): + pyblish.deregister_host("flame") + + log.info("Deregistering Flame plug-ins..") + pyblish.deregister_plugin_path(PUBLISH_PATH) + deregister_loader_plugin_path(LOAD_PATH) + deregister_creator_plugin_path(CREATE_PATH) + + # register callback for switching publishable + pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + + log.info("OpenPype Flame host uninstalled ...") + + +def containerise(flame_clip_segment, + name, + namespace, + context, + loader=None, + data=None): + + data_imprint = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + + if data: + for k, v in data.items(): + data_imprint[k] = v + + log.debug("_ data_imprint: {}".format(data_imprint)) + + set_segment_data_marker(flame_clip_segment, data_imprint) + + return True + + +def ls(): + """List available containers. + """ + return [] + + +def parse_container(tl_segment, validate=True): + """Return container data from timeline_item's openpype tag. + """ + # TODO: parse_container + pass + + +def update_container(tl_segment, data=None): + """Update container data to input timeline_item's openpype tag. + """ + # TODO: update_container + pass + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + # from ayon_core.hosts.resolve import ( + # set_publish_attribute + # ) + + # # Whether instances should be passthrough based on new value + # timeline_item = instance.data["item"] + # set_publish_attribute(timeline_item, new_value) + + +def remove_instance(instance): + """Remove instance marker from track item.""" + # TODO: remove_instance + pass + + +def list_instances(): + """List all created instances from current workfile.""" + # TODO: list_instances + pass + + +def imprint(segment, data=None): + """ + Adding openpype data to Flame timeline segment. + + Also including publish attribute into tag. + + Arguments: + segment (flame.PySegment)): flame api object + data (dict): Any data which needst to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + set_segment_data_marker(segment, data) + + # add publish attribute + set_publish_attribute(segment, True) + + +@contextlib.contextmanager +def maintained_selection(): + import flame + from .lib import CTX + + # check if segment is selected + if isinstance(CTX.selection[0], flame.PySegment): + sequence = get_current_sequence(CTX.selection) + + try: + with maintained_segment_selection(sequence) as selected: + yield + finally: + # reset all selected clips + reset_segment_selection(sequence) + # select only original selection of segments + for segment in selected: + segment.selected = True diff --git a/client/ayon_core/hosts/flame/api/plugin.py b/client/ayon_core/hosts/flame/api/plugin.py new file mode 100644 index 0000000000..720e6792b0 --- /dev/null +++ b/client/ayon_core/hosts/flame/api/plugin.py @@ -0,0 +1,1089 @@ +import os +import re +import shutil +from copy import deepcopy +from xml.etree import ElementTree as ET + +import qargparse +from qtpy import QtCore, QtWidgets + +from ayon_core import style +from ayon_core.lib import Logger, StringTemplate +from ayon_core.pipeline import LegacyCreator, LoaderPlugin +from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native +from ayon_core.settings import get_current_project_settings + +from . import constants +from . import lib as flib +from . import pipeline as fpipeline + +log = Logger.get_logger(__name__) + + +class CreatorWidget(QtWidgets.QDialog): + + # output items + items = dict() + _results_back = None + + def __init__(self, name, info, ui_inputs, parent=None): + super(CreatorWidget, self).__init__(parent) + + self.setObjectName(name) + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + self.setWindowTitle(name or "Pype Creator Input") + self.resize(500, 700) + + # Where inputs and labels are set + self.content_widget = [QtWidgets.QWidget(self)] + top_layout = QtWidgets.QFormLayout(self.content_widget[0]) + top_layout.setObjectName("ContentLayout") + top_layout.addWidget(Spacer(5, self)) + + # first add widget tag line + top_layout.addWidget(QtWidgets.QLabel(info)) + + # main dynamic layout + self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAsNeeded) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOn) + self.scroll_area.setHorizontalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff) + self.scroll_area.setWidgetResizable(True) + + self.content_widget.append(self.scroll_area) + + scroll_widget = QtWidgets.QWidget(self) + in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) + self.content_layout = [in_scroll_area] + + # add preset data into input widget layout + self.items = self.populate_widgets(ui_inputs) + self.scroll_area.setWidget(scroll_widget) + + # Confirmation buttons + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + + cancel_btn = QtWidgets.QPushButton("Cancel") + btns_layout.addWidget(cancel_btn) + + ok_btn = QtWidgets.QPushButton("Ok") + btns_layout.addWidget(ok_btn) + + # Main layout of the dialog + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.setSpacing(0) + + # adding content widget + for w in self.content_widget: + main_layout.addWidget(w) + + main_layout.addWidget(btns_widget) + + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + self.setStyleSheet(style.load_stylesheet()) + + @classmethod + def set_results_back(cls, value): + cls._results_back = value + + @classmethod + def get_results_back(cls): + return cls._results_back + + def _on_ok_clicked(self): + log.debug("ok is clicked: {}".format(self.items)) + results_back = self._values(self.items) + self.set_results_back(results_back) + self.close() + + def _on_cancel_clicked(self): + self.set_results_back(None) + self.close() + + def showEvent(self, event): + self.set_results_back(None) + super(CreatorWidget, self).showEvent(event) + + def _values(self, data, new_data=None): + new_data = new_data or dict() + for k, v in data.items(): + new_data[k] = { + "target": None, + "value": None + } + if v["type"] == "dict": + new_data[k]["target"] = v["target"] + new_data[k]["value"] = self._values(v["value"]) + if v["type"] == "section": + new_data.pop(k) + new_data = self._values(v["value"], new_data) + elif getattr(v["value"], "currentText", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].currentText() + elif getattr(v["value"], "isChecked", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].isChecked() + elif getattr(v["value"], "value", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].value() + elif getattr(v["value"], "text", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].text() + + return new_data + + def camel_case_split(self, text): + matches = re.finditer( + '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) + return " ".join([str(m.group(0)).capitalize() for m in matches]) + + def create_row(self, layout, type_name, text, **kwargs): + # get type attribute from qwidgets + attr = getattr(QtWidgets, type_name) + + # convert label text to normal capitalized text with spaces + label_text = self.camel_case_split(text) + + # assign the new text to label widget + label = QtWidgets.QLabel(label_text) + label.setObjectName("LineLabel") + + # create attribute name text strip of spaces + attr_name = text.replace(" ", "") + + # create attribute and assign default values + setattr( + self, + attr_name, + attr(parent=self)) + + # assign the created attribute to variable + item = getattr(self, attr_name) + for func, val in kwargs.items(): + if getattr(item, func): + func_attr = getattr(item, func) + func_attr(val) + + # add to layout + layout.addRow(label, item) + + return item + + def populate_widgets(self, data, content_layout=None): + """ + Populate widget from input dict. + + Each plugin has its own set of widget rows defined in dictionary + each row values should have following keys: `type`, `target`, + `label`, `order`, `value` and optionally also `toolTip`. + + Args: + data (dict): widget rows or organized groups defined + by types `dict` or `section` + content_layout (QtWidgets.QFormLayout)[optional]: used when nesting + + Returns: + dict: redefined data dict updated with created widgets + + """ + + content_layout = content_layout or self.content_layout[-1] + # fix order of process by defined order value + ordered_keys = list(data.keys()) + for k, v in data.items(): + try: + # try removing a key from index which should + # be filled with new + ordered_keys.pop(v["order"]) + except IndexError: + pass + # add key into correct order + ordered_keys.insert(v["order"], k) + + # process ordered + for k in ordered_keys: + v = data[k] + tool_tip = v.get("toolTip", "") + if v["type"] == "dict": + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + if v["type"] == "section": + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + elif v["type"] == "QLineEdit": + data[k]["value"] = self.create_row( + content_layout, "QLineEdit", v["label"], + setText=v["value"], setToolTip=tool_tip) + elif v["type"] == "QComboBox": + data[k]["value"] = self.create_row( + content_layout, "QComboBox", v["label"], + addItems=v["value"], setToolTip=tool_tip) + elif v["type"] == "QCheckBox": + data[k]["value"] = self.create_row( + content_layout, "QCheckBox", v["label"], + setChecked=v["value"], setToolTip=tool_tip) + elif v["type"] == "QSpinBox": + data[k]["value"] = self.create_row( + content_layout, "QSpinBox", v["label"], + setValue=v["value"], setMinimum=0, + setMaximum=100000, setToolTip=tool_tip) + return data + + +class Spacer(QtWidgets.QWidget): + def __init__(self, height, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + + self.setFixedHeight(height) + + real_spacer = QtWidgets.QWidget(self) + real_spacer.setObjectName("Spacer") + real_spacer.setFixedHeight(height) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(real_spacer) + + self.setLayout(layout) + + +class Creator(LegacyCreator): + """Creator class wrapper + """ + clip_color = constants.COLOR_MAP["purple"] + rename_index = None + + def __init__(self, *args, **kwargs): + super(Creator, self).__init__(*args, **kwargs) + self.presets = get_current_project_settings()[ + "flame"]["create"].get(self.__class__.__name__, {}) + + # adding basic current context flame objects + self.project = flib.get_current_project() + self.sequence = flib.get_current_sequence(flib.CTX.selection) + + if (self.options or {}).get("useSelection"): + self.selected = flib.get_sequence_segments(self.sequence, True) + else: + self.selected = flib.get_sequence_segments(self.sequence) + + def create_widget(self, *args, **kwargs): + widget = CreatorWidget(*args, **kwargs) + widget.exec_() + return widget.get_results_back() + + +class PublishableClip: + """ + Convert a segment to publishable instance + + Args: + segment (flame.PySegment): flame api object + kwargs (optional): additional data needed for rename=True (presets) + + Returns: + flame.PySegment: flame api object + """ + vertical_clip_match = {} + marker_data = {} + types = { + "shot": "shot", + "folder": "folder", + "episode": "episode", + "sequence": "sequence", + "track": "sequence", + } + + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" + + # default templates for non-ui use + rename_default = False + hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" + clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" + subset_name_default = "[ track name ]" + review_track_default = "[ none ]" + subset_family_default = "plate" + count_from_default = 10 + count_steps_default = 10 + vertical_sync_default = False + driving_layer_default = "" + index_from_segment_default = False + use_shot_name_default = False + include_handles_default = False + retimed_handles_default = True + retimed_framerange_default = True + + def __init__(self, segment, **kwargs): + self.rename_index = kwargs["rename_index"] + self.family = kwargs["family"] + self.log = kwargs["log"] + + # get main parent objects + self.current_segment = segment + sequence_name = flib.get_current_sequence([segment]).name.get_value() + self.sequence_name = str(sequence_name).replace(" ", "_") + + self.clip_data = flib.get_segment_attributes(segment) + # segment (clip) main attributes + self.cs_name = self.clip_data["segment_name"] + self.cs_index = int(self.clip_data["segment"]) + self.shot_name = self.clip_data["shot_name"] + + # get track name and index + self.track_index = int(self.clip_data["track"]) + track_name = self.clip_data["track_name"] + self.track_name = str(track_name).replace(" ", "_").replace( + "*", "noname{}".format(self.track_index)) + + # adding tag.family into tag + if kwargs.get("avalon"): + self.marker_data.update(kwargs["avalon"]) + + # add publish attribute to marker data + self.marker_data.update({"publish": True}) + + # adding ui inputs if any + self.ui_inputs = kwargs.get("ui_inputs", {}) + + self.log.info("Inside of plugin: {}".format( + self.marker_data + )) + # populate default data before we get other attributes + self._populate_segment_default_data() + + # use all populated default data to create all important attributes + self._populate_attributes() + + # create parents with correct types + self._create_parents() + + def convert(self): + + # solve segment data and add them to marker data + self._convert_to_marker_data() + + # if track name is in review track name and also if driving track name + # is not in review track name: skip tag creation + if (self.track_name in self.review_layer) and ( + self.driving_layer not in self.review_layer): + return + + # deal with clip name + new_name = self.marker_data.pop("newClipName") + + if self.rename and not self.use_shot_name: + # rename segment + self.current_segment.name = str(new_name) + self.marker_data["asset"] = str(new_name) + elif self.use_shot_name: + self.marker_data["asset"] = self.shot_name + self.marker_data["hierarchyData"]["shot"] = self.shot_name + else: + self.marker_data["asset"] = self.cs_name + self.marker_data["hierarchyData"]["shot"] = self.cs_name + + if self.marker_data["heroTrack"] and self.review_layer: + self.marker_data["reviewTrack"] = self.review_layer + else: + self.marker_data["reviewTrack"] = None + + # create pype tag on track_item and add data + fpipeline.imprint(self.current_segment, self.marker_data) + + return self.current_segment + + def _populate_segment_default_data(self): + """ Populate default formatting data from segment. """ + + self.current_segment_default_data = { + "_folder_": "shots", + "_sequence_": self.sequence_name, + "_track_": self.track_name, + "_clip_": self.cs_name, + "_trackIndex_": self.track_index, + "_clipIndex_": self.cs_index + } + + def _populate_attributes(self): + """ Populate main object attributes. """ + # segment frame range and parent track name for vertical sync check + self.clip_in = int(self.clip_data["record_in"]) + self.clip_out = int(self.clip_data["record_out"]) + + # define ui inputs if non gui mode was used + self.shot_num = self.cs_index + self.log.debug( + "____ self.shot_num: {}".format(self.shot_num)) + + # ui_inputs data or default values if gui was not used + self.rename = self.ui_inputs.get( + "clipRename", {}).get("value") or self.rename_default + self.use_shot_name = self.ui_inputs.get( + "useShotName", {}).get("value") or self.use_shot_name_default + self.clip_name = self.ui_inputs.get( + "clipName", {}).get("value") or self.clip_name_default + self.hierarchy = self.ui_inputs.get( + "hierarchy", {}).get("value") or self.hierarchy_default + self.hierarchy_data = self.ui_inputs.get( + "hierarchyData", {}).get("value") or \ + self.current_segment_default_data.copy() + self.index_from_segment = self.ui_inputs.get( + "segmentIndex", {}).get("value") or self.index_from_segment_default + self.count_from = self.ui_inputs.get( + "countFrom", {}).get("value") or self.count_from_default + self.count_steps = self.ui_inputs.get( + "countSteps", {}).get("value") or self.count_steps_default + self.subset_name = self.ui_inputs.get( + "subsetName", {}).get("value") or self.subset_name_default + self.subset_family = self.ui_inputs.get( + "subsetFamily", {}).get("value") or self.subset_family_default + self.vertical_sync = self.ui_inputs.get( + "vSyncOn", {}).get("value") or self.vertical_sync_default + self.driving_layer = self.ui_inputs.get( + "vSyncTrack", {}).get("value") or self.driving_layer_default + self.review_track = self.ui_inputs.get( + "reviewTrack", {}).get("value") or self.review_track_default + self.audio = self.ui_inputs.get( + "audio", {}).get("value") or False + self.include_handles = self.ui_inputs.get( + "includeHandles", {}).get("value") or self.include_handles_default + self.retimed_handles = ( + self.ui_inputs.get("retimedHandles", {}).get("value") + or self.retimed_handles_default + ) + self.retimed_framerange = ( + self.ui_inputs.get("retimedFramerange", {}).get("value") + or self.retimed_framerange_default + ) + + # build subset name from layer name + if self.subset_name == "[ track name ]": + self.subset_name = self.track_name + + # create subset for publishing + self.subset = self.subset_family + self.subset_name.capitalize() + + def _replace_hash_to_expression(self, name, text): + """ Replace hash with number in correct padding. """ + _spl = text.split("#") + _len = (len(_spl) - 1) + _repl = "{{{0}:0>{1}}}".format(name, _len) + return text.replace(("#" * _len), _repl) + + def _convert_to_marker_data(self): + """ Convert internal data to marker data. + + Populating the marker data into internal variable self.marker_data + """ + # define vertical sync attributes + hero_track = True + self.review_layer = "" + if self.vertical_sync and self.track_name not in self.driving_layer: + # if it is not then define vertical sync as None + hero_track = False + + # increasing steps by index of rename iteration + if not self.index_from_segment: + self.count_steps *= self.rename_index + + hierarchy_formatting_data = {} + hierarchy_data = deepcopy(self.hierarchy_data) + _data = self.current_segment_default_data.copy() + if self.ui_inputs: + # adding tag metadata from ui + for _k, _v in self.ui_inputs.items(): + if _v["target"] == "tag": + self.marker_data[_k] = _v["value"] + + # driving layer is set as positive match + if hero_track or self.vertical_sync: + # mark review layer + if self.review_track and ( + self.review_track not in self.review_track_default): + # if review layer is defined and not the same as default + self.review_layer = self.review_track + + # shot num calculate + if self.index_from_segment: + # use clip index from timeline + self.shot_num = self.count_steps * self.cs_index + else: + if self.rename_index == 0: + self.shot_num = self.count_from + else: + self.shot_num = self.count_from + self.count_steps + + # clip name sequence number + _data.update({"shot": self.shot_num}) + + # solve # in test to pythonic expression + for _k, _v in hierarchy_data.items(): + if "#" not in _v["value"]: + continue + hierarchy_data[ + _k]["value"] = self._replace_hash_to_expression( + _k, _v["value"]) + + # fill up pythonic expresisons in hierarchy data + for k, _v in hierarchy_data.items(): + hierarchy_formatting_data[k] = _v["value"].format(**_data) + else: + # if no gui mode then just pass default data + hierarchy_formatting_data = hierarchy_data + + tag_hierarchy_data = self._solve_tag_hierarchy_data( + hierarchy_formatting_data + ) + + tag_hierarchy_data.update({"heroTrack": True}) + if hero_track and self.vertical_sync: + self.vertical_clip_match.update({ + (self.clip_in, self.clip_out): tag_hierarchy_data + }) + + if not hero_track and self.vertical_sync: + # driving layer is set as negative match + for (_in, _out), hero_data in self.vertical_clip_match.items(): + """ + Since only one instance of hero clip is expected in + `self.vertical_clip_match`, this will loop only once + until none hero clip will be matched with hero clip. + + `tag_hierarchy_data` will be set only once for every + clip which is not hero clip. + """ + _hero_data = deepcopy(hero_data) + _hero_data.update({"heroTrack": False}) + if _in <= self.clip_in and _out >= self.clip_out: + data_subset = hero_data["subset"] + # add track index in case duplicity of names in hero data + if self.subset in data_subset: + _hero_data["subset"] = self.subset + str( + self.track_index) + # in case track name and subset name is the same then add + if self.subset_name == self.track_name: + _hero_data["subset"] = self.subset + # assign data to return hierarchy data to tag + tag_hierarchy_data = _hero_data + break + + # add data to return data dict + self.marker_data.update(tag_hierarchy_data) + + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): + """ Solve marker data from hierarchy data and templates. """ + # fill up clip name and hierarchy keys + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) + + # remove shot from hierarchy data: is not needed anymore + hierarchy_formatting_data.pop("shot") + + return { + "newClipName": clip_name_filled, + "hierarchy": hierarchy_filled, + "parents": self.parents, + "hierarchyData": hierarchy_formatting_data, + "subset": self.subset, + "family": self.subset_family, + "families": [self.family] + } + + def _convert_to_entity(self, type, template): + """ Converting input key to key with type. """ + # convert to entity type + entity_type = self.types.get(type, None) + + assert entity_type, "Missing entity type for `{}`".format( + type + ) + + # first collect formatting data to use for formatting template + formatting_data = {} + for _k, _v in self.hierarchy_data.items(): + value = _v["value"].format( + **self.current_segment_default_data) + formatting_data[_k] = value + + return { + "entity_type": entity_type, + "entity_name": template.format( + **formatting_data + ) + } + + def _create_parents(self): + """ Create parents and return it in list. """ + self.parents = [] + + pattern = re.compile(self.parents_search_pattern) + + par_split = [(pattern.findall(t).pop(), t) + for t in self.hierarchy.split("/")] + + for type, template in par_split: + parent = self._convert_to_entity(type, template) + self.parents.append(parent) + + +# Publishing plugin functions + +# Loader plugin functions +class ClipLoader(LoaderPlugin): + """A basic clip loader for Flame + + This will implement the basic behavior for a loader to inherit from that + will containerize the reference and will implement the `remove` and + `update` logic. + + """ + log = log + + options = [ + qargparse.Boolean( + "handles", + label="Set handles", + default=0, + help="Also set handles to clip as In/Out marks" + ) + ] + + _mapping = None + _host_settings = None + + def apply_settings(cls, project_settings, system_settings): + + plugin_type_settings = ( + project_settings + .get("flame", {}) + .get("load", {}) + ) + + if not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + elif option == "representations": + continue + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) + + def get_colorspace(self, context): + """Get colorspace name + + Look either to version data or representation data. + + Args: + context (dict): version context data + + Returns: + str: colorspace name or None + """ + version = context['version'] + version_data = version.get("data", {}) + colorspace = version_data.get( + "colorspace", None + ) + + if ( + not colorspace + or colorspace == "Unknown" + ): + colorspace = context["representation"]["data"].get( + "colorspace", None) + + return colorspace + + @classmethod + def get_native_colorspace(cls, input_colorspace): + """Return native colorspace name. + + Args: + input_colorspace (str | None): colorspace name + + Returns: + str: native colorspace name defined in mapping or None + """ + # TODO: rewrite to support only pipeline's remapping + if not cls._host_settings: + cls._host_settings = get_current_project_settings()["flame"] + + # [Deprecated] way of remapping + if not cls._mapping: + mapping = ( + cls._host_settings["imageio"]["profilesMapping"]["inputs"]) + cls._mapping = { + input["ocioName"]: input["flameName"] + for input in mapping + } + + native_name = cls._mapping.get(input_colorspace) + + if not native_name: + native_name = get_remapped_colorspace_to_native( + input_colorspace, "flame", cls._host_settings["imageio"]) + + return native_name + + +class OpenClipSolver(flib.MediaInfoFile): + create_new_clip = False + + log = log + + def __init__(self, openclip_file_path, feed_data, logger=None): + self.out_file = openclip_file_path + + # replace log if any + if logger: + self.log = logger + + # new feed variables: + feed_path = feed_data.pop("path") + + # initialize parent class + super(OpenClipSolver, self).__init__( + feed_path, + logger=logger + ) + + # get other metadata + self.feed_version_name = feed_data["version"] + self.feed_colorspace = feed_data.get("colorspace") + self.log.debug("feed_version_name: {}".format(self.feed_version_name)) + + # layer rename variables + self.layer_rename_template = feed_data["layer_rename_template"] + self.layer_rename_patterns = feed_data["layer_rename_patterns"] + self.context_data = feed_data["context_data"] + + # derivate other feed variables + self.feed_basename = os.path.basename(feed_path) + self.feed_dir = os.path.dirname(feed_path) + self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() + self.log.debug("feed_ext: {}".format(self.feed_ext)) + self.log.debug("out_file: {}".format(self.out_file)) + if not self._is_valid_tmp_file(self.out_file): + self.create_new_clip = True + + def _is_valid_tmp_file(self, file): + # check if file exists + if os.path.isfile(file): + # test also if file is not empty + with open(file) as f: + lines = f.readlines() + + if len(lines) > 2: + return True + + # file is probably corrupted + os.remove(file) + return False + + def make(self): + + if self.create_new_clip: + # New openClip + self._create_new_open_clip() + else: + self._update_open_clip() + + def _clear_handler(self, xml_object): + for handler in xml_object.findall("./handler"): + self.log.info("Handler found") + xml_object.remove(handler) + + def _create_new_open_clip(self): + self.log.info("Building new openClip") + + for tmp_xml_track in self.clip_data.iter("track"): + # solve track (layer) name + self._rename_track_name(tmp_xml_track) + + tmp_xml_feeds = tmp_xml_track.find('feeds') + tmp_xml_feeds.set('currentVersion', self.feed_version_name) + + for tmp_feed in tmp_xml_track.iter("feed"): + tmp_feed.set('vuid', self.feed_version_name) + + # add colorspace if any is set + if self.feed_colorspace: + self._add_colorspace(tmp_feed, self.feed_colorspace) + + self._clear_handler(tmp_feed) + + tmp_xml_versions_obj = self.clip_data.find('versions') + tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) + for xml_new_version in tmp_xml_versions_obj: + xml_new_version.set('uid', self.feed_version_name) + xml_new_version.set('type', 'version') + + self._clear_handler(self.clip_data) + self.log.info("Adding feed version: {}".format(self.feed_basename)) + + self.write_clip_data_to_file(self.out_file, self.clip_data) + + def _get_xml_track_obj_by_uid(self, xml_data, uid): + # loop all tracks of input xml data + for xml_track in xml_data.iter("track"): + track_uid = xml_track.get("uid") + self.log.debug( + ">> track_uid:uid: {}:{}".format(track_uid, uid)) + + # get matching uids + if uid == track_uid: + return xml_track + + def _rename_track_name(self, xml_track_data): + layer_uid = xml_track_data.get("uid") + name_obj = xml_track_data.find("name") + layer_name = name_obj.text + + if ( + self.layer_rename_patterns + and not any( + re.search(lp_.lower(), layer_name.lower()) + for lp_ in self.layer_rename_patterns + ) + ): + return + + formatting_data = self._update_formatting_data( + layerName=layer_name, + layerUID=layer_uid + ) + name_obj.text = StringTemplate( + self.layer_rename_template + ).format(formatting_data) + + def _update_formatting_data(self, **kwargs): + """ Updating formatting data for layer rename + + Attributes: + key=value (optional): will be included to formatting data + as {key: value} + Returns: + dict: anatomy context data for formatting + """ + self.log.debug(">> self.clip_data: {}".format(self.clip_data)) + clip_name_obj = self.clip_data.find("name") + data = { + "originalBasename": clip_name_obj.text + } + # include version context data + data.update(self.context_data) + # include input kwargs data + data.update(kwargs) + return data + + def _update_open_clip(self): + self.log.info("Updating openClip ..") + + out_xml = ET.parse(self.out_file) + out_xml = out_xml.getroot() + + self.log.debug(">> out_xml: {}".format(out_xml)) + # loop tmp tracks + updated_any = False + for tmp_xml_track in self.clip_data.iter("track"): + # solve track (layer) name + self._rename_track_name(tmp_xml_track) + + # get tmp track uid + tmp_track_uid = tmp_xml_track.get("uid") + self.log.debug(">> tmp_track_uid: {}".format(tmp_track_uid)) + + # get out data track by uid + out_track_element = self._get_xml_track_obj_by_uid( + out_xml, tmp_track_uid) + self.log.debug( + ">> out_track_element: {}".format(out_track_element)) + + # loop tmp feeds + for tmp_xml_feed in tmp_xml_track.iter("feed"): + new_path_obj = tmp_xml_feed.find( + "spans/span/path") + new_path = new_path_obj.text + + # check if feed path already exists in track's feeds + if ( + out_track_element is not None + and self._feed_exists(out_track_element, new_path) + ): + continue + + # rename versions on feeds + tmp_xml_feed.set('vuid', self.feed_version_name) + self._clear_handler(tmp_xml_feed) + + # update fps from MediaInfoFile class + if self.fps is not None: + tmp_feed_fps_obj = tmp_xml_feed.find( + "startTimecode/rate") + tmp_feed_fps_obj.text = str(self.fps) + + # update start_frame from MediaInfoFile class + if self.start_frame is not None: + tmp_feed_nb_ticks_obj = tmp_xml_feed.find( + "startTimecode/nbTicks") + tmp_feed_nb_ticks_obj.text = str(self.start_frame) + + # update drop_mode from MediaInfoFile class + if self.drop_mode is not None: + tmp_feed_drop_mode_obj = tmp_xml_feed.find( + "startTimecode/dropMode") + tmp_feed_drop_mode_obj.text = str(self.drop_mode) + + # add colorspace if any is set + if self.feed_colorspace is not None: + self._add_colorspace(tmp_xml_feed, self.feed_colorspace) + + # then append/update feed to correct track in output + if out_track_element: + self.log.debug("updating track element ..") + # update already present track + out_feeds = out_track_element.find('feeds') + out_feeds.set('currentVersion', self.feed_version_name) + out_feeds.append(tmp_xml_feed) + + self.log.info( + "Appending new feed: {}".format( + self.feed_version_name)) + else: + self.log.debug("adding new track element ..") + # create new track as it doesnt exists yet + # set current version to feeds on tmp + tmp_xml_feeds = tmp_xml_track.find('feeds') + tmp_xml_feeds.set('currentVersion', self.feed_version_name) + out_tracks = out_xml.find("tracks") + out_tracks.append(tmp_xml_track) + + updated_any = True + + if updated_any: + # Append vUID to versions + out_xml_versions_obj = out_xml.find('versions') + out_xml_versions_obj.set( + 'currentVersion', self.feed_version_name) + new_version_obj = ET.Element( + "version", {"type": "version", "uid": self.feed_version_name}) + out_xml_versions_obj.insert(0, new_version_obj) + + self._clear_handler(out_xml) + + # fist create backup + self._create_openclip_backup_file(self.out_file) + + self.log.info("Adding feed version: {}".format( + self.feed_version_name)) + + self.write_clip_data_to_file(self.out_file, out_xml) + + self.log.debug("OpenClip Updated: {}".format(self.out_file)) + + def _feed_exists(self, xml_data, path): + # loop all available feed paths and check if + # the path is not already in file + for src_path in xml_data.iter('path'): + if path == src_path.text: + self.log.warning( + "Not appending file as it already is in .clip file") + return True + + def _create_openclip_backup_file(self, file): + bck_file = "{}.bak".format(file) + # if backup does not exist + if not os.path.isfile(bck_file): + shutil.copy2(file, bck_file) + else: + # in case it exists and is already multiplied + created = False + for _i in range(1, 99): + bck_file = "{name}.bak.{idx:0>2}".format( + name=file, + idx=_i) + # create numbered backup file + if not os.path.isfile(bck_file): + shutil.copy2(file, bck_file) + created = True + break + # in case numbered does not exists + if not created: + bck_file = "{}.bak.last".format(file) + shutil.copy2(file, bck_file) + + def _add_colorspace(self, feed_obj, profile_name): + feed_storage_obj = feed_obj.find("storageFormat") + feed_clr_obj = feed_storage_obj.find("colourSpace") + if feed_clr_obj is not None: + feed_clr_obj = ET.Element( + "colourSpace", {"type": "string"}) + feed_clr_obj.text = profile_name + feed_storage_obj.append(feed_clr_obj) diff --git a/openpype/hosts/flame/api/render_utils.py b/client/ayon_core/hosts/flame/api/render_utils.py similarity index 99% rename from openpype/hosts/flame/api/render_utils.py rename to client/ayon_core/hosts/flame/api/render_utils.py index 7e50c2b23e..a0c77cb155 100644 --- a/openpype/hosts/flame/api/render_utils.py +++ b/client/ayon_core/hosts/flame/api/render_utils.py @@ -1,6 +1,6 @@ import os from xml.etree import ElementTree as ET -from openpype.lib import Logger +from ayon_core.lib import Logger log = Logger.get_logger(__name__) diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/client/ayon_core/hosts/flame/api/scripts/wiretap_com.py similarity index 99% rename from openpype/hosts/flame/api/scripts/wiretap_com.py rename to client/ayon_core/hosts/flame/api/scripts/wiretap_com.py index a74172c405..cffc6ec782 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/client/ayon_core/hosts/flame/api/scripts/wiretap_com.py @@ -44,7 +44,7 @@ def __init__(self, host_name=None, volume_name=None, group_name=None): self.group_name = group_name or "staff" # wiretap tools dir path - self.wiretap_tools_dir = os.getenv("OPENPYPE_WIRETAP_TOOLS") + self.wiretap_tools_dir = os.getenv("AYON_WIRETAP_TOOLS") # initialize WireTap client WireTapClientInit() diff --git a/client/ayon_core/hosts/flame/api/utils.py b/client/ayon_core/hosts/flame/api/utils.py new file mode 100644 index 0000000000..91584456a6 --- /dev/null +++ b/client/ayon_core/hosts/flame/api/utils.py @@ -0,0 +1,142 @@ +""" +Flame utils for syncing scripts +""" + +import os +import shutil +from ayon_core.lib import Logger +log = Logger.get_logger(__name__) + + +def _sync_utility_scripts(env=None): + """ Synchronizing basic utlility scripts for flame. + + To be able to run start OpenPype within Flame we have to copy + all utility_scripts and additional FLAME_SCRIPT_DIR into + `/opt/Autodesk/shared/python`. This will be always synchronizing those + folders. + """ + from .. import HOST_DIR + + env = env or os.environ + + # initiate inputs + scripts = {} + fsd_env = env.get("FLAME_SCRIPT_DIRS", "") + flame_shared_dir = "/opt/Autodesk/shared/python" + + fsd_paths = [os.path.join( + HOST_DIR, + "api", + "utility_scripts" + )] + + # collect script dirs + log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals())) + log.info("fsd_paths: `{fsd_paths}`".format(**locals())) + + # add application environment setting for FLAME_SCRIPT_DIR + # to script path search + for _dirpath in fsd_env.split(os.pathsep): + if not os.path.isdir(_dirpath): + log.warning("Path is not a valid dir: `{_dirpath}`".format( + **locals())) + continue + fsd_paths.append(_dirpath) + + # collect scripts from dirs + for path in fsd_paths: + scripts.update({path: os.listdir(path)}) + + remove_black_list = [] + for _k, s_list in scripts.items(): + remove_black_list += s_list + + log.info("remove_black_list: `{remove_black_list}`".format(**locals())) + log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals())) + log.info("Flame Scripts: `{scripts}`".format(**locals())) + + # make sure no script file is in folder + if next(iter(os.listdir(flame_shared_dir)), None): + for _itm in os.listdir(flame_shared_dir): + skip = False + + # skip all scripts and folders which are not maintained + if _itm not in remove_black_list: + skip = True + + # do not skip if pyc in extension + if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]: + skip = False + + # continue if skip in true + if skip: + continue + + path = os.path.join(flame_shared_dir, _itm) + log.info("Removing `{path}`...".format(**locals())) + + try: + if os.path.isdir(path): + shutil.rmtree(path, onerror=None) + else: + os.remove(path) + except PermissionError as msg: + log.warning( + "Not able to remove: `{}`, Problem with: `{}`".format( + path, + msg + ) + ) + + # copy scripts into Resolve's utility scripts dir + for dirpath, scriptlist in scripts.items(): + # directory and scripts list + for _script in scriptlist: + # script in script list + src = os.path.join(dirpath, _script) + dst = os.path.join(flame_shared_dir, _script) + log.info("Copying `{src}` to `{dst}`...".format(**locals())) + + try: + if os.path.isdir(src): + shutil.copytree( + src, dst, symlinks=False, + ignore=None, ignore_dangling_symlinks=False + ) + else: + shutil.copy2(src, dst) + except (PermissionError, FileExistsError) as msg: + log.warning( + "Not able to copy to: `{}`, Problem with: `{}`".format( + dst, + msg + ) + ) + + +def setup(env=None): + """ Wrapper installer started from + `flame/hooks/pre_flame_setup.py` + """ + env = env or os.environ + + # synchronize resolve utility scripts + _sync_utility_scripts(env) + + log.info("Flame OpenPype wrapper has been installed") + + +def get_flame_version(): + import flame + + return { + "full": flame.get_version(), + "major": flame.get_version_major(), + "minor": flame.get_version_minor(), + "patch": flame.get_version_patch() + } + + +def get_flame_install_root(): + return "/opt/Autodesk" diff --git a/client/ayon_core/hosts/flame/api/workio.py b/client/ayon_core/hosts/flame/api/workio.py new file mode 100644 index 0000000000..0e3cb7f5fd --- /dev/null +++ b/client/ayon_core/hosts/flame/api/workio.py @@ -0,0 +1,37 @@ +"""Host API required Work Files tool""" + +import os +from ayon_core.lib import Logger +# from .. import ( +# get_project_manager, +# get_current_project +# ) + + +log = Logger.get_logger(__name__) + +exported_projet_ext = ".otoc" + + +def file_extensions(): + return [exported_projet_ext] + + +def has_unsaved_changes(): + pass + + +def save_file(filepath): + pass + + +def open_file(filepath): + pass + + +def current_file(): + pass + + +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/client/ayon_core/hosts/flame/hooks/pre_flame_setup.py similarity index 95% rename from openpype/hosts/flame/hooks/pre_flame_setup.py rename to client/ayon_core/hosts/flame/hooks/pre_flame_setup.py index 850569cfdd..391332d368 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/client/ayon_core/hosts/flame/hooks/pre_flame_setup.py @@ -5,12 +5,12 @@ import socket from pprint import pformat -from openpype.lib import ( - get_openpype_username, +from ayon_core.lib import ( + get_ayon_username, run_subprocess, ) -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.hosts import flame as opflame +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.hosts import flame as opflame class FlamePrelaunch(PreLaunchHook): @@ -32,8 +32,8 @@ def __init__(self, *args, **kwargs): def execute(self): _env = self.launch_context.env - self.flame_python_exe = _env["OPENPYPE_FLAME_PYTHON_EXEC"] - self.flame_pythonpath = _env["OPENPYPE_FLAME_PYTHONPATH"] + self.flame_python_exe = _env["AYON_FLAME_PYTHON_EXEC"] + self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"] """Hook entry method.""" project_doc = self.data["project_doc"] @@ -57,7 +57,7 @@ def execute(self): colormanaged = True # get user name and host name - user_name = get_openpype_username() + user_name = get_ayon_username() user_name = user_name.replace(".", "_") hostname = socket.gethostname() # not returning wiretap host name diff --git a/openpype/hosts/celaction/scripts/__init__.py b/client/ayon_core/hosts/flame/otio/__init__.py similarity index 100% rename from openpype/hosts/celaction/scripts/__init__.py rename to client/ayon_core/hosts/flame/otio/__init__.py diff --git a/openpype/hosts/flame/otio/flame_export.py b/client/ayon_core/hosts/flame/otio/flame_export.py similarity index 99% rename from openpype/hosts/flame/otio/flame_export.py rename to client/ayon_core/hosts/flame/otio/flame_export.py index 6d6b33d2a1..e5ea4dcf5e 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/client/ayon_core/hosts/flame/otio/flame_export.py @@ -275,7 +275,7 @@ def create_otio_reference(clip_data, fps=None): def create_otio_clip(clip_data): - from openpype.hosts.flame.api import MediaInfoFile, TimeEffectMetadata + from ayon_core.hosts.flame.api import MediaInfoFile, TimeEffectMetadata segment = clip_data["PySegment"] diff --git a/openpype/hosts/flame/otio/utils.py b/client/ayon_core/hosts/flame/otio/utils.py similarity index 100% rename from openpype/hosts/flame/otio/utils.py rename to client/ayon_core/hosts/flame/otio/utils.py diff --git a/client/ayon_core/hosts/flame/plugins/create/create_shot_clip.py b/client/ayon_core/hosts/flame/plugins/create/create_shot_clip.py new file mode 100644 index 0000000000..ee99040ca3 --- /dev/null +++ b/client/ayon_core/hosts/flame/plugins/create/create_shot_clip.py @@ -0,0 +1,307 @@ +from copy import deepcopy +import ayon_core.hosts.flame.api as opfapi + + +class CreateShotClip(opfapi.Creator): + """Publishable clip""" + + label = "Create Publishable Clip" + family = "clip" + icon = "film" + defaults = ["Main"] + + presets = None + + def process(self): + # Creator copy of object attributes that are modified during `process` + presets = deepcopy(self.presets) + gui_inputs = self.get_gui_inputs() + + # get key pares from presets and match it on ui inputs + for k, v in gui_inputs.items(): + if v["type"] in ("dict", "section"): + # nested dictionary (only one level allowed + # for sections and dict) + for _k, _v in v["value"].items(): + if presets.get(_k) is not None: + gui_inputs[k][ + "value"][_k]["value"] = presets[_k] + + if presets.get(k) is not None: + gui_inputs[k]["value"] = presets[k] + + # open widget for plugins inputs + results_back = self.create_widget( + "Pype publish attributes creator", + "Define sequential rename and fill hierarchy data.", + gui_inputs + ) + + if len(self.selected) < 1: + return + + if not results_back: + print("Operation aborted") + return + + # get ui output for track name for vertical sync + v_sync_track = results_back["vSyncTrack"]["value"] + + # sort selected trackItems by + sorted_selected_segments = [] + unsorted_selected_segments = [] + for _segment in self.selected: + if _segment.parent.name.get_value() in v_sync_track: + sorted_selected_segments.append(_segment) + else: + unsorted_selected_segments.append(_segment) + + sorted_selected_segments.extend(unsorted_selected_segments) + + kwargs = { + "log": self.log, + "ui_inputs": results_back, + "avalon": self.data, + "family": self.data["family"] + } + + for i, segment in enumerate(sorted_selected_segments): + kwargs["rename_index"] = i + # convert track item to timeline media pool item + opfapi.PublishableClip(segment, **kwargs).convert() + + def get_gui_inputs(self): + gui_tracks = self._get_video_track_names( + opfapi.get_current_sequence(opfapi.CTX.selection) + ) + return deepcopy({ + "renameHierarchy": { + "type": "section", + "label": "Shot Hierarchy And Rename Settings", + "target": "ui", + "order": 0, + "value": { + "hierarchy": { + "value": "{folder}/{sequence}", + "type": "QLineEdit", + "label": "Shot Parent Hierarchy", + "target": "tag", + "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa + "order": 0}, + "useShotName": { + "value": True, + "type": "QCheckBox", + "label": "Use Shot Name", + "target": "ui", + "toolTip": "Use name form Shot name clip attribute", # noqa + "order": 1}, + "clipRename": { + "value": False, + "type": "QCheckBox", + "label": "Rename clips", + "target": "ui", + "toolTip": "Renaming selected clips on fly", # noqa + "order": 2}, + "clipName": { + "value": "{sequence}{shot}", + "type": "QLineEdit", + "label": "Clip Name Template", + "target": "ui", + "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa + "order": 3}, + "segmentIndex": { + "value": True, + "type": "QCheckBox", + "label": "Segment index", + "target": "ui", + "toolTip": "Take number from segment index", # noqa + "order": 4}, + "countFrom": { + "value": 10, + "type": "QSpinBox", + "label": "Count sequence from", + "target": "ui", + "toolTip": "Set when the sequence number stafrom", # noqa + "order": 5}, + "countSteps": { + "value": 10, + "type": "QSpinBox", + "label": "Stepping number", + "target": "ui", + "toolTip": "What number is adding every new step", # noqa + "order": 6}, + } + }, + "hierarchyData": { + "type": "dict", + "label": "Shot Template Keywords", + "target": "tag", + "order": 1, + "value": { + "folder": { + "value": "shots", + "type": "QLineEdit", + "label": "{folder}", + "target": "tag", + "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 0}, + "episode": { + "value": "ep01", + "type": "QLineEdit", + "label": "{episode}", + "target": "tag", + "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 1}, + "sequence": { + "value": "sq01", + "type": "QLineEdit", + "label": "{sequence}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 2}, + "track": { + "value": "{_track_}", + "type": "QLineEdit", + "label": "{track}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 3}, + "shot": { + "value": "sh###", + "type": "QLineEdit", + "label": "{shot}", + "target": "tag", + "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 4} + } + }, + "verticalSync": { + "type": "section", + "label": "Vertical Synchronization Of Attributes", + "target": "ui", + "order": 2, + "value": { + "vSyncOn": { + "value": True, + "type": "QCheckBox", + "label": "Enable Vertical Sync", + "target": "ui", + "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa + "order": 0}, + "vSyncTrack": { + "value": gui_tracks, # noqa + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be hero for all others", # noqa + "order": 1} + } + }, + "publishSettings": { + "type": "section", + "label": "Publish Settings", + "target": "ui", + "order": 3, + "value": { + "subsetName": { + "value": ["[ track name ]", "main", "bg", "fg", "bg", + "animatic"], + "type": "QComboBox", + "label": "Subset Name", + "target": "ui", + "toolTip": "chose subset name pattern, if [ track name ] is selected, name of track layer will be used", # noqa + "order": 0}, + "subsetFamily": { + "value": ["plate", "take"], + "type": "QComboBox", + "label": "Subset Family", + "target": "ui", "toolTip": "What use of this subset is for", # noqa + "order": 1}, + "reviewTrack": { + "value": ["< none >"] + gui_tracks, + "type": "QComboBox", + "label": "Use Review Track", + "target": "ui", + "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa + "order": 2}, + "audio": { + "value": False, + "type": "QCheckBox", + "label": "Include audio", + "target": "tag", + "toolTip": "Process subsets with corresponding audio", # noqa + "order": 3}, + "sourceResolution": { + "value": False, + "type": "QCheckBox", + "label": "Source resolution", + "target": "tag", + "toolTip": "Is resloution taken from timeline or source?", # noqa + "order": 4}, + } + }, + "frameRangeAttr": { + "type": "section", + "label": "Shot Attributes", + "target": "ui", + "order": 4, + "value": { + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle Start", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle End", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + }, + "includeHandles": { + "value": False, + "type": "QCheckBox", + "label": "Include handles", + "target": "tag", + "toolTip": "By default handles are excluded", # noqa + "order": 3 + }, + "retimedHandles": { + "value": True, + "type": "QCheckBox", + "label": "Retimed handles", + "target": "tag", + "toolTip": "By default handles are retimed.", # noqa + "order": 4 + }, + "retimedFramerange": { + "value": True, + "type": "QCheckBox", + "label": "Retimed framerange", + "target": "tag", + "toolTip": "By default framerange is retimed.", # noqa + "order": 5 + } + } + } + }) + + def _get_video_track_names(self, sequence): + track_names = [] + for ver in sequence.versions: + for track in ver.tracks: + track_names.append(track.name.get_value()) + + return track_names diff --git a/client/ayon_core/hosts/flame/plugins/load/load_clip.py b/client/ayon_core/hosts/flame/plugins/load/load_clip.py new file mode 100644 index 0000000000..6f35196932 --- /dev/null +++ b/client/ayon_core/hosts/flame/plugins/load/load_clip.py @@ -0,0 +1,275 @@ +from copy import deepcopy +import os +import flame +from pprint import pformat +import ayon_core.hosts.flame.api as opfapi +from ayon_core.lib import StringTemplate +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) + + +class LoadClip(opfapi.ClipLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) + + label = "Load as clip" + order = -10 + icon = "code-fork" + color = "orange" + + # settings + reel_group_name = "OpenPype_Reels" + reel_name = "Loaded" + clip_name_template = "{asset}_{subset}<_{output}>" + + """ Anatomy keys from version context data and dynamically added: + - {layerName} - original layer name token + - {layerUID} - original layer UID token + - {originalBasename} - original clip name taken from file + """ + layer_rename_template = "{asset}_{subset}<_{output}>" + layer_rename_patterns = [] + + def load(self, context, name, namespace, options): + + # get flame objects + fproject = flame.project.current_project + self.fpd = fproject.current_workspace.desktop + + # load clip to timeline and get main variables + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = self.get_colorspace(context) + + # in case output is not in context replace key to representation + if not context["representation"]["context"].get("output"): + self.clip_name_template = self.clip_name_template.replace( + "output", "representation") + self.layer_rename_template = self.layer_rename_template.replace( + "output", "representation") + + formatting_data = deepcopy(context["representation"]["context"]) + clip_name = StringTemplate(self.clip_name_template).format( + formatting_data) + + # convert colorspace with ocio to flame mapping + # in imageio flame section + colorspace = self.get_native_colorspace(colorspace) + self.log.info("Loading with colorspace: `{}`".format(colorspace)) + + # create workfile path + workfile_dir = os.environ["AVALON_WORKDIR"] + openclip_dir = os.path.join( + workfile_dir, clip_name + ) + openclip_path = os.path.join( + openclip_dir, clip_name + ".clip" + ) + if not os.path.exists(openclip_dir): + os.makedirs(openclip_dir) + + # prepare clip data from context ad send it to openClipLoader + path = self.filepath_from_context(context) + loading_context = { + "path": path.replace("\\", "/"), + "colorspace": colorspace, + "version": "v{:0>3}".format(version_name), + "layer_rename_template": self.layer_rename_template, + "layer_rename_patterns": self.layer_rename_patterns, + "context_data": formatting_data + } + self.log.debug(pformat( + loading_context + )) + self.log.debug(openclip_path) + + # make openpype clip file + opfapi.OpenClipSolver( + openclip_path, loading_context, logger=self.log).make() + + # prepare Reel group in actual desktop + opc = self._get_clip( + clip_name, + openclip_path + ) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = {} + for key in add_keys: + data_imprint.update({ + key: version_data.get(key, str(None)) + }) + + # add variables related to version context + data_imprint.update({ + "version": version_name, + "colorspace": colorspace, + "objectName": clip_name + }) + + # TODO: finish the containerisation + # opc_segment = opfapi.get_clip_segment(opc) + + # return opfapi.containerise( + # opc_segment, + # name, namespace, context, + # self.__class__.__name__, + # data_imprint) + + return opc + + def _get_clip(self, name, clip_path): + reel = self._get_reel() + # with maintained openclip as opc + matching_clip = [cl for cl in reel.clips + if cl.name.get_value() == name] + if matching_clip: + return matching_clip.pop() + else: + created_clips = flame.import_clips(str(clip_path), reel) + return created_clips.pop() + + def _get_reel(self): + + matching_rgroup = [ + rg for rg in self.fpd.reel_groups + if rg.name.get_value() == self.reel_group_name + ] + + if not matching_rgroup: + reel_group = self.fpd.create_reel_group(str(self.reel_group_name)) + for _r in reel_group.reels: + if "reel" not in _r.name.get_value().lower(): + continue + self.log.debug("Removing: {}".format(_r.name)) + flame.delete(_r) + else: + reel_group = matching_rgroup.pop() + + matching_reel = [ + re for re in reel_group.reels + if re.name.get_value() == self.reel_name + ] + + if not matching_reel: + reel_group = reel_group.create_reel(str(self.reel_name)) + else: + reel_group = matching_reel.pop() + + return reel_group + + def _get_segment_from_clip(self, clip): + # unwrapping segment from input clip + pass + + # def switch(self, container, representation): + # self.update(container, representation) + + # def update(self, container, representation): + # """ Updating previously loaded clips + # """ + + # # load clip to timeline and get main variables + # name = container['name'] + # namespace = container['namespace'] + # track_item = phiero.get_track_items( + # track_item_name=namespace) + # version = io.find_one({ + # "type": "version", + # "_id": representation["parent"] + # }) + # version_data = version.get("data", {}) + # version_name = version.get("name", None) + # colorspace = version_data.get("colorspace", None) + # object_name = "{}_{}".format(name, namespace) + # file = get_representation_path(representation).replace("\\", "/") + # clip = track_item.source() + + # # reconnect media to new path + # clip.reconnectMedia(file) + + # # set colorspace + # if colorspace: + # clip.setSourceMediaColourTransform(colorspace) + + # # add additional metadata from the version to imprint Avalon knob + # add_keys = [ + # "frameStart", "frameEnd", "source", "author", + # "fps", "handleStart", "handleEnd" + # ] + + # # move all version data keys to tag data + # data_imprint = {} + # for key in add_keys: + # data_imprint.update({ + # key: version_data.get(key, str(None)) + # }) + + # # add variables related to version context + # data_imprint.update({ + # "representation": str(representation["_id"]), + # "version": version_name, + # "colorspace": colorspace, + # "objectName": object_name + # }) + + # # update color of clip regarding the version order + # self.set_item_color(track_item, version) + + # return phiero.update_container(track_item, data_imprint) + + # def remove(self, container): + # """ Removing previously loaded clips + # """ + # # load clip to timeline and get main variables + # namespace = container['namespace'] + # track_item = phiero.get_track_items( + # track_item_name=namespace) + # track = track_item.parent() + + # # remove track item from track + # track.removeItem(track_item) + + # @classmethod + # def multiselection(cls, track_item): + # if not cls.track: + # cls.track = track_item.parent() + # cls.sequence = cls.track.parent() + + # @classmethod + # def set_item_color(cls, track_item, version): + + # clip = track_item.source() + # # define version name + # version_name = version.get("name", None) + # # get all versions in list + # versions = io.find({ + # "type": "version", + # "parent": version["parent"] + # }).distinct('name') + + # max_version = max(versions) + + # # set clip colour + # if version_name == max_version: + # clip.binItem().setColor(cls.clip_color_last) + # else: + # clip.binItem().setColor(cls.clip_color) diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/client/ayon_core/hosts/flame/plugins/load/load_clip_batch.py similarity index 97% rename from openpype/hosts/flame/plugins/load/load_clip_batch.py rename to client/ayon_core/hosts/flame/plugins/load/load_clip_batch.py index 1f3a017d72..a66bf53622 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/client/ayon_core/hosts/flame/plugins/load/load_clip_batch.py @@ -2,9 +2,9 @@ import os import flame from pprint import pformat -import openpype.hosts.flame.api as opfapi -from openpype.lib import StringTemplate -from openpype.lib.transcoding import ( +import ayon_core.hosts.flame.api as opfapi +from ayon_core.lib import StringTemplate +from ayon_core.lib.transcoding import ( VIDEO_EXTENSIONS, IMAGE_EXTENSIONS ) diff --git a/openpype/hosts/flame/plugins/publish/collect_test_selection.py b/client/ayon_core/hosts/flame/plugins/publish/collect_test_selection.py similarity index 94% rename from openpype/hosts/flame/plugins/publish/collect_test_selection.py rename to client/ayon_core/hosts/flame/plugins/publish/collect_test_selection.py index 9f982321cc..0fb41eab78 100644 --- a/openpype/hosts/flame/plugins/publish/collect_test_selection.py +++ b/client/ayon_core/hosts/flame/plugins/publish/collect_test_selection.py @@ -1,8 +1,8 @@ import os import pyblish.api import tempfile -import openpype.hosts.flame.api as opfapi -from openpype.hosts.flame.otio import flame_export as otio_export +import ayon_core.hosts.flame.api as opfapi +from ayon_core.hosts.flame.otio import flame_export as otio_export import opentimelineio as otio from pprint import pformat reload(otio_export) # noqa diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/client/ayon_core/hosts/flame/plugins/publish/collect_timeline_instances.py similarity index 98% rename from openpype/hosts/flame/plugins/publish/collect_timeline_instances.py rename to client/ayon_core/hosts/flame/plugins/publish/collect_timeline_instances.py index e14f960a2b..636cbd8031 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/client/ayon_core/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -1,9 +1,9 @@ import re from types import NoneType import pyblish -import openpype.hosts.flame.api as opfapi -from openpype.hosts.flame.otio import flame_export -from openpype.pipeline.editorial import ( +import ayon_core.hosts.flame.api as opfapi +from ayon_core.hosts.flame.otio import flame_export +from ayon_core.pipeline.editorial import ( is_overlapping_otio_ranges, get_media_range_with_retimes ) diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/client/ayon_core/hosts/flame/plugins/publish/collect_timeline_otio.py similarity index 90% rename from openpype/hosts/flame/plugins/publish/collect_timeline_otio.py rename to client/ayon_core/hosts/flame/plugins/publish/collect_timeline_otio.py index 20ac048986..6a3e99aa55 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py +++ b/client/ayon_core/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -1,9 +1,9 @@ import pyblish.api -from openpype.client import get_asset_name_identifier -import openpype.hosts.flame.api as opfapi -from openpype.hosts.flame.otio import flame_export -from openpype.pipeline.create import get_subset_name +from ayon_core.client import get_asset_name_identifier +import ayon_core.hosts.flame.api as opfapi +from ayon_core.hosts.flame.otio import flame_export +from ayon_core.pipeline.create import get_subset_name class CollecTimelineOTIO(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/hosts/flame/plugins/publish/extract_otio_file.py b/client/ayon_core/hosts/flame/plugins/publish/extract_otio_file.py new file mode 100644 index 0000000000..41ae981cba --- /dev/null +++ b/client/ayon_core/hosts/flame/plugins/publish/extract_otio_file.py @@ -0,0 +1,43 @@ +import os +import pyblish.api +import opentimelineio as otio +from ayon_core.pipeline import publish + + +class ExtractOTIOFile(publish.Extractor): + """ + Extractor export OTIO file + """ + + label = "Extract OTIO file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["workfile"] + hosts = ["flame"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + staging_dir = self.staging_dir(instance) + + otio_timeline = instance.context.data["otioTimeline"] + # create otio timeline representation + otio_file_name = name + ".otio" + otio_file_path = os.path.join(staging_dir, otio_file_name) + + # export otio file to temp dir + otio.adapters.write_to_file(otio_timeline, otio_file_path) + + representation_otio = { + 'name': "otio", + 'ext': "otio", + 'files': otio_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_otio) + + self.log.info("Added OTIO file representation: {}".format( + representation_otio)) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/client/ayon_core/hosts/flame/plugins/publish/extract_subset_resources.py similarity index 99% rename from openpype/hosts/flame/plugins/publish/extract_subset_resources.py rename to client/ayon_core/hosts/flame/plugins/publish/extract_subset_resources.py index a7979ab4d5..af699fd03a 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/client/ayon_core/hosts/flame/plugins/publish/extract_subset_resources.py @@ -5,10 +5,10 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.flame import api as opfapi -from openpype.hosts.flame.api import MediaInfoFile -from openpype.pipeline.editorial import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.flame import api as opfapi +from ayon_core.hosts.flame.api import MediaInfoFile +from ayon_core.pipeline.editorial import ( get_media_range_with_retimes ) diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/client/ayon_core/hosts/flame/plugins/publish/integrate_batch_group.py similarity index 98% rename from openpype/hosts/flame/plugins/publish/integrate_batch_group.py rename to client/ayon_core/hosts/flame/plugins/publish/integrate_batch_group.py index 4f3945bb0f..3458bd3002 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/client/ayon_core/hosts/flame/plugins/publish/integrate_batch_group.py @@ -3,9 +3,9 @@ from collections import OrderedDict from pprint import pformat import pyblish -import openpype.hosts.flame.api as opfapi -import openpype.pipeline as op_pipeline -from openpype.pipeline.workfile import get_workdir +import ayon_core.hosts.flame.api as opfapi +import ayon_core.pipeline as op_pipeline +from ayon_core.pipeline.workfile import get_workdir class IntegrateBatchGroup(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_thumbnails_jpg.xml diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/export_preset/openpype_seg_video_h264.xml diff --git a/openpype/hosts/flame/otio/__init__.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/__init__.py similarity index 100% rename from openpype/hosts/flame/otio/__init__.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/__init__.py diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/app_utils.py diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/ftrack_lib.py diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/panel_app.py diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/modules/uiwidgets.py diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py b/client/ayon_core/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py rename to client/ayon_core/hosts/flame/startup/openpype_babypublisher/openpype_babypublisher.py diff --git a/openpype/hosts/flame/startup/openpype_in_flame.py b/client/ayon_core/hosts/flame/startup/openpype_in_flame.py similarity index 98% rename from openpype/hosts/flame/startup/openpype_in_flame.py rename to client/ayon_core/hosts/flame/startup/openpype_in_flame.py index 39869333aa..cf0a24ede2 100644 --- a/openpype/hosts/flame/startup/openpype_in_flame.py +++ b/client/ayon_core/hosts/flame/startup/openpype_in_flame.py @@ -4,8 +4,8 @@ from pprint import pformat import atexit -import openpype.hosts.flame.api as opfapi -from openpype.pipeline import ( +import ayon_core.hosts.flame.api as opfapi +from ayon_core.pipeline import ( install_host, registered_host, ) diff --git a/openpype/hosts/fusion/__init__.py b/client/ayon_core/hosts/fusion/__init__.py similarity index 100% rename from openpype/hosts/fusion/__init__.py rename to client/ayon_core/hosts/fusion/__init__.py diff --git a/client/ayon_core/hosts/fusion/addon.py b/client/ayon_core/hosts/fusion/addon.py new file mode 100644 index 0000000000..7eff2d93c8 --- /dev/null +++ b/client/ayon_core/hosts/fusion/addon.py @@ -0,0 +1,72 @@ +import os +import re +from ayon_core.modules import OpenPypeModule, IHostAddon +from ayon_core.lib import Logger + +FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + +# FUSION_VERSIONS_DICT is used by the pre-launch hooks +# The keys correspond to all currently supported Fusion versions +# Each value is a list of corresponding Python home variables and a profile +# number, which is used by the profile hook to set Fusion profile variables. +FUSION_VERSIONS_DICT = { + 9: ("FUSION_PYTHON36_HOME", 9), + 16: ("FUSION16_PYTHON36_HOME", 16), + 17: ("FUSION16_PYTHON36_HOME", 16), + 18: ("FUSION_PYTHON3_HOME", 16), +} + + +def get_fusion_version(app_name): + """ + The function is triggered by the prelaunch hooks to get the fusion version. + + `app_name` is obtained by prelaunch hooks from the + `launch_context.env.get("AVALON_APP_NAME")`. + + To get a correct Fusion version, a version number should be present + in the `applications/fusion/variants` key + of the Blackmagic Fusion Application Settings. + """ + + log = Logger.get_logger(__name__) + + if not app_name: + return + + app_version_candidates = re.findall(r"\d+", app_name) + if not app_version_candidates: + return + for app_version in app_version_candidates: + if int(app_version) in FUSION_VERSIONS_DICT: + return int(app_version) + else: + log.info( + "Unsupported Fusion version: {app_version}".format( + app_version=app_version + ) + ) + + +class FusionAddon(OpenPypeModule, IHostAddon): + name = "fusion" + host_name = "fusion" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [os.path.join(FUSION_HOST_DIR, "hooks")] + + def add_implementation_envs(self, env, app): + # Set default values if are not already set via settings + + defaults = {"AYON_LOG_NO_COLORS": "1"} + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".comp"] diff --git a/openpype/hosts/fusion/api/__init__.py b/client/ayon_core/hosts/fusion/api/__init__.py similarity index 100% rename from openpype/hosts/fusion/api/__init__.py rename to client/ayon_core/hosts/fusion/api/__init__.py diff --git a/client/ayon_core/hosts/fusion/api/action.py b/client/ayon_core/hosts/fusion/api/action.py new file mode 100644 index 0000000000..1643f1ce03 --- /dev/null +++ b/client/ayon_core/hosts/fusion/api/action.py @@ -0,0 +1,60 @@ +import pyblish.api + + +from ayon_core.hosts.fusion.api.lib import get_current_comp +from ayon_core.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Fusion when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context( + context, + plugin=plugin, + ) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = list() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning( + "Plug-in returned to be invalid, " + "but has no selectable nodes." + ) + + if not invalid: + # Assume relevant comp is current comp and clear selection + self.log.info("No invalid tools found.") + comp = get_current_comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + return + + # Assume a single comp + first_tool = invalid[0] + comp = first_tool.Comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + names = set() + for tool in invalid: + flow.Select(tool, True) + comp.SetActiveTool(tool) + names.add(tool.Name) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/client/ayon_core/hosts/fusion/api/lib.py b/client/ayon_core/hosts/fusion/api/lib.py new file mode 100644 index 0000000000..b31f812c1b --- /dev/null +++ b/client/ayon_core/hosts/fusion/api/lib.py @@ -0,0 +1,295 @@ +import os +import sys +import re +import contextlib + +from ayon_core.lib import Logger +from ayon_core.client import ( + get_asset_by_name, + get_subset_by_name, + get_last_version_by_subset_id, + get_representation_by_id, + get_representation_by_name, + get_representation_parents, +) +from ayon_core.pipeline import ( + switch_container, + get_current_project_name, +) +from ayon_core.pipeline.context_tools import get_current_project_asset + +self = sys.modules[__name__] +self._project = None + + +def update_frame_range(start, end, comp=None, set_render_range=True, + handle_start=0, handle_end=0): + """Set Fusion comp's start and end frame range + + Args: + start (float, int): start frame + end (float, int): end frame + comp (object, Optional): comp object from fusion + set_render_range (bool, Optional): When True this will also set the + composition's render start and end frame. + handle_start (float, int, Optional): frame handles before start frame + handle_end (float, int, Optional): frame handles after end frame + + Returns: + None + + """ + + if not comp: + comp = get_current_comp() + + # Convert any potential none type to zero + handle_start = handle_start or 0 + handle_end = handle_end or 0 + + attrs = { + "COMPN_GlobalStart": start - handle_start, + "COMPN_GlobalEnd": end + handle_end + } + + # set frame range + if set_render_range: + attrs.update({ + "COMPN_RenderStart": start, + "COMPN_RenderEnd": end + }) + + with comp_lock_and_undo_chunk(comp): + comp.SetAttrs(attrs) + + +def set_asset_framerange(): + """Set Comp's frame range based on current asset""" + asset_doc = get_current_project_asset() + start = asset_doc["data"]["frameStart"] + end = asset_doc["data"]["frameEnd"] + handle_start = asset_doc["data"]["handleStart"] + handle_end = asset_doc["data"]["handleEnd"] + update_frame_range(start, end, set_render_range=True, + handle_start=handle_start, + handle_end=handle_end) + + +def set_asset_resolution(): + """Set Comp's resolution width x height default based on current asset""" + asset_doc = get_current_project_asset() + width = asset_doc["data"]["resolutionWidth"] + height = asset_doc["data"]["resolutionHeight"] + comp = get_current_comp() + + print("Setting comp frame format resolution to {}x{}".format(width, + height)) + comp.SetPrefs({ + "Comp.FrameFormat.Width": width, + "Comp.FrameFormat.Height": height, + }) + + +def validate_comp_prefs(comp=None, force_repair=False): + """Validate current comp defaults with asset settings. + + Validates fps, resolutionWidth, resolutionHeight, aspectRatio. + + This does *not* validate frameStart, frameEnd, handleStart and handleEnd. + """ + + if comp is None: + comp = get_current_comp() + + log = Logger.get_logger("validate_comp_prefs") + + fields = [ + "name", + "data.fps", + "data.resolutionWidth", + "data.resolutionHeight", + "data.pixelAspect" + ] + asset_doc = get_current_project_asset(fields=fields) + asset_data = asset_doc["data"] + + comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") + + # Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert + # the data to something that is more sensible to Fusion + asset_data["pixelAspectX"] = asset_data.pop("pixelAspect") + asset_data["pixelAspectY"] = 1.0 + + validations = [ + ("fps", "Rate", "FPS"), + ("resolutionWidth", "Width", "Resolution Width"), + ("resolutionHeight", "Height", "Resolution Height"), + ("pixelAspectX", "AspectX", "Pixel Aspect Ratio X"), + ("pixelAspectY", "AspectY", "Pixel Aspect Ratio Y") + ] + + invalid = [] + for key, comp_key, label in validations: + asset_value = asset_data[key] + comp_value = comp_frame_format_prefs.get(comp_key) + if asset_value != comp_value: + invalid_msg = "{} {} should be {}".format(label, + comp_value, + asset_value) + invalid.append(invalid_msg) + + if not force_repair: + # Do not log warning if we force repair anyway + log.warning( + "Comp {pref} {value} does not match asset " + "'{asset_name}' {pref} {asset_value}".format( + pref=label, + value=comp_value, + asset_name=asset_doc["name"], + asset_value=asset_value) + ) + + if invalid: + + def _on_repair(): + attributes = dict() + for key, comp_key, _label in validations: + value = asset_data[key] + comp_key_full = "Comp.FrameFormat.{}".format(comp_key) + attributes[comp_key_full] = value + comp.SetPrefs(attributes) + + if force_repair: + log.info("Applying default Comp preferences..") + _on_repair() + return + + from . import menu + from ayon_core.tools.utils import SimplePopup + from ayon_core.style import load_stylesheet + dialog = SimplePopup(parent=menu.menu) + dialog.setWindowTitle("Fusion comp has invalid configuration") + + msg = "Comp preferences mismatches '{}'".format(asset_doc["name"]) + msg += "\n" + "\n".join(invalid) + dialog.set_message(msg) + dialog.set_button_text("Repair") + dialog.on_clicked.connect(_on_repair) + dialog.show() + dialog.raise_() + dialog.activateWindow() + dialog.setStyleSheet(load_stylesheet()) + + +@contextlib.contextmanager +def maintained_selection(comp=None): + """Reset comp selection from before the context after the context""" + if comp is None: + comp = get_current_comp() + + previous_selection = comp.GetToolList(True).values() + try: + yield + finally: + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + if previous_selection: + for tool in previous_selection: + flow.Select(tool, True) + + +@contextlib.contextmanager +def maintained_comp_range(comp=None, + global_start=True, + global_end=True, + render_start=True, + render_end=True): + """Reset comp frame ranges from before the context after the context""" + if comp is None: + comp = get_current_comp() + + comp_attrs = comp.GetAttrs() + preserve_attrs = {} + if global_start: + preserve_attrs["COMPN_GlobalStart"] = comp_attrs["COMPN_GlobalStart"] + if global_end: + preserve_attrs["COMPN_GlobalEnd"] = comp_attrs["COMPN_GlobalEnd"] + if render_start: + preserve_attrs["COMPN_RenderStart"] = comp_attrs["COMPN_RenderStart"] + if render_end: + preserve_attrs["COMPN_RenderEnd"] = comp_attrs["COMPN_RenderEnd"] + + try: + yield + finally: + comp.SetAttrs(preserve_attrs) + + +def get_frame_path(path): + """Get filename for the Fusion Saver with padded number as '#' + + >>> get_frame_path("C:/test.exr") + ('C:/test', 4, '.exr') + + >>> get_frame_path("filename.00.tif") + ('filename.', 2, '.tif') + + >>> get_frame_path("foobar35.tif") + ('foobar', 2, '.tif') + + Args: + path (str): The path to render to. + + Returns: + tuple: head, padding, tail (extension) + + """ + filename, ext = os.path.splitext(path) + + # Find a final number group + match = re.match('.*?([0-9]+)$', filename) + if match: + padding = len(match.group(1)) + # remove number from end since fusion + # will swap it with the frame number + filename = filename[:-padding] + else: + padding = 4 # default Fusion padding + + return filename, padding, ext + + +def get_fusion_module(): + """Get current Fusion instance""" + fusion = getattr(sys.modules["__main__"], "fusion", None) + return fusion + + +def get_bmd_library(): + """Get bmd library""" + bmd = getattr(sys.modules["__main__"], "bmd", None) + return bmd + + +def get_current_comp(): + """Get current comp in this session""" + fusion = get_fusion_module() + if fusion is not None: + comp = fusion.CurrentComp + return comp + + +@contextlib.contextmanager +def comp_lock_and_undo_chunk( + comp, + undo_queue_name="Script CMD", + keep_undo=True, +): + """Lock comp and open an undo chunk during the context""" + try: + comp.Lock() + comp.StartUndo(undo_queue_name) + yield + finally: + comp.Unlock() + comp.EndUndo(keep_undo) diff --git a/client/ayon_core/hosts/fusion/api/menu.py b/client/ayon_core/hosts/fusion/api/menu.py new file mode 100644 index 0000000000..a2b0a7b628 --- /dev/null +++ b/client/ayon_core/hosts/fusion/api/menu.py @@ -0,0 +1,190 @@ +import os +import sys + +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.utils import host_tools +from ayon_core.style import load_stylesheet +from ayon_core.lib import register_event_callback +from ayon_core.hosts.fusion.scripts import ( + duplicate_with_inputs, +) +from ayon_core.hosts.fusion.api.lib import ( + set_asset_framerange, + set_asset_resolution, +) +from ayon_core.pipeline import get_current_asset_name +from ayon_core.resources import get_ayon_icon_filepath +from ayon_core.tools.utils import get_qt_app + +from .pipeline import FusionEventHandler +from .pulse import FusionPulse + + +MENU_LABEL = os.environ["AYON_MENU_LABEL"] + + +self = sys.modules[__name__] +self.menu = None + + +class OpenPypeMenu(QtWidgets.QWidget): + def __init__(self, *args, **kwargs): + super(OpenPypeMenu, self).__init__(*args, **kwargs) + + self.setObjectName(f"{MENU_LABEL}Menu") + + icon_path = get_ayon_icon_filepath() + icon = QtGui.QIcon(icon_path) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowMinimizeButtonHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + self.render_mode_widget = None + self.setWindowTitle(MENU_LABEL) + + asset_label = QtWidgets.QLabel("Context", self) + asset_label.setStyleSheet( + """QLabel { + font-size: 14px; + font-weight: 600; + color: #5f9fb8; + }""" + ) + asset_label.setAlignment(QtCore.Qt.AlignHCenter) + + workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) + create_btn = QtWidgets.QPushButton("Create...", self) + load_btn = QtWidgets.QPushButton("Load...", self) + publish_btn = QtWidgets.QPushButton("Publish...", self) + manager_btn = QtWidgets.QPushButton("Manage...", self) + libload_btn = QtWidgets.QPushButton("Library...", self) + set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) + set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) + duplicate_with_inputs_btn = QtWidgets.QPushButton( + "Duplicate with input connections", self + ) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(10, 20, 10, 20) + + layout.addWidget(asset_label) + + layout.addSpacing(20) + + layout.addWidget(workfiles_btn) + + layout.addSpacing(20) + + layout.addWidget(create_btn) + layout.addWidget(load_btn) + layout.addWidget(publish_btn) + layout.addWidget(manager_btn) + + layout.addSpacing(20) + + layout.addWidget(libload_btn) + + layout.addSpacing(20) + + layout.addWidget(set_framerange_btn) + layout.addWidget(set_resolution_btn) + + layout.addSpacing(20) + + layout.addWidget(duplicate_with_inputs_btn) + + self.setLayout(layout) + + # Store reference so we can update the label + self.asset_label = asset_label + + workfiles_btn.clicked.connect(self.on_workfile_clicked) + create_btn.clicked.connect(self.on_create_clicked) + publish_btn.clicked.connect(self.on_publish_clicked) + load_btn.clicked.connect(self.on_load_clicked) + manager_btn.clicked.connect(self.on_manager_clicked) + libload_btn.clicked.connect(self.on_libload_clicked) + duplicate_with_inputs_btn.clicked.connect( + self.on_duplicate_with_inputs_clicked + ) + set_resolution_btn.clicked.connect(self.on_set_resolution_clicked) + set_framerange_btn.clicked.connect(self.on_set_framerange_clicked) + + self._callbacks = [] + self.register_callback("taskChanged", self.on_task_changed) + self.on_task_changed() + + # Force close current process if Fusion is closed + self._pulse = FusionPulse(parent=self) + self._pulse.start() + + # Detect Fusion events as OpenPype events + self._event_handler = FusionEventHandler(parent=self) + self._event_handler.start() + + def on_task_changed(self): + # Update current context label + label = get_current_asset_name() + self.asset_label.setText(label) + + def register_callback(self, name, fn): + # Create a wrapper callback that we only store + # for as long as we want it to persist as callback + def _callback(*args): + fn() + + self._callbacks.append(_callback) + register_event_callback(name, _callback) + + def deregister_all_callbacks(self): + self._callbacks[:] = [] + + def on_workfile_clicked(self): + host_tools.show_workfiles() + + def on_create_clicked(self): + host_tools.show_publisher(tab="create") + + def on_publish_clicked(self): + host_tools.show_publisher(tab="publish") + + def on_load_clicked(self): + host_tools.show_loader(use_context=True) + + def on_manager_clicked(self): + host_tools.show_scene_inventory() + + def on_libload_clicked(self): + host_tools.show_library_loader() + + def on_duplicate_with_inputs_clicked(self): + duplicate_with_inputs.duplicate_with_input_connections() + + def on_set_resolution_clicked(self): + set_asset_resolution() + + def on_set_framerange_clicked(self): + set_asset_framerange() + + +def launch_openpype_menu(): + app = get_qt_app() + + pype_menu = OpenPypeMenu() + + stylesheet = load_stylesheet() + pype_menu.setStyleSheet(stylesheet) + + pype_menu.show() + self.menu = pype_menu + + result = app.exec_() + print("Shutting down..") + sys.exit(result) diff --git a/client/ayon_core/hosts/fusion/api/pipeline.py b/client/ayon_core/hosts/fusion/api/pipeline.py new file mode 100644 index 0000000000..7c480704a5 --- /dev/null +++ b/client/ayon_core/hosts/fusion/api/pipeline.py @@ -0,0 +1,399 @@ +""" +Basic avalon integration +""" +import os +import sys +import logging +import contextlib + +import pyblish.api +from qtpy import QtCore + +from ayon_core.lib import ( + Logger, + register_event_callback, + emit_event +) +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + register_inventory_action_path, + AVALON_CONTAINER_ID, +) +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.hosts.fusion import FUSION_HOST_DIR +from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost +from ayon_core.tools.utils import host_tools + + +from .lib import ( + get_current_comp, + comp_lock_and_undo_chunk, + validate_comp_prefs +) + +log = Logger.get_logger(__name__) + +PLUGINS_DIR = os.path.join(FUSION_HOST_DIR, "plugins") + +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +class FusionLogHandler(logging.Handler): + # Keep a reference to fusion's Print function (Remote Object) + _print = None + + @property + def print(self): + if self._print is not None: + # Use cached + return self._print + + _print = getattr(sys.modules["__main__"], "fusion").Print + if _print is None: + # Backwards compatibility: Print method on Fusion instance was + # added around Fusion 17.4 and wasn't available on PyRemote Object + # before + _print = get_current_comp().Print + self._print = _print + return _print + + def emit(self, record): + entry = self.format(record) + self.print(entry) + + +class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "fusion" + + def install(self): + """Install fusion-specific functionality of OpenPype. + + This is where you install menus and register families, data + and loaders into fusion. + + It is called automatically when installing via + `ayon_core.pipeline.install_host(ayon_core.hosts.fusion.api)` + + See the Maya equivalent for inspiration on how to implement this. + + """ + # Remove all handlers associated with the root logger object, because + # that one always logs as "warnings" incorrectly. + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + + # Attach default logging handler that prints to active comp + logger = logging.getLogger() + formatter = logging.Formatter(fmt="%(message)s\n") + handler = FusionLogHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) + + pyblish.api.register_host("fusion") + pyblish.api.register_plugin_path(PUBLISH_PATH) + log.info("Registering Fusion plug-ins..") + + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + + # Register events + register_event_callback("open", on_after_open) + register_event_callback("save", on_save) + register_event_callback("new", on_new) + + # region workfile io api + def has_unsaved_changes(self): + comp = get_current_comp() + return comp.GetAttrs()["COMPB_Modified"] + + def get_workfile_extensions(self): + return [".comp"] + + def save_workfile(self, dst_path=None): + comp = get_current_comp() + comp.Save(dst_path) + + def open_workfile(self, filepath): + # Hack to get fusion, see + # ayon_core.hosts.fusion.api.pipeline.get_current_comp() + fusion = getattr(sys.modules["__main__"], "fusion", None) + + return fusion.LoadComp(filepath) + + def get_current_workfile(self): + comp = get_current_comp() + current_filepath = comp.GetAttrs()["COMPS_FileName"] + if not current_filepath: + return None + + return current_filepath + + def work_root(self, session): + work_dir = session["AVALON_WORKDIR"] + scene_dir = session.get("AVALON_SCENEDIR") + if scene_dir: + return os.path.join(work_dir, scene_dir) + else: + return work_dir + # endregion + + @contextlib.contextmanager + def maintained_selection(self): + from .lib import maintained_selection + return maintained_selection() + + def get_containers(self): + return ls() + + def update_context_data(self, data, changes): + comp = get_current_comp() + comp.SetData("openpype", data) + + def get_context_data(self): + comp = get_current_comp() + return comp.GetData("openpype") or {} + + +def on_new(event): + comp = event["Rets"]["comp"] + validate_comp_prefs(comp, force_repair=True) + + +def on_save(event): + comp = event["sender"] + validate_comp_prefs(comp) + + +def on_after_open(event): + comp = event["sender"] + validate_comp_prefs(comp) + + if any_outdated_containers(): + log.warning("Scene has outdated content.") + + # Find OpenPype menu to attach to + from . import menu + + def _on_show_scene_inventory(): + # ensure that comp is active + frame = comp.CurrentFrame + if not frame: + print("Comp is closed, skipping show scene inventory") + return + frame.ActivateFrame() # raise comp window + host_tools.show_scene_inventory() + + from ayon_core.tools.utils import SimplePopup + from ayon_core.style import load_stylesheet + dialog = SimplePopup(parent=menu.menu) + dialog.setWindowTitle("Fusion comp has outdated content") + dialog.set_message("There are outdated containers in " + "your Fusion comp.") + dialog.on_clicked.connect(_on_show_scene_inventory) + dialog.show() + dialog.raise_() + dialog.activateWindow() + dialog.setStyleSheet(load_stylesheet()) + + +def ls(): + """List containers from active Fusion scene + + This is the host-equivalent of api.ls(), but instead of listing + assets on disk, it lists assets already loaded in Fusion; once loaded + they are called 'containers' + + Yields: + dict: container + + """ + + comp = get_current_comp() + tools = comp.GetToolList(False).values() + + for tool in tools: + container = parse_container(tool) + if container: + yield container + + +def imprint_container(tool, + name, + namespace, + context, + loader=None): + """Imprint a Loader with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + tool (object): The node in Fusion to imprint as container, usually a + Loader. + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + + Returns: + None + + """ + + data = [ + ("schema", "openpype:container-2.0"), + ("id", AVALON_CONTAINER_ID), + ("name", str(name)), + ("namespace", str(namespace)), + ("loader", str(loader)), + ("representation", str(context["representation"]["_id"])), + ] + + for key, value in data: + tool.SetData("avalon.{}".format(key), value) + + +def parse_container(tool): + """Returns imprinted container data of a tool + + This reads the imprinted data from `imprint_container`. + + """ + + data = tool.GetData('avalon') + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + if not all(key in data for key in required): + return + + container = {key: data[key] for key in required} + + # Store the tool's name + container["objectName"] = tool.Name + + # Store reference to the tool object + container["_tool"] = tool + + return container + + +class FusionEventThread(QtCore.QThread): + """QThread which will periodically ping Fusion app for any events. + The fusion.UIManager must be set up to be notified of events before they'll + be reported by this thread, for example: + fusion.UIManager.AddNotify("Comp_Save", None) + + """ + + on_event = QtCore.Signal(dict) + + def run(self): + + app = getattr(sys.modules["__main__"], "app", None) + if app is None: + # No Fusion app found + return + + # As optimization store the GetEvent method directly because every + # getattr of UIManager.GetEvent tries to resolve the Remote Function + # through the PyRemoteObject + get_event = app.UIManager.GetEvent + delay = int(os.environ.get("AYON_FUSION_CALLBACK_INTERVAL", 1000)) + while True: + if self.isInterruptionRequested(): + return + + # Process all events that have been queued up until now + while True: + event = get_event(False) + if not event: + break + self.on_event.emit(event) + + # Wait some time before processing events again + # to not keep blocking the UI + self.msleep(delay) + + +class FusionEventHandler(QtCore.QObject): + """Emits OpenPype events based on Fusion events captured in a QThread. + + This will emit the following OpenPype events based on Fusion actions: + save: Comp_Save, Comp_SaveAs + open: Comp_Opened + new: Comp_New + + To use this you can attach it to you Qt UI so it runs in the background. + E.g. + >>> handler = FusionEventHandler(parent=window) + >>> handler.start() + + + """ + ACTION_IDS = [ + "Comp_Save", + "Comp_SaveAs", + "Comp_New", + "Comp_Opened" + ] + + def __init__(self, parent=None): + super(FusionEventHandler, self).__init__(parent=parent) + + # Set up Fusion event callbacks + fusion = getattr(sys.modules["__main__"], "fusion", None) + ui = fusion.UIManager + + # Add notifications for the ones we want to listen to + notifiers = [] + for action_id in self.ACTION_IDS: + notifier = ui.AddNotify(action_id, None) + notifiers.append(notifier) + + # TODO: Not entirely sure whether these must be kept to avoid + # garbage collection + self._notifiers = notifiers + + self._event_thread = FusionEventThread(parent=self) + self._event_thread.on_event.connect(self._on_event) + + def start(self): + self._event_thread.start() + + def stop(self): + self._event_thread.stop() + + def _on_event(self, event): + """Handle Fusion events to emit OpenPype events""" + if not event: + return + + what = event["what"] + + # Comp Save + if what in {"Comp_Save", "Comp_SaveAs"}: + if not event["Rets"].get("success"): + # If the Save action is cancelled it will still emit an + # event but with "success": False so we ignore those cases + return + # Comp was saved + emit_event("save", data=event) + return + + # Comp New + elif what in {"Comp_New"}: + emit_event("new", data=event) + + # Comp Opened + elif what in {"Comp_Opened"}: + emit_event("open", data=event) diff --git a/client/ayon_core/hosts/fusion/api/plugin.py b/client/ayon_core/hosts/fusion/api/plugin.py new file mode 100644 index 0000000000..12a29d2986 --- /dev/null +++ b/client/ayon_core/hosts/fusion/api/plugin.py @@ -0,0 +1,221 @@ +from copy import deepcopy +import os + +from ayon_core.hosts.fusion.api import ( + get_current_comp, + comp_lock_and_undo_chunk, +) + +from ayon_core.lib import ( + BoolDef, + EnumDef, +) +from ayon_core.pipeline import ( + legacy_io, + Creator, + CreatedInstance +) + + +class GenericCreateSaver(Creator): + default_variants = ["Main", "Mask"] + description = "Fusion Saver to generate image sequence" + icon = "fa5.eye" + + instance_attributes = [ + "reviewable" + ] + + settings_category = "fusion" + + image_format = "exr" + + # TODO: This should be renamed together with Nuke so it is aligned + temp_rendering_path_template = ( + "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}") + + def create(self, subset_name, instance_data, pre_create_data): + self.pass_pre_attributes_to_instance(instance_data, pre_create_data) + + instance = CreatedInstance( + family=self.family, + subset_name=subset_name, + data=instance_data, + creator=self, + ) + data = instance.data_to_store() + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp): + args = (-32768, -32768) # Magical position numbers + saver = comp.AddTool("Saver", *args) + + self._update_tool_with_data(saver, data=data) + + # Register the CreatedInstance + self._imprint(saver, data) + + # Insert the transient data + instance.transient_data["tool"] = saver + + self._add_instance_to_context(instance) + + return instance + + def collect_instances(self): + comp = get_current_comp() + tools = comp.GetToolList(False, "Saver").values() + for tool in tools: + data = self.get_managed_tool_data(tool) + if not data: + continue + + # Add instance + created_instance = CreatedInstance.from_existing(data, self) + + # Collect transient data + created_instance.transient_data["tool"] = tool + + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + new_data = created_inst.data_to_store() + tool = created_inst.transient_data["tool"] + self._update_tool_with_data(tool, new_data) + self._imprint(tool, new_data) + + def remove_instances(self, instances): + for instance in instances: + # Remove the tool from the scene + + tool = instance.transient_data["tool"] + if tool: + tool.Delete() + + # Remove the collected CreatedInstance to remove from UI directly + self._remove_instance_from_context(instance) + + def _imprint(self, tool, data): + # Save all data in a "openpype.{key}" = value data + + # Instance id is the tool's name so we don't need to imprint as data + data.pop("instance_id", None) + + active = data.pop("active", None) + if active is not None: + # Use active value to set the passthrough state + tool.SetAttrs({"TOOLB_PassThrough": not active}) + + for key, value in data.items(): + tool.SetData(f"openpype.{key}", value) + + def _update_tool_with_data(self, tool, data): + """Update tool node name and output path based on subset data""" + if "subset" not in data: + return + + original_subset = tool.GetData("openpype.subset") + original_format = tool.GetData( + "openpype.creator_attributes.image_format" + ) + + subset = data["subset"] + if ( + original_subset != subset + or original_format != data["creator_attributes"]["image_format"] + ): + self._configure_saver_tool(data, tool, subset) + + def _configure_saver_tool(self, data, tool, subset): + formatting_data = deepcopy(data) + + # get frame padding from anatomy templates + frame_padding = self.project_anatomy.templates["frame_padding"] + + # get output format + ext = data["creator_attributes"]["image_format"] + + # Subset change detected + workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"]) + formatting_data.update({ + "workdir": workdir, + "frame": "0" * frame_padding, + "ext": ext, + "product": { + "name": formatting_data["subset"], + "type": formatting_data["family"], + }, + }) + + # build file path to render + filepath = self.temp_rendering_path_template.format(**formatting_data) + + comp = get_current_comp() + tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath)) + + # Rename tool + if tool.Name != subset: + print(f"Renaming {tool.Name} -> {subset}") + tool.SetAttrs({"TOOLS_Name": subset}) + + def get_managed_tool_data(self, tool): + """Return data of the tool if it matches creator identifier""" + data = tool.GetData("openpype") + if not isinstance(data, dict): + return + + required = { + "id": "pyblish.avalon.instance", + "creator_identifier": self.identifier, + } + for key, value in required.items(): + if key not in data or data[key] != value: + return + + # Get active state from the actual tool state + attrs = tool.GetAttrs() + passthrough = attrs["TOOLB_PassThrough"] + data["active"] = not passthrough + + # Override publisher's UUID generation because tool names are + # already unique in Fusion in a comp + data["instance_id"] = tool.Name + + return data + + def get_instance_attr_defs(self): + """Settings for publish page""" + return self.get_pre_create_attr_defs() + + def pass_pre_attributes_to_instance(self, instance_data, pre_create_data): + creator_attrs = instance_data["creator_attributes"] = {} + for pass_key in pre_create_data.keys(): + creator_attrs[pass_key] = pre_create_data[pass_key] + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames", + } + if "farm_rendering" in self.instance_attributes: + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", items=rendering_targets, label="Render target" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=("reviewable" in self.instance_attributes), + label="Review", + ) + + def _get_image_format_enum(self): + image_format_options = ["exr", "tga", "tif", "png", "jpg"] + return EnumDef( + "image_format", + items=image_format_options, + default=self.image_format, + label="Output Image Format", + ) diff --git a/openpype/hosts/fusion/api/pulse.py b/client/ayon_core/hosts/fusion/api/pulse.py similarity index 96% rename from openpype/hosts/fusion/api/pulse.py rename to client/ayon_core/hosts/fusion/api/pulse.py index 762f05ba7e..7128b7e1ff 100644 --- a/openpype/hosts/fusion/api/pulse.py +++ b/client/ayon_core/hosts/fusion/api/pulse.py @@ -14,7 +14,7 @@ def run(self): app = getattr(sys.modules["__main__"], "app", None) # Interval in milliseconds - interval = os.environ.get("OPENPYPE_FUSION_PULSE_INTERVAL", 1000) + interval = os.environ.get("AYON_FUSION_PULSE_INTERVAL", 1000) while True: if self.isInterruptionRequested(): diff --git a/openpype/hosts/fusion/deploy/MenuScripts/README.md b/client/ayon_core/hosts/fusion/deploy/MenuScripts/README.md similarity index 100% rename from openpype/hosts/fusion/deploy/MenuScripts/README.md rename to client/ayon_core/hosts/fusion/deploy/MenuScripts/README.md diff --git a/openpype/hosts/fusion/deploy/MenuScripts/install_pyside2.py b/client/ayon_core/hosts/fusion/deploy/MenuScripts/install_pyside2.py similarity index 100% rename from openpype/hosts/fusion/deploy/MenuScripts/install_pyside2.py rename to client/ayon_core/hosts/fusion/deploy/MenuScripts/install_pyside2.py diff --git a/openpype/hosts/fusion/deploy/MenuScripts/launch_menu.py b/client/ayon_core/hosts/fusion/deploy/MenuScripts/launch_menu.py similarity index 81% rename from openpype/hosts/fusion/deploy/MenuScripts/launch_menu.py rename to client/ayon_core/hosts/fusion/deploy/MenuScripts/launch_menu.py index 1c58ee50e4..23b02b1b69 100644 --- a/openpype/hosts/fusion/deploy/MenuScripts/launch_menu.py +++ b/client/ayon_core/hosts/fusion/deploy/MenuScripts/launch_menu.py @@ -5,8 +5,8 @@ # hack to handle discrepancy between distributed libraries and Python 3.6 # mostly because wrong version of urllib3 # TODO remove when not necessary - from openpype import PACKAGE_DIR - FUSION_HOST_DIR = os.path.join(PACKAGE_DIR, "hosts", "fusion") + from ayon_core import AYON_CORE_ROOT + FUSION_HOST_DIR = os.path.join(AYON_CORE_ROOT, "hosts", "fusion") vendor_path = os.path.join(FUSION_HOST_DIR, "vendor") if vendor_path not in sys.path: @@ -14,8 +14,8 @@ print(f"Added vendorized libraries from {vendor_path}") -from openpype.lib import Logger -from openpype.pipeline import ( +from ayon_core.lib import Logger +from ayon_core.pipeline import ( install_host, registered_host, ) @@ -26,8 +26,8 @@ def main(env): # However the contents of that folder can conflict with Qt library dlls # so we make sure to move out of it to avoid DLL Load Failed errors. os.chdir("..") - from openpype.hosts.fusion.api import FusionHost - from openpype.hosts.fusion.api import menu + from ayon_core.hosts.fusion.api import FusionHost + from ayon_core.hosts.fusion.api import menu # activate resolve from pype install_host(FusionHost()) diff --git a/openpype/hosts/fusion/deploy/ayon/Config/menu.fu b/client/ayon_core/hosts/fusion/deploy/ayon/Config/menu.fu similarity index 100% rename from openpype/hosts/fusion/deploy/ayon/Config/menu.fu rename to client/ayon_core/hosts/fusion/deploy/ayon/Config/menu.fu diff --git a/client/ayon_core/hosts/fusion/deploy/ayon/fusion_shared.prefs b/client/ayon_core/hosts/fusion/deploy/ayon/fusion_shared.prefs new file mode 100644 index 0000000000..59b0f82bad --- /dev/null +++ b/client/ayon_core/hosts/fusion/deploy/ayon/fusion_shared.prefs @@ -0,0 +1,19 @@ +{ +Locked = true, +Global = { + Paths = { + Map = { + ["AYON:"] = "$(AYON_FUSION_ROOT)/deploy/ayon", + ["Config:"] = "UserPaths:Config;AYON:Config", + ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts", + }, + }, + Script = { + PythonVersion = 3, + Python3Forced = true + }, + UserInterface = { + Language = "en_US" + }, + }, +} diff --git a/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py b/client/ayon_core/hosts/fusion/hooks/pre_fusion_profile_hook.py similarity index 94% rename from openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py rename to client/ayon_core/hosts/fusion/hooks/pre_fusion_profile_hook.py index 59053ba62a..f63aaa1eb4 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py +++ b/client/ayon_core/hosts/fusion/hooks/pre_fusion_profile_hook.py @@ -2,13 +2,12 @@ import shutil import platform from pathlib import Path -from openpype import AYON_SERVER_ENABLED -from openpype.hosts.fusion import ( +from ayon_core.hosts.fusion import ( FUSION_HOST_DIR, FUSION_VERSIONS_DICT, get_fusion_version, ) -from openpype.lib.applications import ( +from ayon_core.lib.applications import ( PreLaunchHook, LaunchTypes, ApplicationLaunchFailed, @@ -163,12 +162,8 @@ def execute(self): # to define where it can read custom scripts and tools from master_prefs_variable = f"FUSION{profile_version}_MasterPrefs" - if AYON_SERVER_ENABLED: - master_prefs = Path( - FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs") - else: - master_prefs = Path( - FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs") + master_prefs = Path( + FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs") self.log.info(f"Setting {master_prefs_variable}: {master_prefs}") self.launch_context.env[master_prefs_variable] = str(master_prefs) diff --git a/openpype/hosts/fusion/hooks/pre_fusion_setup.py b/client/ayon_core/hosts/fusion/hooks/pre_fusion_setup.py similarity index 91% rename from openpype/hosts/fusion/hooks/pre_fusion_setup.py rename to client/ayon_core/hosts/fusion/hooks/pre_fusion_setup.py index 3da8968727..7cfa9d0a26 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_setup.py +++ b/client/ayon_core/hosts/fusion/hooks/pre_fusion_setup.py @@ -1,10 +1,10 @@ import os -from openpype.lib.applications import ( +from ayon_core.lib.applications import ( PreLaunchHook, LaunchTypes, ApplicationLaunchFailed, ) -from openpype.hosts.fusion import ( +from ayon_core.hosts.fusion import ( FUSION_HOST_DIR, FUSION_VERSIONS_DICT, get_fusion_version, @@ -67,5 +67,5 @@ def execute(self): # for hook installing PySide2 self.data["fusion_python3_home"] = py3_dir - self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}") - self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR + self.log.info(f"Setting AYON_FUSION_ROOT: {FUSION_HOST_DIR}") + self.launch_context.env["AYON_FUSION_ROOT"] = FUSION_HOST_DIR diff --git a/client/ayon_core/hosts/fusion/hooks/pre_pyside_install.py b/client/ayon_core/hosts/fusion/hooks/pre_pyside_install.py new file mode 100644 index 0000000000..a9db39e24e --- /dev/null +++ b/client/ayon_core/hosts/fusion/hooks/pre_pyside_install.py @@ -0,0 +1,186 @@ +import os +import subprocess +import platform +import uuid + +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes + + +class InstallPySideToFusion(PreLaunchHook): + """Automatically installs Qt binding to fusion's python packages. + + Check if fusion has installed PySide2 and will try to install if not. + + For pipeline implementation is required to have Qt binding installed in + fusion's python packages. + """ + + app_groups = {"fusion"} + order = 2 + launch_types = {LaunchTypes.local} + + def execute(self): + # Prelaunch hook is not crucial + try: + settings = self.data["project_settings"][self.host_name] + if not settings["hooks"]["InstallPySideToFusion"]["enabled"]: + return + self.inner_execute() + except Exception: + self.log.warning( + "Processing of {} crashed.".format(self.__class__.__name__), + exc_info=True + ) + + def inner_execute(self): + self.log.debug("Check for PySide2 installation.") + + fusion_python3_home = self.data.get("fusion_python3_home") + if not fusion_python3_home: + self.log.warning("'fusion_python3_home' was not provided. " + "Installation of PySide2 not possible") + return + + if platform.system().lower() == "windows": + exe_filenames = ["python.exe"] + else: + exe_filenames = ["python3", "python"] + + for exe_filename in exe_filenames: + python_executable = os.path.join(fusion_python3_home, exe_filename) + if os.path.exists(python_executable): + break + + if not os.path.exists(python_executable): + self.log.warning( + "Couldn't find python executable for fusion. {}".format( + python_executable + ) + ) + return + + # Check if PySide2 is installed and skip if yes + if self._is_pyside_installed(python_executable): + self.log.debug("Fusion has already installed PySide2.") + return + + self.log.debug("Installing PySide2.") + # Install PySide2 in fusion's python + if self._windows_require_permissions( + os.path.dirname(python_executable)): + result = self._install_pyside_windows(python_executable) + else: + result = self._install_pyside(python_executable) + + if result: + self.log.info("Successfully installed PySide2 module to fusion.") + else: + self.log.warning("Failed to install PySide2 module to fusion.") + + def _install_pyside_windows(self, python_executable): + """Install PySide2 python module to fusion's python. + + Installation requires administration rights that's why it is required + to use "pywin32" module which can execute command's and ask for + administration rights. + """ + try: + import win32api + import win32con + import win32process + import win32event + import pywintypes + from win32comext.shell.shell import ShellExecuteEx + from win32comext.shell import shellcon + except Exception: + self.log.warning("Couldn't import \"pywin32\" modules") + return False + + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to fusion's + # site-packages and make sure it is binary compatible + parameters = "-m pip install --ignore-installed PySide2" + + # Execute command and ask for administrator's rights + process_info = ShellExecuteEx( + nShow=win32con.SW_SHOWNORMAL, + fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, + lpVerb="runas", + lpFile=python_executable, + lpParameters=parameters, + lpDirectory=os.path.dirname(python_executable) + ) + process_handle = process_info["hProcess"] + win32event.WaitForSingleObject(process_handle, + win32event.INFINITE) + returncode = win32process.GetExitCodeProcess(process_handle) + return returncode == 0 + except pywintypes.error: + return False + + def _install_pyside(self, python_executable): + """Install PySide2 python module to fusion's python.""" + try: + # Parameters + # - use "-m pip" as module pip to install PySide2 and argument + # "--ignore-installed" is to force install module to fusion's + # site-packages and make sure it is binary compatible + env = dict(os.environ) + del env['PYTHONPATH'] + args = [ + python_executable, + "-m", + "pip", + "install", + "--ignore-installed", + "PySide2", + ] + process = subprocess.Popen( + args, stdout=subprocess.PIPE, universal_newlines=True, + env=env + ) + process.communicate() + return process.returncode == 0 + except PermissionError: + self.log.warning( + "Permission denied with command:" + "\"{}\".".format(" ".join(args)) + ) + except OSError as error: + self.log.warning(f"OS error has occurred: \"{error}\".") + except subprocess.SubprocessError: + pass + + def _is_pyside_installed(self, python_executable): + """Check if PySide2 module is in fusion's pip list.""" + args = [python_executable, "-c", "from qtpy import QtWidgets"] + process = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + _, stderr = process.communicate() + stderr = stderr.decode() + if stderr: + return False + return True + + def _windows_require_permissions(self, dirpath): + if platform.system().lower() != "windows": + return False + + try: + # Attempt to create a temporary file in the folder + temp_file_path = os.path.join(dirpath, uuid.uuid4().hex) + with open(temp_file_path, "w"): + pass + os.remove(temp_file_path) # Clean up temporary file + return False + + except PermissionError: + return True + + except BaseException as exc: + print(("Failed to determine if root requires permissions." + "Unexpected error: {}").format(exc)) + return False diff --git a/openpype/hosts/fusion/plugins/create/create_image_saver.py b/client/ayon_core/hosts/fusion/plugins/create/create_image_saver.py similarity index 91% rename from openpype/hosts/fusion/plugins/create/create_image_saver.py rename to client/ayon_core/hosts/fusion/plugins/create/create_image_saver.py index 490228d488..856d86cff6 100644 --- a/openpype/hosts/fusion/plugins/create/create_image_saver.py +++ b/client/ayon_core/hosts/fusion/plugins/create/create_image_saver.py @@ -1,7 +1,7 @@ -from openpype.lib import NumberDef +from ayon_core.lib import NumberDef -from openpype.hosts.fusion.api.plugin import GenericCreateSaver -from openpype.hosts.fusion.api import get_current_comp +from ayon_core.hosts.fusion.api.plugin import GenericCreateSaver +from ayon_core.hosts.fusion.api import get_current_comp class CreateImageSaver(GenericCreateSaver): diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/client/ayon_core/hosts/fusion/plugins/create/create_saver.py similarity index 95% rename from openpype/hosts/fusion/plugins/create/create_saver.py rename to client/ayon_core/hosts/fusion/plugins/create/create_saver.py index 3a8ffe890b..1a0dad7060 100644 --- a/openpype/hosts/fusion/plugins/create/create_saver.py +++ b/client/ayon_core/hosts/fusion/plugins/create/create_saver.py @@ -1,6 +1,6 @@ -from openpype.lib import EnumDef +from ayon_core.lib import EnumDef -from openpype.hosts.fusion.api.plugin import GenericCreateSaver +from ayon_core.hosts.fusion.api.plugin import GenericCreateSaver class CreateSaver(GenericCreateSaver): diff --git a/client/ayon_core/hosts/fusion/plugins/create/create_workfile.py b/client/ayon_core/hosts/fusion/plugins/create/create_workfile.py new file mode 100644 index 0000000000..08d39b0145 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/create/create_workfile.py @@ -0,0 +1,109 @@ +from ayon_core.hosts.fusion.api import ( + get_current_comp +) +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import ( + AutoCreator, + CreatedInstance, +) + + +class FusionWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + label = "Workfile" + icon = "fa5.file" + + default_variant = "Main" + + create_allow_context_change = False + + data_key = "openpype_workfile" + + def collect_instances(self): + + comp = get_current_comp() + data = comp.GetData(self.data_key) + if not data: + return + + instance = CreatedInstance( + family=self.family, + subset_name=data["subset"], + data=data, + creator=self + ) + instance.transient_data["comp"] = comp + + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + comp = created_inst.transient_data["comp"] + if not hasattr(comp, "SetData"): + # Comp is not alive anymore, likely closed by the user + self.log.error("Workfile comp not found for existing instance." + " Comp might have been closed in the meantime.") + continue + + # Imprint data into the comp + data = created_inst.data_to_store() + comp.SetData(self.data_key, data) + + def create(self, options=None): + + comp = get_current_comp() + if not comp: + self.log.error("Unable to find current comp") + return + + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + project_name = self.create_context.get_current_project_name() + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + if existing_instance is None: + existing_instance_asset = None + else: + existing_instance_asset = existing_instance["folderPath"] + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant, + } + data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, None + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + new_instance.transient_data["comp"] = comp + self._add_instance_to_context(new_instance) + + elif ( + existing_instance_asset != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/client/ayon_core/hosts/fusion/plugins/inventory/select_containers.py b/client/ayon_core/hosts/fusion/plugins/inventory/select_containers.py new file mode 100644 index 0000000000..167cd3be1f --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/inventory/select_containers.py @@ -0,0 +1,27 @@ +from ayon_core.pipeline import InventoryAction + + +class FusionSelectContainers(InventoryAction): + + label = "Select Containers" + icon = "mouse-pointer" + color = "#d8d8d8" + + def process(self, containers): + from ayon_core.hosts.fusion.api import ( + get_current_comp, + comp_lock_and_undo_chunk + ) + + tools = [i["_tool"] for i in containers] + + comp = get_current_comp() + flow = comp.CurrentFrame.FlowView + + with comp_lock_and_undo_chunk(comp, self.label): + # Clear selection + flow.Select() + + # Select tool + for tool in tools: + flow.Select(tool) diff --git a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py b/client/ayon_core/hosts/fusion/plugins/inventory/set_tool_color.py similarity index 93% rename from openpype/hosts/fusion/plugins/inventory/set_tool_color.py rename to client/ayon_core/hosts/fusion/plugins/inventory/set_tool_color.py index a057ad1e89..7167cf0fc5 100644 --- a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py +++ b/client/ayon_core/hosts/fusion/plugins/inventory/set_tool_color.py @@ -1,8 +1,8 @@ from qtpy import QtGui, QtWidgets -from openpype.pipeline import InventoryAction -from openpype import style -from openpype.hosts.fusion.api import ( +from ayon_core.pipeline import InventoryAction +from ayon_core import style +from ayon_core.hosts.fusion.api import ( get_current_comp, comp_lock_and_undo_chunk ) diff --git a/client/ayon_core/hosts/fusion/plugins/load/actions.py b/client/ayon_core/hosts/fusion/plugins/load/actions.py new file mode 100644 index 0000000000..f67878bcff --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/load/actions.py @@ -0,0 +1,80 @@ +"""A module containing generic loader actions that will display in the Loader. + +""" + +from ayon_core.pipeline import load + + +class FusionSetFrameRangeLoader(load.LoaderPlugin): + """Set frame range excluding pre- and post-handles""" + + families = ["animation", + "camera", + "imagesequence", + "render", + "yeticache", + "pointcache", + "render"] + representations = ["*"] + extensions = {"*"} + + label = "Set frame range" + order = 11 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + from ayon_core.hosts.fusion.api import lib + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print("Skipping setting frame range because start or " + "end frame data is missing..") + return + + lib.update_frame_range(start, end) + + +class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): + """Set frame range including pre- and post-handles""" + + families = ["animation", + "camera", + "imagesequence", + "render", + "yeticache", + "pointcache", + "render"] + representations = ["*"] + + label = "Set frame range (with handles)" + order = 12 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + from ayon_core.hosts.fusion.api import lib + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print("Skipping setting frame range because start or " + "end frame data is missing..") + return + + # Include handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) + + lib.update_frame_range(start, end) diff --git a/client/ayon_core/hosts/fusion/plugins/load/load_alembic.py b/client/ayon_core/hosts/fusion/plugins/load/load_alembic.py new file mode 100644 index 0000000000..0bc7ffd180 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/load/load_alembic.py @@ -0,0 +1,71 @@ +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.fusion.api import ( + imprint_container, + get_current_comp, + comp_lock_and_undo_chunk +) + + +class FusionLoadAlembicMesh(load.LoaderPlugin): + """Load Alembic mesh into Fusion""" + + families = ["pointcache", "model"] + representations = ["*"] + extensions = {"abc"} + + label = "Load alembic mesh" + order = -10 + icon = "code-fork" + color = "orange" + + tool_type = "SurfaceAlembicMesh" + + def load(self, context, name, namespace, data): + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + # Create the Loader with the filename path set + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp, "Create tool"): + + path = self.filepath_from_context(context) + + args = (-32768, -32768) + tool = comp.AddTool(self.tool_type, *args) + tool["Filename"] = path + + imprint_container(tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update Alembic path""" + + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + path = get_representation_path(representation) + + with comp_lock_and_undo_chunk(comp, "Update tool"): + tool["Filename"] = path + + # Update the imprinted representation + tool.SetData("avalon.representation", str(representation["_id"])) + + def remove(self, container): + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + with comp_lock_and_undo_chunk(comp, "Remove tool"): + tool.Delete() diff --git a/client/ayon_core/hosts/fusion/plugins/load/load_fbx.py b/client/ayon_core/hosts/fusion/plugins/load/load_fbx.py new file mode 100644 index 0000000000..3751d7cc39 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/load/load_fbx.py @@ -0,0 +1,86 @@ +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.fusion.api import ( + imprint_container, + get_current_comp, + comp_lock_and_undo_chunk, +) + + +class FusionLoadFBXMesh(load.LoaderPlugin): + """Load FBX mesh into Fusion""" + + families = ["*"] + representations = ["*"] + extensions = { + "3ds", + "amc", + "aoa", + "asf", + "bvh", + "c3d", + "dae", + "dxf", + "fbx", + "htr", + "mcd", + "obj", + "trc", + } + + label = "Load FBX mesh" + order = -10 + icon = "code-fork" + color = "orange" + + tool_type = "SurfaceFBXMesh" + + def load(self, context, name, namespace, data): + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context["asset"]["name"] + + # Create the Loader with the filename path set + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp, "Create tool"): + path = self.filepath_from_context(context) + + args = (-32768, -32768) + tool = comp.AddTool(self.tool_type, *args) + tool["ImportFile"] = path + + imprint_container( + tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + ) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update path""" + + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + path = get_representation_path(representation) + + with comp_lock_and_undo_chunk(comp, "Update tool"): + tool["ImportFile"] = path + + # Update the imprinted representation + tool.SetData("avalon.representation", str(representation["_id"])) + + def remove(self, container): + tool = container["_tool"] + assert tool.ID == self.tool_type, f"Must be {self.tool_type}" + comp = tool.Comp() + + with comp_lock_and_undo_chunk(comp, "Remove tool"): + tool.Delete() diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/client/ayon_core/hosts/fusion/plugins/load/load_sequence.py similarity index 97% rename from openpype/hosts/fusion/plugins/load/load_sequence.py rename to client/ayon_core/hosts/fusion/plugins/load/load_sequence.py index 4401af97eb..5c183f5159 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/client/ayon_core/hosts/fusion/plugins/load/load_sequence.py @@ -1,13 +1,13 @@ import contextlib -import openpype.pipeline.load as load -from openpype.pipeline.load import get_representation_context -from openpype.hosts.fusion.api import ( +import ayon_core.pipeline.load as load +from ayon_core.pipeline.load import get_representation_context +from ayon_core.hosts.fusion.api import ( imprint_container, get_current_comp, comp_lock_and_undo_chunk, ) -from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS +from ayon_core.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS comp = get_current_comp() diff --git a/openpype/hosts/fusion/plugins/load/load_usd.py b/client/ayon_core/hosts/fusion/plugins/load/load_usd.py similarity index 94% rename from openpype/hosts/fusion/plugins/load/load_usd.py rename to client/ayon_core/hosts/fusion/plugins/load/load_usd.py index 4f1813a646..9c61894d66 100644 --- a/openpype/hosts/fusion/plugins/load/load_usd.py +++ b/client/ayon_core/hosts/fusion/plugins/load/load_usd.py @@ -1,13 +1,13 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.fusion.api import ( +from ayon_core.hosts.fusion.api import ( imprint_container, get_current_comp, comp_lock_and_undo_chunk ) -from openpype.hosts.fusion.api.lib import get_fusion_module +from ayon_core.hosts.fusion.api.lib import get_fusion_module class FusionLoadUSD(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/fusion/plugins/load/load_workfile.py b/client/ayon_core/hosts/fusion/plugins/load/load_workfile.py new file mode 100644 index 0000000000..d50fded502 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/load/load_workfile.py @@ -0,0 +1,33 @@ +"""Import workfiles into your current comp. +As all imported nodes are free floating and will probably be changed there +is no update or reload function added for this plugin +""" + +from ayon_core.pipeline import load + +from ayon_core.hosts.fusion.api import ( + get_current_comp, + get_bmd_library, +) + + +class FusionLoadWorkfile(load.LoaderPlugin): + """Load the content of a workfile into Fusion""" + + families = ["workfile"] + representations = ["*"] + extensions = {"comp"} + + label = "Load Workfile" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + # Get needed elements + bmd = get_bmd_library() + comp = get_current_comp() + path = self.filepath_from_context(context) + + # Paste the content of the file into the current comp + comp.Paste(bmd.readfile(path)) diff --git a/openpype/hosts/fusion/plugins/publish/collect_comp.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_comp.py similarity index 91% rename from openpype/hosts/fusion/plugins/publish/collect_comp.py rename to client/ayon_core/hosts/fusion/plugins/publish/collect_comp.py index d1c49790fa..591c460d5a 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_comp.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_comp.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.fusion.api import get_current_comp +from ayon_core.hosts.fusion.api import get_current_comp class CollectCurrentCompFusion(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/fusion/plugins/publish/collect_comp_frame_range.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_comp_frame_range.py similarity index 100% rename from openpype/hosts/fusion/plugins/publish/collect_comp_frame_range.py rename to client/ayon_core/hosts/fusion/plugins/publish/collect_comp_frame_range.py diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_inputs.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_inputs.py new file mode 100644 index 0000000000..002c0a5672 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_inputs.py @@ -0,0 +1,116 @@ +import pyblish.api + +from ayon_core.pipeline import registered_host + + +def collect_input_containers(tools): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + + # Lookup by node ids + lookup = frozenset([tool.Name for tool in tools]) + + containers = [] + host = registered_host() + for container in host.ls(): + + name = container["_tool"].Name + + # We currently assume no "groups" as containers but just single tools + # like a single "Loader" operator. As such we just check whether the + # Loader is part of the processing queue. + if name in lookup: + containers.append(container) + + return containers + + +def iter_upstream(tool): + """Yields all upstream inputs for the current tool. + + Yields: + tool: The input tools. + + """ + + def get_connected_input_tools(tool): + """Helper function that returns connected input tools for a tool.""" + inputs = [] + + # Filter only to actual types that will have sensible upstream + # connections. So we ignore just "Number" inputs as they can be + # many to iterate, slowing things down quite a bit - and in practice + # they don't have upstream connections. + VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D'] + for type_ in VALID_INPUT_TYPES: + for input_ in tool.GetInputList(type_).values(): + output = input_.GetConnectedOutput() + if output: + input_tool = output.GetTool() + inputs.append(input_tool) + + return inputs + + # Initialize process queue with the node's inputs itself + queue = get_connected_input_tools(tool) + + # We keep track of which node names we have processed so far, to ensure we + # don't process the same hierarchy again. We are not pushing the tool + # itself into the set as that doesn't correctly recognize the same tool. + # Since tool names are unique in a comp in Fusion we rely on that. + collected = set(tool.Name for tool in queue) + + # Traverse upstream references for all nodes and yield them as we + # process the queue. + while queue: + upstream_tool = queue.pop() + yield upstream_tool + + # Find upstream tools that are not collected yet. + upstream_inputs = get_connected_input_tools(upstream_tool) + upstream_inputs = [t for t in upstream_inputs if + t.Name not in collected] + + queue.extend(upstream_inputs) + collected.update(tool.Name for tool in upstream_inputs) + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect source input containers used for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.2 + hosts = ["fusion"] + families = ["render", "image"] + + def process(self, instance): + + # Get all upstream and include itself + if not any(instance[:]): + self.log.debug("No tool found in instance, skipping..") + return + + tool = instance[0] + nodes = list(iter_upstream(tool)) + nodes.append(tool) + + # Collect containers for the given set of nodes + containers = collect_input_containers(nodes) + + inputs = [c["representation"] for c in containers] + instance.data["inputRepresentations"] = inputs + self.log.debug("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/fusion/plugins/publish/collect_instances.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_instances.py similarity index 100% rename from openpype/hosts/fusion/plugins/publish/collect_instances.py rename to client/ayon_core/hosts/fusion/plugins/publish/collect_instances.py diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py new file mode 100644 index 0000000000..f8870da1c5 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py @@ -0,0 +1,210 @@ +import os +import attr +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import RenderInstance +from ayon_core.hosts.fusion.api.lib import get_frame_path + + +@attr.s +class FusionRenderInstance(RenderInstance): + # extend generic, composition name is needed + fps = attr.ib(default=None) + projectEntity = attr.ib(default=None) + stagingDir = attr.ib(default=None) + app_version = attr.ib(default=None) + tool = attr.ib(default=None) + workfileComp = attr.ib(default=None) + publish_attributes = attr.ib(default={}) + frameStartHandle = attr.ib(default=None) + frameEndHandle = attr.ib(default=None) + + +class CollectFusionRender( + publish.AbstractCollectRender, + publish.ColormanagedPyblishPluginMixin +): + + order = pyblish.api.CollectorOrder + 0.09 + label = "Collect Fusion Render" + hosts = ["fusion"] + + def get_instances(self, context): + + comp = context.data.get("currentComp") + comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") + aspect_x = comp_frame_format_prefs["AspectX"] + aspect_y = comp_frame_format_prefs["AspectY"] + + instances = [] + instances_to_remove = [] + + current_file = context.data["currentFile"] + version = context.data["version"] + + project_entity = context.data["projectEntity"] + + for inst in context: + if not inst.data.get("active", True): + continue + + family = inst.data["family"] + if family not in ["render", "image"]: + continue + + task_name = context.data["task"] + tool = inst.data["transientData"]["tool"] + + instance_families = inst.data.get("families", []) + subset_name = inst.data["subset"] + instance = FusionRenderInstance( + family=family, + tool=tool, + workfileComp=comp, + families=instance_families, + version=version, + time="", + source=current_file, + label=inst.data["label"], + subset=subset_name, + asset=inst.data["asset"], + task=task_name, + attachTo=False, + setMembers='', + publish=True, + name=subset_name, + resolutionWidth=comp_frame_format_prefs.get("Width"), + resolutionHeight=comp_frame_format_prefs.get("Height"), + pixelAspect=aspect_x / aspect_y, + tileRendering=False, + tilesX=0, + tilesY=0, + review="review" in instance_families, + frameStart=inst.data["frameStart"], + frameEnd=inst.data["frameEnd"], + handleStart=inst.data["handleStart"], + handleEnd=inst.data["handleEnd"], + frameStartHandle=inst.data["frameStartHandle"], + frameEndHandle=inst.data["frameEndHandle"], + frameStep=1, + fps=comp_frame_format_prefs.get("Rate"), + app_version=comp.GetApp().Version, + publish_attributes=inst.data.get("publish_attributes", {}) + ) + + render_target = inst.data["creator_attributes"]["render_target"] + + # Add render target family + render_target_family = f"render.{render_target}" + if render_target_family not in instance.families: + instance.families.append(render_target_family) + + # Add render target specific data + if render_target in {"local", "frames"}: + instance.projectEntity = project_entity + + if render_target == "farm": + fam = "render.farm" + if fam not in instance.families: + instance.families.append(fam) + instance.farm = True # to skip integrate + if "review" in instance.families: + # to skip ExtractReview locally + instance.families.remove("review") + + # add new instance to the list and remove the original + # instance since it is not needed anymore + instances.append(instance) + instances_to_remove.append(inst) + + for instance in instances_to_remove: + context.remove(instance) + + return instances + + def post_collecting_action(self): + for instance in self._context: + if "render.frames" in instance.data.get("families", []): + # adding representation data to the instance + self._update_for_frames(instance) + + def get_expected_files(self, render_instance): + """ + Returns list of rendered files that should be created by + Deadline. These are not published directly, they are source + for later 'submit_publish_job'. + + Args: + render_instance (RenderInstance): to pull anatomy and parts used + in url + + Returns: + (list) of absolute urls to rendered file + """ + start = render_instance.frameStart - render_instance.handleStart + end = render_instance.frameEnd + render_instance.handleEnd + + comp = render_instance.workfileComp + path = comp.MapPath( + render_instance.tool["Clip"][ + render_instance.workfileComp.TIME_UNDEFINED + ] + ) + output_dir = os.path.dirname(path) + render_instance.outputDir = output_dir + + basename = os.path.basename(path) + + head, padding, ext = get_frame_path(basename) + + expected_files = [] + for frame in range(start, end + 1): + expected_files.append( + os.path.join( + output_dir, + f"{head}{str(frame).zfill(padding)}{ext}" + ) + ) + + return expected_files + + def _update_for_frames(self, instance): + """Updating instance for render.frames family + + Adding representation data to the instance. Also setting + colorspaceData to the representation based on file rules. + """ + + expected_files = instance.data["expectedFiles"] + + start = instance.data["frameStart"] - instance.data["handleStart"] + + path = expected_files[0] + basename = os.path.basename(path) + staging_dir = os.path.dirname(path) + _, padding, ext = get_frame_path(basename) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{padding}d" % start, + "files": [os.path.basename(f) for f in expected_files], + "stagingDir": staging_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=instance.context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) + + return instance diff --git a/openpype/hosts/fusion/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/fusion/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/fusion/plugins/publish/collect_workfile.py diff --git a/client/ayon_core/hosts/fusion/plugins/publish/extract_render_local.py b/client/ayon_core/hosts/fusion/plugins/publish/extract_render_local.py new file mode 100644 index 0000000000..eea232ac29 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/extract_render_local.py @@ -0,0 +1,207 @@ +import os +import logging +import contextlib +import collections +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.fusion.api import comp_lock_and_undo_chunk +from ayon_core.hosts.fusion.api.lib import get_frame_path, maintained_comp_range + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def enabled_savers(comp, savers): + """Enable only the `savers` in Comp during the context. + + Any Saver tool in the passed composition that is not in the savers list + will be set to passthrough during the context. + + Args: + comp (object): Fusion composition object. + savers (list): List of Saver tool objects. + + """ + passthrough_key = "TOOLB_PassThrough" + original_states = {} + enabled_saver_names = {saver.Name for saver in savers} + + all_savers = comp.GetToolList(False, "Saver").values() + savers_by_name = {saver.Name: saver for saver in all_savers} + + try: + for saver in all_savers: + original_state = saver.GetAttrs()[passthrough_key] + original_states[saver.Name] = original_state + + # The passthrough state we want to set (passthrough != enabled) + state = saver.Name not in enabled_saver_names + if state != original_state: + saver.SetAttrs({passthrough_key: state}) + yield + finally: + for saver_name, original_state in original_states.items(): + saver = savers_by_name[saver_name] + saver.SetAttrs({"TOOLB_PassThrough": original_state}) + + +class FusionRenderLocal( + pyblish.api.InstancePlugin, + publish.ColormanagedPyblishPluginMixin +): + """Render the current Fusion composition locally.""" + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Render Local" + hosts = ["fusion"] + families = ["render.local"] + + is_rendered_key = "_fusionrenderlocal_has_rendered" + + def process(self, instance): + + # Start render + result = self.render(instance) + if result is False: + raise RuntimeError(f"Comp render failed for {instance}") + + self._add_representation(instance) + + # Log render status + self.log.info( + "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( + nm=instance.data["name"], + ast=instance.data["asset"], + tsk=instance.data["task"], + ) + ) + + def render(self, instance): + """Render instance. + + We try to render the minimal amount of times by combining the instances + that have a matching frame range in one Fusion render. Then for the + batch of instances we store whether the render succeeded or failed. + + """ + + if self.is_rendered_key in instance.data: + # This instance was already processed in batch with another + # instance, so we just return the render result directly + self.log.debug(f"Instance {instance} was already rendered") + return instance.data[self.is_rendered_key] + + instances_by_frame_range = self.get_render_instances_by_frame_range( + instance.context + ) + + # Render matching batch of instances that share the same frame range + frame_range = self.get_instance_render_frame_range(instance) + render_instances = instances_by_frame_range[frame_range] + + # We initialize render state false to indicate it wasn't successful + # yet to keep track of whether Fusion succeeded. This is for cases + # where an error below this might cause the comp render result not + # to be stored for the instances of this batch + for render_instance in render_instances: + render_instance.data[self.is_rendered_key] = False + + savers_to_render = [inst.data["tool"] for inst in render_instances] + current_comp = instance.context.data["currentComp"] + frame_start, frame_end = frame_range + + self.log.info( + f"Starting Fusion render frame range {frame_start}-{frame_end}" + ) + saver_names = ", ".join(saver.Name for saver in savers_to_render) + self.log.info(f"Rendering tools: {saver_names}") + + with comp_lock_and_undo_chunk(current_comp): + with maintained_comp_range(current_comp): + with enabled_savers(current_comp, savers_to_render): + result = current_comp.Render( + { + "Start": frame_start, + "End": frame_end, + "Wait": True, + } + ) + + # Store the render state for all the rendered instances + for render_instance in render_instances: + render_instance.data[self.is_rendered_key] = bool(result) + + return result + + def _add_representation(self, instance): + """Add representation to instance""" + + expected_files = instance.data["expectedFiles"] + + start = instance.data["frameStart"] - instance.data["handleStart"] + + path = expected_files[0] + _, padding, ext = get_frame_path(path) + + staging_dir = os.path.dirname(path) + + files = [os.path.basename(f) for f in expected_files] + if len(expected_files) == 1: + files = files[0] + + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{padding}d" % start, + "files": files, + "stagingDir": staging_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=instance.context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) + + return instance + + def get_render_instances_by_frame_range(self, context): + """Return enabled render.local instances grouped by their frame range. + + Arguments: + context (pyblish.Context): The pyblish context + + Returns: + dict: (start, end): instances mapping + + """ + + instances_to_render = [ + instance for instance in context if + # Only active instances + instance.data.get("publish", True) and + # Only render.local instances + "render.local" in instance.data.get("families", []) + ] + + # Instances by frame ranges + instances_by_frame_range = collections.defaultdict(list) + for instance in instances_to_render: + start, end = self.get_instance_render_frame_range(instance) + instances_by_frame_range[(start, end)].append(instance) + + return dict(instances_by_frame_range) + + def get_instance_render_frame_range(self, instance): + start = instance.data["frameStartHandle"] + end = instance.data["frameEndHandle"] + return start, end diff --git a/client/ayon_core/hosts/fusion/plugins/publish/increment_current_file.py b/client/ayon_core/hosts/fusion/plugins/publish/increment_current_file.py new file mode 100644 index 0000000000..bcff27b988 --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/increment_current_file.py @@ -0,0 +1,44 @@ +import pyblish.api + +from ayon_core.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline import KnownPublishError + + +class FusionIncrementCurrentFile( + pyblish.api.ContextPlugin, OptionalPyblishPluginMixin +): + """Increment the current file. + + Saves the current file with an increased version number. + + """ + + label = "Increment workfile version" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["fusion"] + optional = True + + def process(self, context): + if not self.is_active(context.data): + return + + from ayon_core.lib import version_up + from ayon_core.pipeline.publish import get_errored_plugins_from_context + + errored_plugins = get_errored_plugins_from_context(context) + if any( + plugin.__name__ == "FusionSubmitDeadline" + for plugin in errored_plugins + ): + raise KnownPublishError( + "Skipping incrementing current file because " + "submission to render farm failed." + ) + + comp = context.data.get("currentComp") + assert comp, "Must have comp" + + current_filepath = context.data["currentFile"] + new_filepath = version_up(current_filepath) + + comp.Save(new_filepath) diff --git a/openpype/hosts/fusion/plugins/publish/save_scene.py b/client/ayon_core/hosts/fusion/plugins/publish/save_scene.py similarity index 100% rename from openpype/hosts/fusion/plugins/publish/save_scene.py rename to client/ayon_core/hosts/fusion/plugins/publish/save_scene.py diff --git a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_background_depth.py similarity index 93% rename from openpype/hosts/fusion/plugins/publish/validate_background_depth.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_background_depth.py index e268f8adec..d588748cfa 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_background_depth.py @@ -1,12 +1,12 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( publish, OptionalPyblishPluginMixin, PublishValidationError, ) -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateBackgroundDepth( diff --git a/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_comp_saved.py similarity index 94% rename from openpype/hosts/fusion/plugins/publish/validate_comp_saved.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_comp_saved.py index 6e6d10e09a..ba56c40b65 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_comp_saved.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateFusionCompSaved(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_create_folder_checked.py similarity index 87% rename from openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_create_folder_checked.py index d5c618af58..13ea85b48c 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_create_folder_checked.py @@ -1,9 +1,9 @@ import pyblish.api -from openpype.pipeline.publish import RepairAction -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline.publish import RepairAction +from ayon_core.pipeline import PublishValidationError -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_expected_frames_existence.py similarity index 91% rename from openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_expected_frames_existence.py index 3f84f59678..83d1feaefd 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_expected_frames_existence.py @@ -1,10 +1,10 @@ import os import pyblish.api -from openpype.pipeline.publish import RepairAction -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline.publish import RepairAction +from ayon_core.pipeline import PublishValidationError -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateLocalFramesExistence(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_filename_has_extension.py similarity index 90% rename from openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_filename_has_extension.py index 38cd578ff2..17b1aa47c8 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_filename_has_extension.py @@ -1,9 +1,9 @@ import os import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateFilenameHasExtension(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_image_frame.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_image_frame.py similarity index 94% rename from openpype/hosts/fusion/plugins/publish/validate_image_frame.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_image_frame.py index 734203f31c..70e5ed9279 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_image_frame.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_image_frame.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateImageFrame(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_frame_range.py similarity index 96% rename from openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_instance_frame_range.py index edf219e752..0f7ef1862d 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_frame_range.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateInstanceFrameRange(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_has_input.py similarity index 88% rename from openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_saver_has_input.py index 0103e990fb..a8977e4747 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_has_input.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateSaverHasInput(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_passthrough.py similarity index 93% rename from openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_saver_passthrough.py index 6019bee93a..acafe3308f 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_passthrough.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError -from openpype.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api.action import SelectInvalidAction class ValidateSaverPassthrough(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_resolution.py similarity index 96% rename from openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py rename to client/ayon_core/hosts/fusion/plugins/publish/validate_saver_resolution.py index b28af3409d..af8d4f41fa 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_saver_resolution.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_saver_resolution.py @@ -1,11 +1,11 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin, ) -from openpype.hosts.fusion.api.action import SelectInvalidAction -from openpype.hosts.fusion.api import comp_lock_and_undo_chunk +from ayon_core.hosts.fusion.api.action import SelectInvalidAction +from ayon_core.hosts.fusion.api import comp_lock_and_undo_chunk class ValidateSaverResolution( diff --git a/client/ayon_core/hosts/fusion/plugins/publish/validate_unique_subsets.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_unique_subsets.py new file mode 100644 index 0000000000..619b52077e --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_unique_subsets.py @@ -0,0 +1,55 @@ +from collections import defaultdict + +import pyblish.api +from ayon_core.pipeline import PublishValidationError + +from ayon_core.hosts.fusion.api.action import SelectInvalidAction + + +class ValidateUniqueSubsets(pyblish.api.ContextPlugin): + """Ensure all instances have a unique subset name""" + + order = pyblish.api.ValidatorOrder + label = "Validate Unique Subsets" + families = ["render", "image"] + hosts = ["fusion"] + actions = [SelectInvalidAction] + + @classmethod + def get_invalid(cls, context): + + # Collect instances per subset per asset + instances_per_subset_asset = defaultdict(lambda: defaultdict(list)) + for instance in context: + asset = instance.data.get("asset", context.data.get("asset")) + subset = instance.data.get("subset", context.data.get("subset")) + instances_per_subset_asset[asset][subset].append(instance) + + # Find which asset + subset combination has more than one instance + # Those are considered invalid because they'd integrate to the same + # destination. + invalid = [] + for asset, instances_per_subset in instances_per_subset_asset.items(): + for subset, instances in instances_per_subset.items(): + if len(instances) > 1: + cls.log.warning( + "{asset} > {subset} used by more than " + "one instance: {instances}".format( + asset=asset, + subset=subset, + instances=instances + ) + ) + invalid.extend(instances) + + # Return tools for the invalid instances so they can be selected + invalid = [instance.data["tool"] for instance in invalid] + + return invalid + + def process(self, context): + invalid = self.get_invalid(context) + if invalid: + raise PublishValidationError("Multiple instances are set to " + "the same asset > subset.", + title=self.label) diff --git a/openpype/hosts/flame/startup/openpype_babypublisher/modules/__init__.py b/client/ayon_core/hosts/fusion/scripts/__init__.py similarity index 100% rename from openpype/hosts/flame/startup/openpype_babypublisher/modules/__init__.py rename to client/ayon_core/hosts/fusion/scripts/__init__.py diff --git a/openpype/hosts/fusion/scripts/duplicate_with_inputs.py b/client/ayon_core/hosts/fusion/scripts/duplicate_with_inputs.py similarity index 97% rename from openpype/hosts/fusion/scripts/duplicate_with_inputs.py rename to client/ayon_core/hosts/fusion/scripts/duplicate_with_inputs.py index 21da6c24d8..727fd335ea 100644 --- a/openpype/hosts/fusion/scripts/duplicate_with_inputs.py +++ b/client/ayon_core/hosts/fusion/scripts/duplicate_with_inputs.py @@ -1,4 +1,4 @@ -from openpype.hosts.fusion.api import ( +from ayon_core.hosts.fusion.api import ( comp_lock_and_undo_chunk, get_current_comp ) diff --git a/openpype/hosts/fusion/vendor/attr/__init__.py b/client/ayon_core/hosts/fusion/vendor/attr/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/__init__.py rename to client/ayon_core/hosts/fusion/vendor/attr/__init__.py diff --git a/openpype/hosts/fusion/vendor/attr/__init__.pyi b/client/ayon_core/hosts/fusion/vendor/attr/__init__.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/__init__.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/__init__.pyi diff --git a/openpype/hosts/fusion/vendor/attr/_cmp.py b/client/ayon_core/hosts/fusion/vendor/attr/_cmp.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_cmp.py rename to client/ayon_core/hosts/fusion/vendor/attr/_cmp.py diff --git a/openpype/hosts/fusion/vendor/attr/_cmp.pyi b/client/ayon_core/hosts/fusion/vendor/attr/_cmp.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_cmp.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/_cmp.pyi diff --git a/openpype/hosts/fusion/vendor/attr/_compat.py b/client/ayon_core/hosts/fusion/vendor/attr/_compat.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_compat.py rename to client/ayon_core/hosts/fusion/vendor/attr/_compat.py diff --git a/openpype/hosts/fusion/vendor/attr/_config.py b/client/ayon_core/hosts/fusion/vendor/attr/_config.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_config.py rename to client/ayon_core/hosts/fusion/vendor/attr/_config.py diff --git a/openpype/hosts/fusion/vendor/attr/_funcs.py b/client/ayon_core/hosts/fusion/vendor/attr/_funcs.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_funcs.py rename to client/ayon_core/hosts/fusion/vendor/attr/_funcs.py diff --git a/openpype/hosts/fusion/vendor/attr/_make.py b/client/ayon_core/hosts/fusion/vendor/attr/_make.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_make.py rename to client/ayon_core/hosts/fusion/vendor/attr/_make.py diff --git a/openpype/hosts/fusion/vendor/attr/_next_gen.py b/client/ayon_core/hosts/fusion/vendor/attr/_next_gen.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_next_gen.py rename to client/ayon_core/hosts/fusion/vendor/attr/_next_gen.py diff --git a/openpype/hosts/fusion/vendor/attr/_version_info.py b/client/ayon_core/hosts/fusion/vendor/attr/_version_info.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_version_info.py rename to client/ayon_core/hosts/fusion/vendor/attr/_version_info.py diff --git a/openpype/hosts/fusion/vendor/attr/_version_info.pyi b/client/ayon_core/hosts/fusion/vendor/attr/_version_info.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/_version_info.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/_version_info.pyi diff --git a/openpype/hosts/fusion/vendor/attr/converters.py b/client/ayon_core/hosts/fusion/vendor/attr/converters.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/converters.py rename to client/ayon_core/hosts/fusion/vendor/attr/converters.py diff --git a/openpype/hosts/fusion/vendor/attr/converters.pyi b/client/ayon_core/hosts/fusion/vendor/attr/converters.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/converters.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/converters.pyi diff --git a/openpype/hosts/fusion/vendor/attr/exceptions.py b/client/ayon_core/hosts/fusion/vendor/attr/exceptions.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/exceptions.py rename to client/ayon_core/hosts/fusion/vendor/attr/exceptions.py diff --git a/openpype/hosts/fusion/vendor/attr/exceptions.pyi b/client/ayon_core/hosts/fusion/vendor/attr/exceptions.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/exceptions.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/exceptions.pyi diff --git a/openpype/hosts/fusion/vendor/attr/filters.py b/client/ayon_core/hosts/fusion/vendor/attr/filters.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/filters.py rename to client/ayon_core/hosts/fusion/vendor/attr/filters.py diff --git a/openpype/hosts/fusion/vendor/attr/filters.pyi b/client/ayon_core/hosts/fusion/vendor/attr/filters.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/filters.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/filters.pyi diff --git a/openpype/hosts/fusion/vendor/attr/py.typed b/client/ayon_core/hosts/fusion/vendor/attr/py.typed similarity index 100% rename from openpype/hosts/fusion/vendor/attr/py.typed rename to client/ayon_core/hosts/fusion/vendor/attr/py.typed diff --git a/openpype/hosts/fusion/vendor/attr/setters.py b/client/ayon_core/hosts/fusion/vendor/attr/setters.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/setters.py rename to client/ayon_core/hosts/fusion/vendor/attr/setters.py diff --git a/openpype/hosts/fusion/vendor/attr/setters.pyi b/client/ayon_core/hosts/fusion/vendor/attr/setters.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/setters.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/setters.pyi diff --git a/openpype/hosts/fusion/vendor/attr/validators.py b/client/ayon_core/hosts/fusion/vendor/attr/validators.py similarity index 100% rename from openpype/hosts/fusion/vendor/attr/validators.py rename to client/ayon_core/hosts/fusion/vendor/attr/validators.py diff --git a/openpype/hosts/fusion/vendor/attr/validators.pyi b/client/ayon_core/hosts/fusion/vendor/attr/validators.pyi similarity index 100% rename from openpype/hosts/fusion/vendor/attr/validators.pyi rename to client/ayon_core/hosts/fusion/vendor/attr/validators.pyi diff --git a/openpype/hosts/fusion/vendor/urllib3/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/_collections.py b/client/ayon_core/hosts/fusion/vendor/urllib3/_collections.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/_collections.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/_collections.py diff --git a/openpype/hosts/fusion/vendor/urllib3/_version.py b/client/ayon_core/hosts/fusion/vendor/urllib3/_version.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/_version.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/_version.py diff --git a/openpype/hosts/fusion/vendor/urllib3/connection.py b/client/ayon_core/hosts/fusion/vendor/urllib3/connection.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/connection.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/connection.py diff --git a/openpype/hosts/fusion/vendor/urllib3/connectionpool.py b/client/ayon_core/hosts/fusion/vendor/urllib3/connectionpool.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/connectionpool.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/connectionpool.py diff --git a/openpype/hosts/fusion/scripts/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/__init__.py similarity index 100% rename from openpype/hosts/fusion/scripts/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/_appengine_environ.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_appengine_environ.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/_appengine_environ.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_appengine_environ.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/bindings.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/bindings.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/bindings.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/bindings.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/low_level.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/low_level.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/low_level.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/_securetransport/low_level.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/appengine.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/appengine.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/appengine.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/appengine.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/ntlmpool.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/ntlmpool.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/ntlmpool.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/ntlmpool.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/pyopenssl.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/pyopenssl.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/pyopenssl.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/pyopenssl.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/securetransport.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/securetransport.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/securetransport.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/securetransport.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/socks.py b/client/ayon_core/hosts/fusion/vendor/urllib3/contrib/socks.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/socks.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/contrib/socks.py diff --git a/openpype/hosts/fusion/vendor/urllib3/exceptions.py b/client/ayon_core/hosts/fusion/vendor/urllib3/exceptions.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/exceptions.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/exceptions.py diff --git a/openpype/hosts/fusion/vendor/urllib3/fields.py b/client/ayon_core/hosts/fusion/vendor/urllib3/fields.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/fields.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/fields.py diff --git a/openpype/hosts/fusion/vendor/urllib3/filepost.py b/client/ayon_core/hosts/fusion/vendor/urllib3/filepost.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/filepost.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/filepost.py diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/backports/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/contrib/_securetransport/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/backports/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/backports/makefile.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/backports/makefile.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/backports/makefile.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/backports/makefile.py diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/six.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/six.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/six.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/six.py diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/client/ayon_core/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/_implementation.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/_implementation.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/packages/ssl_match_hostname/_implementation.py diff --git a/openpype/hosts/fusion/vendor/urllib3/poolmanager.py b/client/ayon_core/hosts/fusion/vendor/urllib3/poolmanager.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/poolmanager.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/poolmanager.py diff --git a/openpype/hosts/fusion/vendor/urllib3/request.py b/client/ayon_core/hosts/fusion/vendor/urllib3/request.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/request.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/request.py diff --git a/openpype/hosts/fusion/vendor/urllib3/response.py b/client/ayon_core/hosts/fusion/vendor/urllib3/response.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/response.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/response.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/__init__.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/__init__.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/__init__.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/connection.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/connection.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/connection.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/connection.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/proxy.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/proxy.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/proxy.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/proxy.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/queue.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/queue.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/queue.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/queue.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/request.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/request.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/request.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/request.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/response.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/response.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/response.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/response.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/retry.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/retry.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/retry.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/retry.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/ssl_.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/ssl_.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/ssl_.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/ssl_.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/ssltransport.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/ssltransport.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/ssltransport.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/ssltransport.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/timeout.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/timeout.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/timeout.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/timeout.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/url.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/url.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/url.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/url.py diff --git a/openpype/hosts/fusion/vendor/urllib3/util/wait.py b/client/ayon_core/hosts/fusion/vendor/urllib3/util/wait.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/util/wait.py rename to client/ayon_core/hosts/fusion/vendor/urllib3/util/wait.py diff --git a/openpype/hosts/harmony/__init__.py b/client/ayon_core/hosts/harmony/__init__.py similarity index 100% rename from openpype/hosts/harmony/__init__.py rename to client/ayon_core/hosts/harmony/__init__.py diff --git a/client/ayon_core/hosts/harmony/addon.py b/client/ayon_core/hosts/harmony/addon.py new file mode 100644 index 0000000000..172a1f104f --- /dev/null +++ b/client/ayon_core/hosts/harmony/addon.py @@ -0,0 +1,23 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +HARMONY_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HarmonyAddon(OpenPypeModule, IHostAddon): + name = "harmony" + host_name = "harmony" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + openharmony_path = os.path.join( + HARMONY_HOST_DIR, "vendor", "OpenHarmony" + ) + # TODO check if is already set? What to do if is already set? + env["LIB_OPENHARMONY_PATH"] = openharmony_path + + def get_workfile_extensions(self): + return [".zip"] diff --git a/client/ayon_core/hosts/harmony/api/README.md b/client/ayon_core/hosts/harmony/api/README.md new file mode 100644 index 0000000000..cdc17b2285 --- /dev/null +++ b/client/ayon_core/hosts/harmony/api/README.md @@ -0,0 +1,655 @@ +# Harmony Integration + +## Setup + +The easiest way to setup for using Toon Boom Harmony is to use the built-in launch: + +``` +python -c "import ayon_core.hosts.harmony.api as harmony;harmony.launch("path/to/harmony/executable")" +``` + +Communication with Harmony happens with a server/client relationship where the server is in the Python process and the client is in the Harmony process. Messages between Python and Harmony are required to be dictionaries, which are serialized to strings: +``` ++------------+ +| | +| Python | +| Process | +| | +| +--------+ | +| | | | +| | Main | | +| | Thread | | +| | | | +| +----^---+ | +| || | +| || | +| +---v----+ | +---------+ +| | | | | | +| | Server +-------> Harmony | +| | Thread <-------+ Process | +| | | | | | +| +--------+ | +---------+ ++------------+ +``` + +Server/client now uses stricter protocol to handle communication. This is necessary because of precise control over data passed between server/client. Each message is prepended with 6 bytes: +``` +| A | H | 0x00 | 0x00 | 0x00 | 0x00 | ... + +``` +First two bytes are *magic* bytes stands for **A**valon **H**armony. Next four bytes hold length of the message `...` encoded as 32bit unsigned integer. This way we know how many bytes to read from the socket and if we need more or we need to parse multiple messages. + + +## Usage + +The integration creates an `Openpype` menu entry where all related tools are located. + +**NOTE: Menu creation can be temperamental. The best way is to launch Harmony and do nothing else until Harmony is fully launched.** + +### Work files + +Because Harmony projects are directories, this integration uses `.zip` as work file extension. Internally the project directories are stored under `[User]/.avalon/harmony`. Whenever the user saves the `.xstage` file, the integration zips up the project directory and moves it to the Avalon project path. Zipping and moving happens in the background. + +### Show Workfiles on launch + +You can show the Workfiles app when Harmony launches by setting environment variable `AVALON_HARMONY_WORKFILES_ON_LAUNCH=1`. + +## Developing + +### Low level messaging +To send from Python to Harmony you can use the exposed method: +```python +import ayon_core.hosts.harmony.api as harmony +from uuid import uuid4 + + +func = """function %s_hello(person) +{ + return ("Hello " + person + "!"); +} +%s_hello +""" % (uuid4(), uuid4()) +print(harmony.send({"function": func, "args": ["Python"]})["result"]) +``` +**NOTE:** Its important to declare the function at the end of the function string. You can have multiple functions within your function string, but the function declared at the end is what gets executed. + +To send a function with multiple arguments its best to declare the arguments within the function: +```python +import ayon_core.hosts.harmony.api as harmony +from uuid import uuid4 + +signature = str(uuid4()).replace("-", "_") +func = """function %s_hello(args) +{ + var greeting = args[0]; + var person = args[1]; + return (greeting + " " + person + "!"); +} +%s_hello +""" % (signature, signature) +print(harmony.send({"function": func, "args": ["Hello", "Python"]})["result"]) +``` + +### Caution + +When naming your functions be aware that they are executed in global scope. They can potentially clash with Harmony own function and object names. +For example `func` is already existing Harmony object. When you call your function `func` it will overwrite in global scope the one from Harmony, causing +erratic behavior of Harmony. Openpype is prefixing those function names with [UUID4](https://docs.python.org/3/library/uuid.html) making chance of such clash minimal. +See above examples how that works. This will result in function named `38dfcef0_a6d7_4064_8069_51fe99ab276e_hello()`. +You can find list of Harmony object and function in Harmony documentation. + +### Higher level (recommended) + +Instead of sending functions directly to Harmony, it is more efficient and safe to just add your code to `js/PypeHarmony.js` or utilize `{"script": "..."}` method. + +#### Extending PypeHarmony.js + +Add your function to `PypeHarmony.js`. For example: + +```javascript +PypeHarmony.myAwesomeFunction = function() { + someCoolStuff(); +}; +``` +Then you can call that javascript code from your Python like: + +```Python +import ayon_core.hosts.harmony.api as harmony + +harmony.send({"function": "PypeHarmony.myAwesomeFunction"}); + +``` + +#### Using Script method + +You can also pass whole scripts into harmony and call their functions later as needed. + +For example, you have bunch of javascript files: + +```javascript +/* Master.js */ + +var Master = { + Foo = {}; + Boo = {}; +}; + +/* FileA.js */ +var Foo = function() {}; + +Foo.prototype.A = function() { + someAStuff(); +} + +// This will construct object Foo and add it to Master namespace. +Master.Foo = new Foo(); + +/* FileB.js */ +var Boo = function() {}; + +Boo.prototype.B = function() { + someBStuff(); +} + +// This will construct object Boo and add it to Master namespace. +Master.Boo = new Boo(); +``` + +Now in python, just read all those files and send them to Harmony. + +```python +from pathlib import Path +import ayon_core.hosts.harmony.api as harmony + +path_to_js = Path('/path/to/my/js') +script_to_send = "" + +for file in path_to_js.iterdir(): + if file.suffix == ".js": + script_to_send += file.read_text() + +harmony.send({"script": script_to_send}) + +# and use your code in Harmony +harmony.send({"function": "Master.Boo.B"}) + +``` + +### Scene Save +Instead of sending a request to Harmony with `scene.saveAll` please use: +```python +import ayon_core.hosts.harmony.api as harmony +harmony.save_scene() +``` + +
+ Click to expand for details on scene save. + + Because Openpype tools does not deal well with folders for a single entity like a Harmony scene, this integration has implemented to use zip files to encapsulate the Harmony scene folders. Saving scene in Harmony via menu or CTRL+S will not result in producing zip file, only saving it from Workfiles will. This is because + zipping process can take some time in which we cannot block user from saving again. If xstage file is changed during zipping process it will produce corrupted zip + archive. +
+ +### Plugin Examples +These plugins were made with the [polly config](https://github.com/mindbender-studio/config). + +#### Creator Plugin +```python +import ayon_core.hosts.harmony.api as harmony +from uuid import uuid4 + + +class CreateComposite(harmony.Creator): + """Composite node for publish.""" + + name = "compositeDefault" + label = "Composite" + family = "mindbender.template" + + def __init__(self, *args, **kwargs): + super(CreateComposite, self).__init__(*args, **kwargs) +``` + +The creator plugin can be configured to use other node types. For example here is a write node creator: +```python +import ayon_core.hosts.harmony.api as harmony + + +class CreateRender(harmony.Creator): + """Composite node for publishing renders.""" + + name = "writeDefault" + label = "Write" + family = "mindbender.imagesequence" + node_type = "WRITE" + + def __init__(self, *args, **kwargs): + super(CreateRender, self).__init__(*args, **kwargs) + + def setup_node(self, node): + signature = str(uuid4()).replace("-", "_") + func = """function %s_func(args) + { + node.setTextAttr(args[0], "DRAWING_TYPE", 1, "PNG4"); + } + %s_func + """ % (signature, signature) + harmony.send( + {"function": func, "args": [node]} + ) +``` + +#### Collector Plugin +```python +import pyblish.api +import ayon_core.hosts.harmony.api as harmony + + +class CollectInstances(pyblish.api.ContextPlugin): + """Gather instances by nodes metadata. + + This collector takes into account assets that are associated with + a composite node and marked with a unique identifier; + + Identifier: + id (str): "pyblish.avalon.instance" + """ + + label = "Instances" + order = pyblish.api.CollectorOrder + hosts = ["harmony"] + + def process(self, context): + nodes = harmony.send( + {"function": "node.getNodes", "args": [["COMPOSITE"]]} + )["result"] + + for node in nodes: + data = harmony.read(node) + + # Skip non-tagged nodes. + if not data: + continue + + # Skip containers. + if "container" in data["id"]: + continue + + instance = context.create_instance(node.split("/")[-1]) + instance.append(node) + instance.data.update(data) + + # Produce diagnostic message for any graphical + # user interface interested in visualising it. + self.log.info("Found: \"%s\" " % instance.data["name"]) +``` + +#### Extractor Plugin +```python +import os + +import pyblish.api +import ayon_core.hosts.harmony.api as harmony + +import clique + + +class ExtractImage(pyblish.api.InstancePlugin): + """Produce a flattened image file from instance. + This plug-in only takes into account the nodes connected to the composite. + """ + label = "Extract Image Sequence" + order = pyblish.api.ExtractorOrder + hosts = ["harmony"] + families = ["mindbender.imagesequence"] + + def process(self, instance): + project_path = harmony.send( + {"function": "scene.currentProjectPath"} + )["result"] + + # Store reference for integration + if "files" not in instance.data: + instance.data["files"] = list() + + # Store display source node for later. + display_node = "Top/Display" + signature = str(uuid4()).replace("-", "_") + func = """function %s_func(display_node) + { + var source_node = null; + if (node.isLinked(display_node, 0)) + { + source_node = node.srcNode(display_node, 0); + node.unlink(display_node, 0); + } + return source_node + } + %s_func + """ % (signature, signature) + display_source_node = harmony.send( + {"function": func, "args": [display_node]} + )["result"] + + # Perform extraction + path = os.path.join( + os.path.normpath( + project_path + ).replace("\\", "/"), + instance.data["name"] + ) + if not os.path.exists(path): + os.makedirs(path) + + render_func = """function frameReady(frame, celImage) + {{ + var path = "{path}/{filename}" + frame + ".png"; + celImage.imageFileAs(path, "", "PNG4"); + }} + function %s_func(composite_node) + {{ + node.link(composite_node, 0, "{display_node}", 0); + render.frameReady.connect(frameReady); + render.setRenderDisplay("{display_node}"); + render.renderSceneAll(); + render.frameReady.disconnect(frameReady); + }} + %s_func + """ % (signature, signature) + restore_func = """function %s_func(args) + { + var display_node = args[0]; + var display_source_node = args[1]; + if (node.isLinked(display_node, 0)) + { + node.unlink(display_node, 0); + } + node.link(display_source_node, 0, display_node, 0); + } + %s_func + """ % (signature, signature) + + with harmony.maintained_selection(): + self.log.info("Extracting %s" % str(list(instance))) + + harmony.send( + { + "function": render_func.format( + path=path.replace("\\", "/"), + filename=os.path.basename(path), + display_node=display_node + ), + "args": [instance[0]] + } + ) + + # Restore display. + if display_source_node: + harmony.send( + { + "function": restore_func, + "args": [display_node, display_source_node] + } + ) + + files = os.listdir(path) + collections, remainder = clique.assemble(files, minimum_items=1) + assert not remainder, ( + "There shouldn't have been a remainder for '%s': " + "%s" % (instance[0], remainder) + ) + assert len(collections) == 1, ( + "There should only be one image sequence in {}. Found: {}".format( + path, len(collections) + ) + ) + + data = { + "subset": collections[0].head, + "isSeries": True, + "stagingDir": path, + "files": list(collections[0]), + } + instance.data.update(data) + + self.log.info("Extracted {instance} to {path}".format(**locals())) +``` + +#### Loader Plugin +```python +import os + +import ayon_core.hosts.harmony.api as harmony + +signature = str(uuid4()).replace("-", "_") +copy_files = """function copyFile(srcFilename, dstFilename) +{ + var srcFile = new PermanentFile(srcFilename); + var dstFile = new PermanentFile(dstFilename); + srcFile.copy(dstFile); +} +""" + +import_files = """function %s_import_files() +{ + var PNGTransparencyMode = 0; // Premultiplied with Black + var TGATransparencyMode = 0; // Premultiplied with Black + var SGITransparencyMode = 0; // Premultiplied with Black + var LayeredPSDTransparencyMode = 1; // Straight + var FlatPSDTransparencyMode = 2; // Premultiplied with White + + function getUniqueColumnName( column_prefix ) + { + var suffix = 0; + // finds if unique name for a column + var column_name = column_prefix; + while(suffix < 2000) + { + if(!column.type(column_name)) + break; + + suffix = suffix + 1; + column_name = column_prefix + "_" + suffix; + } + return column_name; + } + + function import_files(args) + { + var root = args[0]; + var files = args[1]; + var name = args[2]; + var start_frame = args[3]; + + var vectorFormat = null; + var extension = null; + var filename = files[0]; + + var pos = filename.lastIndexOf("."); + if( pos < 0 ) + return null; + + extension = filename.substr(pos+1).toLowerCase(); + + if(extension == "jpeg") + extension = "jpg"; + if(extension == "tvg") + { + vectorFormat = "TVG" + extension ="SCAN"; // element.add() will use this. + } + + var elemId = element.add( + name, + "BW", + scene.numberOfUnitsZ(), + extension.toUpperCase(), + vectorFormat + ); + if (elemId == -1) + { + // hum, unknown file type most likely -- let's skip it. + return null; // no read to add. + } + + var uniqueColumnName = getUniqueColumnName(name); + column.add(uniqueColumnName , "DRAWING"); + column.setElementIdOfDrawing(uniqueColumnName, elemId); + + var read = node.add(root, name, "READ", 0, 0, 0); + var transparencyAttr = node.getAttr( + read, frame.current(), "READ_TRANSPARENCY" + ); + var opacityAttr = node.getAttr(read, frame.current(), "OPACITY"); + transparencyAttr.setValue(true); + opacityAttr.setValue(true); + + var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE"); + alignmentAttr.setValue("ASIS"); + + var transparencyModeAttr = node.getAttr( + read, frame.current(), "applyMatteToColor" + ); + if (extension == "png") + transparencyModeAttr.setValue(PNGTransparencyMode); + if (extension == "tga") + transparencyModeAttr.setValue(TGATransparencyMode); + if (extension == "sgi") + transparencyModeAttr.setValue(SGITransparencyMode); + if (extension == "psd") + transparencyModeAttr.setValue(FlatPSDTransparencyMode); + + node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName); + + // Create a drawing for each file. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, timing.toString()); + copyFile( files[i], drawingFilePath ); + + column.setEntry(uniqueColumnName, 1, timing, timing.toString()); + } + return read; + } + import_files(); +} +%s_import_files +""" % (signature, signature) + +replace_files = """function %s_replace_files(args) +{ + var files = args[0]; + var _node = args[1]; + var start_frame = args[2]; + + var _column = node.linkedColumn(_node, "DRAWING.ELEMENT"); + + // Delete existing drawings. + var timings = column.getDrawingTimings(_column); + for( var i =0; i <= timings.length - 1; ++i) + { + column.deleteDrawingAt(_column, parseInt(timings[i])); + } + + // Create new drawings. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(node.getElementId(_node), timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename( + node.getElementId(_node), timing.toString() + ); + copyFile( files[i], drawingFilePath ); + + column.setEntry(_column, 1, timing, timing.toString()); + } +} +%s_replace_files +""" % (signature, signature) + + +class ImageSequenceLoader(load.LoaderPlugin): + """Load images + Stores the imported asset in a container named after the asset. + """ + families = ["mindbender.imagesequence"] + representations = ["*"] + + def load(self, context, name=None, namespace=None, data=None): + files = [] + for f in context["version"]["data"]["files"]: + files.append( + os.path.join( + context["version"]["data"]["stagingDir"], f + ).replace("\\", "/") + ) + + read_node = harmony.send( + { + "function": copy_files + import_files, + "args": ["Top", files, context["version"]["data"]["subset"], 1] + } + )["result"] + + self[:] = [read_node] + + return harmony.containerise( + name, + namespace, + read_node, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + node = container.pop("node") + + project_name = get_current_project_name() + version = get_version_by_id(project_name, representation["parent"]) + files = [] + for f in version["data"]["files"]: + files.append( + os.path.join( + version["data"]["stagingDir"], f + ).replace("\\", "/") + ) + + harmony.send( + { + "function": copy_files + replace_files, + "args": [files, node, 1] + } + ) + + harmony.imprint( + node, {"representation": str(representation["_id"])} + ) + + def remove(self, container): + node = container.pop("node") + signature = str(uuid4()).replace("-", "_") + func = """function %s_deleteNode(_node) + { + node.deleteNode(_node, true, true); + } + %_deleteNode + """ % (signature, signature) + harmony.send( + {"function": func, "args": [node]} + ) + + def switch(self, container, representation): + self.update(container, representation) +``` + +## Resources +- https://github.com/diegogarciahuerta/tk-harmony +- https://github.com/cfourney/OpenHarmony +- [Toon Boom Discord](https://discord.gg/syAjy4H) +- [Toon Boom TD](https://discord.gg/yAjyQtZ) diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/client/ayon_core/hosts/harmony/api/TB_sceneOpened.js similarity index 96% rename from openpype/hosts/harmony/api/TB_sceneOpened.js rename to client/ayon_core/hosts/harmony/api/TB_sceneOpened.js index 48daf094dd..1fb0d295e7 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/client/ayon_core/hosts/harmony/api/TB_sceneOpened.js @@ -276,7 +276,7 @@ function Client() { app.avalonClient.send( { - 'module': 'openpype.lib', + 'module': 'ayon_core.lib', 'method': 'emit_event', 'args': ['application.launched'] }, false); @@ -370,7 +370,7 @@ function start() { app.avalonMenu = null; for (var i = 0 ; i < actions.length; i++) { - label = System.getenv('AVALON_LABEL'); + label = System.getenv('AYON_MENU_LABEL'); if (actions[i].text == label) { app.avalonMenu = true; } @@ -378,7 +378,7 @@ function start() { var menu = null; if (app.avalonMenu == null) { - menu = menuBar.addMenu(System.getenv('AVALON_LABEL')); + menu = menuBar.addMenu(System.getenv('AYON_MENU_LABEL')); } // menu = menuBar.addMenu('Avalon'); @@ -387,7 +387,7 @@ function start() { */ self.onCreator = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['creator'] }, false); @@ -402,7 +402,7 @@ function start() { */ self.onWorkfiles = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['workfiles'] }, false); @@ -417,7 +417,7 @@ function start() { */ self.onLoad = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['loader'] }, false); @@ -433,7 +433,7 @@ function start() { */ self.onPublish = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['publish'] }, false); @@ -449,7 +449,7 @@ function start() { */ self.onManage = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['sceneinventory'] }, false); @@ -465,7 +465,7 @@ function start() { */ self.onSubsetManage = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['subsetmanager'] }, false); @@ -482,7 +482,7 @@ function start() { self.onSetSceneSettings = function() { app.avalonClient.send( { - "module": "openpype.hosts.harmony.api", + "module": "ayon_core.hosts.harmony.api", "method": "ensure_scene_settings", "args": [] }, @@ -500,7 +500,7 @@ function start() { */ self.onExperimentalTools = function() { app.avalonClient.send({ - 'module': 'openpype.hosts.harmony.api.lib', + 'module': 'ayon_core.hosts.harmony.api.lib', 'method': 'show', 'args': ['experimental_tools'] }, false); @@ -550,7 +550,7 @@ function ensureSceneSettings() { var app = QCoreApplication.instance(); app.avalonClient.send( { - "module": "openpype.hosts.harmony.api", + "module": "ayon_core.hosts.harmony.api", "method": "ensure_scene_settings", "args": [] }, diff --git a/openpype/hosts/harmony/api/__init__.py b/client/ayon_core/hosts/harmony/api/__init__.py similarity index 100% rename from openpype/hosts/harmony/api/__init__.py rename to client/ayon_core/hosts/harmony/api/__init__.py diff --git a/openpype/hosts/harmony/api/js/.eslintrc.json b/client/ayon_core/hosts/harmony/api/js/.eslintrc.json similarity index 100% rename from openpype/hosts/harmony/api/js/.eslintrc.json rename to client/ayon_core/hosts/harmony/api/js/.eslintrc.json diff --git a/openpype/hosts/harmony/api/js/AvalonHarmony.js b/client/ayon_core/hosts/harmony/api/js/AvalonHarmony.js similarity index 100% rename from openpype/hosts/harmony/api/js/AvalonHarmony.js rename to client/ayon_core/hosts/harmony/api/js/AvalonHarmony.js diff --git a/openpype/hosts/harmony/api/js/package.json b/client/ayon_core/hosts/harmony/api/js/package.json similarity index 100% rename from openpype/hosts/harmony/api/js/package.json rename to client/ayon_core/hosts/harmony/api/js/package.json diff --git a/client/ayon_core/hosts/harmony/api/lib.py b/client/ayon_core/hosts/harmony/api/lib.py new file mode 100644 index 0000000000..782134c343 --- /dev/null +++ b/client/ayon_core/hosts/harmony/api/lib.py @@ -0,0 +1,625 @@ +# -*- coding: utf-8 -*- +"""Utility functions used for Avalon - Harmony integration.""" +import subprocess +import threading +import os +import random +import zipfile +import sys +import filecmp +import shutil +import logging +import contextlib +import json +import signal +import time +from uuid import uuid4 +from qtpy import QtWidgets, QtCore, QtGui +import collections + +from .server import Server + +from ayon_core.tools.stdout_broker.app import StdOutBroker +from ayon_core.tools.utils import host_tools +from ayon_core import style +from ayon_core.lib.applications import get_non_python_host_kwargs + +# Setup logging. +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + + +class ProcessContext: + server = None + pid = None + process = None + application_path = None + callback_queue = collections.deque() + workfile_path = None + port = None + stdout_broker = None + workfile_tool = None + + @classmethod + def execute_in_main_thread(cls, func_to_call_from_main_thread): + cls.callback_queue.append(func_to_call_from_main_thread) + + @classmethod + def main_thread_listen(cls): + if cls.callback_queue: + callback = cls.callback_queue.popleft() + callback() + if cls.process is not None and cls.process.poll() is not None: + log.info("Server is not running, closing") + ProcessContext.stdout_broker.stop() + QtWidgets.QApplication.quit() + + +def signature(postfix="func") -> str: + """Return random ECMA6 compatible function name. + + Args: + postfix (str): name to append to random string. + Returns: + str: random function name. + + """ + return "f{}_{}".format(str(uuid4()).replace("-", "_"), postfix) + + +class _ZipFile(zipfile.ZipFile): + """Extended check for windows invalid characters.""" + + # this is extending default zipfile table for few invalid characters + # that can come from Mac + _windows_illegal_characters = ":<>|\"?*\r\n\x00" + _windows_illegal_name_trans_table = str.maketrans( + _windows_illegal_characters, + "_" * len(_windows_illegal_characters) + ) + + +def main(*subprocess_args): + # coloring in StdOutBroker + os.environ["AYON_LOG_NO_COLORS"] = "0" + app = QtWidgets.QApplication([]) + app.setQuitOnLastWindowClosed(False) + icon = QtGui.QIcon(style.get_app_icon_path()) + app.setWindowIcon(icon) + + ProcessContext.stdout_broker = StdOutBroker('harmony') + ProcessContext.stdout_broker.start() + launch(*subprocess_args) + + loop_timer = QtCore.QTimer() + loop_timer.setInterval(20) + + loop_timer.timeout.connect(ProcessContext.main_thread_listen) + loop_timer.start() + + sys.exit(app.exec_()) + + +def setup_startup_scripts(): + """Manages installation of avalon's TB_sceneOpened.js for Harmony launch. + + If a studio already has defined "TOONBOOM_GLOBAL_SCRIPT_LOCATION", copies + the TB_sceneOpened.js to that location if the file is different. + Otherwise, will set the env var to point to the avalon/harmony folder. + + Admins should be aware that this will overwrite TB_sceneOpened in the + "TOONBOOM_GLOBAL_SCRIPT_LOCATION", and that if they want to have additional + logic, they will need to one of the following: + * Create a Harmony package to manage startup logic + * Use TB_sceneOpenedUI.js instead to manage startup logic + * Add their startup logic to avalon/harmony/TB_sceneOpened.js + """ + avalon_dcc_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), + "api") + startup_js = "TB_sceneOpened.js" + + if os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"): + + avalon_harmony_startup = os.path.join(avalon_dcc_dir, startup_js) + + env_harmony_startup = os.path.join( + os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"), startup_js) + + if not filecmp.cmp(avalon_harmony_startup, env_harmony_startup): + try: + shutil.copy(avalon_harmony_startup, env_harmony_startup) + except Exception as e: + log.error(e) + log.warning( + "Failed to copy {0} to {1}! " + "Defaulting to Avalon TOONBOOM_GLOBAL_SCRIPT_LOCATION." + .format(avalon_harmony_startup, env_harmony_startup)) + + os.environ["TOONBOOM_GLOBAL_SCRIPT_LOCATION"] = avalon_dcc_dir + else: + os.environ["TOONBOOM_GLOBAL_SCRIPT_LOCATION"] = avalon_dcc_dir + + +def check_libs(): + """Check if `OpenHarmony`_ is available. + + Avalon expects either path in `LIB_OPENHARMONY_PATH` or `openHarmony.js` + present in `TOONBOOM_GLOBAL_SCRIPT_LOCATION`. + + Throws: + RuntimeError: If openHarmony is not found. + + .. _OpenHarmony: + https://github.com/cfourney/OpenHarmony + + """ + if not os.getenv("LIB_OPENHARMONY_PATH"): + + if os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"): + if os.path.exists( + os.path.join( + os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"), + "openHarmony.js")): + + os.environ["LIB_OPENHARMONY_PATH"] = \ + os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION") + return + + else: + log.error(("Cannot find OpenHarmony library. " + "Please set path to it in LIB_OPENHARMONY_PATH " + "environment variable.")) + raise RuntimeError("Missing OpenHarmony library.") + + +def launch(application_path, *args): + """Set Harmony for launch. + + Launches Harmony and the server, then starts listening on the main thread + for callbacks from the server. This is to have Qt applications run in the + main thread. + + Args: + application_path (str): Path to Harmony. + + """ + from ayon_core.pipeline import install_host + from ayon_core.hosts.harmony import api as harmony + + install_host(harmony) + + ProcessContext.port = random.randrange(49152, 65535) + os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port) + ProcessContext.application_path = application_path + + # Launch Harmony. + setup_startup_scripts() + check_libs() + + if not os.environ.get("AVALON_HARMONY_WORKFILES_ON_LAUNCH", False): + open_empty_workfile() + return + + ProcessContext.workfile_tool = host_tools.get_tool_by_name("workfiles") + host_tools.show_workfiles(save=False) + ProcessContext.execute_in_main_thread(check_workfiles_tool) + + +def check_workfiles_tool(): + if ProcessContext.workfile_tool.isVisible(): + ProcessContext.execute_in_main_thread(check_workfiles_tool) + elif not ProcessContext.workfile_path: + open_empty_workfile() + + +def open_empty_workfile(): + zip_file = os.path.join(os.path.dirname(__file__), "temp.zip") + temp_path = get_local_harmony_path(zip_file) + if os.path.exists(temp_path): + log.info(f"removing existing {temp_path}") + try: + shutil.rmtree(temp_path) + except Exception as e: + log.critical(f"cannot clear {temp_path}") + raise Exception(f"cannot clear {temp_path}") from e + + launch_zip_file(zip_file) + + +def get_local_harmony_path(filepath): + """From the provided path get the equivalent local Harmony path.""" + basename = os.path.splitext(os.path.basename(filepath))[0] + harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony") + return os.path.join(harmony_path, basename) + + +def launch_zip_file(filepath): + """Launch a Harmony application instance with the provided zip file. + + Args: + filepath (str): Path to file. + """ + print(f"Localizing {filepath}") + + temp_path = get_local_harmony_path(filepath) + scene_name = os.path.basename(temp_path) + if os.path.exists(os.path.join(temp_path, scene_name)): + # unzipped with duplicated scene_name + temp_path = os.path.join(temp_path, scene_name) + + scene_path = os.path.join( + temp_path, scene_name + ".xstage" + ) + + unzip = False + if os.path.exists(scene_path): + # Check remote scene is newer than local. + if os.path.getmtime(scene_path) < os.path.getmtime(filepath): + try: + shutil.rmtree(temp_path) + except Exception as e: + log.error(e) + raise Exception("Cannot delete working folder") from e + unzip = True + else: + unzip = True + + if unzip: + with _ZipFile(filepath, "r") as zip_ref: + zip_ref.extractall(temp_path) + + if os.path.exists(os.path.join(temp_path, scene_name)): + # unzipped with duplicated scene_name + temp_path = os.path.join(temp_path, scene_name) + + # Close existing scene. + if ProcessContext.pid: + os.kill(ProcessContext.pid, signal.SIGTERM) + + # Stop server. + if ProcessContext.server: + ProcessContext.server.stop() + + # Launch Avalon server. + ProcessContext.server = Server(ProcessContext.port) + ProcessContext.server.start() + # thread = threading.Thread(target=self.server.start) + # thread.daemon = True + # thread.start() + + # Save workfile path for later. + ProcessContext.workfile_path = filepath + + # find any xstage files is directory, prefer the one with the same name + # as directory (plus extension) + xstage_files = [] + for _, _, files in os.walk(temp_path): + for file in files: + if os.path.splitext(file)[1] == ".xstage": + xstage_files.append(file) + + if not os.path.basename("temp.zip"): + if not xstage_files: + ProcessContext.server.stop() + print("no xstage file was found") + return + + # try to use first available + scene_path = os.path.join( + temp_path, xstage_files[0] + ) + + # prefer the one named as zip file + zip_based_name = "{}.xstage".format( + os.path.splitext(os.path.basename(filepath))[0]) + + if zip_based_name in xstage_files: + scene_path = os.path.join( + temp_path, zip_based_name + ) + + if not os.path.exists(scene_path): + print("error: cannot determine scene file {}".format(scene_path)) + ProcessContext.server.stop() + return + + print("Launching {}".format(scene_path)) + kwargs = get_non_python_host_kwargs({}, False) + process = subprocess.Popen( + [ProcessContext.application_path, scene_path], + **kwargs + ) + ProcessContext.pid = process.pid + ProcessContext.process = process + ProcessContext.stdout_broker.host_connected() + + +def on_file_changed(path, threaded=True): + """Threaded zipping and move of the project directory. + + This method is called when the `.xstage` file is changed. + """ + log.debug("File changed: " + path) + + if ProcessContext.workfile_path is None: + return + + if threaded: + thread = threading.Thread( + target=zip_and_move, + args=(os.path.dirname(path), ProcessContext.workfile_path) + ) + thread.start() + else: + zip_and_move(os.path.dirname(path), ProcessContext.workfile_path) + + +def zip_and_move(source, destination): + """Zip a directory and move to `destination`. + + Args: + source (str): Directory to zip and move to destination. + destination (str): Destination file path to zip file. + + """ + os.chdir(os.path.dirname(source)) + shutil.make_archive(os.path.basename(source), "zip", source) + with _ZipFile(os.path.basename(source) + ".zip") as zr: + if zr.testzip() is not None: + raise Exception("File archive is corrupted.") + shutil.move(os.path.basename(source) + ".zip", destination) + log.debug(f"Saved '{source}' to '{destination}'") + + +def show(tool_name): + """Call show on "module_name". + + This allows to make a QApplication ahead of time and always "exec_" to + prevent crashing. + + Args: + module_name (str): Name of module to call "show" on. + + """ + # Requests often get doubled up when showing tools, so we wait a second for + # requests to be received properly. + time.sleep(1) + + kwargs = {} + if tool_name == "loader": + kwargs["use_context"] = True + + ProcessContext.execute_in_main_thread( + lambda: host_tools.show_tool_by_name(tool_name, **kwargs) + ) + + # Required return statement. + return "nothing" + + +def get_scene_data(): + try: + return send( + { + "function": "AvalonHarmony.getSceneData" + })["result"] + except json.decoder.JSONDecodeError: + # Means no scene metadata has been made before. + return {} + except KeyError: + # Means no existing scene metadata has been made. + return {} + + +def set_scene_data(data): + """Write scene data to metadata. + + Args: + data (dict): Data to write. + + """ + # Write scene data. + send( + { + "function": "AvalonHarmony.setSceneData", + "args": data + }) + + +def read(node_id): + """Read object metadata in to a dictionary. + + Args: + node_id (str): Path to node or id of object. + + Returns: + dict + """ + scene_data = get_scene_data() + if node_id in scene_data: + return scene_data[node_id] + + return {} + + +def remove(node_id): + """ + Remove node data from scene metadata. + + Args: + node_id (str): full name (eg. 'Top/renderAnimation') + """ + data = get_scene_data() + del data[node_id] + set_scene_data(data) + + +def delete_node(node): + """ Physically delete node from scene. """ + send( + { + "function": "AvalonHarmony.deleteNode", + "args": node + } + ) + + +def imprint(node_id, data, remove=False): + """Write `data` to the `node` as json. + + Arguments: + node_id (str): Path to node or id of object. + data (dict): Dictionary of key/value pairs. + remove (bool): Removes the data from the scene. + + Example: + >>> from ayon_core.hosts.harmony.api import lib + >>> node = "Top/Display" + >>> data = {"str": "something", "int": 1, "float": 0.32, "bool": True} + >>> lib.imprint(layer, data) + """ + scene_data = get_scene_data() + + if remove and (node_id in scene_data): + scene_data.pop(node_id, None) + else: + if node_id in scene_data: + scene_data[node_id].update(data) + else: + scene_data[node_id] = data + + set_scene_data(scene_data) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context.""" + + selected_nodes = send( + { + "function": "AvalonHarmony.getSelectedNodes" + })["result"] + + try: + yield selected_nodes + finally: + selected_nodes = send( + { + "function": "AvalonHarmony.selectNodes", + "args": selected_nodes + } + ) + + +def send(request): + """Public method for sending requests to Harmony.""" + return ProcessContext.server.send(request) + + +def select_nodes(nodes): + """ Selects nodes in Node View """ + _ = send( + { + "function": "AvalonHarmony.selectNodes", + "args": nodes + } + ) + + +@contextlib.contextmanager +def maintained_nodes_state(nodes): + """Maintain nodes states during context.""" + # Collect current state. + states = send( + { + "function": "AvalonHarmony.areEnabled", "args": nodes + })["result"] + + # Disable all nodes. + send( + { + "function": "AvalonHarmony.disableNodes", "args": nodes + }) + + try: + yield + finally: + send( + { + "function": "AvalonHarmony.setState", + "args": [nodes, states] + }) + + +def save_scene(): + """Save the Harmony scene safely. + + The built-in (to Avalon) background zip and moving of the Harmony scene + folder, interfers with server/client communication by sending two requests + at the same time. This only happens when sending "scene.saveAll()". This + method prevents this double request and safely saves the scene. + + """ + # Need to turn off the background watcher else the communication with + # the server gets spammed with two requests at the same time. + scene_path = send( + {"function": "AvalonHarmony.saveScene"})["result"] + + # Manually update the remote file. + on_file_changed(scene_path, threaded=False) + + # Re-enable the background watcher. + send({"function": "AvalonHarmony.enableFileWather"}) + + +def save_scene_as(filepath): + """Save Harmony scene as `filepath`.""" + scene_dir = os.path.dirname(filepath) + destination = os.path.join( + os.path.dirname(ProcessContext.workfile_path), + os.path.splitext(os.path.basename(filepath))[0] + ".zip" + ) + + if os.path.exists(scene_dir): + try: + shutil.rmtree(scene_dir) + except Exception as e: + log.error(f"Cannot remove {scene_dir}") + raise Exception(f"Cannot remove {scene_dir}") from e + + send( + {"function": "scene.saveAs", "args": [scene_dir]} + )["result"] + + zip_and_move(scene_dir, destination) + + ProcessContext.workfile_path = destination + + send( + {"function": "AvalonHarmony.addPathToWatcher", "args": filepath} + ) + + +def find_node_by_name(name, node_type): + """Find node by its name. + + Args: + name (str): Name of the Node. (without part before '/') + node_type (str): Type of the Node. + 'READ' - for loaded data with Loaders (background) + 'GROUP' - for loaded data with Loaders (templates) + 'WRITE' - render nodes + + Returns: + str: FQ Node name. + + """ + nodes = send( + {"function": "node.getNodes", "args": [[node_type]]} + )["result"] + for node in nodes: + node_name = node.split("/")[-1] + if name == node_name: + return node + + return None diff --git a/client/ayon_core/hosts/harmony/api/pipeline.py b/client/ayon_core/hosts/harmony/api/pipeline.py new file mode 100644 index 0000000000..863053dddc --- /dev/null +++ b/client/ayon_core/hosts/harmony/api/pipeline.py @@ -0,0 +1,345 @@ +import os +from pathlib import Path +import logging + +import pyblish.api + +from ayon_core.lib import register_event_callback +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + deregister_loader_plugin_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.pipeline.load import get_outdated_containers +from ayon_core.pipeline.context_tools import get_current_project_asset + +from ayon_core.hosts.harmony import HARMONY_HOST_DIR +import ayon_core.hosts.harmony.api as harmony + + +log = logging.getLogger("ayon_core.hosts.harmony") + +PLUGINS_DIR = os.path.join(HARMONY_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +def set_scene_settings(settings): + """Set correct scene settings in Harmony. + + Args: + settings (dict): Scene settings. + + Returns: + dict: Dictionary of settings to set. + + """ + harmony.send( + {"function": "PypeHarmony.setSceneSettings", "args": settings}) + + +def get_asset_settings(): + """Get settings on current asset from database. + + Returns: + dict: Scene data. + + """ + + asset_doc = get_current_project_asset() + asset_data = asset_doc["data"] + fps = asset_data.get("fps") + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") + handle_start = asset_data.get("handleStart") + handle_end = asset_data.get("handleEnd") + resolution_width = asset_data.get("resolutionWidth") + resolution_height = asset_data.get("resolutionHeight") + entity_type = asset_data.get("entityType") + + scene_data = { + "fps": fps, + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "entityType": entity_type + } + + return scene_data + + +def ensure_scene_settings(): + """Validate if Harmony scene has valid settings.""" + settings = get_asset_settings() + + invalid_settings = [] + valid_settings = {} + for key, value in settings.items(): + if value is None: + invalid_settings.append(key) + else: + valid_settings[key] = value + + # Warn about missing attributes. + if invalid_settings: + msg = "Missing attributes:" + for item in invalid_settings: + msg += f"\n{item}" + + harmony.send( + {"function": "PypeHarmony.message", "args": msg}) + + set_scene_settings(valid_settings) + + +def check_inventory(): + """Check is scene contains outdated containers. + + If it does it will colorize outdated nodes and display warning message + in Harmony. + """ + + outdated_containers = get_outdated_containers() + if not outdated_containers: + return + + # Colour nodes. + outdated_nodes = [] + for container in outdated_containers: + if container["loader"] == "ImageSequenceLoader": + outdated_nodes.append( + harmony.find_node_by_name(container["name"], "READ") + ) + harmony.send({"function": "PypeHarmony.setColor", "args": outdated_nodes}) + + # Warn about outdated containers. + msg = "There are outdated containers in the scene." + harmony.send({"function": "PypeHarmony.message", "args": msg}) + + +def application_launch(event): + """Event that is executed after Harmony is launched.""" + # fills AYON_HARMONY_JS + pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js" + pype_harmony_js = pype_harmony_path.read_text() + + # go through js/creators, loaders and publish folders and load all scripts + script = "" + for item in ["creators", "loaders", "publish"]: + dir_to_scan = Path(__file__).parent.parent / "js" / item + for child in dir_to_scan.iterdir(): + script += child.read_text() + + # send scripts to Harmony + harmony.send({"script": pype_harmony_js}) + harmony.send({"script": script}) + inject_avalon_js() + + # ensure_scene_settings() + check_inventory() + + +def export_template(backdrops, nodes, filepath): + """Export Template to file. + + Args: + backdrops (list): List of backdrops to export. + nodes (list): List of nodes to export. + filepath (str): Path where to save Template. + + """ + harmony.send({ + "function": "PypeHarmony.exportTemplate", + "args": [ + backdrops, + nodes, + os.path.basename(filepath), + os.path.dirname(filepath) + ] + }) + + +def install(): + """Install Pype as host config.""" + print("Installing Pype config ...") + + pyblish.api.register_host("harmony") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + log.info(PUBLISH_PATH) + + # Register callbacks. + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled + ) + + register_event_callback("application.launched", application_launch) + + +def uninstall(): + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + deregister_loader_plugin_path(LOAD_PATH) + deregister_creator_plugin_path(CREATE_PATH) + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node enabling on instance toggles.""" + node = None + if instance.data.get("setMembers"): + node = instance.data["setMembers"][0] + + if node: + harmony.send( + { + "function": "PypeHarmony.toggleInstance", + "args": [node, new_value] + } + ) + + +def inject_avalon_js(): + """Inject AvalonHarmony.js into Harmony.""" + avalon_harmony_js = Path(__file__).parent.joinpath("js/AvalonHarmony.js") + script = avalon_harmony_js.read_text() + # send AvalonHarmony.js to Harmony + harmony.send({"script": script}) + + +def ls(): + """Yields containers from Harmony scene. + + This is the host-equivalent of api.ls(), but instead of listing + assets on disk, it lists assets already loaded in Harmony; once loaded + they are called 'containers'. + + Yields: + dict: container + """ + objects = harmony.get_scene_data() or {} + for _, data in objects.items(): + # Skip non-tagged objects. + if not data: + continue + + # Filter to only containers. + if "container" not in data.get("id"): + continue + + if not data.get("objectName"): # backward compatibility + data["objectName"] = data["name"] + yield data + + +def list_instances(remove_orphaned=True): + """ + List all created instances from current workfile which + will be published. + + Pulls from File > File Info + + For SubsetManager, by default it check if instance has matching node + in the scene, if not, instance gets deleted from metadata. + + Returns: + (list) of dictionaries matching instances format + """ + objects = harmony.get_scene_data() or {} + instances = [] + for key, data in objects.items(): + # Skip non-tagged objects. + if not data: + continue + + # Filter out containers. + if "container" in data.get("id"): + continue + + data['uuid'] = key + + if remove_orphaned: + node_name = key.split("/")[-1] + located_node = harmony.find_node_by_name(node_name, 'WRITE') + if not located_node: + print("Removing orphaned instance {}".format(key)) + harmony.remove(key) + continue + + instances.append(data) + + return instances + + +def remove_instance(instance): + """ + Remove instance from current workfile metadata and from scene! + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + For SubsetManager + + Args: + instance (dict): instance representation from subsetmanager model + """ + node = instance.get("uuid") + harmony.remove(node) + harmony.delete_node(node) + + +def select_instance(instance): + """ + Select instance in Node View + + Args: + instance (dict): instance representation from subsetmanager model + """ + harmony.select_nodes([instance.get("uuid")]) + + +def containerise(name, + namespace, + node, + context, + loader=None, + suffix=None, + nodes=None): + """Imprint node with metadata. + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + name (str): Name of resulting assembly. + namespace (str): Namespace under which to host container. + node (str): Node to containerise. + context (dict): Asset information. + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Path of container assembly. + """ + if not nodes: + nodes = [] + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + "nodes": nodes + } + + harmony.imprint(node, data) + + return node diff --git a/client/ayon_core/hosts/harmony/api/plugin.py b/client/ayon_core/hosts/harmony/api/plugin.py new file mode 100644 index 0000000000..29ebdb2b8d --- /dev/null +++ b/client/ayon_core/hosts/harmony/api/plugin.py @@ -0,0 +1,70 @@ +from ayon_core.pipeline import LegacyCreator +import ayon_core.hosts.harmony.api as harmony + + +class Creator(LegacyCreator): + """Creator plugin to create instances in Harmony. + + By default a Composite node is created to support any number of nodes in + an instance, but any node type is supported. + If the selection is used, the selected nodes will be connected to the + created node. + """ + + defaults = ["Main"] + node_type = "COMPOSITE" + + def setup_node(self, node): + """Prepare node as container. + + Args: + node (str): Path to node. + """ + harmony.send( + { + "function": "AvalonHarmony.setupNodeForCreator", + "args": node + } + ) + + def process(self): + """Plugin entry point.""" + existing_node_names = harmony.send( + { + "function": "AvalonHarmony.getNodesNamesByType", + "args": self.node_type + })["result"] + + # Dont allow instances with the same name. + msg = "Instance with name \"{}\" already exists.".format(self.name) + for name in existing_node_names: + if self.name.lower() == name.lower(): + harmony.send( + { + "function": "AvalonHarmony.message", "args": msg + } + ) + return False + + with harmony.maintained_selection() as selection: + node = None + + if (self.options or {}).get("useSelection") and selection: + node = harmony.send( + { + "function": "AvalonHarmony.createContainer", + "args": [self.name, self.node_type, selection[-1]] + } + )["result"] + else: + node = harmony.send( + { + "function": "AvalonHarmony.createContainer", + "args": [self.name, self.node_type] + } + )["result"] + + harmony.imprint(node, self.data) + self.setup_node(node) + + return node diff --git a/openpype/hosts/harmony/api/server.py b/client/ayon_core/hosts/harmony/api/server.py similarity index 100% rename from openpype/hosts/harmony/api/server.py rename to client/ayon_core/hosts/harmony/api/server.py diff --git a/openpype/hosts/harmony/api/temp.zip b/client/ayon_core/hosts/harmony/api/temp.zip similarity index 100% rename from openpype/hosts/harmony/api/temp.zip rename to client/ayon_core/hosts/harmony/api/temp.zip diff --git a/openpype/hosts/harmony/api/workio.py b/client/ayon_core/hosts/harmony/api/workio.py similarity index 100% rename from openpype/hosts/harmony/api/workio.py rename to client/ayon_core/hosts/harmony/api/workio.py diff --git a/openpype/hosts/harmony/js/.eslintrc.json b/client/ayon_core/hosts/harmony/js/.eslintrc.json similarity index 100% rename from openpype/hosts/harmony/js/.eslintrc.json rename to client/ayon_core/hosts/harmony/js/.eslintrc.json diff --git a/openpype/hosts/harmony/js/PypeHarmony.js b/client/ayon_core/hosts/harmony/js/PypeHarmony.js similarity index 100% rename from openpype/hosts/harmony/js/PypeHarmony.js rename to client/ayon_core/hosts/harmony/js/PypeHarmony.js diff --git a/openpype/hosts/harmony/js/README.md b/client/ayon_core/hosts/harmony/js/README.md similarity index 100% rename from openpype/hosts/harmony/js/README.md rename to client/ayon_core/hosts/harmony/js/README.md diff --git a/openpype/hosts/harmony/js/creators/CreateRender.js b/client/ayon_core/hosts/harmony/js/creators/CreateRender.js similarity index 86% rename from openpype/hosts/harmony/js/creators/CreateRender.js rename to client/ayon_core/hosts/harmony/js/creators/CreateRender.js index 92ec6dfd2f..1a2b606eb9 100644 --- a/openpype/hosts/harmony/js/creators/CreateRender.js +++ b/client/ayon_core/hosts/harmony/js/creators/CreateRender.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } diff --git a/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js b/client/ayon_core/hosts/harmony/js/loaders/ImageSequenceLoader.js similarity index 98% rename from openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js rename to client/ayon_core/hosts/harmony/js/loaders/ImageSequenceLoader.js index cf8a9a29ca..25afeff214 100644 --- a/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js +++ b/client/ayon_core/hosts/harmony/js/loaders/ImageSequenceLoader.js @@ -5,8 +5,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } if (typeof $ === 'undefined'){ diff --git a/openpype/hosts/harmony/js/loaders/TemplateLoader.js b/client/ayon_core/hosts/harmony/js/loaders/TemplateLoader.js similarity index 97% rename from openpype/hosts/harmony/js/loaders/TemplateLoader.js rename to client/ayon_core/hosts/harmony/js/loaders/TemplateLoader.js index 1df04c8282..06ef1671ea 100644 --- a/openpype/hosts/harmony/js/loaders/TemplateLoader.js +++ b/client/ayon_core/hosts/harmony/js/loaders/TemplateLoader.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } if (typeof $ === 'undefined'){ diff --git a/openpype/hosts/harmony/js/package.json b/client/ayon_core/hosts/harmony/js/package.json similarity index 100% rename from openpype/hosts/harmony/js/package.json rename to client/ayon_core/hosts/harmony/js/package.json diff --git a/openpype/hosts/harmony/js/publish/CollectCurrentFile.js b/client/ayon_core/hosts/harmony/js/publish/CollectCurrentFile.js similarity index 84% rename from openpype/hosts/harmony/js/publish/CollectCurrentFile.js rename to client/ayon_core/hosts/harmony/js/publish/CollectCurrentFile.js index 2eeb7fb764..b9863d4b29 100644 --- a/openpype/hosts/harmony/js/publish/CollectCurrentFile.js +++ b/client/ayon_core/hosts/harmony/js/publish/CollectCurrentFile.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } diff --git a/openpype/hosts/harmony/js/publish/CollectFarmRender.js b/client/ayon_core/hosts/harmony/js/publish/CollectFarmRender.js similarity index 90% rename from openpype/hosts/harmony/js/publish/CollectFarmRender.js rename to client/ayon_core/hosts/harmony/js/publish/CollectFarmRender.js index 759dc5ce5d..3d9f69ebde 100644 --- a/openpype/hosts/harmony/js/publish/CollectFarmRender.js +++ b/client/ayon_core/hosts/harmony/js/publish/CollectFarmRender.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } diff --git a/openpype/hosts/harmony/js/publish/CollectPalettes.js b/client/ayon_core/hosts/harmony/js/publish/CollectPalettes.js similarity index 86% rename from openpype/hosts/harmony/js/publish/CollectPalettes.js rename to client/ayon_core/hosts/harmony/js/publish/CollectPalettes.js index afb0ad854a..0b119c3118 100644 --- a/openpype/hosts/harmony/js/publish/CollectPalettes.js +++ b/client/ayon_core/hosts/harmony/js/publish/CollectPalettes.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } diff --git a/openpype/hosts/harmony/js/publish/ExtractPalette.js b/client/ayon_core/hosts/harmony/js/publish/ExtractPalette.js similarity index 88% rename from openpype/hosts/harmony/js/publish/ExtractPalette.js rename to client/ayon_core/hosts/harmony/js/publish/ExtractPalette.js index c4765354c4..fd96de518d 100644 --- a/openpype/hosts/harmony/js/publish/ExtractPalette.js +++ b/client/ayon_core/hosts/harmony/js/publish/ExtractPalette.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } /** diff --git a/openpype/hosts/harmony/js/publish/ExtractTemplate.js b/client/ayon_core/hosts/harmony/js/publish/ExtractTemplate.js similarity index 91% rename from openpype/hosts/harmony/js/publish/ExtractTemplate.js rename to client/ayon_core/hosts/harmony/js/publish/ExtractTemplate.js index 4676e1ff68..3ddad39117 100644 --- a/openpype/hosts/harmony/js/publish/ExtractTemplate.js +++ b/client/ayon_core/hosts/harmony/js/publish/ExtractTemplate.js @@ -6,8 +6,8 @@ // check if PypeHarmony is defined and if not, load it. if (typeof PypeHarmony === 'undefined') { - var OPENPYPE_HARMONY_JS = System.getenv('OPENPYPE_HARMONY_JS') + '/PypeHarmony.js'; - include(OPENPYPE_HARMONY_JS.replace(/\\/g, "/")); + var AYON_HARMONY_JS = System.getenv('AYON_HARMONY_JS') + '/PypeHarmony.js'; + include(AYON_HARMONY_JS.replace(/\\/g, "/")); } diff --git a/openpype/hosts/fusion/vendor/urllib3/packages/backports/__init__.py b/client/ayon_core/hosts/harmony/plugins/__init__.py similarity index 100% rename from openpype/hosts/fusion/vendor/urllib3/packages/backports/__init__.py rename to client/ayon_core/hosts/harmony/plugins/__init__.py diff --git a/openpype/hosts/harmony/plugins/create/create_farm_render.py b/client/ayon_core/hosts/harmony/plugins/create/create_farm_render.py similarity index 90% rename from openpype/hosts/harmony/plugins/create/create_farm_render.py rename to client/ayon_core/hosts/harmony/plugins/create/create_farm_render.py index c62a90a9aa..6b19764181 100644 --- a/openpype/hosts/harmony/plugins/create/create_farm_render.py +++ b/client/ayon_core/hosts/harmony/plugins/create/create_farm_render.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Create Composite node for render on farm.""" -import openpype.hosts.harmony.api as harmony -from openpype.hosts.harmony.api import plugin +import ayon_core.hosts.harmony.api as harmony +from ayon_core.hosts.harmony.api import plugin class CreateFarmRender(plugin.Creator): diff --git a/client/ayon_core/hosts/harmony/plugins/create/create_render.py b/client/ayon_core/hosts/harmony/plugins/create/create_render.py new file mode 100644 index 0000000000..0a2cd33551 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/create/create_render.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +"""Create render node.""" +import ayon_core.hosts.harmony.api as harmony +from ayon_core.hosts.harmony.api import plugin + + +class CreateRender(plugin.Creator): + """Composite node for publishing renders.""" + + name = "renderDefault" + label = "Render" + family = "render" + node_type = "WRITE" + + def __init__(self, *args, **kwargs): + """Constructor.""" + super(CreateRender, self).__init__(*args, **kwargs) + + def setup_node(self, node): + """Set render node.""" + self_name = self.__class__.__name__ + path = "render/{0}/{0}.".format(node.split("/")[-1]) + harmony.send( + { + "function": f"PypeHarmony.Creators.{self_name}.create", + "args": [node, path] + }) diff --git a/openpype/hosts/harmony/plugins/create/create_template.py b/client/ayon_core/hosts/harmony/plugins/create/create_template.py similarity index 85% rename from openpype/hosts/harmony/plugins/create/create_template.py rename to client/ayon_core/hosts/harmony/plugins/create/create_template.py index 534179b190..4f3fd85f00 100644 --- a/openpype/hosts/harmony/plugins/create/create_template.py +++ b/client/ayon_core/hosts/harmony/plugins/create/create_template.py @@ -1,4 +1,4 @@ -from openpype.hosts.harmony.api import plugin +from ayon_core.hosts.harmony.api import plugin class CreateTemplate(plugin.Creator): diff --git a/client/ayon_core/hosts/harmony/plugins/load/load_audio.py b/client/ayon_core/hosts/harmony/plugins/load/load_audio.py new file mode 100644 index 0000000000..14389166d7 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/load/load_audio.py @@ -0,0 +1,62 @@ +from ayon_core.pipeline import ( + load, + get_representation_path, +) +import ayon_core.hosts.harmony.api as harmony + +sig = harmony.signature() +func = """ +function getUniqueColumnName( column_prefix ) +{ + var suffix = 0; + // finds if unique name for a column + var column_name = column_prefix; + while(suffix < 2000) + { + if(!column.type(column_name)) + break; + + suffix = suffix + 1; + column_name = column_prefix + "_" + suffix; + } + return column_name; +} + +function %s(args) +{ + var uniqueColumnName = getUniqueColumnName(args[0]); + column.add(uniqueColumnName , "SOUND"); + column.importSound(uniqueColumnName, 1, args[1]); +} +%s +""" % (sig, sig) + + +class ImportAudioLoader(load.LoaderPlugin): + """Import audio.""" + + families = ["shot", "audio"] + representations = ["wav"] + label = "Import Audio" + + def load(self, context, name=None, namespace=None, data=None): + wav_file = get_representation_path(context["representation"]) + harmony.send( + {"function": func, "args": [context["subset"]["name"], wav_file]} + ) + + subset_name = context["subset"]["name"] + + return harmony.containerise( + subset_name, + namespace, + subset_name, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + pass + + def remove(self, container): + pass diff --git a/client/ayon_core/hosts/harmony/plugins/load/load_background.py b/client/ayon_core/hosts/harmony/plugins/load/load_background.py new file mode 100644 index 0000000000..1c61cfa7a4 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/load/load_background.py @@ -0,0 +1,373 @@ +import os +import json + +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.pipeline.context_tools import is_representation_from_latest +import ayon_core.hosts.harmony.api as harmony + + +copy_files = """function copyFile(srcFilename, dstFilename) +{ + var srcFile = new PermanentFile(srcFilename); + var dstFile = new PermanentFile(dstFilename); + srcFile.copy(dstFile); +} +""" + +import_files = """var PNGTransparencyMode = 1; //Premultiplied with Black +var TGATransparencyMode = 0; //Premultiplied with Black +var SGITransparencyMode = 0; //Premultiplied with Black +var LayeredPSDTransparencyMode = 1; //Straight +var FlatPSDTransparencyMode = 2; //Premultiplied with White + +function getUniqueColumnName( column_prefix ) +{ + var suffix = 0; + // finds if unique name for a column + var column_name = column_prefix; + while(suffix < 2000) + { + if(!column.type(column_name)) + break; + + suffix = suffix + 1; + column_name = column_prefix + "_" + suffix; + } + return column_name; +} + +function import_files(args) +{ + var root = args[0]; + var files = args[1]; + var name = args[2]; + var start_frame = args[3]; + + var vectorFormat = null; + var extension = null; + var filename = files[0]; + + var pos = filename.lastIndexOf("."); + if( pos < 0 ) + return null; + + extension = filename.substr(pos+1).toLowerCase(); + + if(extension == "jpeg") + extension = "jpg"; + if(extension == "tvg") + { + vectorFormat = "TVG" + extension ="SCAN"; // element.add() will use this. + } + + var elemId = element.add( + name, + "BW", + scene.numberOfUnitsZ(), + extension.toUpperCase(), + vectorFormat + ); + if (elemId == -1) + { + // hum, unknown file type most likely -- let's skip it. + return null; // no read to add. + } + + var uniqueColumnName = getUniqueColumnName(name); + column.add(uniqueColumnName , "DRAWING"); + column.setElementIdOfDrawing(uniqueColumnName, elemId); + + var read = node.add(root, name, "READ", 0, 0, 0); + var transparencyAttr = node.getAttr( + read, frame.current(), "READ_TRANSPARENCY" + ); + var opacityAttr = node.getAttr(read, frame.current(), "OPACITY"); + transparencyAttr.setValue(true); + opacityAttr.setValue(true); + + var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE"); + alignmentAttr.setValue("ASIS"); + + var transparencyModeAttr = node.getAttr( + read, frame.current(), "applyMatteToColor" + ); + if (extension == "png") + transparencyModeAttr.setValue(PNGTransparencyMode); + if (extension == "tga") + transparencyModeAttr.setValue(TGATransparencyMode); + if (extension == "sgi") + transparencyModeAttr.setValue(SGITransparencyMode); + if (extension == "psd") + transparencyModeAttr.setValue(FlatPSDTransparencyMode); + if (extension == "jpg") + transparencyModeAttr.setValue(LayeredPSDTransparencyMode); + + node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName); + + if (files.length == 1) + { + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, 1, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, "1"); + copyFile(files[0], drawingFilePath); + // Expose the image for the entire frame range. + for( var i =0; i <= frame.numberOf() - 1; ++i) + { + timing = start_frame + i + column.setEntry(uniqueColumnName, 1, timing, "1"); + } + } else { + // Create a drawing for each file. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, timing.toString()); + copyFile( files[i], drawingFilePath ); + + column.setEntry(uniqueColumnName, 1, timing, timing.toString()); + } + } + + var green_color = new ColorRGBA(0, 255, 0, 255); + node.setColor(read, green_color); + + return read; +} +import_files +""" + +replace_files = """var PNGTransparencyMode = 1; //Premultiplied with Black +var TGATransparencyMode = 0; //Premultiplied with Black +var SGITransparencyMode = 0; //Premultiplied with Black +var LayeredPSDTransparencyMode = 1; //Straight +var FlatPSDTransparencyMode = 2; //Premultiplied with White + +function replace_files(args) +{ + var files = args[0]; + MessageLog.trace(files); + MessageLog.trace(files.length); + var _node = args[1]; + var start_frame = args[2]; + + var _column = node.linkedColumn(_node, "DRAWING.ELEMENT"); + var elemId = column.getElementIdOfDrawing(_column); + + // Delete existing drawings. + var timings = column.getDrawingTimings(_column); + for( var i =0; i <= timings.length - 1; ++i) + { + column.deleteDrawingAt(_column, parseInt(timings[i])); + } + + + var filename = files[0]; + var pos = filename.lastIndexOf("."); + if( pos < 0 ) + return null; + var extension = filename.substr(pos+1).toLowerCase(); + + if(extension == "jpeg") + extension = "jpg"; + + var transparencyModeAttr = node.getAttr( + _node, frame.current(), "applyMatteToColor" + ); + if (extension == "png") + transparencyModeAttr.setValue(PNGTransparencyMode); + if (extension == "tga") + transparencyModeAttr.setValue(TGATransparencyMode); + if (extension == "sgi") + transparencyModeAttr.setValue(SGITransparencyMode); + if (extension == "psd") + transparencyModeAttr.setValue(FlatPSDTransparencyMode); + if (extension == "jpg") + transparencyModeAttr.setValue(LayeredPSDTransparencyMode); + + if (files.length == 1) + { + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, 1, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, "1"); + copyFile(files[0], drawingFilePath); + MessageLog.trace(files[0]); + MessageLog.trace(drawingFilePath); + // Expose the image for the entire frame range. + for( var i =0; i <= frame.numberOf() - 1; ++i) + { + timing = start_frame + i + column.setEntry(_column, 1, timing, "1"); + } + } else { + // Create a drawing for each file. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, timing.toString()); + copyFile( files[i], drawingFilePath ); + + column.setEntry(_column, 1, timing, timing.toString()); + } + } + + var green_color = new ColorRGBA(0, 255, 0, 255); + node.setColor(_node, green_color); +} +replace_files +""" + + +class BackgroundLoader(load.LoaderPlugin): + """Load images + Stores the imported asset in a container named after the asset. + """ + families = ["background"] + representations = ["json"] + + def load(self, context, name=None, namespace=None, data=None): + + path = self.filepath_from_context(context) + with open(path) as json_file: + data = json.load(json_file) + + layers = list() + + for child in data['children']: + if child.get("filename"): + layers.append(child["filename"]) + else: + for layer in child['children']: + if layer.get("filename"): + layers.append(layer["filename"]) + + bg_folder = os.path.dirname(path) + + subset_name = context["subset"]["name"] + # read_node_name += "_{}".format(uuid.uuid4()) + container_nodes = [] + + for layer in sorted(layers): + file_to_import = [ + os.path.join(bg_folder, layer).replace("\\", "/") + ] + + read_node = harmony.send( + { + "function": copy_files + import_files, + "args": ["Top", file_to_import, layer, 1] + } + )["result"] + container_nodes.append(read_node) + + return harmony.containerise( + subset_name, + namespace, + subset_name, + context, + self.__class__.__name__, + nodes=container_nodes + ) + + def update(self, container, representation): + path = get_representation_path(representation) + with open(path) as json_file: + data = json.load(json_file) + + layers = list() + + for child in data['children']: + if child.get("filename"): + print(child["filename"]) + layers.append(child["filename"]) + else: + for layer in child['children']: + if layer.get("filename"): + print(layer["filename"]) + layers.append(layer["filename"]) + + bg_folder = os.path.dirname(path) + + print(container) + + is_latest = is_representation_from_latest(representation) + for layer in sorted(layers): + file_to_import = [ + os.path.join(bg_folder, layer).replace("\\", "/") + ] + print(20 * "#") + print(f"FILE TO REPLACE: {file_to_import}") + print(f"LAYER: {layer}") + node = harmony.find_node_by_name(layer, "READ") + print(f"{node}") + + if node in container['nodes']: + harmony.send( + { + "function": copy_files + replace_files, + "args": [file_to_import, node, 1] + } + ) + else: + read_node = harmony.send( + { + "function": copy_files + import_files, + "args": ["Top", file_to_import, layer, 1] + } + )["result"] + container['nodes'].append(read_node) + + # Colour node. + sig = harmony.signature("set_color") + func = """function %s(args){ + for( var i =0; i <= args[0].length - 1; ++i) + { + var red_color = new ColorRGBA(255, 0, 0, 255); + var green_color = new ColorRGBA(0, 255, 0, 255); + if (args[1] == "red"){ + node.setColor(args[0], red_color); + } + if (args[1] == "green"){ + node.setColor(args[0], green_color); + } + } + } + %s + """ % (sig, sig) + if is_latest: + harmony.send({"function": func, "args": [node, "green"]}) + else: + harmony.send({"function": func, "args": [node, "red"]}) + + harmony.imprint( + container['name'], {"representation": str(representation["_id"]), + "nodes": container['nodes']} + ) + + def remove(self, container): + for node in container.get("nodes"): + + func = """function deleteNode(_node) + { + node.deleteNode(_node, true, true); + } + deleteNode + """ + harmony.send( + {"function": func, "args": [node]} + ) + harmony.imprint(container['name'], {}, remove=True) + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/client/ayon_core/hosts/harmony/plugins/load/load_imagesequence.py similarity index 96% rename from openpype/hosts/harmony/plugins/load/load_imagesequence.py rename to client/ayon_core/hosts/harmony/plugins/load/load_imagesequence.py index 754f82e5d5..4d87272de8 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/client/ayon_core/hosts/harmony/plugins/load/load_imagesequence.py @@ -6,12 +6,12 @@ import clique -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.pipeline.context_tools import is_representation_from_latest -import openpype.hosts.harmony.api as harmony +from ayon_core.pipeline.context_tools import is_representation_from_latest +import ayon_core.hosts.harmony.api as harmony class ImageSequenceLoader(load.LoaderPlugin): diff --git a/openpype/hosts/harmony/plugins/load/load_palette.py b/client/ayon_core/hosts/harmony/plugins/load/load_palette.py similarity index 95% rename from openpype/hosts/harmony/plugins/load/load_palette.py rename to client/ayon_core/hosts/harmony/plugins/load/load_palette.py index 1da3e61e1b..aa5894e026 100644 --- a/openpype/hosts/harmony/plugins/load/load_palette.py +++ b/client/ayon_core/hosts/harmony/plugins/load/load_palette.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -import openpype.hosts.harmony.api as harmony +import ayon_core.hosts.harmony.api as harmony class ImportPaletteLoader(load.LoaderPlugin): diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/client/ayon_core/hosts/harmony/plugins/load/load_template.py similarity index 96% rename from openpype/hosts/harmony/plugins/load/load_template.py rename to client/ayon_core/hosts/harmony/plugins/load/load_template.py index a78a1bf1ec..d26f148c09 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/client/ayon_core/hosts/harmony/plugins/load/load_template.py @@ -6,12 +6,12 @@ import shutil import uuid -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.pipeline.context_tools import is_representation_from_latest -import openpype.hosts.harmony.api as harmony +from ayon_core.pipeline.context_tools import is_representation_from_latest +import ayon_core.hosts.harmony.api as harmony class TemplateLoader(load.LoaderPlugin): diff --git a/openpype/hosts/harmony/plugins/load/load_template_workfile.py b/client/ayon_core/hosts/harmony/plugins/load/load_template_workfile.py similarity index 95% rename from openpype/hosts/harmony/plugins/load/load_template_workfile.py rename to client/ayon_core/hosts/harmony/plugins/load/load_template_workfile.py index 2b84a43b35..0ea46f8f67 100644 --- a/openpype/hosts/harmony/plugins/load/load_template_workfile.py +++ b/client/ayon_core/hosts/harmony/plugins/load/load_template_workfile.py @@ -3,11 +3,11 @@ import os import shutil -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -import openpype.hosts.harmony.api as harmony +import ayon_core.hosts.harmony.api as harmony class ImportTemplateLoader(load.LoaderPlugin): diff --git a/openpype/hosts/harmony/plugins/publish/collect_audio.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_audio.py similarity index 100% rename from openpype/hosts/harmony/plugins/publish/collect_audio.py rename to client/ayon_core/hosts/harmony/plugins/publish/collect_audio.py diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..ebe123eacc --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_current_file.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +"""Collect information about current file.""" +import os + +import pyblish.api +import ayon_core.hosts.harmony.api as harmony + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context.""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "Current File" + hosts = ["harmony"] + + def process(self, context): + """Inject the current working file.""" + self_name = self.__class__.__name__ + + current_file = harmony.send( + {"function": f"PypeHarmony.Publish.{self_name}.collect"})["result"] + context.data["currentFile"] = os.path.normpath(current_file) diff --git a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py similarity index 97% rename from openpype/hosts/harmony/plugins/publish/collect_farm_render.py rename to client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py index af825c052a..faeff7bddd 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py @@ -4,10 +4,10 @@ import attr -from openpype.lib import get_formatted_current_time -from openpype.pipeline import publish -from openpype.pipeline.publish import RenderInstance -import openpype.hosts.harmony.api as harmony +from ayon_core.lib import get_formatted_current_time +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import RenderInstance +import ayon_core.hosts.harmony.api as harmony @attr.s diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_instances.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_instances.py new file mode 100644 index 0000000000..3eb689aff6 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_instances.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +"""Collect instances in Harmony.""" +import json + +import pyblish.api +import ayon_core.hosts.harmony.api as harmony + + +class CollectInstances(pyblish.api.ContextPlugin): + """Gather instances by nodes metadata. + + This collector takes into account assets that are associated with + a composite node and marked with a unique identifier. + + Identifier: + id (str): "pyblish.avalon.instance" + """ + + label = "Instances" + order = pyblish.api.CollectorOrder + hosts = ["harmony"] + families_mapping = { + "render": ["review", "ftrack"], + "harmony.template": [], + "palette": ["palette", "ftrack"] + } + + pair_media = True + + def process(self, context): + """Plugin entry point. + + Args: + context (:class:`pyblish.api.Context`): Context data. + + """ + nodes = harmony.send( + {"function": "node.subNodes", "args": ["Top"]} + )["result"] + + for node in nodes: + data = harmony.read(node) + + # Skip non-tagged nodes. + if not data: + continue + + # Skip containers. + if "container" in data["id"]: + continue + + # skip render farm family as it is collected separately + if data["family"] == "renderFarm": + continue + + instance = context.create_instance(node.split("/")[-1]) + instance.data.update(data) + instance.data["setMembers"] = [node] + instance.data["publish"] = harmony.send( + {"function": "node.getEnable", "args": [node]} + )["result"] + instance.data["families"] = self.families_mapping[data["family"]] + + # If set in plugin, pair the scene Version in ftrack with + # thumbnails and review media. + if (self.pair_media and instance.data["family"] == "scene"): + context.data["scene_instance"] = instance + + # Produce diagnostic message for any graphical + # user interface interested in visualising it. + self.log.info( + "Found: \"{0}\": \n{1}".format( + instance.data["name"], json.dumps(instance.data, indent=4) + ) + ) diff --git a/openpype/hosts/harmony/plugins/publish/collect_palettes.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_palettes.py similarity index 97% rename from openpype/hosts/harmony/plugins/publish/collect_palettes.py rename to client/ayon_core/hosts/harmony/plugins/publish/collect_palettes.py index e19057e302..9343fab86d 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_palettes.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_palettes.py @@ -4,7 +4,7 @@ import re import pyblish.api -import openpype.hosts.harmony.api as harmony +import ayon_core.hosts.harmony.api as harmony class CollectPalettes(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/harmony/plugins/publish/collect_scene.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_scene.py similarity index 98% rename from openpype/hosts/harmony/plugins/publish/collect_scene.py rename to client/ayon_core/hosts/harmony/plugins/publish/collect_scene.py index 3e65edf7d6..a60e44b69b 100644 --- a/openpype/hosts/harmony/plugins/publish/collect_scene.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_scene.py @@ -3,7 +3,7 @@ import os import pyblish.api -import openpype.hosts.harmony.api as harmony +import ayon_core.hosts.harmony.api as harmony class CollectScene(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..4be2a0fc26 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_workfile.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +"""Collect current workfile from Harmony.""" +import os +import pyblish.api + +from ayon_core.pipeline.create import get_subset_name + + +class CollectWorkfile(pyblish.api.ContextPlugin): + """Collect current script for publish.""" + + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect Workfile" + hosts = ["harmony"] + + def process(self, context): + """Plugin entry point.""" + family = "workfile" + basename = os.path.basename(context.data["currentFile"]) + subset = get_subset_name( + family, + "", + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] + ) + + # Create instance + instance = context.create_instance(subset) + instance.data.update({ + "subset": subset, + "label": basename, + "name": basename, + "family": family, + "families": [family], + "representations": [], + "asset": context.data["asset"] + }) diff --git a/openpype/hosts/harmony/plugins/publish/extract_palette.py b/client/ayon_core/hosts/harmony/plugins/publish/extract_palette.py similarity index 98% rename from openpype/hosts/harmony/plugins/publish/extract_palette.py rename to client/ayon_core/hosts/harmony/plugins/publish/extract_palette.py index 69c6e098ff..9f12c78d9f 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_palette.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/extract_palette.py @@ -5,8 +5,8 @@ from PIL import Image, ImageDraw, ImageFont -import openpype.hosts.harmony.api as harmony -from openpype.pipeline import publish +import ayon_core.hosts.harmony.api as harmony +from ayon_core.pipeline import publish class ExtractPalette(publish.Extractor): diff --git a/openpype/hosts/harmony/plugins/publish/extract_render.py b/client/ayon_core/hosts/harmony/plugins/publish/extract_render.py similarity index 97% rename from openpype/hosts/harmony/plugins/publish/extract_render.py rename to client/ayon_core/hosts/harmony/plugins/publish/extract_render.py index 96a375716b..391661a118 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_render.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/extract_render.py @@ -3,8 +3,8 @@ import subprocess import pyblish.api -import openpype.hosts.harmony.api as harmony -import openpype.lib +import ayon_core.hosts.harmony.api as harmony +import ayon_core.lib import clique @@ -94,7 +94,7 @@ def process(self, instance): # Generate thumbnail. thumbnail_path = os.path.join(path, "thumbnail.png") - args = openpype.lib.get_ffmpeg_tool_args( + args = ayon_core.lib.get_ffmpeg_tool_args( "ffmpeg", "-y", "-i", os.path.join(path, list(collections[0])[0]), diff --git a/client/ayon_core/hosts/harmony/plugins/publish/extract_save_scene.py b/client/ayon_core/hosts/harmony/plugins/publish/extract_save_scene.py new file mode 100644 index 0000000000..1be74ba3a4 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/extract_save_scene.py @@ -0,0 +1,13 @@ +import pyblish.api +import ayon_core.hosts.harmony.api as harmony + + +class ExtractSaveScene(pyblish.api.ContextPlugin): + """Save scene for extraction.""" + + label = "Extract Save Scene" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["harmony"] + + def process(self, context): + harmony.save_scene() diff --git a/openpype/hosts/harmony/plugins/publish/extract_template.py b/client/ayon_core/hosts/harmony/plugins/publish/extract_template.py similarity index 97% rename from openpype/hosts/harmony/plugins/publish/extract_template.py rename to client/ayon_core/hosts/harmony/plugins/publish/extract_template.py index e75459fe1e..c481a34454 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_template.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/extract_template.py @@ -3,8 +3,8 @@ import os import shutil -from openpype.pipeline import publish -import openpype.hosts.harmony.api as harmony +from ayon_core.pipeline import publish +import ayon_core.hosts.harmony.api as harmony class ExtractTemplate(publish.Extractor): diff --git a/client/ayon_core/hosts/harmony/plugins/publish/extract_workfile.py b/client/ayon_core/hosts/harmony/plugins/publish/extract_workfile.py new file mode 100644 index 0000000000..3081a57157 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/extract_workfile.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +"""Extract work file.""" +import os +import shutil +from zipfile import ZipFile + +from ayon_core.pipeline import publish + + +class ExtractWorkfile(publish.Extractor): + """Extract and zip complete workfile folder into zip.""" + + label = "Extract Workfile" + hosts = ["harmony"] + families = ["workfile"] + + def process(self, instance): + """Plugin entry point.""" + staging_dir = self.staging_dir(instance) + filepath = os.path.join(staging_dir, "{}.tpl".format(instance.name)) + src = os.path.dirname(instance.context.data["currentFile"]) + self.log.info("Copying to {}".format(filepath)) + shutil.copytree(src, filepath) + + # Prep representation. + os.chdir(staging_dir) + shutil.make_archive( + f"{instance.name}", + "zip", + os.path.join(staging_dir, f"{instance.name}.tpl") + ) + # Check if archive is ok + with ZipFile(os.path.basename(f"{instance.name}.zip")) as zr: + if zr.testzip() is not None: + raise Exception("File archive is corrupted.") + + representation = { + "name": "tpl", + "ext": "zip", + "files": f"{instance.name}.zip", + "stagingDir": staging_dir + } + instance.data["representations"] = [representation] diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_audio.xml b/client/ayon_core/hosts/harmony/plugins/publish/help/validate_audio.xml similarity index 100% rename from openpype/hosts/harmony/plugins/publish/help/validate_audio.xml rename to client/ayon_core/hosts/harmony/plugins/publish/help/validate_audio.xml diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_instances.xml b/client/ayon_core/hosts/harmony/plugins/publish/help/validate_instances.xml similarity index 100% rename from openpype/hosts/harmony/plugins/publish/help/validate_instances.xml rename to client/ayon_core/hosts/harmony/plugins/publish/help/validate_instances.xml diff --git a/openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml b/client/ayon_core/hosts/harmony/plugins/publish/help/validate_scene_settings.xml similarity index 100% rename from openpype/hosts/harmony/plugins/publish/help/validate_scene_settings.xml rename to client/ayon_core/hosts/harmony/plugins/publish/help/validate_scene_settings.xml diff --git a/client/ayon_core/hosts/harmony/plugins/publish/increment_workfile.py b/client/ayon_core/hosts/harmony/plugins/publish/increment_workfile.py new file mode 100644 index 0000000000..16332a5283 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/increment_workfile.py @@ -0,0 +1,37 @@ +import os + +import pyblish.api +from ayon_core.pipeline.publish import get_errored_plugins_from_context +from ayon_core.lib import version_up +import ayon_core.hosts.harmony.api as harmony + + +class IncrementWorkfile(pyblish.api.InstancePlugin): + """Increment the current workfile. + + Saves the current scene with an increased version number. + """ + + label = "Increment Workfile" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["harmony"] + families = ["workfile"] + optional = True + + def process(self, instance): + errored_plugins = get_errored_plugins_from_context(instance.context) + if errored_plugins: + raise RuntimeError( + "Skipping incrementing current file because publishing failed." + ) + + scene_dir = version_up( + os.path.dirname(instance.context.data["currentFile"]) + ) + scene_path = os.path.join( + scene_dir, os.path.basename(scene_dir) + ".xstage" + ) + + harmony.save_scene_as(scene_path) + + self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/openpype/hosts/harmony/plugins/publish/validate_audio.py b/client/ayon_core/hosts/harmony/plugins/publish/validate_audio.py similarity index 92% rename from openpype/hosts/harmony/plugins/publish/validate_audio.py rename to client/ayon_core/hosts/harmony/plugins/publish/validate_audio.py index e9b8609803..808734a061 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_audio.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/validate_audio.py @@ -2,9 +2,9 @@ import pyblish.api -import openpype.hosts.harmony.api as harmony +import ayon_core.hosts.harmony.api as harmony -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateAudio(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/harmony/plugins/publish/validate_instances.py b/client/ayon_core/hosts/harmony/plugins/publish/validate_instances.py similarity index 92% rename from openpype/hosts/harmony/plugins/publish/validate_instances.py rename to client/ayon_core/hosts/harmony/plugins/publish/validate_instances.py index 7183de6048..a57a863d6f 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_instances.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/validate_instances.py @@ -1,8 +1,8 @@ import pyblish.api -import openpype.hosts.harmony.api as harmony -from openpype.pipeline import get_current_asset_name -from openpype.pipeline.publish import ( +import ayon_core.hosts.harmony.api as harmony +from ayon_core.pipeline import get_current_asset_name +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, ) diff --git a/client/ayon_core/hosts/harmony/plugins/publish/validate_scene_settings.py b/client/ayon_core/hosts/harmony/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..0cf96e70b0 --- /dev/null +++ b/client/ayon_core/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +"""Validate scene settings.""" +import os +import json +import re + +import pyblish.api + +import ayon_core.hosts.harmony.api as harmony +from ayon_core.pipeline import PublishXmlValidationError + + +class ValidateSceneSettingsRepair(pyblish.api.Action): + """Repair the instance.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + """Repair action entry point.""" + expected = harmony.get_asset_settings() + asset_settings = _update_frames(dict.copy(expected)) + asset_settings["frameStart"] = 1 + asset_settings["frameEnd"] = asset_settings["frameEnd"] + \ + asset_settings["handleEnd"] + harmony.set_scene_settings(asset_settings) + if not os.path.exists(context.data["scenePath"]): + self.log.info("correcting scene name") + scene_dir = os.path.dirname(context.data["currentFile"]) + scene_path = os.path.join( + scene_dir, os.path.basename(scene_dir) + ".xstage" + ) + harmony.save_scene_as(scene_path) + + +class ValidateSceneSettings(pyblish.api.InstancePlugin): + """Ensure the scene settings are in sync with database.""" + + order = pyblish.api.ValidatorOrder + label = "Validate Scene Settings" + families = ["workfile"] + hosts = ["harmony"] + actions = [ValidateSceneSettingsRepair] + optional = True + + # skip frameEnd check if asset contains any of: + frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] # regex + + # skip resolution check if Task name matches any of regex patterns + skip_resolution_check = ["render", "Render"] # regex + + # skip frameStart, frameEnd check if Task name matches any of regex patt. + skip_timelines_check = [] # regex + + def process(self, instance): + """Plugin entry point.""" + + # TODO 'get_asset_settings' could expect asset document as argument + # which is available on 'context.data["assetEntity"]' + # - the same approach can be used in 'ValidateSceneSettingsRepair' + expected_settings = harmony.get_asset_settings() + self.log.info("scene settings from DB:{}".format(expected_settings)) + expected_settings.pop("entityType") # not useful for the validation + + expected_settings = _update_frames(dict.copy(expected_settings)) + expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\ + expected_settings["handleEnd"] + + task_name = instance.context.data["task"] + + if (any(re.search(pattern, task_name) + for pattern in self.skip_resolution_check)): + self.log.info("Skipping resolution check because of " + "task name and pattern {}".format( + self.skip_resolution_check)) + expected_settings.pop("resolutionWidth") + expected_settings.pop("resolutionHeight") + + if (any(re.search(pattern, os.getenv('AVALON_TASK')) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.skip_timelines_check)) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) + + asset_name = instance.context.data['anatomyData']['asset'] + if any(re.search(pattern, asset_name) + for pattern in self.frame_check_filter): + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.frame_check_filter)) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) + + # handle case where ftrack uses only two decimal places + # 23.976023976023978 vs. 23.98 + fps = instance.context.data.get("frameRate") + if isinstance(instance.context.data.get("frameRate"), float): + fps = float( + "{:.2f}".format(instance.context.data.get("frameRate"))) + + self.log.debug("filtered settings: {}".format(expected_settings)) + + current_settings = { + "fps": fps, + "frameStart": instance.context.data["frameStart"], + "frameEnd": instance.context.data["frameEnd"], + "handleStart": instance.context.data.get("handleStart"), + "handleEnd": instance.context.data.get("handleEnd"), + "frameStartHandle": instance.context.data.get("frameStartHandle"), + "frameEndHandle": instance.context.data.get("frameEndHandle"), + "resolutionWidth": instance.context.data.get("resolutionWidth"), + "resolutionHeight": instance.context.data.get("resolutionHeight"), + } + self.log.debug("current scene settings {}".format(current_settings)) + + invalid_settings = [] + invalid_keys = set() + for key, value in expected_settings.items(): + if value != current_settings[key]: + invalid_settings.append( + "{} expected: {} found: {}".format(key, value, + current_settings[key])) + invalid_keys.add(key) + + if ((expected_settings["handleStart"] + or expected_settings["handleEnd"]) + and invalid_settings): + msg = "Handles included in calculation. Remove handles in DB " +\ + "or extend frame range in timeline." + invalid_settings[-1]["reason"] = msg + + msg = "Found invalid settings:\n{}".format( + json.dumps(invalid_settings, sort_keys=True, indent=4) + ) + + if invalid_settings: + invalid_keys_str = ",".join(invalid_keys) + break_str = "
" + invalid_setting_str = "Found invalid settings:
{}".\ + format(break_str.join(invalid_settings)) + + formatting_data = { + "invalid_setting_str": invalid_setting_str, + "invalid_keys_str": invalid_keys_str + } + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + scene_url = instance.context.data.get("scenePath") + if not os.path.exists(scene_url): + msg = "Scene file {} not found (saved under wrong name)".format( + scene_url + ) + formatting_data = { + "scene_url": scene_url + } + raise PublishXmlValidationError(self, msg, key="file_not_found", + formatting_data=formatting_data) + + +def _update_frames(expected_settings): + """ + Calculate proper frame range including handles set in DB. + + Harmony requires rendering from 1, so frame range is always moved + to 1. + Args: + expected_settings (dict): pulled from DB + + Returns: + modified expected_setting (dict) + """ + frames_count = expected_settings["frameEnd"] - \ + expected_settings["frameStart"] + 1 + + expected_settings["frameStart"] = 1.0 + expected_settings["handleStart"] + expected_settings["frameEnd"] = \ + expected_settings["frameStart"] + frames_count - 1 + return expected_settings diff --git a/openpype/hosts/harmony/vendor/.eslintrc.json b/client/ayon_core/hosts/harmony/vendor/.eslintrc.json similarity index 100% rename from openpype/hosts/harmony/vendor/.eslintrc.json rename to client/ayon_core/hosts/harmony/vendor/.eslintrc.json diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/.gitattributes b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/.gitattributes similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/.gitattributes rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/.gitattributes diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/.gitignore b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/.gitignore similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/.gitignore rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/.gitignore diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/Install.bat b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/Install.bat similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/Install.bat rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/Install.bat diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/LICENSE b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/LICENSE similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/LICENSE rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/LICENSE diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/README.md b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/README.md similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/README.md rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/README.md diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/build_doc.bat b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/build_doc.bat similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/build_doc.bat rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/build_doc.bat diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/documentation.json b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/documentation.json similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/documentation.json rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/documentation.json diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/install.sh b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/install.sh similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/install.sh rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/install.sh diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/oH_DOM.jpg b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/oH_DOM.jpg similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/oH_DOM.jpg rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/oH_DOM.jpg diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeAttributes.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeAttributes.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeAttributes.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeAttributes.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_palette.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_palette.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_palette.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_palette.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_path.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_path.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_path.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_path.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferencedoc.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferencedoc.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferencedoc.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferencedoc.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferences.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferences.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferences.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_preferences.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_scene.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_scene.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_scene.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_scene.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_threading.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_threading.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_threading.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_threading.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_timeline.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_timeline.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_timeline.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_timeline.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_tool.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_tool.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_tool.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_tool.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_toolInstall.ui b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_toolInstall.ui similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_toolInstall.ui rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_toolInstall.ui diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_install.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony_install.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_install.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony_install.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/package.json b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/package.json similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/package.json rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/package.json diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/reference/Reference_view_currentToolManager().txt b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/reference/Reference_view_currentToolManager().txt similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/reference/Reference_view_currentToolManager().txt rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/reference/Reference_view_currentToolManager().txt diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tbpackage.json b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tbpackage.json similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tbpackage.json rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tbpackage.json diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/INSTALL b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/INSTALL similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/INSTALL rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/INSTALL diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/README b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/README similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/README rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/README diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_anim_tools.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_anim_tools.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_anim_tools.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_anim_tools.js diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_basic_backdropPicker.ui b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_basic_backdropPicker.ui similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_basic_backdropPicker.ui rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_basic_backdropPicker.ui diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_rigging_tools.js b/client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_rigging_tools.js similarity index 100% rename from openpype/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_rigging_tools.js rename to client/ayon_core/hosts/harmony/vendor/OpenHarmony/tools/OpenHarmony_basic/openHarmony_rigging_tools.js diff --git a/openpype/hosts/hiero/__init__.py b/client/ayon_core/hosts/hiero/__init__.py similarity index 100% rename from openpype/hosts/hiero/__init__.py rename to client/ayon_core/hosts/hiero/__init__.py diff --git a/client/ayon_core/hosts/hiero/addon.py b/client/ayon_core/hosts/hiero/addon.py new file mode 100644 index 0000000000..447700e2e1 --- /dev/null +++ b/client/ayon_core/hosts/hiero/addon.py @@ -0,0 +1,67 @@ +import os +import platform +from ayon_core.modules import OpenPypeModule, IHostAddon + +HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HieroAddon(OpenPypeModule, IHostAddon): + name = "hiero" + host_name = "hiero" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to HIERO_PLUGIN_PATH + new_hiero_paths = [ + os.path.join(HIERO_ROOT_DIR, "api", "startup") + ] + old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" + for path in old_hiero_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_hiero_paths: + new_hiero_paths.append(norm_path) + + env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths) + # Remove auto screen scale factor for Qt + # - let Hiero decide it's value + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + # Remove tkinter library paths if are set + env.pop("TK_LIBRARY", None) + env.pop("TCL_LIBRARY", None) + + # Add vendor to PYTHONPATH + python_path = env["PYTHONPATH"] + python_path_parts = [] + if python_path: + python_path_parts = python_path.split(os.pathsep) + vendor_path = os.path.join(HIERO_ROOT_DIR, "vendor") + python_path_parts.insert(0, vendor_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + # Try to add QuickTime to PATH + quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" + if platform.system() == "windows" and os.path.exists(quick_time_path): + path_value = env.get("PATH") or "" + path_paths = [ + path + for path in path_value.split(os.pathsep) + if path + ] + path_paths.append(quick_time_path) + env["PATH"] = os.pathsep.join(path_paths) + + def get_workfile_extensions(self): + return [".hrox"] diff --git a/openpype/hosts/hiero/api/__init__.py b/client/ayon_core/hosts/hiero/api/__init__.py similarity index 100% rename from openpype/hosts/hiero/api/__init__.py rename to client/ayon_core/hosts/hiero/api/__init__.py diff --git a/openpype/hosts/hiero/api/constants.py b/client/ayon_core/hosts/hiero/api/constants.py similarity index 100% rename from openpype/hosts/hiero/api/constants.py rename to client/ayon_core/hosts/hiero/api/constants.py diff --git a/client/ayon_core/hosts/hiero/api/events.py b/client/ayon_core/hosts/hiero/api/events.py new file mode 100644 index 0000000000..0e509747d5 --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/events.py @@ -0,0 +1,130 @@ +import os +import hiero.core.events +from ayon_core.lib import Logger, register_event_callback +from .lib import ( + sync_avalon_data_to_workfile, + launch_workfiles_app, + selection_changed_timeline, + before_project_save, +) +from .tags import add_tags_to_workfile +from .menu import update_menu_task_label + +log = Logger.get_logger(__name__) + + +def startupCompleted(event): + log.info("startup competed event...") + return + + +def shutDown(event): + log.info("shut down event...") + return + + +def beforeNewProjectCreated(event): + log.info("before new project created event...") + return + + +def afterNewProjectCreated(event): + log.info("after new project created event...") + # sync avalon data to project properties + sync_avalon_data_to_workfile() + + # add tags from preset + add_tags_to_workfile() + + # Workfiles. + if int(os.environ.get("WORKFILES_STARTUP", "0")): + hiero.core.events.sendEvent("kStartWorkfiles", None) + # reset workfiles startup not to open any more in session + os.environ["WORKFILES_STARTUP"] = "0" + + +def beforeProjectLoad(event): + log.info("before project load event...") + return + + +def afterProjectLoad(event): + log.info("after project load event...") + # sync avalon data to project properties + sync_avalon_data_to_workfile() + + # add tags from preset + add_tags_to_workfile() + + +def beforeProjectClosed(event): + log.info("before project closed event...") + return + + +def afterProjectClosed(event): + log.info("after project closed event...") + return + + +def beforeProjectSaved(event): + log.info("before project saved event...") + return + + +def afterProjectSaved(event): + log.info("after project saved event...") + return + + +def register_hiero_events(): + log.info( + "Registering events for: kBeforeNewProjectCreated, " + "kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, " + "kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, " + "kAfterProjectClose, kShutdown, kStartup, kSelectionChanged" + ) + + # hiero.core.events.registerInterest( + # "kBeforeNewProjectCreated", beforeNewProjectCreated) + hiero.core.events.registerInterest( + "kAfterNewProjectCreated", afterNewProjectCreated) + + # hiero.core.events.registerInterest( + # "kBeforeProjectLoad", beforeProjectLoad) + hiero.core.events.registerInterest( + "kAfterProjectLoad", afterProjectLoad) + + hiero.core.events.registerInterest( + "kBeforeProjectSave", before_project_save) + # hiero.core.events.registerInterest( + # "kAfterProjectSave", afterProjectSaved) + # + # hiero.core.events.registerInterest( + # "kBeforeProjectClose", beforeProjectClosed) + # hiero.core.events.registerInterest( + # "kAfterProjectClose", afterProjectClosed) + # + # hiero.core.events.registerInterest("kShutdown", shutDown) + # hiero.core.events.registerInterest("kStartup", startupCompleted) + + # INFO: was disabled because it was slowing down timeline operations + # hiero.core.events.registerInterest( + # ("kSelectionChanged", "kTimeline"), selection_changed_timeline) + + # workfiles + try: + hiero.core.events.registerEventType("kStartWorkfiles") + hiero.core.events.registerInterest( + "kStartWorkfiles", launch_workfiles_app) + except RuntimeError: + pass + +def register_events(): + """ + Adding all callbacks. + """ + + # if task changed then change notext of hiero + register_event_callback("taskChanged", update_menu_task_label) + log.info("Installed event callback for 'taskChanged'..") diff --git a/openpype/hosts/hiero/api/launchforhiero.py b/client/ayon_core/hosts/hiero/api/launchforhiero.py similarity index 100% rename from openpype/hosts/hiero/api/launchforhiero.py rename to client/ayon_core/hosts/hiero/api/launchforhiero.py diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py new file mode 100644 index 0000000000..24ff76d30b --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/lib.py @@ -0,0 +1,1319 @@ +""" +Host specific functions where host api is connected +""" + +from copy import deepcopy +import os +import re +import platform +import functools +import warnings +import json +import ast +import secrets +import shutil +import hiero + +from qtpy import QtWidgets, QtCore +try: + from PySide import QtXml +except ImportError: + from PySide2 import QtXml + +from ayon_core.client import get_project +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import Anatomy, get_current_project_name +from ayon_core.pipeline.load import filter_containers +from ayon_core.lib import Logger +from . import tags +from .constants import ( + OPENPYPE_TAG_NAME, + DEFAULT_SEQUENCE_NAME, + DEFAULT_BIN_NAME +) +from ayon_core.pipeline.colorspace import ( + get_imageio_config +) + + +class _CTX: + has_been_setup = False + has_menu = False + parent_gui = None + + +class DeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +log = Logger.get_logger(__name__) + + +def flatten(list_): + for item_ in list_: + if isinstance(item_, (list, tuple)): + for sub_item in flatten(item_): + yield sub_item + else: + yield item_ + + +def get_current_project(remove_untitled=False): + projects = flatten(hiero.core.projects()) + if not remove_untitled: + return next(iter(projects)) + + # if remove_untitled + for proj in projects: + if "Untitled" in proj.name(): + proj.close() + else: + return proj + + +def get_current_sequence(name=None, new=False): + """ + Get current sequence in context of active project. + + Args: + name (str)[optional]: name of sequence we want to return + new (bool)[optional]: if we want to create new one + + Returns: + hiero.core.Sequence: the sequence object + """ + sequence = None + project = get_current_project() + root_bin = project.clipsBin() + + if new: + # create new + name = name or DEFAULT_SEQUENCE_NAME + sequence = hiero.core.Sequence(name) + root_bin.addItem(hiero.core.BinItem(sequence)) + elif name: + # look for sequence by name + sequences = project.sequences() + for _sequence in sequences: + if _sequence.name() == name: + sequence = _sequence + if not sequence: + # if nothing found create new with input name + sequence = get_current_sequence(name, True) + else: + # if name is none and new is False then return current open sequence + sequence = hiero.ui.activeSequence() + + return sequence + + +def get_timeline_selection(): + active_sequence = hiero.ui.activeSequence() + timeline_editor = hiero.ui.getTimelineEditor(active_sequence) + return list(timeline_editor.selection()) + + +def get_current_track(sequence, name, audio=False): + """ + Get current track in context of active project. + + Creates new if none is found. + + Args: + sequence (hiero.core.Sequence): hiero sequene object + name (str): name of track we want to return + audio (bool)[optional]: switch to AudioTrack + + Returns: + hiero.core.Track: the track object + """ + tracks = sequence.videoTracks() + + if audio: + tracks = sequence.audioTracks() + + # get track by name + track = None + for _track in tracks: + if _track.name() == name: + track = _track + + if not track: + if not audio: + track = hiero.core.VideoTrack(name) + else: + track = hiero.core.AudioTrack(name) + + sequence.addTrack(track) + + return track + + +def get_track_items( + selection=False, + sequence_name=None, + track_item_name=None, + track_name=None, + track_type=None, + check_enabled=True, + check_locked=True, + check_tagged=False): + """Get all available current timeline track items. + + Attribute: + selection (list)[optional]: list of selected track items + sequence_name (str)[optional]: return only clips from input sequence + track_item_name (str)[optional]: return only item with input name + track_name (str)[optional]: return only items from track name + track_type (str)[optional]: return only items of given type + (`audio` or `video`) default is `video` + check_enabled (bool)[optional]: ignore disabled if True + check_locked (bool)[optional]: ignore locked if True + + Return: + list or hiero.core.TrackItem: list of track items or single track item + """ + track_type = track_type or "video" + selection = selection or [] + return_list = [] + + # get selected track items or all in active sequence + if selection: + try: + for track_item in selection: + log.info("___ track_item: {}".format(track_item)) + # make sure only trackitems are selected + if not isinstance(track_item, hiero.core.TrackItem): + continue + + if _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged + ): + log.info("___ valid trackitem: {}".format(track_item)) + return_list.append(track_item) + except AttributeError: + pass + + # collect all available active sequence track items + if not return_list: + sequence = get_current_sequence(name=sequence_name) + # get all available tracks from sequence + tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) + # loop all tracks + for track in tracks: + if check_locked and track.isLocked(): + continue + if check_enabled and not track.isEnabled(): + continue + # and all items in track + for track_item in track.items(): + # make sure no subtrackitem is also track items + if not isinstance(track_item, hiero.core.TrackItem): + continue + + if _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged + ): + return_list.append(track_item) + + return return_list + + +def _validate_all_atrributes( + track_item, + track_item_name, + track_name, + track_type, + check_enabled, + check_tagged +): + def _validate_correct_name_track_item(): + if track_item_name and track_item_name in track_item.name(): + return True + elif not track_item_name: + return True + + def _validate_tagged_track_item(): + if check_tagged and track_item.tags(): + return True + elif not check_tagged: + return True + + def _validate_enabled_track_item(): + if check_enabled and track_item.isEnabled(): + return True + elif not check_enabled: + return True + + def _validate_parent_track_item(): + if track_name and track_name in track_item.parent().name(): + # filter only items fitting input track name + return True + elif not track_name: + # or add all if no track_name was defined + return True + + def _validate_type_track_item(): + if track_type == "video" and isinstance( + track_item.parent(), hiero.core.VideoTrack): + # only video track items are allowed + return True + elif track_type == "audio" and isinstance( + track_item.parent(), hiero.core.AudioTrack): + # only audio track items are allowed + return True + + # check if track item is enabled + return all([ + _validate_enabled_track_item(), + _validate_type_track_item(), + _validate_tagged_track_item(), + _validate_parent_track_item(), + _validate_correct_name_track_item() + ]) + + +def get_track_item_tags(track_item): + """ + Get track item tags excluded openpype tag + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + returning_tag_data = [] + # get all tags from track item + _tags = track_item.tags() + if not _tags: + return [] + + # collect all tags which are not openpype tag + returning_tag_data.extend( + tag for tag in _tags + if tag.name() != OPENPYPE_TAG_NAME + ) + + return returning_tag_data + + +def _get_tag_unique_hash(): + # sourcery skip: avoid-builtin-shadow + return secrets.token_hex(nbytes=4) + + +def set_track_openpype_tag(track, data=None): + """ + Set openpype track tag to input track object. + + Attributes: + track (hiero.core.VideoTrack): hiero object + + Returns: + hiero.core.Tag + """ + data = data or {} + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "OpenPype data container", + "icon": "openpype_icon.png", + "metadata": dict(data.items()) + } + # get available pype tag if any + _tag = get_track_openpype_tag(track) + + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag( + "{}_{}".format( + OPENPYPE_TAG_NAME, + _get_tag_unique_hash() + ), + tag_data + ) + # add it to the input track item + track.addTag(tag) + + return tag + + +def get_track_openpype_tag(track): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if OPENPYPE_TAG_NAME in tag.name(): + return tag + + +def get_track_openpype_data(track, container_name=None): + """ + Get track's openpype tag data. + + Attributes: + trackItem (hiero.core.VideoTrack): hiero object + + Returns: + dict: data found on pype tag + """ + return_data = {} + # get pype data tag from track item + tag = get_track_openpype_tag(track) + + if not tag: + return None + + # get tag metadata attribute + tag_data = deepcopy(dict(tag.metadata())) + + for obj_name, obj_data in tag_data.items(): + obj_name = obj_name.replace("tag.", "") + + if obj_name in ["applieswhole", "note", "label"]: + continue + return_data[obj_name] = json.loads(obj_data) + + return ( + return_data[container_name] + if container_name + else return_data + ) + + +@deprecated("ayon_core.hosts.hiero.api.lib.get_trackitem_openpype_tag") +def get_track_item_pype_tag(track_item): + # backward compatibility alias + return get_trackitem_openpype_tag(track_item) + + +@deprecated("ayon_core.hosts.hiero.api.lib.set_trackitem_openpype_tag") +def set_track_item_pype_tag(track_item, data=None): + # backward compatibility alias + return set_trackitem_openpype_tag(track_item, data) + + +@deprecated("ayon_core.hosts.hiero.api.lib.get_trackitem_openpype_data") +def get_track_item_pype_data(track_item): + # backward compatibility alias + return get_trackitem_openpype_data(track_item) + + +def get_trackitem_openpype_tag(track_item): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track_item.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if OPENPYPE_TAG_NAME in tag.name(): + return tag + + +def set_trackitem_openpype_tag(track_item, data=None): + """ + Set openpype track tag to input track object. + + Attributes: + track (hiero.core.VideoTrack): hiero object + + Returns: + hiero.core.Tag + """ + data = data or {} + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "OpenPype data container", + "icon": "openpype_icon.png", + "metadata": dict(data.items()) + } + # get available pype tag if any + _tag = get_trackitem_openpype_tag(track_item) + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag( + "{}_{}".format( + OPENPYPE_TAG_NAME, + _get_tag_unique_hash() + ), + tag_data + ) + # add it to the input track item + track_item.addTag(tag) + + return tag + + +def get_trackitem_openpype_data(track_item): + """ + Get track item's pype tag data. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + dict: data found on pype tag + """ + data = {} + # get pype data tag from track item + tag = get_trackitem_openpype_tag(track_item) + + if not tag: + return None + + # get tag metadata attribute + tag_data = deepcopy(dict(tag.metadata())) + # convert tag metadata to normal keys names and values to correct types + for k, v in tag_data.items(): + key = k.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + if re.match(r"^[\d]+$", v): + value = int(v) + elif re.match(r"^True$", v): + value = True + elif re.match(r"^False$", v): + value = False + elif re.match(r"^None$", v): + value = None + elif re.match(r"^[\w\d_]+$", v): + value = v + else: + value = ast.literal_eval(v) + except (ValueError, SyntaxError) as msg: + log.warning(msg) + value = v + + data[key] = value + + return data + + +def imprint(track_item, data=None): + """ + Adding `Avalon data` into a hiero track item tag. + + Also including publish attribute into tag. + + Arguments: + track_item (hiero.core.TrackItem): hiero track item object + data (dict): Any data which needst to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + tag = set_trackitem_openpype_tag(track_item, data) + + # add publish attribute + set_publish_attribute(tag, True) + + +def set_publish_attribute(tag, value): + """ Set Publish attribute in input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # set data to the publish attribute + tag_data.setValue("tag.publish", str(value)) + + +def get_publish_attribute(tag): + """ Get Publish attribute from input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # get data to the publish attribute + value = tag_data.value("tag.publish") + # return value converted to bool value. Atring is stored in tag. + return ast.literal_eval(value) + + +def sync_avalon_data_to_workfile(): + # import session to get project dir + project_name = get_current_project_name() + + anatomy = Anatomy(project_name) + work_template = anatomy.templates["work"]["path"] + work_root = anatomy.root_value_for_template(work_template) + active_project_root = ( + os.path.join(work_root, project_name) + ).replace("\\", "/") + # getting project + project = get_current_project() + + if "Tag Presets" in project.name(): + return + + log.debug("Synchronizing Pype metadata to project: {}".format( + project.name())) + + # set project root with backward compatibility + try: + project.setProjectDirectory(active_project_root) + except Exception: + # old way of setting it + project.setProjectRoot(active_project_root) + + # get project data from avalon db + project_doc = get_project(project_name) + project_data = project_doc["data"] + + log.debug("project_data: {}".format(project_data)) + + # get format and fps property from avalon db on project + width = project_data["resolutionWidth"] + height = project_data["resolutionHeight"] + pixel_aspect = project_data["pixelAspect"] + fps = project_data['fps'] + format_name = project_data['code'] + + # create new format in hiero project + format = hiero.core.Format(width, height, pixel_aspect, format_name) + project.setOutputFormat(format) + + # set fps to hiero project + project.setFramerate(fps) + + # TODO: add auto colorspace set from project drop + log.info("Project property has been synchronised with Avalon db") + + +def launch_workfiles_app(event): + """ + Event for launching workfiles after hiero start + + Args: + event (obj): required but unused + """ + from . import launch_workfiles_app + launch_workfiles_app() + + +def setup(console=False, port=None, menu=True): + """Setup integration + + Registers Pyblish for Hiero plug-ins and appends an item to the File-menu + + Arguments: + console (bool): Display console with GUI + port (int, optional): Port from which to start looking for an + available port to connect with Pyblish QML, default + provided by Pyblish Integration. + menu (bool, optional): Display file menu in Hiero. + """ + + if _CTX.has_been_setup: + teardown() + + add_submission() + + if menu: + add_to_filemenu() + _CTX.has_menu = True + + _CTX.has_been_setup = True + log.debug("pyblish: Loaded successfully.") + + +def teardown(): + """Remove integration""" + if not _CTX.has_been_setup: + return + + if _CTX.has_menu: + remove_from_filemenu() + _CTX.has_menu = False + + _CTX.has_been_setup = False + log.debug("pyblish: Integration torn down successfully") + + +def remove_from_filemenu(): + raise NotImplementedError("Implement me please.") + + +def add_to_filemenu(): + PublishAction() + + +class PyblishSubmission(hiero.exporters.FnSubmission.Submission): + + def __init__(self): + hiero.exporters.FnSubmission.Submission.__init__(self) + + def addToQueue(self): + from . import publish + # Add submission to Hiero module for retrieval in plugins. + hiero.submission = self + publish(hiero.ui.mainWindow()) + + +def add_submission(): + registry = hiero.core.taskRegistry + registry.addSubmission("Pyblish", PyblishSubmission) + + +class PublishAction(QtWidgets.QAction): + """ + Action with is showing as menu item + """ + + def __init__(self): + QtWidgets.QAction.__init__(self, "Publish", None) + self.triggered.connect(self.publish) + + for interest in ["kShowContextMenu/kTimeline", + "kShowContextMenukBin", + "kShowContextMenu/kSpreadsheet"]: + hiero.core.events.registerInterest(interest, self.eventHandler) + + self.setShortcut("Ctrl+Alt+P") + + def publish(self): + from . import publish + # Removing "submission" attribute from hiero module, to prevent tasks + # from getting picked up when not using the "Export" dialog. + if hasattr(hiero, "submission"): + del hiero.submission + publish(hiero.ui.mainWindow()) + + def eventHandler(self, event): + # Add the Menu to the right-click menu + event.menu.addAction(self) + + +# def CreateNukeWorkfile(nodes=None, +# nodes_effects=None, +# to_timeline=False, +# **kwargs): +# ''' Creating nuke workfile with particular version with given nodes +# Also it is creating timeline track items as precomps. +# +# Arguments: +# nodes(list of dict): each key in dict is knob order is important +# to_timeline(type): will build trackItem with metadata +# +# Returns: +# bool: True if done +# +# Raises: +# Exception: with traceback +# +# ''' +# import hiero.core +# from ayon_core.hosts.nuke.api.lib import ( +# BuildWorkfile, +# imprint +# ) +# +# # check if the file exists if does then Raise "File exists!" +# if os.path.exists(filepath): +# raise FileExistsError("File already exists: `{}`".format(filepath)) +# +# # if no representations matching then +# # Raise "no representations to be build" +# if len(representations) == 0: +# raise AttributeError("Missing list of `representations`") +# +# # check nodes input +# if len(nodes) == 0: +# log.warning("Missing list of `nodes`") +# +# # create temp nk file +# nuke_script = hiero.core.nuke.ScriptWriter() +# +# # create root node and save all metadata +# root_node = hiero.core.nuke.RootNode() +# +# anatomy = Anatomy(get_current_project_name()) +# work_template = anatomy.templates["work"]["path"] +# root_path = anatomy.root_value_for_template(work_template) +# +# nuke_script.addNode(root_node) +# +# script_builder = BuildWorkfile( +# root_node=root_node, +# root_path=root_path, +# nodes=nuke_script.getNodes(), +# **kwargs +# ) + + +def create_nuke_workfile_clips(nuke_workfiles, seq=None): + ''' + nuke_workfiles is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will need to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nuke_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create clip media + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + source = hiero.core.Clip(media) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin(path=None, project=None): + ''' + Create bin in project. + If the path is "bin1/bin2/bin3" it will create whole depth + and return `bin3` + + ''' + # get the first loaded project + project = project or get_current_project() + + path = path or DEFAULT_BIN_NAME + + path = path.replace("\\", "/").split("/") + + root_bin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(path): + if i == 0 and len(path) > 1: + if b in [bin.name() for bin in root_bin.bins()]: + bin = [bin for bin in root_bin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + root_bin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as error: + log.error(error) + return None + + +def get_selected_track_items(sequence=None): + _sequence = sequence or get_current_sequence() + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.selection() + + +def set_selected_track_items(track_items_list, sequence=None): + _sequence = sequence or get_current_sequence() + + # make sure only trackItems are in list selection + only_track_items = [ + i for i in track_items_list + if isinstance(i, hiero.core.TrackItem)] + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.setSelection(only_track_items) + + +def _read_doc_from_path(path): + # reading QtXml.QDomDocument from HROX path + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.ReadOnly): + raise RuntimeError("Failed to open file for reading") + doc = QtXml.QDomDocument() + doc.setContent(hrox_file) + hrox_file.close() + return doc + + +def _write_doc_to_path(doc, path): + # write QtXml.QDomDocument to path as HROX + hrox_file = QtCore.QFile(path) + if not hrox_file.open(QtCore.QFile.WriteOnly): + raise RuntimeError("Failed to open file for writing") + stream = QtCore.QTextStream(hrox_file) + doc.save(stream, 1) + hrox_file.close() + + +def _set_hrox_project_knobs(doc, **knobs): + # set attributes to Project Tag + proj_elem = doc.documentElement().firstChildElement("Project") + for k, v in knobs.items(): + if "ocioconfigpath" in k: + paths_to_format = v[platform.system().lower()] + for _path in paths_to_format: + v = _path.format(**os.environ) + if not os.path.exists(v): + continue + log.debug("Project colorspace knob `{}` was set to `{}`".format(k, v)) + if isinstance(v, dict): + continue + proj_elem.setAttribute(str(k), v) + + +def apply_colorspace_project(): + project_name = get_current_project_name() + # get path the the active projects + project = get_current_project(remove_untitled=True) + current_file = project.path() + + # close the active project + project.close() + + # get presets for hiero + imageio = get_project_settings(project_name)["hiero"]["imageio"] + presets = imageio.get("workfile") + + # backward compatibility layer + # TODO: remove this after some time + config_data = get_imageio_config( + project_name=get_current_project_name(), + host_name="hiero" + ) + + if config_data: + presets.update({ + "ocioConfigName": "custom" + }) + + # save the workfile as subversion "comment:_colorspaceChange" + split_current_file = os.path.splitext(current_file) + copy_current_file = current_file + + if "_colorspaceChange" not in current_file: + copy_current_file = ( + split_current_file[0] + + "_colorspaceChange" + + split_current_file[1] + ) + + try: + # duplicate the file so the changes are applied only to the copy + shutil.copyfile(current_file, copy_current_file) + except shutil.Error: + # in case the file already exists and it want to copy to the + # same filewe need to do this trick + # TEMP file name change + copy_current_file_tmp = copy_current_file + "_tmp" + # create TEMP file + shutil.copyfile(current_file, copy_current_file_tmp) + # remove original file + os.remove(current_file) + # copy TEMP back to original name + shutil.copyfile(copy_current_file_tmp, copy_current_file) + # remove the TEMP file as we dont need it + os.remove(copy_current_file_tmp) + + # use the code from below for changing xml hrox Attributes + presets.update({"name": os.path.basename(copy_current_file)}) + + # read HROX in as QDomSocument + doc = _read_doc_from_path(copy_current_file) + + # apply project colorspace properties + _set_hrox_project_knobs(doc, **presets) + + # write QDomSocument back as HROX + _write_doc_to_path(doc, copy_current_file) + + # open the file as current project + hiero.core.openProject(copy_current_file) + + +def apply_colorspace_clips(): + project_name = get_current_project_name() + project = get_current_project(remove_untitled=True) + clips = project.clips() + + # get presets for hiero + imageio = get_project_settings(project_name)["hiero"]["imageio"] + from pprint import pprint + + presets = imageio.get("regexInputs", {}).get("inputs", {}) + pprint(presets) + for clip in clips: + clip_media_source_path = clip.mediaSource().firstpath() + clip_name = clip.name() + clip_colorspace = clip.sourceMediaColourTransform() + + if "default" in clip_colorspace: + continue + + # check if any colorspace presets for read is matching + preset_clrsp = None + for k in presets: + if not bool(re.search(k["regex"], clip_media_source_path)): + continue + preset_clrsp = k["colorspace"] + + if preset_clrsp: + log.debug("Changing clip.path: {}".format(clip_media_source_path)) + log.info("Changing clip `{}` colorspace {} to {}".format( + clip_name, clip_colorspace, preset_clrsp)) + # set the found preset to the clip + clip.setSourceMediaColourTransform(preset_clrsp) + + # save project after all is changed + project.save() + + +def is_overlapping(ti_test, ti_original, strict=False): + covering_exp = ( + (ti_test.timelineIn() <= ti_original.timelineIn()) + and (ti_test.timelineOut() >= ti_original.timelineOut()) + ) + + if strict: + return covering_exp + + inside_exp = ( + (ti_test.timelineIn() >= ti_original.timelineIn()) + and (ti_test.timelineOut() <= ti_original.timelineOut()) + ) + overlaying_right_exp = ( + (ti_test.timelineIn() < ti_original.timelineOut()) + and (ti_test.timelineOut() >= ti_original.timelineOut()) + ) + overlaying_left_exp = ( + (ti_test.timelineOut() > ti_original.timelineIn()) + and (ti_test.timelineIn() <= ti_original.timelineIn()) + ) + + return any(( + covering_exp, + inside_exp, + overlaying_right_exp, + overlaying_left_exp + )) + + +def get_sequence_pattern_and_padding(file): + """ Return sequence pattern and padding from file + + Attributes: + file (string): basename form path + + Example: + Can find file.0001.ext, file.%02d.ext, file.####.ext + + Return: + string: any matching sequence pattern + int: padding of sequnce numbering + """ + foundall = re.findall( + r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file) + if not foundall: + return None, None + found = sorted(list(set(foundall[0])))[-1] + + padding = int( + re.findall(r"\d+", found)[-1]) if "%" in found else len(found) + return found, padding + + +def sync_clip_name_to_data_asset(track_items_list): + # loop through all selected clips + for track_item in track_items_list: + # ignore if parent track is locked or disabled + if track_item.parent().isLocked(): + continue + if not track_item.parent().isEnabled(): + continue + # ignore if the track item is disabled + if not track_item.isEnabled(): + continue + + # get name and data + ti_name = track_item.name() + data = get_trackitem_openpype_data(track_item) + + # ignore if no data on the clip or not publish instance + if not data: + continue + if data.get("id") != "pyblish.avalon.instance": + continue + + # fix data if wrong name + if data["asset"] != ti_name: + data["asset"] = ti_name + # remove the original tag + tag = get_trackitem_openpype_tag(track_item) + track_item.removeTag(tag) + # create new tag with updated data + set_trackitem_openpype_tag(track_item, data) + print("asset was changed in clip: {}".format(ti_name)) + + +def set_track_color(track_item, color): + track_item.source().binItem().setColor(color) + + +def check_inventory_versions(track_items=None): + """ + Actual version color identifier of Loaded containers + + Check all track items and filter only + Loader nodes for its version. It will get all versions from database + and check if the node is having actual version. If not then it will color + it to red. + """ + from . import parse_container + + track_items = track_items or get_track_items() + # presets + clip_color_last = "green" + clip_color = "red" + + containers = [] + # Find all containers and collect it's node and representation ids + for track_item in track_items: + container = parse_container(track_item) + if container: + containers.append(container) + + # Skip if nothing was found + if not containers: + return + + project_name = get_current_project_name() + filter_result = filter_containers(containers, project_name) + for container in filter_result.latest: + set_track_color(container["_item"], clip_color_last) + + for container in filter_result.outdated: + set_track_color(container["_item"], clip_color) + + +def selection_changed_timeline(event): + """Callback on timeline to check if asset in data is the same as clip name. + + Args: + event (hiero.core.Event): timeline event + """ + timeline_editor = event.sender + selection = timeline_editor.selection() + + track_items = get_track_items( + selection=selection, + track_type="video", + check_enabled=True, + check_locked=True, + check_tagged=True + ) + + # run checking function + sync_clip_name_to_data_asset(track_items) + + +def before_project_save(event): + track_items = get_track_items( + track_type="video", + check_enabled=True, + check_locked=True, + check_tagged=True + ) + + # run checking function + sync_clip_name_to_data_asset(track_items) + + # also mark old versions of loaded containers + check_inventory_versions(track_items) + + +def get_main_window(): + """Acquire Nuke's main window""" + if _CTX.parent_gui is None: + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "Foundry::UI::DockMainWindow" + main_window = next(widget for widget in top_widgets if + widget.inherits("QMainWindow") and + widget.metaObject().className() == name) + _CTX.parent_gui = main_window + return _CTX.parent_gui diff --git a/client/ayon_core/hosts/hiero/api/menu.py b/client/ayon_core/hosts/hiero/api/menu.py new file mode 100644 index 0000000000..ba0cbdd120 --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/menu.py @@ -0,0 +1,175 @@ +import os +import sys + +import hiero.core +from hiero.ui import findMenuAction + +from qtpy import QtGui + +from ayon_core.lib import Logger, is_dev_mode_enabled +from ayon_core.tools.utils import host_tools +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( + get_current_project_name, + get_current_asset_name, + get_current_task_name +) + +from . import tags + +log = Logger.get_logger(__name__) + +self = sys.modules[__name__] +self._change_context_menu = None + + +def get_context_label(): + return "{}, {}".format( + get_current_asset_name(), + get_current_task_name() + ) + + +def update_menu_task_label(): + """Update the task label in Avalon menu to current session""" + + object_name = self._change_context_menu + found_menu = findMenuAction(object_name) + + if not found_menu: + log.warning("Can't find menuItem: {}".format(object_name)) + return + + label = get_context_label() + + menu = found_menu.menu() + self._change_context_menu = label + menu.setTitle(label) + + +def menu_install(): + """ + Installing menu into Hiero + + """ + + from . import ( + publish, launch_workfiles_app, reload_config, + apply_colorspace_project, apply_colorspace_clips + ) + from .lib import get_main_window + + main_window = get_main_window() + + # here is the best place to add menu + + menu_name = os.environ['AYON_MENU_LABEL'] + + context_label = get_context_label() + + self._change_context_menu = context_label + + try: + check_made_menu = findMenuAction(menu_name) + except Exception: + check_made_menu = None + + if not check_made_menu: + # Grab Hiero's MenuBar + menu = hiero.ui.menuBar().addMenu(menu_name) + else: + menu = check_made_menu.menu() + + context_label_action = menu.addAction(context_label) + context_label_action.setEnabled(False) + + menu.addSeparator() + + workfiles_action = menu.addAction("Work Files...") + workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) + workfiles_action.triggered.connect(launch_workfiles_app) + + default_tags_action = menu.addAction("Create Default Tags") + default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) + default_tags_action.triggered.connect(tags.add_tags_to_workfile) + + menu.addSeparator() + + creator_action = menu.addAction("Create...") + creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + creator_action.triggered.connect( + lambda: host_tools.show_creator(parent=main_window) + ) + + publish_action = menu.addAction("Publish...") + publish_action.setIcon(QtGui.QIcon("icons:Output.png")) + publish_action.triggered.connect( + lambda *args: publish(hiero.ui.mainWindow()) + ) + + loader_action = menu.addAction("Load...") + loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + loader_action.triggered.connect( + lambda: host_tools.show_loader(parent=main_window) + ) + + sceneinventory_action = menu.addAction("Manage...") + sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + sceneinventory_action.triggered.connect( + lambda: host_tools.show_scene_inventory(parent=main_window) + ) + + library_action = menu.addAction("Library...") + library_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + library_action.triggered.connect( + lambda: host_tools.show_library_loader(parent=main_window) + ) + + if is_dev_mode_enabled(): + menu.addSeparator() + reload_action = menu.addAction("Reload pipeline") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + reload_action.triggered.connect(reload_config) + + menu.addSeparator() + apply_colorspace_p_action = menu.addAction("Apply Colorspace Project") + apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + apply_colorspace_p_action.triggered.connect(apply_colorspace_project) + + apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips") + apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + apply_colorspace_c_action.triggered.connect(apply_colorspace_clips) + + menu.addSeparator() + + exeprimental_action = menu.addAction("Experimental tools...") + exeprimental_action.triggered.connect( + lambda: host_tools.show_experimental_tools_dialog(parent=main_window) + ) + + +def add_scripts_menu(): + try: + from . import launchforhiero + except ImportError: + + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_settings = get_project_settings(get_current_project_name()) + config = project_settings["hiero"]["scriptsmenu"]["definition"] + _menu = project_settings["hiero"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Hiero menu + studio_menu = launchforhiero.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) diff --git a/openpype/hosts/harmony/plugins/__init__.py b/client/ayon_core/hosts/hiero/api/otio/__init__.py similarity index 100% rename from openpype/hosts/harmony/plugins/__init__.py rename to client/ayon_core/hosts/hiero/api/otio/__init__.py diff --git a/openpype/hosts/hiero/api/otio/hiero_export.py b/client/ayon_core/hosts/hiero/api/otio/hiero_export.py similarity index 100% rename from openpype/hosts/hiero/api/otio/hiero_export.py rename to client/ayon_core/hosts/hiero/api/otio/hiero_export.py diff --git a/openpype/hosts/hiero/api/otio/hiero_import.py b/client/ayon_core/hosts/hiero/api/otio/hiero_import.py similarity index 100% rename from openpype/hosts/hiero/api/otio/hiero_import.py rename to client/ayon_core/hosts/hiero/api/otio/hiero_import.py diff --git a/openpype/hosts/hiero/api/otio/utils.py b/client/ayon_core/hosts/hiero/api/otio/utils.py similarity index 100% rename from openpype/hosts/hiero/api/otio/utils.py rename to client/ayon_core/hosts/hiero/api/otio/utils.py diff --git a/client/ayon_core/hosts/hiero/api/pipeline.py b/client/ayon_core/hosts/hiero/api/pipeline.py new file mode 100644 index 0000000000..1897628678 --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/pipeline.py @@ -0,0 +1,336 @@ +""" +Basic avalon integration +""" +from copy import deepcopy +import os +import contextlib +from collections import OrderedDict + +from pyblish import api as pyblish +from ayon_core.lib import Logger +from ayon_core.pipeline import ( + schema, + register_creator_plugin_path, + register_loader_plugin_path, + deregister_creator_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.tools.utils import host_tools +from . import lib, menu, events +import hiero + +log = Logger.get_logger(__name__) + +# plugin paths +API_DIR = os.path.dirname(os.path.abspath(__file__)) +HOST_DIR = os.path.dirname(API_DIR) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/") + +AVALON_CONTAINERS = ":AVALON_CONTAINERS" + + +def install(): + """Installing Hiero integration.""" + + # adding all events + events.register_events() + + log.info("Registering Hiero plug-ins..") + pyblish.register_host("hiero") + pyblish.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + # register callback for switching publishable + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) + + # install menu + menu.menu_install() + menu.add_scripts_menu() + + # register hiero events + events.register_hiero_events() + + +def uninstall(): + """ + Uninstalling Hiero integration for avalon + + """ + log.info("Deregistering Hiero plug-ins..") + pyblish.deregister_host("hiero") + pyblish.deregister_plugin_path(PUBLISH_PATH) + deregister_loader_plugin_path(LOAD_PATH) + deregister_creator_plugin_path(CREATE_PATH) + + # register callback for switching publishable + pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + + +def containerise(track_item, + name, + namespace, + context, + loader=None, + data=None): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track_item (hiero.core.TrackItem): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = OrderedDict({ + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + }) + + if data: + for k, v in data.items(): + data_imprint.update({k: v}) + + log.debug("_ data_imprint: {}".format(data_imprint)) + lib.set_trackitem_openpype_tag(track_item, data_imprint) + + return track_item + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + + See the `container.json` schema for details on how it should look, + and the Maya equivalent, which is in `avalon.maya.pipeline` + """ + + # get all track items from current timeline + all_items = lib.get_track_items() + + # append all video tracks + for track in lib.get_current_sequence(): + if type(track) != hiero.core.VideoTrack: + continue + all_items.append(track) + + for item in all_items: + container_data = parse_container(item) + + if isinstance(container_data, list): + for _c in container_data: + yield _c + elif container_data: + yield container_data + + +def parse_container(item, validate=True): + """Return container data from track_item's pype tag. + + Args: + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. + validate (bool)[optional]: validating with avalon scheme + + Returns: + dict: The container schema data for input containerized track item. + + """ + def data_to_container(item, data): + if ( + not data + or data.get("id") != "pyblish.avalon.container" + ): + return + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if any(key not in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = item.name() + + # Store reference to the node object + container["_item"] = item + + return container + + # convert tag metadata to normal keys names + if type(item) == hiero.core.VideoTrack: + return_list = [] + _data = lib.get_track_openpype_data(item) + + if not _data: + return + # convert the data to list and validate them + for _, obj_data in _data.items(): + container = data_to_container(item, obj_data) + return_list.append(container) + return return_list + else: + _data = lib.get_trackitem_openpype_data(item) + return data_to_container(item, _data) + + +def _update_container_data(container, data): + for key in container: + try: + container[key] = data[key] + except KeyError: + pass + return container + + +def update_container(item, data=None): + """Update container data to input track_item or track's + openpype tag. + + Args: + item (hiero.core.TrackItem or hiero.core.VideoTrack): + A containerised track item. + data (dict)[optional]: dictionery with data to be updated + + Returns: + bool: True if container was updated correctly + + """ + + data = data or {} + data = deepcopy(data) + + if type(item) == hiero.core.VideoTrack: + # form object data for test + object_name = data["objectName"] + + # get all available containers + containers = lib.get_track_openpype_data(item) + container = lib.get_track_openpype_data(item, object_name) + + containers = deepcopy(containers) + container = deepcopy(container) + + # update data in container + updated_container = _update_container_data(container, data) + # merge updated container back to containers + containers.update({object_name: updated_container}) + + return bool(lib.set_track_openpype_tag(item, containers)) + else: + container = lib.get_trackitem_openpype_data(item) + updated_container = _update_container_data(container, data) + + log.info("Updating container: `{}`".format(item.name())) + return bool(lib.set_trackitem_openpype_tag(item, updated_container)) + + +def launch_workfiles_app(*args): + ''' Wrapping function for workfiles launcher ''' + from .lib import get_main_window + + main_window = get_main_window() + # show workfile gui + host_tools.show_workfiles(parent=main_window) + + +def publish(parent): + """Shorthand to publish from within host""" + return host_tools.show_publish(parent) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... for track_item in track_items: + ... < do some stuff > + """ + from .lib import ( + set_selected_track_items, + get_selected_track_items + ) + previous_selection = get_selected_track_items() + reset_selection() + try: + # do the operation + yield + finally: + reset_selection() + set_selected_track_items(previous_selection) + + +def reset_selection(): + """Deselect all selected nodes + """ + from .lib import set_selected_track_items + set_selected_track_items([]) + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + import importlib + + for module in ( + "ayon_core.hosts.hiero.lib", + "ayon_core.hosts.hiero.menu", + "ayon_core.hosts.hiero.tags" + ): + log.info("Reloading module: {}...".format(module)) + try: + module = importlib.import_module(module) + import imp + imp.reload(module) + except Exception as e: + log.warning("Cannot reload module: {}".format(e)) + importlib.reload(module) + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + from ayon_core.hosts.hiero.api import ( + get_trackitem_openpype_tag, + set_publish_attribute + ) + + # Whether instances should be passthrough based on new value + track_item = instance.data["item"] + tag = get_trackitem_openpype_tag(track_item) + set_publish_attribute(tag, new_value) diff --git a/client/ayon_core/hosts/hiero/api/plugin.py b/client/ayon_core/hosts/hiero/api/plugin.py new file mode 100644 index 0000000000..574865488f --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/plugin.py @@ -0,0 +1,955 @@ +import os +from pprint import pformat +import re +from copy import deepcopy + +import hiero + +from qtpy import QtWidgets, QtCore +import qargparse + +from ayon_core.settings import get_current_project_settings +from ayon_core.lib import Logger +from ayon_core.pipeline import LoaderPlugin, LegacyCreator +from ayon_core.pipeline.load import get_representation_path_from_context +from . import lib + +log = Logger.get_logger(__name__) + + +def load_stylesheet(): + path = os.path.join(os.path.dirname(__file__), "style.css") + if not os.path.exists(path): + log.warning("Unable to load stylesheet, file not found in resources") + return "" + + with open(path, "r") as file_stream: + stylesheet = file_stream.read() + return stylesheet + + +class CreatorWidget(QtWidgets.QDialog): + + # output items + items = {} + + def __init__(self, name, info, ui_inputs, parent=None): + super(CreatorWidget, self).__init__(parent) + + self.setObjectName(name) + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + self.setWindowTitle(name or "Pype Creator Input") + self.resize(500, 700) + + # Where inputs and labels are set + self.content_widget = [QtWidgets.QWidget(self)] + top_layout = QtWidgets.QFormLayout(self.content_widget[0]) + top_layout.setObjectName("ContentLayout") + top_layout.addWidget(Spacer(5, self)) + + # first add widget tag line + top_layout.addWidget(QtWidgets.QLabel(info)) + + # main dynamic layout + self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAsNeeded) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOn) + self.scroll_area.setHorizontalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff) + self.scroll_area.setWidgetResizable(True) + + self.content_widget.append(self.scroll_area) + + scroll_widget = QtWidgets.QWidget(self) + in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) + self.content_layout = [in_scroll_area] + + # add preset data into input widget layout + self.items = self.populate_widgets(ui_inputs) + self.scroll_area.setWidget(scroll_widget) + + # Confirmation buttons + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + + cancel_btn = QtWidgets.QPushButton("Cancel") + btns_layout.addWidget(cancel_btn) + + ok_btn = QtWidgets.QPushButton("Ok") + btns_layout.addWidget(ok_btn) + + # Main layout of the dialog + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.setSpacing(0) + + # adding content widget + for w in self.content_widget: + main_layout.addWidget(w) + + main_layout.addWidget(btns_widget) + + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + stylesheet = load_stylesheet() + self.setStyleSheet(stylesheet) + + def _on_ok_clicked(self): + self.result = self.value(self.items) + self.close() + + def _on_cancel_clicked(self): + self.result = None + self.close() + + def value(self, data, new_data=None): + new_data = new_data or dict() + for k, v in data.items(): + new_data[k] = { + "target": None, + "value": None + } + if v["type"] == "dict": + new_data[k]["target"] = v["target"] + new_data[k]["value"] = self.value(v["value"]) + if v["type"] == "section": + new_data.pop(k) + new_data = self.value(v["value"], new_data) + elif getattr(v["value"], "currentText", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].currentText() + elif getattr(v["value"], "isChecked", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].isChecked() + elif getattr(v["value"], "value", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].value() + elif getattr(v["value"], "text", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].text() + + return new_data + + def camel_case_split(self, text): + matches = re.finditer( + '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) + return " ".join([str(m.group(0)).capitalize() for m in matches]) + + def create_row(self, layout, type, text, **kwargs): + value_keys = ["setText", "setCheckState", "setValue", "setChecked"] + + # get type attribute from qwidgets + attr = getattr(QtWidgets, type) + + # convert label text to normal capitalized text with spaces + label_text = self.camel_case_split(text) + + # assign the new text to label widget + label = QtWidgets.QLabel(label_text) + label.setObjectName("LineLabel") + + # create attribute name text strip of spaces + attr_name = text.replace(" ", "") + + # create attribute and assign default values + setattr( + self, + attr_name, + attr(parent=self)) + + # assign the created attribute to variable + item = getattr(self, attr_name) + + # set attributes to item which are not values + for func, val in kwargs.items(): + if func in value_keys: + continue + + if getattr(item, func): + log.debug("Setting {} to {}".format(func, val)) + func_attr = getattr(item, func) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) + + # set values to item + for value_item in value_keys: + if value_item not in kwargs: + continue + if getattr(item, value_item): + getattr(item, value_item)(kwargs[value_item]) + + # add to layout + layout.addRow(label, item) + + return item + + def populate_widgets(self, data, content_layout=None): + """ + Populate widget from input dict. + + Each plugin has its own set of widget rows defined in dictionary + each row values should have following keys: `type`, `target`, + `label`, `order`, `value` and optionally also `toolTip`. + + Args: + data (dict): widget rows or organized groups defined + by types `dict` or `section` + content_layout (QtWidgets.QFormLayout)[optional]: used when nesting + + Returns: + dict: redefined data dict updated with created widgets + + """ + + content_layout = content_layout or self.content_layout[-1] + # fix order of process by defined order value + ordered_keys = list(data.keys()) + for k, v in data.items(): + try: + # try removing a key from index which should + # be filled with new + ordered_keys.pop(v["order"]) + except IndexError: + pass + # add key into correct order + ordered_keys.insert(v["order"], k) + + # process ordered + for k in ordered_keys: + v = data[k] + tool_tip = v.get("toolTip", "") + if v["type"] == "dict": + # adding spacer between sections + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + if v["type"] == "section": + # adding spacer between sections + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + elif v["type"] == "QLineEdit": + data[k]["value"] = self.create_row( + content_layout, "QLineEdit", v["label"], + setText=v["value"], setToolTip=tool_tip) + elif v["type"] == "QComboBox": + data[k]["value"] = self.create_row( + content_layout, "QComboBox", v["label"], + addItems=v["value"], setToolTip=tool_tip) + elif v["type"] == "QCheckBox": + data[k]["value"] = self.create_row( + content_layout, "QCheckBox", v["label"], + setChecked=v["value"], setToolTip=tool_tip) + elif v["type"] == "QSpinBox": + data[k]["value"] = self.create_row( + content_layout, "QSpinBox", v["label"], + setValue=v["value"], + setDisplayIntegerBase=10000, + setRange=(0, 99999), setMinimum=0, + setMaximum=100000, setToolTip=tool_tip) + + return data + + +class Spacer(QtWidgets.QWidget): + def __init__(self, height, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + + self.setFixedHeight(height) + + real_spacer = QtWidgets.QWidget(self) + real_spacer.setObjectName("Spacer") + real_spacer.setFixedHeight(height) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(real_spacer) + + self.setLayout(layout) + + +class SequenceLoader(LoaderPlugin): + """A basic SequenceLoader for Resolve + + This will implement the basic behavior for a loader to inherit from that + will containerize the reference and will implement the `remove` and + `update` logic. + + """ + + options = [ + qargparse.Boolean( + "handles", + label="Include handles", + default=0, + help="Load with handles or without?" + ), + qargparse.Choice( + "load_to", + label="Where to load clips", + items=[ + "Current timeline", + "New timeline" + ], + default="Current timeline", + help="Where do you want clips to be loaded?" + ), + qargparse.Choice( + "load_how", + label="How to load clips", + items=[ + "Original timing", + "Sequentially in order" + ], + default="Original timing", + help="Would you like to place it at original timing?" + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None + ): + pass + + def update(self, container, representation): + """Update an existing `container` + """ + pass + + def remove(self, container): + """Remove an existing `container` + """ + pass + + +class ClipLoader: + + active_bin = None + data = dict() + + def __init__(self, cls, context, path, **options): + """ Initialize object + + Arguments: + cls (avalon.api.Loader): plugin object + context (dict): loader plugin context + options (dict)[optional]: possible keys: + projectBinPath: "path/to/binItem" + + """ + self.__dict__.update(cls.__dict__) + self.context = context + self.active_project = lib.get_current_project() + self.fname = path + + # try to get value from options or evaluate key value for `handles` + self.with_handles = options.get("handles") or bool( + options.get("handles") is True) + # try to get value from options or evaluate key value for `load_how` + self.sequencial_load = options.get("sequentially") or bool( + "Sequentially in order" in options.get("load_how", "")) + # try to get value from options or evaluate key value for `load_to` + self.new_sequence = options.get("newSequence") or bool( + "New timeline" in options.get("load_to", "")) + self.clip_name_template = options.get( + "clipNameTemplate") or "{asset}_{subset}_{representation}" + assert self._populate_data(), str( + "Cannot Load selected data, look into database " + "or call your supervisor") + + # inject asset data to representation dict + self._get_asset_data() + log.info("__init__ self.data: `{}`".format(pformat(self.data))) + log.info("__init__ options: `{}`".format(pformat(options))) + + # add active components to class + if self.new_sequence: + if options.get("sequence"): + # if multiselection is set then use options sequence + self.active_sequence = options["sequence"] + else: + # create new sequence + self.active_sequence = lib.get_current_sequence(new=True) + self.active_sequence.setFramerate( + hiero.core.TimeBase.fromString( + str(self.data["assetData"]["fps"]))) + else: + self.active_sequence = lib.get_current_sequence() + + if options.get("track"): + # if multiselection is set then use options track + self.active_track = options["track"] + else: + self.active_track = lib.get_current_track( + self.active_sequence, self.data["track_name"]) + + def _populate_data(self): + """ Gets context and convert it to self.data + data structure: + { + "name": "assetName_subsetName_representationName" + "path": "path/to/file/created/by/get_repr..", + "binPath": "projectBinPath", + } + """ + # create name + repr = self.context["representation"] + repr_cntx = repr["context"] + asset = str(repr_cntx["asset"]) + subset = str(repr_cntx["subset"]) + representation = str(repr_cntx["representation"]) + self.data["clip_name"] = self.clip_name_template.format(**repr_cntx) + self.data["track_name"] = "_".join([subset, representation]) + self.data["versionData"] = self.context["version"]["data"] + # gets file path + file = get_representation_path_from_context(self.context) + if not file: + repr_id = repr["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return None + self.data["path"] = file.replace("\\", "/") + + # convert to hashed path + if repr_cntx.get("frame"): + self._fix_path_hashes() + + # solve project bin structure path + hierarchy = str("/".join(( + "Loader", + repr_cntx["hierarchy"].replace("\\", "/"), + asset + ))) + + self.data["binPath"] = hierarchy + + return True + + def _fix_path_hashes(self): + """ Convert file path where it is needed padding with hashes + """ + file = self.data["path"] + if "#" not in file: + frame = self.context["representation"]["context"].get("frame") + padding = len(frame) + file = file.replace(frame, "#" * padding) + self.data["path"] = file + + def _get_asset_data(self): + """ Get all available asset data + + joint `data` key with asset.data dict into the representation + + """ + + asset_doc = self.context["asset"] + self.data["assetData"] = asset_doc["data"] + + def _make_track_item(self, source_bin_item, audio=False): + """ Create track item with """ + + clip = source_bin_item.activeItem() + + # add to track as clip item + if not audio: + track_item = hiero.core.TrackItem( + self.data["clip_name"], hiero.core.TrackItem.kVideo) + else: + track_item = hiero.core.TrackItem( + self.data["clip_name"], hiero.core.TrackItem.kAudio) + + track_item.setSource(clip) + track_item.setSourceIn(self.handle_start) + track_item.setTimelineIn(self.timeline_in) + track_item.setSourceOut((self.media_duration) - self.handle_end) + track_item.setTimelineOut(self.timeline_out) + track_item.setPlaybackSpeed(1) + self.active_track.addTrackItem(track_item) + + return track_item + + def load(self): + # create project bin for the media to be imported into + self.active_bin = lib.create_bin(self.data["binPath"]) + + # create mediaItem in active project bin + # create clip media + self.media = hiero.core.MediaSource(self.data["path"]) + self.media_duration = int(self.media.duration()) + + # get handles + self.handle_start = self.data["versionData"].get("handleStart") + self.handle_end = self.data["versionData"].get("handleEnd") + if self.handle_start is None: + self.handle_start = self.data["assetData"]["handleStart"] + if self.handle_end is None: + self.handle_end = self.data["assetData"]["handleEnd"] + + self.handle_start = int(self.handle_start) + self.handle_end = int(self.handle_end) + + if self.sequencial_load: + last_track_item = lib.get_track_items( + sequence_name=self.active_sequence.name(), + track_name=self.active_track.name() + ) + if len(last_track_item) == 0: + last_timeline_out = 0 + else: + last_track_item = last_track_item[-1] + last_timeline_out = int(last_track_item.timelineOut()) + 1 + self.timeline_in = last_timeline_out + self.timeline_out = last_timeline_out + int( + self.data["assetData"]["clipOut"] + - self.data["assetData"]["clipIn"]) + else: + self.timeline_in = int(self.data["assetData"]["clipIn"]) + self.timeline_out = int(self.data["assetData"]["clipOut"]) + + log.debug("__ self.timeline_in: {}".format(self.timeline_in)) + log.debug("__ self.timeline_out: {}".format(self.timeline_out)) + + # check if slate is included + slate_on = "slate" in self.context["version"]["data"]["families"] + log.debug("__ slate_on: {}".format(slate_on)) + + # if slate is on then remove the slate frame from beginning + if slate_on: + self.media_duration -= 1 + self.handle_start += 1 + + # create Clip from Media + clip = hiero.core.Clip(self.media) + clip.setName(self.data["clip_name"]) + + # add Clip to bin if not there yet + if self.data["clip_name"] not in [ + b.name() for b in self.active_bin.items()]: + bin_item = hiero.core.BinItem(clip) + self.active_bin.addItem(bin_item) + + # just make sure the clip is created + # there were some cases were hiero was not creating it + source_bin_item = None + for item in self.active_bin.items(): + if self.data["clip_name"] == item.name(): + source_bin_item = item + if not source_bin_item: + log.warning("Problem with created Source clip: `{}`".format( + self.data["clip_name"])) + + # include handles + if self.with_handles: + self.timeline_in -= self.handle_start + self.timeline_out += self.handle_end + self.handle_start = 0 + self.handle_end = 0 + + # make track item from source in bin as item + track_item = self._make_track_item(source_bin_item) + + log.info("Loading clips: `{}`".format(self.data["clip_name"])) + return track_item + + +class Creator(LegacyCreator): + """Creator class wrapper + """ + clip_color = "Purple" + rename_index = None + + def __init__(self, *args, **kwargs): + super(Creator, self).__init__(*args, **kwargs) + import ayon_core.hosts.hiero.api as phiero + self.presets = get_current_project_settings()[ + "hiero"]["create"].get(self.__class__.__name__, {}) + + # adding basic current context resolve objects + self.project = phiero.get_current_project() + self.sequence = phiero.get_current_sequence() + + if (self.options or {}).get("useSelection"): + timeline_selection = phiero.get_timeline_selection() + self.selected = phiero.get_track_items( + selection=timeline_selection + ) + else: + self.selected = phiero.get_track_items() + + self.widget = CreatorWidget + + +class PublishClip: + """ + Convert a track item to publishable instance + + Args: + track_item (hiero.core.TrackItem): hiero track item object + kwargs (optional): additional data needed for rename=True (presets) + + Returns: + hiero.core.TrackItem: hiero track item object with pype tag + """ + vertical_clip_match = {} + tag_data = {} + types = { + "shot": "shot", + "folder": "folder", + "episode": "episode", + "sequence": "sequence", + "track": "sequence", + } + + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" + + # default templates for non-ui use + rename_default = False + hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" + clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" + subset_name_default = "" + review_track_default = "< none >" + subset_family_default = "plate" + count_from_default = 10 + count_steps_default = 10 + vertical_sync_default = False + driving_layer_default = "" + + def __init__(self, cls, track_item, **kwargs): + # populate input cls attribute onto self.[attr] + self.__dict__.update(cls.__dict__) + + # get main parent objects + self.track_item = track_item + sequence_name = lib.get_current_sequence().name() + self.sequence_name = str(sequence_name).replace(" ", "_") + + # track item (clip) main attributes + self.ti_name = track_item.name() + self.ti_index = int(track_item.eventNumber()) + + # get track name and index + track_name = track_item.parent().name() + self.track_name = str(track_name).replace(" ", "_") + self.track_index = int(track_item.parent().trackIndex()) + + # adding tag.family into tag + if kwargs.get("avalon"): + self.tag_data.update(kwargs["avalon"]) + + # add publish attribute to tag data + self.tag_data.update({"publish": True}) + + # adding ui inputs if any + self.ui_inputs = kwargs.get("ui_inputs", {}) + + # populate default data before we get other attributes + self._populate_track_item_default_data() + + # use all populated default data to create all important attributes + self._populate_attributes() + + # create parents with correct types + self._create_parents() + + def convert(self): + # solve track item data and add them to tag data + tag_hierarchy_data = self._convert_to_tag_data() + + self.tag_data.update(tag_hierarchy_data) + + # if track name is in review track name and also if driving track name + # is not in review track name: skip tag creation + if (self.track_name in self.review_layer) and ( + self.driving_layer not in self.review_layer): + return + + # deal with clip name + new_name = self.tag_data.pop("newClipName") + + if self.rename: + # rename track item + self.track_item.setName(new_name) + self.tag_data["asset_name"] = new_name + else: + self.tag_data["asset_name"] = self.ti_name + self.tag_data["hierarchyData"]["shot"] = self.ti_name + + # AYON unique identifier + folder_path = "/{}/{}".format( + tag_hierarchy_data["hierarchy"], + self.tag_data["asset_name"] + ) + self.tag_data["folderPath"] = folder_path + if self.tag_data["heroTrack"] and self.review_layer: + self.tag_data.update({"reviewTrack": self.review_layer}) + else: + self.tag_data.update({"reviewTrack": None}) + + # TODO: remove debug print + log.debug("___ self.tag_data: {}".format( + pformat(self.tag_data) + )) + + # create pype tag on track_item and add data + lib.imprint(self.track_item, self.tag_data) + + return self.track_item + + def _populate_track_item_default_data(self): + """ Populate default formatting data from track item. """ + + self.track_item_default_data = { + "_folder_": "shots", + "_sequence_": self.sequence_name, + "_track_": self.track_name, + "_clip_": self.ti_name, + "_trackIndex_": self.track_index, + "_clipIndex_": self.ti_index + } + + def _populate_attributes(self): + """ Populate main object attributes. """ + # track item frame range and parent track name for vertical sync check + self.clip_in = int(self.track_item.timelineIn()) + self.clip_out = int(self.track_item.timelineOut()) + + # define ui inputs if non gui mode was used + self.shot_num = self.ti_index + log.debug( + "____ self.shot_num: {}".format(self.shot_num)) + + # ui_inputs data or default values if gui was not used + self.rename = self.ui_inputs.get( + "clipRename", {}).get("value") or self.rename_default + self.clip_name = self.ui_inputs.get( + "clipName", {}).get("value") or self.clip_name_default + self.hierarchy = self.ui_inputs.get( + "hierarchy", {}).get("value") or self.hierarchy_default + self.hierarchy_data = self.ui_inputs.get( + "hierarchyData", {}).get("value") or \ + self.track_item_default_data.copy() + self.count_from = self.ui_inputs.get( + "countFrom", {}).get("value") or self.count_from_default + self.count_steps = self.ui_inputs.get( + "countSteps", {}).get("value") or self.count_steps_default + self.subset_name = self.ui_inputs.get( + "subsetName", {}).get("value") or self.subset_name_default + self.subset_family = self.ui_inputs.get( + "subsetFamily", {}).get("value") or self.subset_family_default + self.vertical_sync = self.ui_inputs.get( + "vSyncOn", {}).get("value") or self.vertical_sync_default + self.driving_layer = self.ui_inputs.get( + "vSyncTrack", {}).get("value") or self.driving_layer_default + self.review_track = self.ui_inputs.get( + "reviewTrack", {}).get("value") or self.review_track_default + self.audio = self.ui_inputs.get( + "audio", {}).get("value") or False + + # build subset name from layer name + if self.subset_name == "": + self.subset_name = self.track_name + + # create subset for publishing + self.subset = self.subset_family + self.subset_name.capitalize() + + def _replace_hash_to_expression(self, name, text): + """ Replace hash with number in correct padding. """ + _spl = text.split("#") + _len = (len(_spl) - 1) + _repl = "{{{0}:0>{1}}}".format(name, _len) + return text.replace(("#" * _len), _repl) + + + def _convert_to_tag_data(self): + """ Convert internal data to tag data. + + Populating the tag data into internal variable self.tag_data + """ + # define vertical sync attributes + hero_track = True + self.review_layer = "" + if self.vertical_sync: + # check if track name is not in driving layer + if self.track_name not in self.driving_layer: + # if it is not then define vertical sync as None + hero_track = False + + # increasing steps by index of rename iteration + self.count_steps *= self.rename_index + + hierarchy_formatting_data = {} + hierarchy_data = deepcopy(self.hierarchy_data) + _data = self.track_item_default_data.copy() + if self.ui_inputs: + # adding tag metadata from ui + for _k, _v in self.ui_inputs.items(): + if _v["target"] == "tag": + self.tag_data[_k] = _v["value"] + + # driving layer is set as positive match + if hero_track or self.vertical_sync: + # mark review layer + if self.review_track and ( + self.review_track not in self.review_track_default): + # if review layer is defined and not the same as default + self.review_layer = self.review_track + # shot num calculate + if self.rename_index == 0: + self.shot_num = self.count_from + else: + self.shot_num = self.count_from + self.count_steps + + # clip name sequence number + _data.update({"shot": self.shot_num}) + + # solve # in test to pythonic expression + for _k, _v in hierarchy_data.items(): + if "#" not in _v["value"]: + continue + hierarchy_data[ + _k]["value"] = self._replace_hash_to_expression( + _k, _v["value"]) + + # fill up pythonic expresisons in hierarchy data + for k, _v in hierarchy_data.items(): + hierarchy_formatting_data[k] = _v["value"].format(**_data) + else: + # if no gui mode then just pass default data + hierarchy_formatting_data = hierarchy_data + + tag_hierarchy_data = self._solve_tag_hierarchy_data( + hierarchy_formatting_data + ) + + tag_hierarchy_data.update({"heroTrack": True}) + if hero_track and self.vertical_sync: + self.vertical_clip_match.update({ + (self.clip_in, self.clip_out): tag_hierarchy_data + }) + + if not hero_track and self.vertical_sync: + # driving layer is set as negative match + for (_in, _out), hero_data in self.vertical_clip_match.items(): + hero_data.update({"heroTrack": False}) + if _in == self.clip_in and _out == self.clip_out: + data_subset = hero_data["subset"] + # add track index in case duplicity of names in hero data + if self.subset in data_subset: + hero_data["subset"] = self.subset + str( + self.track_index) + # in case track name and subset name is the same then add + if self.subset_name == self.track_name: + hero_data["subset"] = self.subset + # assign data to return hierarchy data to tag + tag_hierarchy_data = hero_data + + # add data to return data dict + return tag_hierarchy_data + + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): + """ Solve tag data from hierarchy data and templates. """ + # fill up clip name and hierarchy keys + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) + + # remove shot from hierarchy data: is not needed anymore + hierarchy_formatting_data.pop("shot") + + return { + "newClipName": clip_name_filled, + "hierarchy": hierarchy_filled, + "parents": self.parents, + "hierarchyData": hierarchy_formatting_data, + "subset": self.subset, + "family": self.subset_family, + "families": [self.data["family"]] + } + + def _convert_to_entity(self, type, template): + """ Converting input key to key with type. """ + # convert to entity type + entity_type = self.types.get(type, None) + + assert entity_type, "Missing entity type for `{}`".format( + type + ) + + # first collect formatting data to use for formatting template + formatting_data = {} + for _k, _v in self.hierarchy_data.items(): + value = _v["value"].format( + **self.track_item_default_data) + formatting_data[_k] = value + + return { + "entity_type": entity_type, + "entity_name": template.format( + **formatting_data + ) + } + + def _create_parents(self): + """ Create parents and return it in list. """ + self.parents = [] + + pattern = re.compile(self.parents_search_pattern) + + par_split = [(pattern.findall(t).pop(), t) + for t in self.hierarchy.split("/")] + + for type, template in par_split: + parent = self._convert_to_entity(type, template) + self.parents.append(parent) diff --git a/openpype/hosts/hiero/api/startup/HieroPlayer/PlayerPresets.hrox b/client/ayon_core/hosts/hiero/api/startup/HieroPlayer/PlayerPresets.hrox similarity index 100% rename from openpype/hosts/hiero/api/startup/HieroPlayer/PlayerPresets.hrox rename to client/ayon_core/hosts/hiero/api/startup/HieroPlayer/PlayerPresets.hrox diff --git a/openpype/hosts/hiero/api/startup/Icons/1_add_handles_end.png b/client/ayon_core/hosts/hiero/api/startup/Icons/1_add_handles_end.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/1_add_handles_end.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/1_add_handles_end.png diff --git a/openpype/hosts/hiero/api/startup/Icons/2_add_handles.png b/client/ayon_core/hosts/hiero/api/startup/Icons/2_add_handles.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/2_add_handles.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/2_add_handles.png diff --git a/openpype/hosts/hiero/api/startup/Icons/3D.png b/client/ayon_core/hosts/hiero/api/startup/Icons/3D.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/3D.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/3D.png diff --git a/openpype/hosts/hiero/api/startup/Icons/3_add_handles_start.png b/client/ayon_core/hosts/hiero/api/startup/Icons/3_add_handles_start.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/3_add_handles_start.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/3_add_handles_start.png diff --git a/openpype/hosts/hiero/api/startup/Icons/4_2D.png b/client/ayon_core/hosts/hiero/api/startup/Icons/4_2D.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/4_2D.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/4_2D.png diff --git a/openpype/hosts/hiero/api/startup/Icons/edit.png b/client/ayon_core/hosts/hiero/api/startup/Icons/edit.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/edit.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/edit.png diff --git a/openpype/hosts/hiero/api/startup/Icons/fusion.png b/client/ayon_core/hosts/hiero/api/startup/Icons/fusion.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/fusion.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/fusion.png diff --git a/openpype/hosts/hiero/api/startup/Icons/hierarchy.png b/client/ayon_core/hosts/hiero/api/startup/Icons/hierarchy.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/hierarchy.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/hierarchy.png diff --git a/openpype/hosts/hiero/api/startup/Icons/houdini.png b/client/ayon_core/hosts/hiero/api/startup/Icons/houdini.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/houdini.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/houdini.png diff --git a/openpype/hosts/hiero/api/startup/Icons/layers.psd b/client/ayon_core/hosts/hiero/api/startup/Icons/layers.psd similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/layers.psd rename to client/ayon_core/hosts/hiero/api/startup/Icons/layers.psd diff --git a/openpype/hosts/hiero/api/startup/Icons/lense.png b/client/ayon_core/hosts/hiero/api/startup/Icons/lense.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/lense.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/lense.png diff --git a/openpype/hosts/hiero/api/startup/Icons/lense1.png b/client/ayon_core/hosts/hiero/api/startup/Icons/lense1.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/lense1.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/lense1.png diff --git a/openpype/hosts/hiero/api/startup/Icons/maya.png b/client/ayon_core/hosts/hiero/api/startup/Icons/maya.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/maya.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/maya.png diff --git a/openpype/hosts/hiero/api/startup/Icons/nuke.png b/client/ayon_core/hosts/hiero/api/startup/Icons/nuke.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/nuke.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/nuke.png diff --git a/openpype/hosts/hiero/api/startup/Icons/pype_icon.png b/client/ayon_core/hosts/hiero/api/startup/Icons/pype_icon.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/pype_icon.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/pype_icon.png diff --git a/openpype/hosts/hiero/api/startup/Icons/resolution.png b/client/ayon_core/hosts/hiero/api/startup/Icons/resolution.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/resolution.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/resolution.png diff --git a/openpype/hosts/hiero/api/startup/Icons/resolution.psd b/client/ayon_core/hosts/hiero/api/startup/Icons/resolution.psd similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/resolution.psd rename to client/ayon_core/hosts/hiero/api/startup/Icons/resolution.psd diff --git a/openpype/hosts/hiero/api/startup/Icons/retiming.png b/client/ayon_core/hosts/hiero/api/startup/Icons/retiming.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/retiming.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/retiming.png diff --git a/openpype/hosts/hiero/api/startup/Icons/retiming.psd b/client/ayon_core/hosts/hiero/api/startup/Icons/retiming.psd similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/retiming.psd rename to client/ayon_core/hosts/hiero/api/startup/Icons/retiming.psd diff --git a/openpype/hosts/hiero/api/startup/Icons/review.png b/client/ayon_core/hosts/hiero/api/startup/Icons/review.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/review.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/review.png diff --git a/openpype/hosts/hiero/api/startup/Icons/review.psd b/client/ayon_core/hosts/hiero/api/startup/Icons/review.psd similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/review.psd rename to client/ayon_core/hosts/hiero/api/startup/Icons/review.psd diff --git a/openpype/hosts/hiero/api/startup/Icons/volume.png b/client/ayon_core/hosts/hiero/api/startup/Icons/volume.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/volume.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/volume.png diff --git a/openpype/hosts/hiero/api/startup/Icons/z_layer_bg.png b/client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_bg.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/z_layer_bg.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_bg.png diff --git a/openpype/hosts/hiero/api/startup/Icons/z_layer_fg.png b/client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_fg.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/z_layer_fg.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_fg.png diff --git a/openpype/hosts/hiero/api/startup/Icons/z_layer_main.png b/client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_main.png similarity index 100% rename from openpype/hosts/hiero/api/startup/Icons/z_layer_main.png rename to client/ayon_core/hosts/hiero/api/startup/Icons/z_layer_main.png diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/SpreadsheetExport.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/SpreadsheetExport.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/Startup/SpreadsheetExport.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/SpreadsheetExport.py diff --git a/client/ayon_core/hosts/hiero/api/startup/Python/Startup/Startup.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/Startup.py new file mode 100644 index 0000000000..cffab8067c --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/Startup.py @@ -0,0 +1,19 @@ +import traceback + +# activate hiero from pype +from ayon_core.pipeline import install_host +import ayon_core.hosts.hiero.api as phiero +install_host(phiero) + +try: + __import__("ayon_core.hosts.hiero.api") + __import__("pyblish") + +except ImportError as e: + print(traceback.format_exc()) + print("pyblish: Could not load integration: %s " % e) + +else: + # Setup integration + import ayon_core.hosts.hiero.api as phiero + phiero.lib.setup() diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py similarity index 97% rename from openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py index e4ce2fe827..bd5048a832 100644 --- a/openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py +++ b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportTask.py @@ -8,7 +8,7 @@ from hiero.core import util import opentimelineio as otio -from openpype.hosts.hiero.api.otio import hiero_export +from ayon_core.hosts.hiero.api.otio import hiero_export class OTIOExportTask(hiero.core.TaskBase): diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py similarity index 97% rename from openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py index af5593e484..25aa8bb0cf 100644 --- a/openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py +++ b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/OTIOExportUI.py @@ -22,7 +22,7 @@ FormLayout = QFormLayout # lint:ok -from openpype.hosts.hiero.api.otio import hiero_export +from ayon_core.hosts.hiero.api.otio import hiero_export class OTIOExportUI(hiero.ui.TaskUIBase): def __init__(self, preset): diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/__init__.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/__init__.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/Startup/otioexporter/__init__.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/otioexporter/__init__.py diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/project_helpers.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/project_helpers.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/Startup/project_helpers.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/project_helpers.py diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/selection_tracker.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/selection_tracker.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/Startup/selection_tracker.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/selection_tracker.py diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/setFrameRate.py b/client/ayon_core/hosts/hiero/api/startup/Python/Startup/setFrameRate.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/Startup/setFrameRate.py rename to client/ayon_core/hosts/hiero/api/startup/Python/Startup/setFrameRate.py diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py rename to client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/Purge.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/Purge.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/StartupUI/Purge.py rename to client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/Purge.py diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/nukeStyleKeyboardShortcuts.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/nukeStyleKeyboardShortcuts.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/StartupUI/nukeStyleKeyboardShortcuts.py rename to client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/nukeStyleKeyboardShortcuts.py diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/otioimporter/OTIOImport.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/otioimporter/OTIOImport.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/StartupUI/otioimporter/OTIOImport.py rename to client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/otioimporter/OTIOImport.py diff --git a/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py new file mode 100644 index 0000000000..29507db975 --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] + +import hiero.ui +import hiero.core + +import PySide2.QtWidgets as qw + +from ayon_core.hosts.hiero.api.otio.hiero_import import load_otio + + +class OTIOProjectSelect(qw.QDialog): + + def __init__(self, projects, *args, **kwargs): + super(OTIOProjectSelect, self).__init__(*args, **kwargs) + self.setWindowTitle("Please select active project") + self.layout = qw.QVBoxLayout() + + self.label = qw.QLabel( + "Unable to determine which project to import sequence to.\n" + "Please select one." + ) + self.layout.addWidget(self.label) + + self.projects = qw.QComboBox() + self.projects.addItems(map(lambda p: p.name(), projects)) + self.layout.addWidget(self.projects) + + QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel + self.buttonBox = qw.QDialogButtonBox(QBtn) + self.buttonBox.accepted.connect(self.accept) + self.buttonBox.rejected.connect(self.reject) + + self.layout.addWidget(self.buttonBox) + self.setLayout(self.layout) + + +def get_sequence(view): + sequence = None + if isinstance(view, hiero.ui.TimelineEditor): + sequence = view.sequence() + + elif isinstance(view, hiero.ui.BinView): + for item in view.selection(): + if not hasattr(item, "acitveItem"): + continue + + if isinstance(item.activeItem(), hiero.core.Sequence): + sequence = item.activeItem() + + return sequence + + +def OTIO_menu_action(event): + # Menu actions + otio_import_action = hiero.ui.createMenuAction( + "Import OTIO...", + open_otio_file, + icon=None + ) + + otio_add_track_action = hiero.ui.createMenuAction( + "New Track(s) from OTIO...", + open_otio_file, + icon=None + ) + otio_add_track_action.setEnabled(False) + + hiero.ui.registerAction(otio_import_action) + hiero.ui.registerAction(otio_add_track_action) + + view = hiero.ui.currentContextMenuView() + + if view: + sequence = get_sequence(view) + if sequence: + otio_add_track_action.setEnabled(True) + + for action in event.menu.actions(): + if action.text() == "Import": + action.menu().addAction(otio_import_action) + action.menu().addAction(otio_add_track_action) + + elif action.text() == "New Track": + action.menu().addAction(otio_add_track_action) + + +def open_otio_file(): + files = hiero.ui.openFileBrowser( + caption="Please select an OTIO file of choice", + pattern="*.otio", + requiredExtension=".otio" + ) + + selection = None + sequence = None + + view = hiero.ui.currentContextMenuView() + if view: + sequence = get_sequence(view) + selection = view.selection() + + if sequence: + project = sequence.project() + + elif selection: + project = selection[0].project() + + elif len(hiero.core.projects()) > 1: + dialog = OTIOProjectSelect(hiero.core.projects()) + if dialog.exec_(): + project = hiero.core.projects()[dialog.projects.currentIndex()] + + else: + bar = hiero.ui.mainWindow().statusBar() + bar.showMessage( + "OTIO Import aborted by user", + timeout=3000 + ) + return + + else: + project = hiero.core.projects()[-1] + + for otio_file in files: + load_otio(otio_file, project, sequence) + + +# HieroPlayer is quite limited and can't create transitions etc. +if not hiero.core.isHieroPlayer(): + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", + OTIO_menu_action + ) + hiero.core.events.registerInterest( + "kShowContextMenu/kTimeline", + OTIO_menu_action + ) diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/setPosterFrame.py b/client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/setPosterFrame.py similarity index 100% rename from openpype/hosts/hiero/api/startup/Python/StartupUI/setPosterFrame.py rename to client/ayon_core/hosts/hiero/api/startup/Python/StartupUI/setPosterFrame.py diff --git a/openpype/hosts/hiero/api/startup/TaskPresets/10.5/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml b/client/ayon_core/hosts/hiero/api/startup/TaskPresets/10.5/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml similarity index 100% rename from openpype/hosts/hiero/api/startup/TaskPresets/10.5/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml rename to client/ayon_core/hosts/hiero/api/startup/TaskPresets/10.5/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml diff --git a/openpype/hosts/hiero/api/startup/TaskPresets/11.1/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml b/client/ayon_core/hosts/hiero/api/startup/TaskPresets/11.1/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml similarity index 100% rename from openpype/hosts/hiero/api/startup/TaskPresets/11.1/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml rename to client/ayon_core/hosts/hiero/api/startup/TaskPresets/11.1/Processors/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml diff --git a/openpype/hosts/hiero/api/startup/TaskPresets/11.2/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml b/client/ayon_core/hosts/hiero/api/startup/TaskPresets/11.2/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml similarity index 100% rename from openpype/hosts/hiero/api/startup/TaskPresets/11.2/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml rename to client/ayon_core/hosts/hiero/api/startup/TaskPresets/11.2/hiero.exporters.FnShotProcessor.ShotProcessor/pipeline.xml diff --git a/openpype/hosts/hiero/api/style.css b/client/ayon_core/hosts/hiero/api/style.css similarity index 100% rename from openpype/hosts/hiero/api/style.css rename to client/ayon_core/hosts/hiero/api/style.css diff --git a/openpype/hosts/hiero/api/tags.py b/client/ayon_core/hosts/hiero/api/tags.py similarity index 97% rename from openpype/hosts/hiero/api/tags.py rename to client/ayon_core/hosts/hiero/api/tags.py index 02d8205414..ad7c7e44a1 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/client/ayon_core/hosts/hiero/api/tags.py @@ -3,9 +3,9 @@ import os import hiero -from openpype.client import get_project, get_assets -from openpype.lib import Logger -from openpype.pipeline import get_current_project_name +from ayon_core.client import get_project, get_assets +from ayon_core.lib import Logger +from ayon_core.pipeline import get_current_project_name log = Logger.get_logger(__name__) diff --git a/client/ayon_core/hosts/hiero/api/workio.py b/client/ayon_core/hosts/hiero/api/workio.py new file mode 100644 index 0000000000..14d9439344 --- /dev/null +++ b/client/ayon_core/hosts/hiero/api/workio.py @@ -0,0 +1,73 @@ +import os +import hiero + +from ayon_core.lib import Logger + +log = Logger.get_logger(__name__) + + +def file_extensions(): + return [".hrox"] + + +def has_unsaved_changes(): + # There are no methods for querying unsaved changes to a project, so + # enforcing to always save. + # but we could at least check if a current open script has a path + project = hiero.core.projects()[-1] + if project.path(): + return True + else: + return False + + +def save_file(filepath): + file = os.path.basename(filepath) + project = hiero.core.projects()[-1] + + if project: + log.info("Saving project: `{}` as '{}'".format(project.name(), file)) + project.saveAs(filepath) + else: + log.info("Creating new project...") + project = hiero.core.newProject() + project.saveAs(filepath) + + +def open_file(filepath): + """Manually fire the kBeforeProjectLoad event in order to work around a bug in Hiero. + The Foundry has logged this bug as: + Bug 40413 - Python API - kBeforeProjectLoad event type is not triggered + when calling hiero.core.openProject() (only triggered through UI) + It exists in all versions of Hiero through (at least) v1.9v1b12. + + Once this bug is fixed, a version check will need to be added here in order to + prevent accidentally firing this event twice. The following commented-out code + is just an example, and will need to be updated when the bug is fixed to catch the + correct versions.""" + # if (hiero.core.env['VersionMajor'] < 1 or + # hiero.core.env['VersionMajor'] == 1 and hiero.core.env['VersionMinor'] < 10: + hiero.core.events.sendEvent("kBeforeProjectLoad", None) + + project = hiero.core.projects()[-1] + + # open project file + hiero.core.openProject(filepath.replace(os.path.sep, "/")) + + # close previous project + project.close() + + + + return True + + +def current_file(): + current_file = hiero.core.projects()[-1].path() + if not current_file: + return None + return os.path.normpath(current_file) + + +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/client/ayon_core/hosts/hiero/plugins/create/create_shot_clip.py b/client/ayon_core/hosts/hiero/plugins/create/create_shot_clip.py new file mode 100644 index 0000000000..ce84a9120e --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/create/create_shot_clip.py @@ -0,0 +1,262 @@ +from copy import deepcopy +import ayon_core.hosts.hiero.api as phiero +# from ayon_core.hosts.hiero.api import plugin, lib +# reload(lib) +# reload(plugin) +# reload(phiero) + + +class CreateShotClip(phiero.Creator): + """Publishable clip""" + + label = "Create Publishable Clip" + family = "clip" + icon = "film" + defaults = ["Main"] + + gui_tracks = [track.name() + for track in phiero.get_current_sequence().videoTracks()] + gui_name = "Pype publish attributes creator" + gui_info = "Define sequential rename and fill hierarchy data." + gui_inputs = { + "renameHierarchy": { + "type": "section", + "label": "Shot Hierarchy And Rename Settings", + "target": "ui", + "order": 0, + "value": { + "hierarchy": { + "value": "{folder}/{sequence}", + "type": "QLineEdit", + "label": "Shot Parent Hierarchy", + "target": "tag", + "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa + "order": 0}, + "clipRename": { + "value": False, + "type": "QCheckBox", + "label": "Rename clips", + "target": "ui", + "toolTip": "Renaming selected clips on fly", # noqa + "order": 1}, + "clipName": { + "value": "{sequence}{shot}", + "type": "QLineEdit", + "label": "Clip Name Template", + "target": "ui", + "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa + "order": 2}, + "countFrom": { + "value": 10, + "type": "QSpinBox", + "label": "Count sequence from", + "target": "ui", + "toolTip": "Set when the sequence number stafrom", # noqa + "order": 3}, + "countSteps": { + "value": 10, + "type": "QSpinBox", + "label": "Stepping number", + "target": "ui", + "toolTip": "What number is adding every new step", # noqa + "order": 4}, + } + }, + "hierarchyData": { + "type": "dict", + "label": "Shot Template Keywords", + "target": "tag", + "order": 1, + "value": { + "folder": { + "value": "shots", + "type": "QLineEdit", + "label": "{folder}", + "target": "tag", + "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 0}, + "episode": { + "value": "ep01", + "type": "QLineEdit", + "label": "{episode}", + "target": "tag", + "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 1}, + "sequence": { + "value": "sq01", + "type": "QLineEdit", + "label": "{sequence}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 2}, + "track": { + "value": "{_track_}", + "type": "QLineEdit", + "label": "{track}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 3}, + "shot": { + "value": "sh###", + "type": "QLineEdit", + "label": "{shot}", + "target": "tag", + "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 4} + } + }, + "verticalSync": { + "type": "section", + "label": "Vertical Synchronization Of Attributes", + "target": "ui", + "order": 2, + "value": { + "vSyncOn": { + "value": True, + "type": "QCheckBox", + "label": "Enable Vertical Sync", + "target": "ui", + "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa + "order": 0}, + "vSyncTrack": { + "value": gui_tracks, # noqa + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be hero for all others", # noqa + "order": 1} + } + }, + "publishSettings": { + "type": "section", + "label": "Publish Settings", + "target": "ui", + "order": 3, + "value": { + "subsetName": { + "value": ["", "main", "bg", "fg", "bg", + "animatic"], + "type": "QComboBox", + "label": "Subset Name", + "target": "ui", + "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa + "order": 0}, + "subsetFamily": { + "value": ["plate", "take"], + "type": "QComboBox", + "label": "Subset Family", + "target": "ui", "toolTip": "What use of this subset is for", # noqa + "order": 1}, + "reviewTrack": { + "value": ["< none >"] + gui_tracks, + "type": "QComboBox", + "label": "Use Review Track", + "target": "ui", + "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa + "order": 2}, + "audio": { + "value": False, + "type": "QCheckBox", + "label": "Include audio", + "target": "tag", + "toolTip": "Process subsets with corresponding audio", # noqa + "order": 3}, + "sourceResolution": { + "value": False, + "type": "QCheckBox", + "label": "Source resolution", + "target": "tag", + "toolTip": "Is resloution taken from timeline or source?", # noqa + "order": 4}, + } + }, + "frameRangeAttr": { + "type": "section", + "label": "Shot Attributes", + "target": "ui", + "order": 4, + "value": { + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle Start", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle End", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + } + } + } + } + + presets = None + + def process(self): + # Creator copy of object attributes that are modified during `process` + presets = deepcopy(self.presets) + gui_inputs = deepcopy(self.gui_inputs) + + # get key pares from presets and match it on ui inputs + for k, v in gui_inputs.items(): + if v["type"] in ("dict", "section"): + # nested dictionary (only one level allowed + # for sections and dict) + for _k, _v in v["value"].items(): + if presets.get(_k): + gui_inputs[k][ + "value"][_k]["value"] = presets[_k] + if presets.get(k): + gui_inputs[k]["value"] = presets[k] + + # open widget for plugins inputs + widget = self.widget(self.gui_name, self.gui_info, gui_inputs) + widget.exec_() + + if len(self.selected) < 1: + return + + if not widget.result: + print("Operation aborted") + return + + self.rename_add = 0 + + # get ui output for track name for vertical sync + v_sync_track = widget.result["vSyncTrack"]["value"] + + # sort selected trackItems by + sorted_selected_track_items = list() + unsorted_selected_track_items = list() + for _ti in self.selected: + if _ti.parent().name() in v_sync_track: + sorted_selected_track_items.append(_ti) + else: + unsorted_selected_track_items.append(_ti) + + sorted_selected_track_items.extend(unsorted_selected_track_items) + + kwargs = { + "ui_inputs": widget.result, + "avalon": self.data + } + + for i, track_item in enumerate(sorted_selected_track_items): + self.rename_index = i + + # convert track item to timeline media pool item + phiero.PublishClip(self, track_item, **kwargs).convert() diff --git a/client/ayon_core/hosts/hiero/plugins/load/load_clip.py b/client/ayon_core/hosts/hiero/plugins/load/load_clip.py new file mode 100644 index 0000000000..d77a28872f --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/load/load_clip.py @@ -0,0 +1,223 @@ +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id +) +from ayon_core.pipeline import ( + get_representation_path, + get_current_project_name, +) +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) +import ayon_core.hosts.hiero.api as phiero + + +class LoadClip(phiero.SequenceLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) + + label = "Load as clip" + order = -10 + icon = "code-fork" + color = "orange" + + # for loader multiselection + sequence = None + track = None + + # presets + clip_color_last = "green" + clip_color = "red" + + clip_name_template = "{asset}_{subset}_{representation}" + + @classmethod + def apply_settings(cls, project_settings, system_settings): + plugin_type_settings = ( + project_settings + .get("hiero", {}) + .get("load", {}) + ) + + if not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + elif option == "representations": + continue + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) + + + def load(self, context, name, namespace, options): + # add clip name template to options + options.update({ + "clipNameTemplate": self.clip_name_template + }) + # in case loader uses multiselection + if self.track and self.sequence: + options.update({ + "sequence": self.sequence, + "track": self.track, + "clipNameTemplate": self.clip_name_template + }) + + # load clip to timeline and get main variables + path = self.filepath_from_context(context) + track_item = phiero.ClipLoader(self, context, path, **options).load() + namespace = namespace or track_item.name() + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = version_data.get("colorspace", None) + object_name = self.clip_name_template.format( + **context["representation"]["context"]) + + # set colorspace + if colorspace: + track_item.source().setSourceMediaColourTransform(colorspace) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = {} + for key in add_keys: + data_imprint.update({ + key: version_data.get(key, str(None)) + }) + + # add variables related to version context + data_imprint.update({ + "version": version_name, + "colorspace": colorspace, + "objectName": object_name + }) + + # update color of clip regarding the version order + self.set_item_color(track_item, version) + + # deal with multiselection + self.multiselection(track_item) + + self.log.info("Loader done: `{}`".format(name)) + + return phiero.containerise( + track_item, + name, namespace, context, + self.__class__.__name__, + data_imprint) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ Updating previously loaded clips + """ + + # load clip to timeline and get main variables + name = container['name'] + namespace = container['namespace'] + track_item = phiero.get_track_items( + track_item_name=namespace).pop() + + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + + version_data = version_doc.get("data", {}) + version_name = version_doc.get("name", None) + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + file = get_representation_path(representation).replace("\\", "/") + clip = track_item.source() + + # reconnect media to new path + clip.reconnectMedia(file) + + # set colorspace + if colorspace: + clip.setSourceMediaColourTransform(colorspace) + + # add additional metadata from the version to imprint Avalon knob + add_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + + # move all version data keys to tag data + data_imprint = {} + for key in add_keys: + data_imprint.update({ + key: version_data.get(key, str(None)) + }) + + # add variables related to version context + data_imprint.update({ + "representation": str(representation["_id"]), + "version": version_name, + "colorspace": colorspace, + "objectName": object_name + }) + + # update color of clip regarding the version order + self.set_item_color(track_item, version_doc) + + return phiero.update_container(track_item, data_imprint) + + def remove(self, container): + """ Removing previously loaded clips + """ + # load clip to timeline and get main variables + namespace = container['namespace'] + track_item = phiero.get_track_items( + track_item_name=namespace).pop() + track = track_item.parent() + + # remove track item from track + track.removeItem(track_item) + + @classmethod + def multiselection(cls, track_item): + if not cls.track: + cls.track = track_item.parent() + cls.sequence = cls.track.parent() + + @classmethod + def set_item_color(cls, track_item, version_doc): + project_name = get_current_project_name() + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + clip = track_item.source() + # set clip colour + if version_doc["_id"] == last_version_doc["_id"]: + clip.binItem().setColor(cls.clip_color_last) + else: + clip.binItem().setColor(cls.clip_color) diff --git a/client/ayon_core/hosts/hiero/plugins/load/load_effects.py b/client/ayon_core/hosts/hiero/plugins/load/load_effects.py new file mode 100644 index 0000000000..809080e87e --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/load/load_effects.py @@ -0,0 +1,310 @@ +import json +from collections import OrderedDict +import six + +from ayon_core.client import ( + get_version_by_id +) + +from ayon_core.pipeline import ( + AVALON_CONTAINER_ID, + load, + get_representation_path, + get_current_project_name +) +from ayon_core.hosts.hiero import api as phiero +from ayon_core.lib import Logger + + +class LoadEffects(load.LoaderPlugin): + """Loading colorspace soft effect exported from nukestudio""" + + families = ["effect"] + representations = ["*"] + extension = {"json"} + + label = "Load Effects" + order = 0 + icon = "cc" + color = "white" + + log = Logger.get_logger(__name__) + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + active_sequence = phiero.get_current_sequence() + active_track = phiero.get_current_track( + active_sequence, "Loaded_{}".format(name)) + + # get main variables + namespace = namespace or context["asset"]["name"] + object_name = "{}_{}".format(name, namespace) + clip_in = context["asset"]["data"]["clipIn"] + clip_out = context["asset"]["data"]["clipOut"] + + data_imprint = { + "objectName": object_name, + "children_names": [] + } + + # getting file path + file = self.filepath_from_context(context) + file = file.replace("\\", "/") + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint + ): + self.containerise( + active_track, + name=name, + namespace=namespace, + object_name=object_name, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def _shared_loading( + self, + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=False + ): + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).items()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f) + + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + + loaded = False + for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()): + new_name = "{}_loaded".format(ef_name) + if new_name not in used_subtracks: + effect_track_item = active_track.createEffect( + effectType=ef_val["class"], + timelineIn=clip_in, + timelineOut=clip_out, + subTrackIndex=index_order + + ) + effect_track_item.setName(new_name) + else: + effect_track_item = used_subtracks[new_name] + + node = effect_track_item.node() + for knob_name, knob_value in ef_val["node"].items(): + if ( + not knob_value + or knob_name == "name" + ): + continue + + try: + # assume list means animation + # except 4 values could be RGBA or vector + if isinstance(knob_value, list) and len(knob_value) > 4: + node[knob_name].setAnimated() + for i, value in enumerate(knob_value): + if isinstance(value, list): + # list can have vector animation + for ci, cv in enumerate(value): + node[knob_name].setValueAt( + cv, + (clip_in + i), + ci + ) + else: + # list is single values + node[knob_name].setValueAt( + value, + (clip_in + i) + ) + else: + node[knob_name].setValue(knob_value) + except NameError: + self.log.warning("Knob: {} cannot be set".format( + knob_name)) + + # register all loaded children + data_imprint["children_names"].append(new_name) + + # make sure containerisation will happen + loaded = True + + return loaded + + def update(self, container, representation): + """ Updating previously loaded effects + """ + active_track = container["_item"] + file = get_representation_path(representation).replace("\\", "/") + + # get main variables + name = container['name'] + namespace = container['namespace'] + + # get timeline in out data + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + version_data = version_doc["data"] + clip_in = version_data["clipIn"] + clip_out = version_data["clipOut"] + + object_name = "{}_{}".format(name, namespace) + + # Disable previously created nodes + used_subtracks = { + stitem.name(): stitem + for stitem in phiero.flatten(active_track.subTrackItems()) + } + container = phiero.get_track_openpype_data( + active_track, object_name + ) + + loaded_subtrack_items = container["children_names"] + for loaded_stitem in loaded_subtrack_items: + if loaded_stitem not in used_subtracks: + continue + item_to_remove = used_subtracks.pop(loaded_stitem) + # TODO: find a way to erase nodes + self.log.debug( + "This node needs to be removed: {}".format(item_to_remove)) + + data_imprint = { + "objectName": object_name, + "name": name, + "representation": str(representation["_id"]), + "children_names": [] + } + + if self._shared_loading( + file, + active_track, + clip_in, + clip_out, + data_imprint, + update=True + ): + return phiero.update_container(active_track, data_imprint) + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items() + if isinstance(v, dict)] + subTrackNums = [v["subTrackIndex"] for k, v in data.items() + if isinstance(v, dict)] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if isinstance(val, dict) + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes through all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.items()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, six.text_type): + return str(input) + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + pass + + def containerise( + self, + track, + name, + namespace, + object_name, + context, + loader=None, + data=None + ): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track (hiero.core.VideoTrack): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + object_name (str): name of container + context (dict): Asset information + loader (str, optional): Name of node used to produce this + container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = { + object_name: { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + } + + if data: + for k, v in data.items(): + data_imprint[object_name].update({k: v}) + + self.log.debug("_ data_imprint: {}".format(data_imprint)) + phiero.set_track_openpype_tag(track, data_imprint) diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py b/client/ayon_core/hosts/hiero/plugins/publish/collect_clip_effects.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_clip_effects.py rename to client/ayon_core/hosts/hiero/plugins/publish/collect_clip_effects.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py b/client/ayon_core/hosts/hiero/plugins/publish/collect_frame_tag_instances.py similarity index 98% rename from openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py rename to client/ayon_core/hosts/hiero/plugins/publish/collect_frame_tag_instances.py index 79bf67b336..b981d89eef 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_frame_tag_instances.py +++ b/client/ayon_core/hosts/hiero/plugins/publish/collect_frame_tag_instances.py @@ -5,7 +5,7 @@ import pyblish.api -from openpype.client import get_asset_name_identifier +from ayon_core.client import get_asset_name_identifier class CollectFrameTagInstances(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py b/client/ayon_core/hosts/hiero/plugins/publish/collect_tag_tasks.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py rename to client/ayon_core/hosts/hiero/plugins/publish/collect_tag_tasks.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py b/client/ayon_core/hosts/hiero/plugins/publish/extract_clip_effects.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/extract_clip_effects.py rename to client/ayon_core/hosts/hiero/plugins/publish/extract_clip_effects.py index 7fb381ff7e..afff41fc74 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py +++ b/client/ayon_core/hosts/hiero/plugins/publish/extract_clip_effects.py @@ -1,9 +1,9 @@ -# from openpype import plugins +# from ayon_core import plugins import os import json import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractClipEffects(publish.Extractor): diff --git a/openpype/hosts/hiero/plugins/publish/extract_frames.py b/client/ayon_core/hosts/hiero/plugins/publish/extract_frames.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/extract_frames.py rename to client/ayon_core/hosts/hiero/plugins/publish/extract_frames.py index 803c338766..9ea3134d4c 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_frames.py +++ b/client/ayon_core/hosts/hiero/plugins/publish/extract_frames.py @@ -1,11 +1,11 @@ import os import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( get_oiio_tool_args, run_subprocess, ) -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractFrames(publish.Extractor): diff --git a/client/ayon_core/hosts/hiero/plugins/publish/extract_thumbnail.py b/client/ayon_core/hosts/hiero/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..bcaf5308d9 --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/publish/extract_thumbnail.py @@ -0,0 +1,60 @@ +import os +import pyblish.api + +from ayon_core.pipeline import publish + + +class ExtractThumnail(publish.Extractor): + """ + Extractor for track item's tumnails + """ + + label = "Extract Thumnail" + order = pyblish.api.ExtractorOrder + families = ["plate", "take"] + hosts = ["hiero"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + staging_dir = self.staging_dir(instance) + + self.create_thumbnail(staging_dir, instance) + + def create_thumbnail(self, staging_dir, instance): + track_item = instance.data["item"] + track_item_name = track_item.name() + + # frames + duration = track_item.sourceDuration() + frame_start = track_item.sourceIn() + self.log.debug( + "__ frame_start: `{}`, duration: `{}`".format( + frame_start, duration)) + + # get thumbnail frame from the middle + thumb_frame = int(frame_start + (duration / 2)) + + thumb_file = "{}thumbnail{}{}".format( + track_item_name, thumb_frame, ".png") + thumb_path = os.path.join(staging_dir, thumb_file) + + thumbnail = track_item.thumbnail(thumb_frame, "colour").save( + thumb_path, + format='png' + ) + self.log.debug( + "__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) + + self.log.info("Thumnail was generated to: {}".format(thumb_path)) + thumb_representation = { + 'files': thumb_file, + 'stagingDir': staging_dir, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + instance.data["representations"].append( + thumb_representation) diff --git a/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py b/client/ayon_core/hosts/hiero/plugins/publish/integrate_version_up_workfile.py similarity index 93% rename from openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py rename to client/ayon_core/hosts/hiero/plugins/publish/integrate_version_up_workfile.py index 6ccbe955f2..27a8bc2604 100644 --- a/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py +++ b/client/ayon_core/hosts/hiero/plugins/publish/integrate_version_up_workfile.py @@ -1,6 +1,6 @@ from pyblish import api -from openpype.lib import version_up +from ayon_core.lib import version_up class IntegrateVersionUpWorkfile(api.ContextPlugin): diff --git a/client/ayon_core/hosts/hiero/plugins/publish/precollect_instances.py b/client/ayon_core/hosts/hiero/plugins/publish/precollect_instances.py new file mode 100644 index 0000000000..e41ca74320 --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/publish/precollect_instances.py @@ -0,0 +1,422 @@ +import pyblish + +from ayon_core.pipeline.editorial import is_overlapping_otio_ranges + +from ayon_core.hosts.hiero import api as phiero +from ayon_core.hosts.hiero.api.otio import hiero_export + +import hiero +# # developer reload modules +from pprint import pformat + + +class PrecollectInstances(pyblish.api.ContextPlugin): + """Collect all Track items selection.""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Precollect Instances" + hosts = ["hiero"] + + audio_track_items = [] + + def process(self, context): + self.otio_timeline = context.data["otioTimeline"] + timeline_selection = phiero.get_timeline_selection() + selected_timeline_items = phiero.get_track_items( + selection=timeline_selection, + check_tagged=True, + check_enabled=True + ) + + # only return enabled track items + if not selected_timeline_items: + selected_timeline_items = phiero.get_track_items( + check_enabled=True, check_tagged=True) + + self.log.info( + "Processing enabled track items: {}".format( + selected_timeline_items)) + + # add all tracks subtreck effect items to context + all_tracks = hiero.ui.activeSequence().videoTracks() + tracks_effect_items = self.collect_sub_track_items(all_tracks) + context.data["tracksEffectItems"] = tracks_effect_items + + # process all sellected timeline track items + for track_item in selected_timeline_items: + data = {} + clip_name = track_item.name() + source_clip = track_item.source() + self.log.debug("clip_name: {}".format(clip_name)) + + # get openpype tag data + tag_data = phiero.get_trackitem_openpype_data(track_item) + self.log.debug("__ tag_data: {}".format(pformat(tag_data))) + + if not tag_data: + continue + + if tag_data.get("id") != "pyblish.avalon.instance": + continue + + # get clips subtracks and anotations + annotations = self.clip_annotations(source_clip) + subtracks = self.clip_subtrack(track_item) + self.log.debug("Annotations: {}".format(annotations)) + self.log.debug(">> Subtracks: {}".format(subtracks)) + + # solve handles length + tag_data["handleStart"] = min( + tag_data["handleStart"], int(track_item.handleInLength())) + tag_data["handleEnd"] = min( + tag_data["handleEnd"], int(track_item.handleOutLength())) + + # add audio to families + with_audio = False + if tag_data.pop("audio"): + with_audio = True + + # add tag data to instance data + data.update({ + k: v for k, v in tag_data.items() + if k not in ("id", "applieswhole", "label") + }) + + asset, asset_name = self._get_asset_data(tag_data) + + subset = tag_data["subset"] + + # insert family into families + families = [str(f) for f in tag_data["families"]] + + # form label + label = "{} -".format(asset) + if asset_name != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "asset_name": asset_name, + "item": track_item, + "families": families, + "publish": tag_data["publish"], + "fps": context.data["fps"], + + # clip's effect + "clipEffectItems": subtracks, + "clipAnnotations": annotations, + + # add all additional tags + "tags": phiero.get_track_item_tags(track_item), + "newAssetPublishing": True + }) + + # otio clip data + otio_data = self.get_otio_clip_instance_data(track_item) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + data.update(otio_data) + self.log.debug("__ data: {}".format(pformat(data))) + + # add resolution + self.get_resolution_to_data(data, context) + + # create instance + instance = context.create_instance(**data) + + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": track_item.sourceMediaColourTransform(), + } + }) + + # create shot instance for shot attributes create/update + self.create_shot_instance(context, **data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # create audio subset instance + self.create_audio_instance(context, **data) + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if tag_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True + + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" + + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata[ + "openpype.source.width"], + "resolutionHeight": otio_clip_metadata[ + "openpype.source.height"], + "pixelAspect": otio_clip_metadata[ + "openpype.source.pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], + "resolutionHeight": otio_tl_metadata[ + "openpype.timeline.height"], + "pixelAspect": otio_tl_metadata[ + "openpype.timeline.pixelAspect"] + }) + + def create_shot_instance(self, context, **data): + subset = "shotMain" + master_layer = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + item = data.get("item") + clip_name = item.name() + + if not master_layer: + return + + if not hierarchy_data: + return + + asset = data["asset"] + asset_name = data["asset_name"] + + # insert family into families + family = "shot" + + # form label + label = "{} -".format(asset) + if asset_name != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "family": family, + "families": [] + }) + + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def _get_asset_data(self, data): + folder_path = data.pop("folderPath", None) + + if data.get("asset_name"): + asset_name = data["asset_name"] + else: + asset_name = data["asset"] + + # backward compatibility for clip tags + # which are missing folderPath key + # TODO remove this in future versions + if not folder_path: + hierarchy_path = data["hierarchy"] + folder_path = "/{}/{}".format( + hierarchy_path, + asset_name + ) + + return folder_path, asset_name + + def create_audio_instance(self, context, **data): + subset = "audioMain" + master_layer = data.get("heroTrack") + + if not master_layer: + return + + asset = data.get("asset") + item = data.get("item") + clip_name = item.name() + + # test if any audio clips + if not self.test_any_audio(item): + return + + asset = data["asset"] + asset_name = data["asset_name"] + + # insert family into families + family = "audio" + + # form label + label = "{} -".format(asset) + if asset_name != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "family": family, + "families": ["clip"] + }) + # remove review track attr if any + data.pop("reviewTrack") + + # create instance + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def test_any_audio(self, track_item): + # collect all audio tracks to class variable + if not self.audio_track_items: + for otio_clip in self.otio_timeline.each_clip(): + if otio_clip.parent().kind != "Audio": + continue + self.audio_track_items.append(otio_clip) + + # get track item timeline range + timeline_range = self.create_otio_time_range_from_timeline_item_data( + track_item) + + # loop through audio track items and search for overlapping clip + for otio_audio in self.audio_track_items: + parent_range = otio_audio.range_in_parent() + + # if any overaling clip found then return True + if is_overlapping_otio_ranges( + parent_range, timeline_range, strict=False): + return True + + def get_otio_clip_instance_data(self, track_item): + """ + Return otio objects for timeline, track and clip + + Args: + timeline_item_data (dict): timeline_item_data from list returned by + resolve.get_current_timeline_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + ti_track_name = track_item.parent().name() + timeline_range = self.create_otio_time_range_from_timeline_item_data( + track_item) + for otio_clip in self.otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if ti_track_name != track_name: + continue + if otio_clip.name != track_item.name(): + continue + self.log.debug("__ parent_range: {}".format(parent_range)) + self.log.debug("__ timeline_range: {}".format(timeline_range)) + if is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + + # add pypedata marker to otio_clip metadata + for marker in otio_clip.markers: + if phiero.OPENPYPE_TAG_NAME in marker.name: + otio_clip.metadata.update(marker.metadata) + return {"otioClip": otio_clip} + + return None + + @staticmethod + def create_otio_time_range_from_timeline_item_data(track_item): + timeline = phiero.get_current_sequence() + frame_start = int(track_item.timelineIn()) + frame_duration = int(track_item.duration()) + fps = timeline.framerate().toFloat() + + return hiero_export.create_otio_time_range( + frame_start, frame_duration, fps) + + def collect_sub_track_items(self, tracks): + """ + Returns dictionary with track index as key and list of subtracks + """ + # collect all subtrack items + sub_track_items = {} + for track in tracks: + items = track.items() + + effet_items = track.subTrackItems() + + # skip if no clips on track > need track with effect only + if not effet_items: + continue + + # skip all disabled tracks + if not track.isEnabled(): + continue + + track_index = track.trackIndex() + _sub_track_items = phiero.flatten(effet_items) + + _sub_track_items = list(_sub_track_items) + # continue only if any subtrack items are collected + if not _sub_track_items: + continue + + enabled_sti = [] + # loop all found subtrack items and check if they are enabled + for _sti in _sub_track_items: + # checking if not enabled + if not _sti.isEnabled(): + continue + if isinstance(_sti, hiero.core.Annotation): + continue + # collect the subtrack item + enabled_sti.append(_sti) + + # continue only if any subtrack items are collected + if not enabled_sti: + continue + + # add collection of subtrackitems to dict + sub_track_items[track_index] = enabled_sti + + return sub_track_items + + @staticmethod + def clip_annotations(clip): + """ + Returns list of Clip's hiero.core.Annotation + """ + annotations = [] + subTrackItems = phiero.flatten(clip.subTrackItems()) + annotations += [item for item in subTrackItems if isinstance( + item, hiero.core.Annotation)] + return annotations + + @staticmethod + def clip_subtrack(clip): + """ + Returns list of Clip's hiero.core.SubTrackItem + """ + subtracks = [] + subTrackItems = phiero.flatten(clip.parent().subTrackItems()) + for item in subTrackItems: + if "TimeWarp" in item.name(): + continue + # avoid all anotation + if isinstance(item, hiero.core.Annotation): + continue + # # avoid all not anaibled + if not item.isEnabled(): + continue + subtracks.append(item) + return subtracks diff --git a/client/ayon_core/hosts/hiero/plugins/publish/precollect_workfile.py b/client/ayon_core/hosts/hiero/plugins/publish/precollect_workfile.py new file mode 100644 index 0000000000..e9e2aae653 --- /dev/null +++ b/client/ayon_core/hosts/hiero/plugins/publish/precollect_workfile.py @@ -0,0 +1,108 @@ +import os +import tempfile +from pprint import pformat + +import pyblish.api +from qtpy.QtGui import QPixmap + +import hiero.ui + +from ayon_core.hosts.hiero.api.otio import hiero_export + + +class PrecollectWorkfile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + label = "Precollect Workfile" + order = pyblish.api.CollectorOrder - 0.491 + + def process(self, context): + asset = context.data["asset"] + asset_name = asset.split("/")[-1] + + active_timeline = hiero.ui.activeSequence() + project = active_timeline.project() + fps = active_timeline.framerate().toFloat() + + # adding otio timeline to context + otio_timeline = hiero_export.create_otio_timeline() + + # get workfile thumbnail paths + tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + thumbnail_name = "workfile_thumbnail.png" + thumbnail_path = os.path.join(tmp_staging, thumbnail_name) + + # search for all windows with name of actual sequence + _windows = [w for w in hiero.ui.windowManager().windows() + if active_timeline.name() in w.windowTitle()] + + # export window to thumb path + QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png') + + # thumbnail + thumb_representation = { + 'files': thumbnail_name, + 'stagingDir': tmp_staging, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + + # get workfile paths + current_file = project.path() + staging_dir, base_name = os.path.split(current_file) + + # creating workfile representation + workfile_representation = { + 'name': 'hrox', + 'ext': 'hrox', + 'files': base_name, + "stagingDir": staging_dir, + } + family = "workfile" + instance_data = { + "label": "{} - {}Main".format( + asset, family), + "name": "{}_{}".format(asset_name, family), + "asset": context.data["asset"], + # TODO use 'get_subset_name' + "subset": "{}{}Main".format(asset_name, family.capitalize()), + "item": project, + "family": family, + "families": [], + "representations": [workfile_representation, thumb_representation] + } + + # create instance with workfile + instance = context.create_instance(**instance_data) + + # update context with main project attributes + context_data = { + "activeProject": project, + "activeTimeline": active_timeline, + "otioTimeline": otio_timeline, + "currentFile": current_file, + "colorspace": self.get_colorspace(project), + "fps": fps + } + self.log.debug("__ context_data: {}".format(pformat(context_data))) + context.data.update(context_data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.debug("__ instance.data: {}".format(pformat(instance.data))) + self.log.debug("__ context_data: {}".format(pformat(context_data))) + + def get_colorspace(self, project): + # get workfile's colorspace properties + return { + "useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), + "lutSetting16Bit": project.lutSetting16Bit(), + "lutSetting8Bit": project.lutSetting8Bit(), + "lutSettingFloat": project.lutSettingFloat(), + "lutSettingLog": project.lutSettingLog(), + "lutSettingViewer": project.lutSettingViewer(), + "lutSettingWorkingSpace": project.lutSettingWorkingSpace(), + "lutUseOCIOForExport": project.lutUseOCIOForExport(), + "ocioConfigName": project.ocioConfigName(), + "ocioConfigPath": project.ocioConfigPath() + } diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py b/client/ayon_core/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py similarity index 96% rename from openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py rename to client/ayon_core/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py index 37370497a5..ca937f4aa2 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py +++ b/client/ayon_core/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py @@ -1,6 +1,6 @@ from pyblish import api -from openpype.client import get_assets, get_asset_name_identifier +from ayon_core.client import get_assets, get_asset_name_identifier class CollectAssetBuilds(api.ContextPlugin): diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_comments.py b/client/ayon_core/hosts/hiero/plugins/publish_old_workflow/collect_tag_comments.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_comments.py rename to client/ayon_core/hosts/hiero/plugins/publish_old_workflow/collect_tag_comments.py diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py b/client/ayon_core/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py similarity index 98% rename from openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py rename to client/ayon_core/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py index 2f65a8bd4f..297ffa8001 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py +++ b/client/ayon_core/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py @@ -1,7 +1,7 @@ from pyblish import api import hiero import math -from openpype.hosts.hiero.api.otio.hiero_export import create_otio_time_range +from ayon_core.hosts.hiero.api.otio.hiero_export import create_otio_time_range class PrecollectRetime(api.InstancePlugin): """Calculate Retiming of selected track items.""" diff --git a/openpype/hosts/hiero/vendor/google/protobuf/__init__.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/__init__.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/__init__.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/__init__.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/any_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/any_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/any_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/any_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/api_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/api_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/api_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/api_pb2.py diff --git a/openpype/hosts/hiero/api/otio/__init__.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/compiler/__init__.py similarity index 100% rename from openpype/hosts/hiero/api/otio/__init__.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/compiler/__init__.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/compiler/plugin_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/descriptor.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_database.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_database.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/descriptor_database.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_database.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/descriptor_pool.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_pool.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/descriptor_pool.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/descriptor_pool.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/duration_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/duration_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/duration_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/duration_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/empty_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/empty_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/empty_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/empty_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/field_mask_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/compiler/__init__.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/__init__.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/compiler/__init__.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/__init__.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/_parameterized.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/api_implementation.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/builder.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/builder.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/builder.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/builder.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/containers.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/containers.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/containers.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/containers.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/decoder.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/decoder.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/decoder.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/decoder.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/encoder.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/encoder.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/encoder.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/encoder.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/enum_type_wrapper.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/enum_type_wrapper.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/enum_type_wrapper.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/enum_type_wrapper.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/extension_dict.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/message_listener.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/message_listener.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/message_listener.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/message_listener.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/message_set_extensions_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_extensions_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/more_messages_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/no_package_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/python_message.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/python_message.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/python_message.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/python_message.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/type_checkers.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/well_known_types.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/wire_format.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/wire_format.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/wire_format.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/internal/wire_format.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/json_format.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/json_format.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/json_format.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/json_format.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/message.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/message.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/message.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/message.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/message_factory.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/message_factory.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/message_factory.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/message_factory.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/proto_builder.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/proto_builder.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/proto_builder.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/proto_builder.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/internal/__init__.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/__init__.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/internal/__init__.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/__init__.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/cpp_message.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/pyext/python_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/reflection.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/reflection.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/reflection.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/reflection.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/service.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/service.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/service.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/service.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/service_reflection.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/service_reflection.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/service_reflection.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/service_reflection.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/source_context_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/source_context_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/source_context_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/source_context_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/struct_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/struct_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/struct_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/struct_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/symbol_database.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/symbol_database.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/symbol_database.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/symbol_database.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/text_encoding.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/text_encoding.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/text_encoding.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/text_encoding.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/text_format.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/text_format.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/text_format.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/text_format.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/timestamp_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/type_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/type_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/type_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/type_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/pyext/__init__.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/util/__init__.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/pyext/__init__.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/util/__init__.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/util/json_format_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/util/json_format_proto3_pb2.py diff --git a/openpype/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py b/client/ayon_core/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py rename to client/ayon_core/hosts/hiero/vendor/google/protobuf/wrappers_pb2.py diff --git a/openpype/hosts/houdini/__init__.py b/client/ayon_core/hosts/houdini/__init__.py similarity index 100% rename from openpype/hosts/houdini/__init__.py rename to client/ayon_core/hosts/houdini/__init__.py diff --git a/client/ayon_core/hosts/houdini/addon.py b/client/ayon_core/hosts/houdini/addon.py new file mode 100644 index 0000000000..34d140db3c --- /dev/null +++ b/client/ayon_core/hosts/houdini/addon.py @@ -0,0 +1,54 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class HoudiniAddon(OpenPypeModule, IHostAddon): + name = "houdini" + host_name = "houdini" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to HOUDINI_PATH and HOUDINI_MENU_PATH + startup_path = os.path.join(HOUDINI_HOST_DIR, "startup") + new_houdini_path = [startup_path] + new_houdini_menu_path = [startup_path] + + old_houdini_path = env.get("HOUDINI_PATH") or "" + old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" + + for path in old_houdini_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_houdini_path: + new_houdini_path.append(norm_path) + + for path in old_houdini_menu_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_houdini_menu_path: + new_houdini_menu_path.append(norm_path) + + # Add ampersand for unknown reason (Maybe is needed in Houdini?) + new_houdini_path.append("&") + new_houdini_menu_path.append("&") + + env["HOUDINI_PATH"] = os.pathsep.join(new_houdini_path) + env["HOUDINI_MENU_PATH"] = os.pathsep.join(new_houdini_menu_path) + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(HOUDINI_HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".hip", ".hiplc", ".hipnc"] diff --git a/openpype/hosts/houdini/api/__init__.py b/client/ayon_core/hosts/houdini/api/__init__.py similarity index 100% rename from openpype/hosts/houdini/api/__init__.py rename to client/ayon_core/hosts/houdini/api/__init__.py diff --git a/client/ayon_core/hosts/houdini/api/action.py b/client/ayon_core/hosts/houdini/api/action.py new file mode 100644 index 0000000000..a14296950b --- /dev/null +++ b/client/ayon_core/hosts/houdini/api/action.py @@ -0,0 +1,83 @@ +import pyblish.api +import hou + +from ayon_core.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Maya when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + + errored_instances = get_errored_instances_from_context(context, + plugin=plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = list() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning("Plug-in returned to be invalid, " + "but has no selectable nodes.") + + hou.clearAllSelected() + if invalid: + self.log.info("Selecting invalid nodes: {}".format( + ", ".join(node.path() for node in invalid) + )) + for node in invalid: + node.setSelected(True) + node.setCurrent(True) + else: + self.log.info("No invalid nodes found.") + + +class SelectROPAction(pyblish.api.Action): + """Select ROP. + + It's used to select the associated ROPs with the errored instances. + """ + + label = "Select ROP" + on = "failed" # This action is only available on a failed plug-in + icon = "mdi.cursor-default-click" + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context(context, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding ROP nodes..") + rop_nodes = list() + for instance in errored_instances: + node_path = instance.data.get("instance_node") + if not node_path: + continue + + node = hou.node(node_path) + if not node: + continue + + rop_nodes.append(node) + + hou.clearAllSelected() + if rop_nodes: + self.log.info("Selecting ROP nodes: {}".format( + ", ".join(node.path() for node in rop_nodes) + )) + for node in rop_nodes: + node.setSelected(True) + node.setCurrent(True) + else: + self.log.info("No ROP nodes found.") diff --git a/client/ayon_core/hosts/houdini/api/colorspace.py b/client/ayon_core/hosts/houdini/api/colorspace.py new file mode 100644 index 0000000000..66581d6f20 --- /dev/null +++ b/client/ayon_core/hosts/houdini/api/colorspace.py @@ -0,0 +1,69 @@ +import attr +import hou +from ayon_core.hosts.houdini.api.lib import get_color_management_preferences +from ayon_core.pipeline.colorspace import get_display_view_colorspace_name + +@attr.s +class LayerMetadata(object): + """Data class for Render Layer metadata.""" + frameStart = attr.ib() + frameEnd = attr.ib() + + +@attr.s +class RenderProduct(object): + """Getting Colorspace as + Specific Render Product Parameter for submitting + publish job. + + """ + colorspace = attr.ib() # colorspace + view = attr.ib() + productName = attr.ib(default=None) + + +class ARenderProduct(object): + + def __init__(self): + """Constructor.""" + # Initialize + self.layer_data = self._get_layer_data() + self.layer_data.products = self.get_colorspace_data() + + def _get_layer_data(self): + return LayerMetadata( + frameStart=int(hou.playbar.frameRange()[0]), + frameEnd=int(hou.playbar.frameRange()[1]), + ) + + def get_colorspace_data(self): + """To be implemented by renderer class. + + This should return a list of RenderProducts. + + Returns: + list: List of RenderProduct + + """ + data = get_color_management_preferences() + colorspace_data = [ + RenderProduct( + colorspace=data["display"], + view=data["view"], + productName="" + ) + ] + return colorspace_data + + +def get_default_display_view_colorspace(): + """Returns the colorspace attribute of the default (display, view) pair. + + It's used for 'ociocolorspace' parm in OpenGL Node.""" + + prefs = get_color_management_preferences() + return get_display_view_colorspace_name( + config_path=prefs["config"], + display=prefs["display"], + view=prefs["view"] + ) diff --git a/openpype/hosts/houdini/api/creator_node_shelves.py b/client/ayon_core/hosts/houdini/api/creator_node_shelves.py similarity index 94% rename from openpype/hosts/houdini/api/creator_node_shelves.py rename to client/ayon_core/hosts/houdini/api/creator_node_shelves.py index 14662dc419..567bb245db 100644 --- a/openpype/hosts/houdini/api/creator_node_shelves.py +++ b/client/ayon_core/hosts/houdini/api/creator_node_shelves.py @@ -12,10 +12,10 @@ import logging import os -from openpype.client import get_asset_by_name -from openpype.pipeline import registered_host -from openpype.pipeline.create import CreateContext -from openpype.resources import get_openpype_icon_filepath +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import registered_host +from ayon_core.pipeline.create import CreateContext +from ayon_core.resources import get_ayon_icon_filepath import hou import stateutils @@ -34,7 +34,7 @@ CREATE_SCRIPT = """ -from openpype.hosts.houdini.api.creator_node_shelves import create_interactive +from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive create_interactive("{identifier}", **kwargs) """ @@ -146,7 +146,7 @@ def install(): This function is re-entrant and can be called again to reinstall and update the node definitions. For example during development it can be useful to call it manually: - >>> from openpype.hosts.houdini.api.creator_node_shelves import install + >>> from ayon_core.hosts.houdini.api.creator_node_shelves import install >>> install() Returns: @@ -172,8 +172,8 @@ def install(): # and update the tools file if creator identifiers change os.remove(filepath) - icon = get_openpype_icon_filepath() - tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON" + icon = get_ayon_icon_filepath() + tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" # Create context only to get creator plugins, so we don't reset and only # populate what we need to retrieve the list of creator plugins diff --git a/client/ayon_core/hosts/houdini/api/lib.py b/client/ayon_core/hosts/houdini/api/lib.py new file mode 100644 index 0000000000..7163aebdec --- /dev/null +++ b/client/ayon_core/hosts/houdini/api/lib.py @@ -0,0 +1,1055 @@ +# -*- coding: utf-8 -*- +import sys +import os +import errno +import re +import uuid +import logging +import json + +import six + +from ayon_core.lib import StringTemplate +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.settings import get_current_project_settings +from ayon_core.pipeline import ( + Anatomy, + get_current_project_name, + get_current_asset_name, + registered_host, + get_current_context, + get_current_host_name, +) +from ayon_core.pipeline.create import CreateContext +from ayon_core.pipeline.template_data import get_template_data +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.tools.utils import PopupUpdateKeys, SimplePopup +from ayon_core.tools.utils.host_tools import get_tool_by_name + +import hou + + +self = sys.modules[__name__] +self._parent = None +log = logging.getLogger(__name__) +JSON_PREFIX = "JSON:::" + + +def get_asset_fps(asset_doc=None): + """Return current asset fps.""" + + if asset_doc is None: + asset_doc = get_current_project_asset(fields=["data.fps"]) + return asset_doc["data"]["fps"] + + +def set_id(node, unique_id, overwrite=False): + exists = node.parm("id") + if not exists: + imprint(node, {"id": unique_id}) + + if not exists and overwrite: + node.setParm("id", unique_id) + + +def get_id(node): + """Get the `cbId` attribute of the given node. + + Args: + node (hou.Node): the name of the node to retrieve the attribute from + + Returns: + str: cbId attribute of the node. + + """ + + if node is not None: + return node.parm("id") + + +def generate_ids(nodes, asset_id=None): + """Returns new unique ids for the given nodes. + + Note: This does not assign the new ids, it only generates the values. + + To assign new ids using this method: + >>> nodes = ["a", "b", "c"] + >>> for node, id in generate_ids(nodes): + >>> set_id(node, id) + + To also override any existing values (and assign regenerated ids): + >>> nodes = ["a", "b", "c"] + >>> for node, id in generate_ids(nodes): + >>> set_id(node, id, overwrite=True) + + Args: + nodes (list): List of nodes. + asset_id (str or bson.ObjectId): The database id for the *asset* to + generate for. When None provided the current asset in the + active session is used. + + Returns: + list: A list of (node, id) tuples. + + """ + + if asset_id is None: + project_name = get_current_project_name() + asset_name = get_current_asset_name() + # Get the asset ID from the database for the asset of current context + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + + assert asset_doc, "No current asset found in Session" + asset_id = asset_doc['_id'] + + node_ids = [] + for node in nodes: + _, uid = str(uuid.uuid4()).rsplit("-", 1) + unique_id = "{}:{}".format(asset_id, uid) + node_ids.append((node, unique_id)) + + return node_ids + + +def get_id_required_nodes(): + + valid_types = ["geometry"] + nodes = {n for n in hou.node("/out").children() if + n.type().name() in valid_types} + + return list(nodes) + + +def get_output_parameter(node): + """Return the render output parameter of the given node + + Example: + root = hou.node("/obj") + my_alembic_node = root.createNode("alembic") + get_output_parameter(my_alembic_node) + >>> "filename" + + Notes: + I'm using node.type().name() to get on par with the creators, + Because the return value of `node.type().name()` is the + same string value used in creators + e.g. instance_data.update({"node_type": "alembic"}) + + Rop nodes in different network categories have + the same output parameter. + So, I took that into consideration as a hint for + future development. + + Args: + node(hou.Node): node instance + + Returns: + hou.Parm + """ + + node_type = node.type().name() + + # Figure out which type of node is being rendered + if node_type in {"alembic", "rop_alembic"}: + return node.parm("filename") + elif node_type == "arnold": + if node_type.evalParm("ar_ass_export_enable"): + return node.parm("ar_ass_file") + return node.parm("ar_picture") + elif node_type in { + "geometry", + "rop_geometry", + "filmboxfbx", + "rop_fbx" + }: + return node.parm("sopoutput") + elif node_type == "comp": + return node.parm("copoutput") + elif node_type in {"karma", "opengl"}: + return node.parm("picture") + elif node_type == "ifd": # Mantra + if node.evalParm("soho_outputmode"): + return node.parm("soho_diskfile") + return node.parm("vm_picture") + elif node_type == "Redshift_Proxy_Output": + return node.parm("RS_archive_file") + elif node_type == "Redshift_ROP": + return node.parm("RS_outputFileNamePrefix") + elif node_type in {"usd", "usd_rop", "usdexport"}: + return node.parm("lopoutput") + elif node_type in {"usdrender", "usdrender_rop"}: + return node.parm("outputimage") + elif node_type == "vray_renderer": + return node.parm("SettingsOutput_img_file_path") + + raise TypeError("Node type '%s' not supported" % node_type) + + +def set_scene_fps(fps): + hou.setFps(fps) + + +# Valid FPS +def validate_fps(): + """Validate current scene FPS and show pop-up when it is incorrect + + Returns: + bool + + """ + + fps = get_asset_fps() + current_fps = hou.fps() # returns float + + if current_fps != fps: + + # Find main window + parent = hou.ui.mainQtWindow() + if parent is None: + pass + else: + dialog = PopupUpdateKeys(parent=parent) + dialog.setModal(True) + dialog.setWindowTitle("Houdini scene does not match project FPS") + dialog.set_message("Scene %i FPS does not match project %i FPS" % + (current_fps, fps)) + dialog.set_button_text("Fix") + + # on_show is the Fix button clicked callback + dialog.on_clicked_state.connect(lambda: set_scene_fps(fps)) + + dialog.show() + + return False + + return True + + +def create_remote_publish_node(force=True): + """Function to create a remote publish node in /out + + This is a hacked "Shell" node that does *nothing* except for triggering + `colorbleed.lib.publish_remote()` as pre-render script. + + All default attributes of the Shell node are hidden to the Artist to + avoid confusion. + + Additionally some custom attributes are added that can be collected + by a Collector to set specific settings for the publish, e.g. whether + to separate the jobs per instance or process in one single job. + + """ + + cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()" + + existing = hou.node("/out/REMOTE_PUBLISH") + if existing: + if force: + log.warning("Removing existing '/out/REMOTE_PUBLISH' node..") + existing.destroy() + else: + raise RuntimeError("Node already exists /out/REMOTE_PUBLISH. " + "Please remove manually or set `force` to " + "True.") + + # Create the shell node + out = hou.node("/out") + node = out.createNode("shell", node_name="REMOTE_PUBLISH") + node.moveToGoodPosition() + + # Set color make it stand out (avalon/pyblish color) + node.setColor(hou.Color(0.439, 0.709, 0.933)) + + # Set the pre-render script + node.setParms({ + "prerender": cmd, + "lprerender": "python" # command language + }) + + # Lock the attributes to ensure artists won't easily mess things up. + node.parm("prerender").lock(True) + node.parm("lprerender").lock(True) + + # Lock up the actual shell command + command_parm = node.parm("command") + command_parm.set("") + command_parm.lock(True) + shellexec_parm = node.parm("shellexec") + shellexec_parm.set(False) + shellexec_parm.lock(True) + + # Get the node's parm template group so we can customize it + template = node.parmTemplateGroup() + + # Hide default tabs + template.hideFolder("Shell", True) + template.hideFolder("Scripts", True) + + # Hide default settings + template.hide("execute", True) + template.hide("renderdialog", True) + template.hide("trange", True) + template.hide("f", True) + template.hide("take", True) + + # Add custom settings to this node. + parm_folder = hou.FolderParmTemplate("folder", "Submission Settings") + + # Separate Jobs per Instance + parm = hou.ToggleParmTemplate(name="separateJobPerInstance", + label="Separate Job per Instance", + default_value=False) + parm_folder.addParmTemplate(parm) + + # Add our custom Submission Settings folder + template.append(parm_folder) + + # Apply template back to the node + node.setParmTemplateGroup(template) + + +def render_rop(ropnode): + """Render ROP node utility for Publishing. + + This renders a ROP node with the settings we want during Publishing. + """ + # Print verbose when in batch mode without UI + verbose = not hou.isUIAvailable() + + # Render + try: + ropnode.render(verbose=verbose, + # Allow Deadline to capture completion percentage + output_progress=verbose) + except hou.Error as exc: + # The hou.Error is not inherited from a Python Exception class, + # so we explicitly capture the houdini error, otherwise pyblish + # will remain hanging. + import traceback + traceback.print_exc() + raise RuntimeError("Render failed: {0}".format(exc)) + + +def imprint(node, data, update=False): + """Store attributes with value on a node + + Depending on the type of attribute it creates the correct parameter + template. Houdini uses a template per type, see the docs for more + information. + + http://www.sidefx.com/docs/houdini/hom/hou/ParmTemplate.html + + Because of some update glitch where you cannot overwrite existing + ParmTemplates on node using: + `setParmTemplates()` and `parmTuplesInFolder()` + update is done in another pass. + + Args: + node(hou.Node): node object from Houdini + data(dict): collection of attributes and their value + update (bool, optional): flag if imprint should update + already existing data or leave them untouched and only + add new. + + Returns: + None + + """ + if not data: + return + if not node: + self.log.error("Node is not set, calling imprint on invalid data.") + return + + current_parms = {p.name(): p for p in node.spareParms()} + update_parm_templates = [] + new_parm_templates = [] + + for key, value in data.items(): + if value is None: + continue + + parm_template = get_template_from_value(key, value) + + if key in current_parms: + if node.evalParm(key) == value: + continue + if not update: + log.debug(f"{key} already exists on {node}") + else: + log.debug(f"replacing {key}") + update_parm_templates.append(parm_template) + continue + + new_parm_templates.append(parm_template) + + if not new_parm_templates and not update_parm_templates: + return + + parm_group = node.parmTemplateGroup() + + # Add new parm templates + if new_parm_templates: + parm_folder = parm_group.findFolder("Extra") + + # if folder doesn't exist yet, create one and append to it, + # else append to existing one + if not parm_folder: + parm_folder = hou.FolderParmTemplate("folder", "Extra") + parm_folder.setParmTemplates(new_parm_templates) + parm_group.append(parm_folder) + else: + # Add to parm template folder instance then replace with updated + # one in parm template group + for template in new_parm_templates: + parm_folder.addParmTemplate(template) + parm_group.replace(parm_folder.name(), parm_folder) + + # Update existing parm templates + for parm_template in update_parm_templates: + parm_group.replace(parm_template.name(), parm_template) + + # When replacing a parm with a parm of the same name it preserves its + # value if before the replacement the parm was not at the default, + # because it has a value override set. Since we're trying to update the + # parm by using the new value as `default` we enforce the parm is at + # default state + node.parm(parm_template.name()).revertToDefaults() + + node.setParmTemplateGroup(parm_group) + + +def lsattr(attr, value=None, root="/"): + """Return nodes that have `attr` + When `value` is not None it will only return nodes matching that value + for the given attribute. + Args: + attr (str): Name of the attribute (hou.Parm) + value (object, Optional): The value to compare the attribute too. + When the default None is provided the value check is skipped. + root (str): The root path in Houdini to search in. + Returns: + list: Matching nodes that have attribute with value. + """ + if value is None: + # Use allSubChildren() as allNodes() errors on nodes without + # permission to enter without a means to continue of querying + # the rest + nodes = hou.node(root).allSubChildren() + return [n for n in nodes if n.parm(attr)] + return lsattrs({attr: value}) + + +def lsattrs(attrs, root="/"): + """Return nodes matching `key` and `value` + Arguments: + attrs (dict): collection of attribute: value + root (str): The root path in Houdini to search in. + Example: + >> lsattrs({"id": "myId"}) + ["myNode"] + >> lsattr("id") + ["myNode", "myOtherNode"] + Returns: + list: Matching nodes that have attribute with value. + """ + + matches = set() + # Use allSubChildren() as allNodes() errors on nodes without + # permission to enter without a means to continue of querying + # the rest + nodes = hou.node(root).allSubChildren() + for node in nodes: + for attr in attrs: + if not node.parm(attr): + continue + elif node.evalParm(attr) != attrs[attr]: + continue + else: + matches.add(node) + + return list(matches) + + +def read(node): + """Read the container data in to a dict + + Args: + node(hou.Node): Houdini node + + Returns: + dict + + """ + # `spareParms` returns a tuple of hou.Parm objects + data = {} + if not node: + return data + for parameter in node.spareParms(): + value = parameter.eval() + # test if value is json encoded dict + if isinstance(value, six.string_types) and \ + value.startswith(JSON_PREFIX): + try: + value = json.loads(value[len(JSON_PREFIX):]) + except json.JSONDecodeError: + # not a json + pass + data[parameter.name()] = value + + return data + + +@contextmanager +def maintained_selection(): + """Maintain selection during context + Example: + >>> with maintained_selection(): + ... # Modify selection + ... node.setSelected(on=False, clear_all_selected=True) + >>> # Selection restored + """ + + previous_selection = hou.selectedNodes() + try: + yield + finally: + # Clear the selection + # todo: does hou.clearAllSelected() do the same? + for node in hou.selectedNodes(): + node.setSelected(on=False) + + if previous_selection: + for node in previous_selection: + node.setSelected(on=True) + + +def reset_framerange(): + """Set frame range and FPS to current asset""" + + # Get asset data + project_name = get_current_project_name() + asset_name = get_current_asset_name() + # Get the asset ID from the database for the asset of current context + asset_doc = get_asset_by_name(project_name, asset_name) + asset_data = asset_doc["data"] + + # Get FPS + fps = get_asset_fps(asset_doc) + + # Get Start and End Frames + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") + + if frame_start is None or frame_end is None: + log.warning("No edit information found for %s" % asset_name) + return + + handle_start = asset_data.get("handleStart", 0) + handle_end = asset_data.get("handleEnd", 0) + + frame_start -= int(handle_start) + frame_end += int(handle_end) + + # Set frame range and FPS + print("Setting scene FPS to {}".format(int(fps))) + set_scene_fps(fps) + hou.playbar.setFrameRange(frame_start, frame_end) + hou.playbar.setPlaybackRange(frame_start, frame_end) + hou.setFrame(frame_start) + + +def get_main_window(): + """Acquire Houdini's main window""" + if self._parent is None: + self._parent = hou.ui.mainQtWindow() + return self._parent + + +def get_template_from_value(key, value): + if isinstance(value, float): + parm = hou.FloatParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, bool): + parm = hou.ToggleParmTemplate(name=key, + label=key, + default_value=value) + elif isinstance(value, int): + parm = hou.IntParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, six.string_types): + parm = hou.StringParmTemplate(name=key, + label=key, + num_components=1, + default_value=(value,)) + elif isinstance(value, (dict, list, tuple)): + parm = hou.StringParmTemplate(name=key, + label=key, + num_components=1, + default_value=( + JSON_PREFIX + json.dumps(value),)) + else: + raise TypeError("Unsupported type: %r" % type(value)) + + return parm + + +def get_frame_data(node, log=None): + """Get the frame data: `frameStartHandle`, `frameEndHandle` + and `byFrameStep`. + + This function uses Houdini node's `trange`, `t1, `t2` and `t3` + parameters as the source of truth for the full inclusive frame + range to render, as such these are considered as the frame + range including the handles. + + The non-inclusive frame start and frame end without handles + can be computed by subtracting the handles from the inclusive + frame range. + + Args: + node (hou.Node): ROP node to retrieve frame range from, + the frame range is assumed to be the frame range + *including* the start and end handles. + + Returns: + dict: frame data for `frameStartHandle`, `frameEndHandle` + and `byFrameStep`. + + """ + + if log is None: + log = self.log + + data = {} + + if node.parm("trange") is None: + log.debug( + "Node has no 'trange' parameter: {}".format(node.path()) + ) + return data + + if node.evalParm("trange") == 0: + data["frameStartHandle"] = hou.intFrame() + data["frameEndHandle"] = hou.intFrame() + data["byFrameStep"] = 1.0 + + log.info( + "Node '{}' has 'Render current frame' set.\n" + "Asset Handles are ignored.\n" + "frameStart and frameEnd are set to the " + "current frame.".format(node.path()) + ) + else: + data["frameStartHandle"] = int(node.evalParm("f1")) + data["frameEndHandle"] = int(node.evalParm("f2")) + data["byFrameStep"] = node.evalParm("f3") + + return data + + +def splitext(name, allowed_multidot_extensions): + # type: (str, list) -> tuple + """Split file name to name and extension. + + Args: + name (str): File name to split. + allowed_multidot_extensions (list of str): List of allowed multidot + extensions. + + Returns: + tuple: Name and extension. + """ + + for ext in allowed_multidot_extensions: + if name.endswith(ext): + return name[:-len(ext)], ext + + return os.path.splitext(name) + + +def get_top_referenced_parm(parm): + + processed = set() # disallow infinite loop + while True: + if parm.path() in processed: + raise RuntimeError("Parameter references result in cycle.") + + processed.add(parm.path()) + + ref = parm.getReferencedParm() + if ref.path() == parm.path(): + # It returns itself when it doesn't reference + # another parameter + return ref + else: + parm = ref + + +def evalParmNoFrame(node, parm, pad_character="#"): + + parameter = node.parm(parm) + assert parameter, "Parameter does not exist: %s.%s" % (node, parm) + + # If the parameter has a parameter reference, then get that + # parameter instead as otherwise `unexpandedString()` fails. + parameter = get_top_referenced_parm(parameter) + + # Substitute out the frame numbering with padded characters + try: + raw = parameter.unexpandedString() + except hou.Error as exc: + print("Failed: %s" % parameter) + raise RuntimeError(exc) + + def replace(match): + padding = 1 + n = match.group(2) + if n and int(n): + padding = int(n) + return pad_character * padding + + expression = re.sub(r"(\$F([0-9]*))", replace, raw) + + with hou.ScriptEvalContext(parameter): + return hou.expandStringAtFrame(expression, 0) + + +def get_color_management_preferences(): + """Get default OCIO preferences""" + return { + "config": hou.Color.ocio_configPath(), + "display": hou.Color.ocio_defaultDisplay(), + "view": hou.Color.ocio_defaultView() + } + + +def get_obj_node_output(obj_node): + """Find output node. + + If the node has any output node return the + output node with the minimum `outputidx`. + When no output is present return the node + with the display flag set. If no output node is + detected then None is returned. + + Arguments: + node (hou.Node): The node to retrieve a single + the output node for. + + Returns: + Optional[hou.Node]: The child output node. + + """ + + outputs = obj_node.subnetOutputs() + if not outputs: + return + + elif len(outputs) == 1: + return outputs[0] + + else: + return min(outputs, + key=lambda node: node.evalParm('outputidx')) + + +def get_output_children(output_node, include_sops=True): + """Recursively return a list of all output nodes + contained in this node including this node. + + It works in a similar manner to output_node.allNodes(). + """ + out_list = [output_node] + + if output_node.childTypeCategory() == hou.objNodeTypeCategory(): + for child in output_node.children(): + out_list += get_output_children(child, include_sops=include_sops) + + elif include_sops and \ + output_node.childTypeCategory() == hou.sopNodeTypeCategory(): + out = get_obj_node_output(output_node) + if out: + out_list += [out] + + return out_list + + +def get_resolution_from_doc(doc): + """Get resolution from the given asset document. """ + + if not doc or "data" not in doc: + print("Entered document is not valid. \"{}\"".format(str(doc))) + return None + + resolution_width = doc["data"].get("resolutionWidth") + resolution_height = doc["data"].get("resolutionHeight") + + # Make sure both width and height are set + if resolution_width is None or resolution_height is None: + print("No resolution information found for \"{}\"".format(doc["name"])) + return None + + return int(resolution_width), int(resolution_height) + + +def set_camera_resolution(camera, asset_doc=None): + """Apply resolution to camera from asset document of the publish""" + + if not asset_doc: + asset_doc = get_current_project_asset() + + resolution = get_resolution_from_doc(asset_doc) + + if resolution: + print("Setting camera resolution: {} -> {}x{}".format( + camera.name(), resolution[0], resolution[1] + )) + camera.parm("resx").set(resolution[0]) + camera.parm("resy").set(resolution[1]) + + +def get_camera_from_container(container): + """Get camera from container node. """ + + cameras = container.recursiveGlob( + "*", + filter=hou.nodeTypeFilter.ObjCamera, + include_subnets=False + ) + + assert len(cameras) == 1, "Camera instance must have only one camera" + return cameras[0] + + +def get_current_context_template_data_with_asset_data(): + """ + TODOs: + Support both 'assetData' and 'folderData' in future. + """ + + context = get_current_context() + project_name = context["project_name"] + asset_name = context["asset_name"] + task_name = context["task_name"] + host_name = get_current_host_name() + + anatomy = Anatomy(project_name) + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + # get context specific vars + asset_data = asset_doc["data"] + + # compute `frameStartHandle` and `frameEndHandle` + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") + handle_start = asset_data.get("handleStart") + handle_end = asset_data.get("handleEnd") + if frame_start is not None and handle_start is not None: + asset_data["frameStartHandle"] = frame_start - handle_start + + if frame_end is not None and handle_end is not None: + asset_data["frameEndHandle"] = frame_end + handle_end + + template_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + template_data["root"] = anatomy.roots + template_data["assetData"] = asset_data + + return template_data + + +def get_context_var_changes(): + """get context var changes.""" + + houdini_vars_to_update = {} + + project_settings = get_current_project_settings() + houdini_vars_settings = \ + project_settings["houdini"]["general"]["update_houdini_var_context"] + + if not houdini_vars_settings["enabled"]: + return houdini_vars_to_update + + houdini_vars = houdini_vars_settings["houdini_vars"] + + # No vars specified - nothing to do + if not houdini_vars: + return houdini_vars_to_update + + # Get Template data + template_data = get_current_context_template_data_with_asset_data() + + # Set Houdini Vars + for item in houdini_vars: + # For consistency reasons we always force all vars to be uppercase + # Also remove any leading, and trailing whitespaces. + var = item["var"].strip().upper() + + # get and resolve template in value + item_value = StringTemplate.format_template( + item["value"], + template_data + ) + + if var == "JOB" and item_value == "": + # sync $JOB to $HIP if $JOB is empty + item_value = os.environ["HIP"] + + if item["is_directory"]: + item_value = item_value.replace("\\", "/") + + current_value = hou.hscript("echo -n `${}`".format(var))[0] + + if current_value != item_value: + houdini_vars_to_update[var] = ( + current_value, item_value, item["is_directory"] + ) + + return houdini_vars_to_update + + +def update_houdini_vars_context(): + """Update asset context variables""" + + for var, (_old, new, is_directory) in get_context_var_changes().items(): + if is_directory: + try: + os.makedirs(new) + except OSError as e: + if e.errno != errno.EEXIST: + print( + "Failed to create ${} dir. Maybe due to " + "insufficient permissions.".format(var) + ) + + hou.hscript("set {}={}".format(var, new)) + os.environ[var] = new + print("Updated ${} to {}".format(var, new)) + + +def update_houdini_vars_context_dialog(): + """Show pop-up to update asset context variables""" + update_vars = get_context_var_changes() + if not update_vars: + # Nothing to change + print("Nothing to change, Houdini vars are already up to date.") + return + + message = "\n".join( + "${}: {} -> {}".format(var, old or "None", new or "None") + for var, (old, new, _is_directory) in update_vars.items() + ) + + # TODO: Use better UI! + parent = hou.ui.mainQtWindow() + dialog = SimplePopup(parent=parent) + dialog.setModal(True) + dialog.setWindowTitle("Houdini scene has outdated asset variables") + dialog.set_message(message) + dialog.set_button_text("Fix") + + # on_show is the Fix button clicked callback + dialog.on_clicked.connect(update_houdini_vars_context) + + dialog.show() + + +def publisher_show_and_publish(comment=None): + """Open publisher window and trigger publishing action. + + Args: + comment (Optional[str]): Comment to set in publisher window. + """ + + main_window = get_main_window() + publisher_window = get_tool_by_name( + tool_name="publisher", + parent=main_window, + ) + publisher_window.show_and_publish(comment) + + +def find_rop_input_dependencies(input_tuple): + """Self publish from ROP nodes. + + Arguments: + tuple (hou.RopNode.inputDependencies) which can be a nested tuples + represents the input dependencies of the ROP node, consisting of ROPs, + and the frames that need to be be rendered prior to rendering the ROP. + + Returns: + list of the RopNode.path() that can be found inside + the input tuple. + """ + + out_list = [] + if isinstance(input_tuple[0], hou.RopNode): + return input_tuple[0].path() + + if isinstance(input_tuple[0], tuple): + for item in input_tuple: + out_list.append(find_rop_input_dependencies(item)) + + return out_list + + +def self_publish(): + """Self publish from ROP nodes. + + Firstly, it gets the node and its dependencies. + Then, it deactivates all other ROPs + And finaly, it triggers the publishing action. + """ + + result, comment = hou.ui.readInput( + "Add Publish Comment", + buttons=("Publish", "Cancel"), + title="Publish comment", + close_choice=1 + ) + + if result: + return + + current_node = hou.node(".") + inputs_paths = find_rop_input_dependencies( + current_node.inputDependencies() + ) + inputs_paths.append(current_node.path()) + + host = registered_host() + context = CreateContext(host, reset=True) + + for instance in context.instances: + node_path = instance.data.get("instance_node") + instance["active"] = node_path and node_path in inputs_paths + + context.save_changes() + + publisher_show_and_publish(comment) + + +def add_self_publish_button(node): + """Adds a self publish button to the rop node.""" + + label = os.environ.get("AYON_MENU_LABEL") or "AYON" + + button_parm = hou.ButtonParmTemplate( + "ayon_self_publish", + "{} Publish".format(label), + script_callback="from ayon_core.hosts.houdini.api.lib import " + "self_publish; self_publish()", + script_callback_language=hou.scriptLanguage.Python, + join_with_next=True + ) + + template = node.parmTemplateGroup() + template.insertBefore((0,), button_parm) + node.setParmTemplateGroup(template) diff --git a/client/ayon_core/hosts/houdini/api/pipeline.py b/client/ayon_core/hosts/houdini/api/pipeline.py new file mode 100644 index 0000000000..d93ea9acec --- /dev/null +++ b/client/ayon_core/hosts/houdini/api/pipeline.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- +"""Pipeline tools for OpenPype Houdini integration.""" +import os +import sys +import logging + +import hou # noqa + +from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost + +import pyblish.api + +from ayon_core.pipeline import ( + register_creator_plugin_path, + register_loader_plugin_path, + register_inventory_action_path, + AVALON_CONTAINER_ID, +) +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.hosts.houdini import HOUDINI_HOST_DIR +from ayon_core.hosts.houdini.api import lib, shelves, creator_node_shelves + +from ayon_core.lib import ( + register_event_callback, + emit_event, +) + + +log = logging.getLogger("ayon_core.hosts.houdini") + +AVALON_CONTAINERS = "/obj/AVALON_CONTAINERS" +CONTEXT_CONTAINER = "/obj/OpenPypeContext" +IS_HEADLESS = not hasattr(hou, "ui") + +PLUGINS_DIR = os.path.join(HOUDINI_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "houdini" + + def __init__(self): + super(HoudiniHost, self).__init__() + self._op_events = {} + self._has_been_setup = False + + def install(self): + pyblish.api.register_host("houdini") + pyblish.api.register_host("hython") + pyblish.api.register_host("hpython") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + + log.info("Installing callbacks ... ") + # register_event_callback("init", on_init) + self._register_callbacks() + register_event_callback("before.save", before_save) + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) + + self._has_been_setup = True + + # Set asset settings for the empty scene directly after launch of + # Houdini so it initializes into the correct scene FPS, + # Frame Range, etc. + # TODO: make sure this doesn't trigger when + # opening with last workfile. + _set_context_settings() + + if not IS_HEADLESS: + import hdefereval # noqa, hdefereval is only available in ui mode + # Defer generation of shelves due to issue on Windows where shelf + # initialization during start up delays Houdini UI by minutes + # making it extremely slow to launch. + hdefereval.executeDeferred(shelves.generate_shelves) + + if not IS_HEADLESS: + import hdefereval # noqa, hdefereval is only available in ui mode + hdefereval.executeDeferred(creator_node_shelves.install) + + def workfile_has_unsaved_changes(self): + return hou.hipFile.hasUnsavedChanges() + + def get_workfile_extensions(self): + return [".hip", ".hiplc", ".hipnc"] + + def save_workfile(self, dst_path=None): + # Force forwards slashes to avoid segfault + if dst_path: + dst_path = dst_path.replace("\\", "/") + hou.hipFile.save(file_name=dst_path, + save_to_recent_files=True) + return dst_path + + def open_workfile(self, filepath): + # Force forwards slashes to avoid segfault + filepath = filepath.replace("\\", "/") + + hou.hipFile.load(filepath, + suppress_save_prompt=True, + ignore_load_warnings=False) + + return filepath + + def get_current_workfile(self): + current_filepath = hou.hipFile.path() + if (os.path.basename(current_filepath) == "untitled.hip" and + not os.path.exists(current_filepath)): + # By default a new scene in houdini is saved in the current + # working directory as "untitled.hip" so we need to capture + # that and consider it 'not saved' when it's in that state. + return None + + return current_filepath + + def get_containers(self): + return ls() + + def _register_callbacks(self): + for event in self._op_events.copy().values(): + if event is None: + continue + + try: + hou.hipFile.removeEventCallback(event) + except RuntimeError as e: + log.info(e) + + self._op_events[on_file_event_callback] = hou.hipFile.addEventCallback( + on_file_event_callback + ) + + @staticmethod + def create_context_node(): + """Helper for creating context holding node. + + Returns: + hou.Node: context node + + """ + obj_network = hou.node("/obj") + op_ctx = obj_network.createNode("subnet", + node_name="OpenPypeContext", + run_init_scripts=False, + load_contents=False) + + op_ctx.moveToGoodPosition() + op_ctx.setBuiltExplicitly(False) + op_ctx.setCreatorState("OpenPype") + op_ctx.setComment("OpenPype node to hold context metadata") + op_ctx.setColor(hou.Color((0.081, 0.798, 0.810))) + op_ctx.setDisplayFlag(False) + op_ctx.hide(True) + return op_ctx + + def update_context_data(self, data, changes): + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + + lib.imprint(op_ctx, data) + + def get_context_data(self): + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + return lib.read(op_ctx) + + def save_file(self, dst_path=None): + # Force forwards slashes to avoid segfault + dst_path = dst_path.replace("\\", "/") + + hou.hipFile.save(file_name=dst_path, + save_to_recent_files=True) + + +def on_file_event_callback(event): + if event == hou.hipFileEventType.AfterLoad: + emit_event("open") + elif event == hou.hipFileEventType.AfterSave: + emit_event("save") + elif event == hou.hipFileEventType.BeforeSave: + emit_event("before.save") + elif event == hou.hipFileEventType.AfterClear: + emit_event("new") + + +def containerise(name, + namespace, + nodes, + context, + loader=None, + suffix=""): + """Bundle `nodes` into a subnet and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + nodes (list): Long names of nodes to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + + """ + + # Ensure AVALON_CONTAINERS subnet exists + subnet = hou.node(AVALON_CONTAINERS) + if subnet is None: + obj_network = hou.node("/obj") + subnet = obj_network.createNode("subnet", + node_name="AVALON_CONTAINERS") + + # Create proper container name + container_name = "{}_{}".format(name, suffix or "CON") + container = hou.node("/obj/{}".format(name)) + container.setName(container_name, unique_name=True) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + } + + lib.imprint(container, data) + + # "Parent" the container under the container network + hou.moveNodesTo([container], subnet) + + subnet.node(container_name).moveToGoodPosition() + + return container + + +def parse_container(container): + """Return the container node's full container data. + + Args: + container (hou.Node): A container node name. + + Returns: + dict: The container schema data for this container node. + + """ + data = lib.read(container) + + # Backwards compatibility pre-schemas for containers + data["schema"] = data.get("schema", "openpype:container-1.0") + + # Append transient data + data["objectName"] = container.path() + data["node"] = container + + return data + + +def ls(): + containers = [] + for identifier in (AVALON_CONTAINER_ID, + "pyblish.mindbender.container"): + containers += lib.lsattr("id", identifier) + + for container in sorted(containers, + # Hou 19+ Python 3 hou.ObjNode are not + # sortable due to not supporting greater + # than comparisons + key=lambda node: node.path()): + yield parse_container(container) + + +def before_save(): + return lib.validate_fps() + + +def on_save(): + + log.info("Running callback on save..") + + # update houdini vars + lib.update_houdini_vars_context_dialog() + + nodes = lib.get_id_required_nodes() + for node, new_id in lib.generate_ids(nodes): + lib.set_id(node, new_id, overwrite=False) + + +def _show_outdated_content_popup(): + # Get main window + parent = lib.get_main_window() + if parent is None: + log.info("Skipping outdated content pop-up " + "because Houdini window can't be found.") + return + + from ayon_core.tools.utils import SimplePopup + + # Show outdated pop-up + def _on_show_inventory(): + from ayon_core.tools.utils import host_tools + host_tools.show_scene_inventory(parent=parent) + + dialog = SimplePopup(parent=parent) + dialog.setWindowTitle("Houdini scene has outdated content") + dialog.set_message("There are outdated containers in " + "your Houdini scene.") + dialog.on_clicked.connect(_on_show_inventory) + dialog.show() + + +def on_open(): + + if not hou.isUIAvailable(): + log.debug("Batch mode detected, ignoring `on_open` callbacks..") + return + + log.info("Running callback on open..") + + # update houdini vars + lib.update_houdini_vars_context_dialog() + + # Validate FPS after update_task_from_path to + # ensure it is using correct FPS for the asset + lib.validate_fps() + + if any_outdated_containers(): + parent = lib.get_main_window() + if parent is None: + # When opening Houdini with last workfile on launch the UI hasn't + # initialized yet completely when the `on_open` callback triggers. + # We defer the dialog popup to wait for the UI to become available. + # We assume it will open because `hou.isUIAvailable()` returns True + import hdefereval + hdefereval.executeDeferred(_show_outdated_content_popup) + else: + _show_outdated_content_popup() + + log.warning("Scene has outdated content.") + + +def on_new(): + """Set project resolution and fps when create a new file""" + + if hou.hipFile.isLoadingHipFile(): + # This event also triggers when Houdini opens a file due to the + # new event being registered to 'afterClear'. As such we can skip + # 'new' logic if the user is opening a file anyway + log.debug("Skipping on new callback due to scene being opened.") + return + + log.info("Running callback on new..") + _set_context_settings() + + # It seems that the current frame always gets reset to frame 1 on + # new scene. So we enforce current frame to be at the start of the playbar + # with execute deferred + def _enforce_start_frame(): + start = hou.playbar.playbackRange()[0] + hou.setFrame(start) + + if hou.isUIAvailable(): + import hdefereval + hdefereval.executeDeferred(_enforce_start_frame) + else: + # Run without execute deferred when no UI is available because + # without UI `hdefereval` is not available to import + _enforce_start_frame() + + +def _set_context_settings(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + fps + resolution + renderer + + Returns: + None + """ + + lib.reset_framerange() + lib.update_houdini_vars_context() diff --git a/client/ayon_core/hosts/houdini/api/plugin.py b/client/ayon_core/hosts/houdini/api/plugin.py new file mode 100644 index 0000000000..e8f89bfbb4 --- /dev/null +++ b/client/ayon_core/hosts/houdini/api/plugin.py @@ -0,0 +1,348 @@ +# -*- coding: utf-8 -*- +"""Houdini specific Avalon/Pyblish plugin definitions.""" +import sys +from abc import ( + ABCMeta +) +import six +import hou + +from ayon_core.pipeline import ( + CreatorError, + LegacyCreator, + Creator as NewCreator, + CreatedInstance +) +from ayon_core.lib import BoolDef +from .lib import imprint, read, lsattr, add_self_publish_button + + +class OpenPypeCreatorError(CreatorError): + pass + + +class Creator(LegacyCreator): + """Creator plugin to create instances in Houdini + + To support the wide range of node types for render output (Alembic, VDB, + Mantra) the Creator needs a node type to create the correct instance + + By default, if none is given, is `geometry`. An example of accepted node + types: geometry, alembic, ifd (mantra) + + Please check the Houdini documentation for more node types. + + Tip: to find the exact node type to create press the `i` left of the node + when hovering over a node. The information is visible under the name of + the node. + + Deprecated: + This creator is deprecated and will be removed in future version. + + """ + defaults = ['Main'] + + def __init__(self, *args, **kwargs): + super(Creator, self).__init__(*args, **kwargs) + self.nodes = [] + + def process(self): + """This is the base functionality to create instances in Houdini + + The selected nodes are stored in self to be used in an override method. + This is currently necessary in order to support the multiple output + types in Houdini which can only be rendered through their own node. + + Default node type if none is given is `geometry` + + It also makes it easier to apply custom settings per instance type + + Example of override method for Alembic: + + def process(self): + instance = super(CreateEpicNode, self, process() + # Set parameters for Alembic node + instance.setParms( + {"sop_path": "$HIP/%s.abc" % self.nodes[0]} + ) + + Returns: + hou.Node + + """ + try: + if (self.options or {}).get("useSelection"): + self.nodes = hou.selectedNodes() + + # Get the node type and remove it from the data, not needed + node_type = self.data.pop("node_type", None) + if node_type is None: + node_type = "geometry" + + # Get out node + out = hou.node("/out") + instance = out.createNode(node_type, node_name=self.name) + instance.moveToGoodPosition() + + imprint(instance, self.data) + + self._process(instance) + + except hou.Error as er: + six.reraise( + OpenPypeCreatorError, + OpenPypeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + +class HoudiniCreatorBase(object): + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `houdini_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + Create `houdini_cached_legacy_subsets` key for any legacy instances + detected in the scene as instances per family. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("houdini_cached_subsets") is None: + cache = dict() + cache_legacy = dict() + + for node in lsattr("id", "pyblish.avalon.instance"): + + creator_identifier_parm = node.parm("creator_identifier") + if creator_identifier_parm: + # creator instance + creator_id = creator_identifier_parm.eval() + cache.setdefault(creator_id, []).append(node) + + else: + # legacy instance + family_parm = node.parm("family") + if not family_parm: + # must be a broken instance + continue + + family = family_parm.eval() + cache_legacy.setdefault(family, []).append(node) + + shared_data["houdini_cached_subsets"] = cache + shared_data["houdini_cached_legacy_subsets"] = cache_legacy + + return shared_data + + @staticmethod + def create_instance_node( + asset_name, node_name, parent, node_type="geometry" + ): + # type: (str, str, str) -> hou.Node + """Create node representing instance. + + Arguments: + asset_name (str): Asset name. + node_name (str): Name of the new node. + parent (str): Name of the parent node. + node_type (str, optional): Type of the node. + + Returns: + hou.Node: Newly created instance node. + + """ + parent_node = hou.node(parent) + instance_node = parent_node.createNode( + node_type, node_name=node_name) + instance_node.moveToGoodPosition() + return instance_node + + +@six.add_metaclass(ABCMeta) +class HoudiniCreator(NewCreator, HoudiniCreatorBase): + """Base class for most of the Houdini creator plugins.""" + selected_nodes = [] + settings_name = None + add_publish_button = False + + def create(self, subset_name, instance_data, pre_create_data): + try: + self.selected_nodes = [] + + if pre_create_data.get("use_selection"): + self.selected_nodes = hou.selectedNodes() + + # Get the node type and remove it from the data, not needed + node_type = instance_data.pop("node_type", None) + if node_type is None: + node_type = "geometry" + + asset_name = instance_data["folderPath"] + + instance_node = self.create_instance_node( + asset_name, subset_name, "/out", node_type) + + self.customize_node_look(instance_node) + + instance_data["instance_node"] = instance_node.path() + instance_data["instance_id"] = instance_node.path() + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + self.imprint(instance_node, instance.data_to_store()) + + if self.add_publish_button: + add_self_publish_button(instance_node) + + return instance + + except hou.Error as er: + six.reraise( + OpenPypeCreatorError, + OpenPypeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + def lock_parameters(self, node, parameters): + """Lock list of specified parameters on the node. + + Args: + node (hou.Node): Houdini node to lock parameters on. + parameters (list of str): List of parameter names. + + """ + for name in parameters: + try: + parm = node.parm(name) + parm.lock(True) + except AttributeError: + self.log.debug("missing lock pattern {}".format(name)) + + def collect_instances(self): + # cache instances if missing + self.cache_subsets(self.collection_shared_data) + for instance in self.collection_shared_data[ + "houdini_cached_subsets"].get(self.identifier, []): + + node_data = read(instance) + + # Node paths are always the full node path since that is unique + # Because it's the node's path it's not written into attributes + # but explicitly collected + node_path = instance.path() + node_data["instance_id"] = node_path + node_data["instance_node"] = node_path + + created_instance = CreatedInstance.from_existing( + node_data, self + ) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, changes in update_list: + instance_node = hou.node(created_inst.get("instance_node")) + new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + # Update parm templates and values + self.imprint( + instance_node, + new_values, + update=True + ) + + def imprint(self, node, values, update=False): + # Never store instance node and instance id since that data comes + # from the node's path + values.pop("instance_node", None) + values.pop("instance_id", None) + imprint(node, values, update=update) + + def remove_instances(self, instances): + """Remove specified instance from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + for instance in instances: + instance_node = hou.node(instance.data.get("instance_node")) + if instance_node: + instance_node.destroy() + + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection") + ] + + @staticmethod + def customize_node_look( + node, color=None, + shape="chevron_down"): + """Set custom look for instance nodes. + + Args: + node (hou.Node): Node to set look. + color (hou.Color, Optional): Color of the node. + shape (str, Optional): Shape name of the node. + + Returns: + None + + """ + if not color: + color = hou.Color((0.616, 0.871, 0.769)) + node.setUserData('nodeshape', shape) + node.setColor(color) + + def get_network_categories(self): + """Return in which network view type this creator should show. + + The node type categories returned here will be used to define where + the creator will show up in the TAB search for nodes in Houdini's + Network View. + + This can be overridden in inherited classes to define where that + particular Creator should be visible in the TAB search. + + Returns: + list: List of houdini node type categories + + """ + return [hou.ropNodeTypeCategory()] + + def apply_settings(self, project_settings): + """Method called on initialization of plugin to apply settings.""" + + # Apply General Settings + houdini_general_settings = project_settings["houdini"]["general"] + self.add_publish_button = houdini_general_settings.get( + "add_self_publish_button", False) + + # Apply Creator Settings + settings_name = self.settings_name + if settings_name is None: + settings_name = self.__class__.__name__ + + settings = project_settings["houdini"]["create"] + settings = settings.get(settings_name) + if settings is None: + self.log.debug( + "No settings found for {}".format(self.__class__.__name__) + ) + return + + for key, value in settings.items(): + setattr(self, key, value) diff --git a/openpype/hosts/houdini/api/shelves.py b/client/ayon_core/hosts/houdini/api/shelves.py similarity index 96% rename from openpype/hosts/houdini/api/shelves.py rename to client/ayon_core/hosts/houdini/api/shelves.py index 5093a90988..b0f5af839e 100644 --- a/openpype/hosts/houdini/api/shelves.py +++ b/client/ayon_core/hosts/houdini/api/shelves.py @@ -3,16 +3,16 @@ import logging import platform -from openpype.settings import get_project_settings -from openpype.pipeline import get_current_project_name +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import get_current_project_name -from openpype.lib import StringTemplate +from ayon_core.lib import StringTemplate import hou from .lib import get_current_context_template_data_with_asset_data -log = logging.getLogger("openpype.hosts.houdini.shelves") +log = logging.getLogger("ayon_core.hosts.houdini.shelves") def generate_shelves(): diff --git a/openpype/hosts/houdini/api/usd.py b/client/ayon_core/hosts/houdini/api/usd.py similarity index 96% rename from openpype/hosts/houdini/api/usd.py rename to client/ayon_core/hosts/houdini/api/usd.py index b935dfdf30..e900bc5fac 100644 --- a/openpype/hosts/houdini/api/usd.py +++ b/client/ayon_core/hosts/houdini/api/usd.py @@ -5,10 +5,10 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype import style -from openpype.client import get_asset_by_name -from openpype.pipeline import legacy_io -from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget +from ayon_core import style +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import legacy_io, get_current_project_name +from ayon_core.tools.utils.assets_widget import SingleSelectAssetsWidget from pxr import Sdf @@ -47,7 +47,7 @@ def _on_show(self): select_id = None name = self._parm.eval() if name: - project_name = legacy_io.active_project() + project_name = get_current_project_name() db_asset = get_asset_by_name(project_name, name, fields=["_id"]) if db_asset: select_id = db_asset["_id"] diff --git a/client/ayon_core/hosts/houdini/hooks/set_paths.py b/client/ayon_core/hosts/houdini/hooks/set_paths.py new file mode 100644 index 0000000000..1f24a8dd7d --- /dev/null +++ b/client/ayon_core/hosts/houdini/hooks/set_paths.py @@ -0,0 +1,18 @@ +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes + + +class SetPath(PreLaunchHook): + """Set current dir to workdir. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = {"houdini"} + launch_types = {LaunchTypes.local} + + def execute(self): + workdir = self.launch_context.env.get("AVALON_WORKDIR", "") + if not workdir: + self.log.warning("BUG: Workdir is not filled.") + return + + self.launch_context.kwargs["cwd"] = workdir diff --git a/client/ayon_core/hosts/houdini/plugins/create/convert_legacy.py b/client/ayon_core/hosts/houdini/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..6e503fba6b --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/create/convert_legacy.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +"""Converter for legacy Houdini subsets.""" +from ayon_core.pipeline.create.creator_plugins import SubsetConvertorPlugin +from ayon_core.hosts.houdini.api.lib import imprint + + +class HoudiniLegacyConvertor(SubsetConvertorPlugin): + """Find and convert any legacy subsets in the scene. + + This Converter will find all legacy subsets in the scene and will + transform them to the current system. Since the old subsets doesn't + retain any information about their original creators, the only mapping + we can do is based on their families. + + Its limitation is that you can have multiple creators creating subset + of the same family and there is no way to handle it. This code should + nevertheless cover all creators that came with OpenPype. + + """ + identifier = "io.openpype.creators.houdini.legacy" + family_to_id = { + "camera": "io.openpype.creators.houdini.camera", + "ass": "io.openpype.creators.houdini.ass", + "imagesequence": "io.openpype.creators.houdini.imagesequence", + "hda": "io.openpype.creators.houdini.hda", + "pointcache": "io.openpype.creators.houdini.pointcache", + "redshiftproxy": "io.openpype.creators.houdini.redshiftproxy", + "redshift_rop": "io.openpype.creators.houdini.redshift_rop", + "usd": "io.openpype.creators.houdini.usd", + "usdrender": "io.openpype.creators.houdini.usdrender", + "vdbcache": "io.openpype.creators.houdini.vdbcache" + } + + def __init__(self, *args, **kwargs): + super(HoudiniLegacyConvertor, self).__init__(*args, **kwargs) + self.legacy_subsets = {} + + def find_instances(self): + """Find legacy subsets in the scene. + + Legacy subsets are the ones that doesn't have `creator_identifier` + parameter on them. + + This is using cached entries done in + :py:meth:`~HoudiniCreatorBase.cache_subsets()` + + """ + self.legacy_subsets = self.collection_shared_data.get( + "houdini_cached_legacy_subsets") + if not self.legacy_subsets: + return + self.add_convertor_item("Found {} incompatible subset{}.".format( + len(self.legacy_subsets), "s" if len(self.legacy_subsets) > 1 else "") + ) + + def convert(self): + """Convert all legacy subsets to current. + + It is enough to add `creator_identifier` and `instance_node`. + + """ + if not self.legacy_subsets: + return + + for family, subsets in self.legacy_subsets.items(): + if family in self.family_to_id: + for subset in subsets: + data = { + "creator_identifier": self.family_to_id[family], + "instance_node": subset.path() + } + if family == "pointcache": + data["families"] = ["abc"] + self.log.info("Converting {} to {}".format( + subset.path(), self.family_to_id[family])) + imprint(subset, data) diff --git a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py b/client/ayon_core/hosts/houdini/plugins/create/create_alembic_camera.py similarity index 94% rename from openpype/hosts/houdini/plugins/create/create_alembic_camera.py rename to client/ayon_core/hosts/houdini/plugins/create/create_alembic_camera.py index 8c8a5e9eed..b6661fe7e4 100644 --- a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_alembic_camera.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator plugin for creating alembic camera subsets.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance, CreatorError +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance, CreatorError import hou diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_ass.py similarity index 96% rename from openpype/hosts/houdini/plugins/create/create_arnold_ass.py rename to client/ayon_core/hosts/houdini/plugins/create/create_arnold_ass.py index 437a14c723..f60f5bc42f 100644 --- a/openpype/hosts/houdini/plugins/create/create_arnold_ass.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_ass.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator plugin for creating Arnold ASS files.""" -from openpype.hosts.houdini.api import plugin -from openpype.lib import BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import BoolDef class CreateArnoldAss(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py similarity index 96% rename from openpype/hosts/houdini/plugins/create/create_arnold_rop.py rename to client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py index 85c4f0f5ff..590a92f56f 100644 --- a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py @@ -1,5 +1,5 @@ -from openpype.hosts.houdini.api import plugin -from openpype.lib import EnumDef, BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import EnumDef, BoolDef class CreateArnoldRop(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_bgeo.py b/client/ayon_core/hosts/houdini/plugins/create/create_bgeo.py similarity index 95% rename from openpype/hosts/houdini/plugins/create/create_bgeo.py rename to client/ayon_core/hosts/houdini/plugins/create/create_bgeo.py index 4140919202..135c889b3e 100644 --- a/openpype/hosts/houdini/plugins/create/create_bgeo.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_bgeo.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """Creator plugin for creating pointcache bgeo files.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance, CreatorError +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance, CreatorError import hou -from openpype.lib import EnumDef, BoolDef +from ayon_core.lib import EnumDef, BoolDef class CreateBGEO(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/client/ayon_core/hosts/houdini/plugins/create/create_composite.py similarity index 94% rename from openpype/hosts/houdini/plugins/create/create_composite.py rename to client/ayon_core/hosts/houdini/plugins/create/create_composite.py index 52ea6fa054..b87e1fd5b1 100644 --- a/openpype/hosts/houdini/plugins/create/create_composite.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_composite.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator plugin for creating composite sequences.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance, CreatorError +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance, CreatorError import hou diff --git a/openpype/hosts/houdini/plugins/create/create_hda.py b/client/ayon_core/hosts/houdini/plugins/create/create_hda.py similarity index 97% rename from openpype/hosts/houdini/plugins/create/create_hda.py rename to client/ayon_core/hosts/houdini/plugins/create/create_hda.py index f670b55eb6..faddc11b0c 100644 --- a/openpype/hosts/houdini/plugins/create/create_hda.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_hda.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- """Creator plugin for creating publishable Houdini Digital Assets.""" -from openpype.client import ( +from ayon_core.client import ( get_asset_by_name, get_subsets, ) -from openpype.hosts.houdini.api import plugin +from ayon_core.hosts.houdini.api import plugin import hou diff --git a/openpype/hosts/houdini/plugins/create/create_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py similarity index 96% rename from openpype/hosts/houdini/plugins/create/create_karma_rop.py rename to client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py index 4e1360ca45..5211044fea 100644 --- a/openpype/hosts/houdini/plugins/create/create_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator plugin to create Karma ROP.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance -from openpype.lib import BoolDef, EnumDef, NumberDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import BoolDef, EnumDef, NumberDef class CreateKarmaROP(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_ifd.py b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_ifd.py similarity index 93% rename from openpype/hosts/houdini/plugins/create/create_mantra_ifd.py rename to client/ayon_core/hosts/houdini/plugins/create/create_mantra_ifd.py index 7ea7d1042f..7f1da13d2e 100644 --- a/openpype/hosts/houdini/plugins/create/create_mantra_ifd.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_ifd.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator plugin for creating pointcache alembics.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance -from openpype.lib import BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import BoolDef class CreateMantraIFD(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py similarity index 96% rename from openpype/hosts/houdini/plugins/create/create_mantra_rop.py rename to client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py index 8ecfbea802..02252f35d1 100644 --- a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator plugin to create Mantra ROP.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance -from openpype.lib import EnumDef, BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import EnumDef, BoolDef class CreateMantraROP(plugin.HoudiniCreator): diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_pointcache.py b/client/ayon_core/hosts/houdini/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..07dcc17f25 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/create/create_pointcache.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating pointcache alembics.""" +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import BoolDef + +import hou + + + +class CreatePointCache(plugin.HoudiniCreator): + """Alembic ROP to pointcache""" + identifier = "io.openpype.creators.houdini.pointcache" + label = "PointCache (Abc)" + family = "pointcache" + icon = "gears" + + def create(self, subset_name, instance_data, pre_create_data): + instance_data.pop("active", None) + instance_data.update({"node_type": "alembic"}) + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + creator_attributes["farm"] = pre_create_data["farm"] + + instance = super(CreatePointCache, self).create( + subset_name, + instance_data, + pre_create_data) + + instance_node = hou.node(instance.get("instance_node")) + parms = { + "use_sop_path": True, + "build_from_path": True, + "path_attrib": "path", + "prim_to_detail_pattern": "cbId", + "format": 2, + "facesets": 0, + "filename": hou.text.expandString( + "$HIP/pyblish/{}.abc".format(subset_name)) + } + + if self.selected_nodes: + selected_node = self.selected_nodes[0] + + # Although Houdini allows ObjNode path on `sop_path` for the + # the ROP node we prefer it set to the SopNode path explicitly + + # Allow sop level paths (e.g. /obj/geo1/box1) + if isinstance(selected_node, hou.SopNode): + parms["sop_path"] = selected_node.path() + self.log.debug( + "Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'." + % selected_node.path() + ) + + # Allow object level paths to Geometry nodes (e.g. /obj/geo1) + # but do not allow other object level nodes types like cameras, etc. + elif isinstance(selected_node, hou.ObjNode) and \ + selected_node.type().name() in ["geo"]: + + # get the output node with the minimum + # 'outputidx' or the node with display flag + sop_path = self.get_obj_output(selected_node) + + if sop_path: + parms["sop_path"] = sop_path.path() + self.log.debug( + "Valid ObjNode selection, 'SOP Path' in ROP will be set to " + "the child path '%s'." + % sop_path.path() + ) + + if not parms.get("sop_path", None): + self.log.debug( + "Selection isn't valid. 'SOP Path' in ROP will be empty." + ) + else: + self.log.debug( + "No Selection. 'SOP Path' in ROP will be empty." + ) + + instance_node.setParms(parms) + instance_node.parm("trange").set(1) + + # Lock any parameters in this list + to_lock = ["prim_to_detail_pattern"] + self.lock_parameters(instance_node, to_lock) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.sopNodeTypeCategory() + ] + + def get_obj_output(self, obj_node): + """Find output node with the smallest 'outputidx'.""" + + outputs = obj_node.subnetOutputs() + + # if obj_node is empty + if not outputs: + return + + # if obj_node has one output child whether its + # sop output node or a node with the render flag + elif len(outputs) == 1: + return outputs[0] + + # if there are more than one, then it have multiple ouput nodes + # return the one with the minimum 'outputidx' + else: + return min(outputs, + key=lambda node: node.evalParm('outputidx')) + + def get_instance_attr_defs(self): + return [ + BoolDef("farm", + label="Submitting to Farm", + default=False) + ] + + def get_pre_create_attr_defs(self): + attrs = super().get_pre_create_attr_defs() + # Use same attributes as for instance attributes + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_proxy.py b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_proxy.py new file mode 100644 index 0000000000..fa42411a1c --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_proxy.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating Redshift proxies.""" +from ayon_core.hosts.houdini.api import plugin +import hou +from ayon_core.lib import BoolDef + + +class CreateRedshiftProxy(plugin.HoudiniCreator): + """Redshift Proxy""" + identifier = "io.openpype.creators.houdini.redshiftproxy" + label = "Redshift Proxy" + family = "redshiftproxy" + icon = "magic" + + def create(self, subset_name, instance_data, pre_create_data): + + # Remove the active, we are checking the bypass flag of the nodes + instance_data.pop("active", None) + + # Redshift provides a `Redshift_Proxy_Output` node type which shows + # a limited set of parameters by default and is set to extract a + # Redshift Proxy. However when "imprinting" extra parameters needed + # for OpenPype it starts showing all its parameters again. It's unclear + # why this happens. + # TODO: Somehow enforce so that it only shows the original limited + # attributes of the Redshift_Proxy_Output node type + instance_data.update({"node_type": "Redshift_Proxy_Output"}) + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + creator_attributes["farm"] = pre_create_data["farm"] + + instance = super(CreateRedshiftProxy, self).create( + subset_name, + instance_data, + pre_create_data) + + instance_node = hou.node(instance.get("instance_node")) + + parms = { + "RS_archive_file": '$HIP/pyblish/{}.$F4.rs'.format(subset_name), + } + + if self.selected_nodes: + parms["RS_archive_sopPath"] = self.selected_nodes[0].path() + + instance_node.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id", "prim_to_detail_pattern"] + self.lock_parameters(instance_node, to_lock) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.sopNodeTypeCategory() + ] + + def get_instance_attr_defs(self): + return [ + BoolDef("farm", + label="Submitting to Farm", + default=False) + ] + + def get_pre_create_attr_defs(self): + attrs = super().get_pre_create_attr_defs() + # Use same attributes as for instance attributes + return attrs + self.get_instance_attr_defs() diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py similarity index 97% rename from openpype/hosts/houdini/plugins/create/create_redshift_rop.py rename to client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py index 9d1c7bc90d..8e88c690b9 100644 --- a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py @@ -2,8 +2,8 @@ """Creator plugin to create Redshift ROP.""" import hou # noqa -from openpype.hosts.houdini.api import plugin -from openpype.lib import EnumDef, BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import EnumDef, BoolDef class CreateRedshiftROP(plugin.HoudiniCreator): diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py new file mode 100644 index 0000000000..c512a61105 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating openGL reviews.""" +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import EnumDef, BoolDef, NumberDef + +import os +import hou + + +class CreateReview(plugin.HoudiniCreator): + """Review with OpenGL ROP""" + + identifier = "io.openpype.creators.houdini.review" + label = "Review" + family = "review" + icon = "video-camera" + + def create(self, subset_name, instance_data, pre_create_data): + + instance_data.pop("active", None) + instance_data.update({"node_type": "opengl"}) + instance_data["imageFormat"] = pre_create_data.get("imageFormat") + instance_data["keepImages"] = pre_create_data.get("keepImages") + + instance = super(CreateReview, self).create( + subset_name, + instance_data, + pre_create_data) + + instance_node = hou.node(instance.get("instance_node")) + + frame_range = hou.playbar.frameRange() + + filepath = "{root}/{subset}/{subset}.$F4.{ext}".format( + root=hou.text.expandString("$HIP/pyblish"), + subset="`chs(\"subset\")`", # keep dynamic link to subset + ext=pre_create_data.get("image_format") or "png" + ) + + parms = { + "picture": filepath, + + "trange": 1, + + # Unlike many other ROP nodes the opengl node does not default + # to expression of $FSTART and $FEND so we preserve that behavior + # but do set the range to the frame range of the playbar + "f1": frame_range[0], + "f2": frame_range[1], + } + + override_resolution = pre_create_data.get("override_resolution") + if override_resolution: + parms.update({ + "tres": override_resolution, + "res1": pre_create_data.get("resx"), + "res2": pre_create_data.get("resy"), + "aspect": pre_create_data.get("aspect"), + }) + + if self.selected_nodes: + # The first camera found in selection we will use as camera + # Other node types we set in force objects + camera = None + force_objects = [] + for node in self.selected_nodes: + path = node.path() + if node.type().name() == "cam": + if camera: + continue + camera = path + else: + force_objects.append(path) + + if not camera: + self.log.warning("No camera found in selection.") + + parms.update({ + "camera": camera or "", + "scenepath": "/obj", + "forceobjects": " ".join(force_objects), + "vobjects": "" # clear candidate objects from '*' value + }) + + instance_node.setParms(parms) + + # Set OCIO Colorspace to the default output colorspace + # if there's OCIO + if os.getenv("OCIO"): + self.set_colorcorrect_to_default_view_space(instance_node) + + to_lock = ["id", "family"] + + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateReview, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + BoolDef("keepImages", + label="Keep Image Sequences", + default=False), + EnumDef("imageFormat", + image_format_enum, + default="png", + label="Image Format Options"), + BoolDef("override_resolution", + label="Override resolution", + tooltip="When disabled the resolution set on the camera " + "is used instead.", + default=True), + NumberDef("resx", + label="Resolution Width", + default=1280, + minimum=2, + decimals=0), + NumberDef("resy", + label="Resolution Height", + default=720, + minimum=2, + decimals=0), + NumberDef("aspect", + label="Aspect Ratio", + default=1.0, + minimum=0.0001, + decimals=3) + ] + + def set_colorcorrect_to_default_view_space(self, + instance_node): + """Set ociocolorspace to the default output space.""" + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + + # set Color Correction parameter to OpenColorIO + instance_node.setParms({"colorcorrect": 2}) + + # Get default view space for ociocolorspace parm. + default_view_space = get_default_display_view_colorspace() + instance_node.setParms( + {"ociocolorspace": default_view_space} + ) + + self.log.debug( + "'OCIO Colorspace' parm on '{}' has been set to " + "the default view color space '{}'" + .format(instance_node, default_view_space) + ) diff --git a/openpype/hosts/houdini/plugins/create/create_staticmesh.py b/client/ayon_core/hosts/houdini/plugins/create/create_staticmesh.py similarity index 98% rename from openpype/hosts/houdini/plugins/create/create_staticmesh.py rename to client/ayon_core/hosts/houdini/plugins/create/create_staticmesh.py index d0985198bd..319be3568d 100644 --- a/openpype/hosts/houdini/plugins/create/create_staticmesh.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_staticmesh.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" -from openpype.hosts.houdini.api import plugin -from openpype.lib import BoolDef, EnumDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.lib import BoolDef, EnumDef import hou diff --git a/openpype/hosts/houdini/plugins/create/create_usd.py b/client/ayon_core/hosts/houdini/plugins/create/create_usd.py similarity index 93% rename from openpype/hosts/houdini/plugins/create/create_usd.py rename to client/ayon_core/hosts/houdini/plugins/create/create_usd.py index e05d254863..db9c77fffe 100644 --- a/openpype/hosts/houdini/plugins/create/create_usd.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_usd.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator plugin for creating USDs.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance import hou diff --git a/openpype/hosts/houdini/plugins/create/create_usdrender.py b/client/ayon_core/hosts/houdini/plugins/create/create_usdrender.py similarity index 92% rename from openpype/hosts/houdini/plugins/create/create_usdrender.py rename to client/ayon_core/hosts/houdini/plugins/create/create_usdrender.py index f78f0bed50..72a2d2fc7f 100644 --- a/openpype/hosts/houdini/plugins/create/create_usdrender.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_usdrender.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator plugin for creating USD renders.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance class CreateUSDRender(plugin.HoudiniCreator): diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/client/ayon_core/hosts/houdini/plugins/create/create_vbd_cache.py similarity index 96% rename from openpype/hosts/houdini/plugins/create/create_vbd_cache.py rename to client/ayon_core/hosts/houdini/plugins/create/create_vbd_cache.py index 53df9dda68..507917b7a5 100644 --- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_vbd_cache.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Creator plugin for creating VDB Caches.""" -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance -from openpype.lib import BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import BoolDef import hou diff --git a/openpype/hosts/houdini/plugins/create/create_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py similarity index 97% rename from openpype/hosts/houdini/plugins/create/create_vray_rop.py rename to client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py index 272b57b548..609828e201 100644 --- a/openpype/hosts/houdini/plugins/create/create_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py @@ -2,9 +2,9 @@ """Creator plugin to create VRay ROP.""" import hou -from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance -from openpype.lib import EnumDef, BoolDef +from ayon_core.hosts.houdini.api import plugin +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import EnumDef, BoolDef class CreateVrayROP(plugin.HoudiniCreator): diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py new file mode 100644 index 0000000000..19631566df --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" +from ayon_core.hosts.houdini.api import plugin +from ayon_core.hosts.houdini.api.lib import read, imprint +from ayon_core.hosts.houdini.api.pipeline import CONTEXT_CONTAINER +from ayon_core.pipeline import CreatedInstance, AutoCreator +from ayon_core.client import get_asset_by_name +import hou + + +class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.houdini.workfile" + label = "Workfile" + family = "workfile" + icon = "fa5.file" + + default_variant = "Main" + + def create(self): + variant = self.default_variant + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + project_name = self.project_name + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.host_name + + if current_instance is None: + current_instance_asset = None + else: + current_instance_asset = current_instance["folderPath"] + + if current_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": variant, + } + + data.update( + self.get_dynamic_data( + variant, task_name, asset_doc, + project_name, host_name, current_instance) + ) + self.log.info("Auto-creating workfile instance...") + current_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(current_instance) + elif ( + current_instance_asset != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + current_instance["folderPath"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + # write workfile information to context container. + op_ctx = hou.node(CONTEXT_CONTAINER) + if not op_ctx: + op_ctx = self.create_context_node() + + workfile_data = {"workfile": current_instance.data_to_store()} + imprint(op_ctx, workfile_data) + + def collect_instances(self): + op_ctx = hou.node(CONTEXT_CONTAINER) + instance = read(op_ctx) + if not instance: + return + workfile = instance.get("workfile") + if not workfile: + return + created_instance = CreatedInstance.from_existing( + workfile, self + ) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + op_ctx = hou.node(CONTEXT_CONTAINER) + for created_inst, _changes in update_list: + if created_inst["creator_identifier"] == self.identifier: + workfile_data = {"workfile": created_inst.data_to_store()} + imprint(op_ctx, workfile_data, update=True) diff --git a/openpype/hosts/houdini/plugins/inventory/set_camera_resolution.py b/client/ayon_core/hosts/houdini/plugins/inventory/set_camera_resolution.py similarity index 78% rename from openpype/hosts/houdini/plugins/inventory/set_camera_resolution.py rename to client/ayon_core/hosts/houdini/plugins/inventory/set_camera_resolution.py index 18ececb019..dadb80469a 100644 --- a/openpype/hosts/houdini/plugins/inventory/set_camera_resolution.py +++ b/client/ayon_core/hosts/houdini/plugins/inventory/set_camera_resolution.py @@ -1,9 +1,9 @@ -from openpype.pipeline import InventoryAction -from openpype.hosts.houdini.api.lib import ( +from ayon_core.pipeline import InventoryAction +from ayon_core.hosts.houdini.api.lib import ( get_camera_from_container, set_camera_resolution ) -from openpype.pipeline.context_tools import get_current_project_asset +from ayon_core.pipeline.context_tools import get_current_project_asset class SetCameraResolution(InventoryAction): diff --git a/client/ayon_core/hosts/houdini/plugins/load/actions.py b/client/ayon_core/hosts/houdini/plugins/load/actions.py new file mode 100644 index 0000000000..2cffa565b1 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/actions.py @@ -0,0 +1,85 @@ +"""A module containing generic loader actions that will display in the Loader. + +""" + +from ayon_core.pipeline import load + + +class SetFrameRangeLoader(load.LoaderPlugin): + """Set frame range excluding pre- and post-handles""" + + families = [ + "animation", + "camera", + "pointcache", + "vdbcache", + "usd", + ] + representations = ["abc", "vdb", "usd"] + + label = "Set frame range" + order = 11 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import hou + + version = context["version"] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print( + "Skipping setting frame range because start or " + "end frame data is missing.." + ) + return + + hou.playbar.setFrameRange(start, end) + hou.playbar.setPlaybackRange(start, end) + + +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): + """Set frame range including pre- and post-handles""" + + families = [ + "animation", + "camera", + "pointcache", + "vdbcache", + "usd", + ] + representations = ["abc", "vdb", "usd"] + + label = "Set frame range (with handles)" + order = 12 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import hou + + version = context["version"] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print( + "Skipping setting frame range because start or " + "end frame data is missing.." + ) + return + + # Include handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) + + hou.playbar.setFrameRange(start, end) + hou.playbar.setPlaybackRange(start, end) diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py new file mode 100644 index 0000000000..6996b0d117 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py @@ -0,0 +1,110 @@ +import os +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.houdini.api import pipeline + + +class AbcLoader(load.LoaderPlugin): + """Load Alembic""" + + families = ["model", "animation", "pointcache", "gpuCache"] + label = "Load Alembic" + representations = ["abc"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + import hou + + # Format file name, Houdini only wants forward slashes + file_path = self.filepath_from_context(context) + file_path = os.path.normpath(file_path) + file_path = file_path.replace("\\", "/") + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create a new geo node + container = obj.createNode("geo", node_name=node_name) + + # Remove the file node, it only loads static meshes + # Houdini 17 has removed the file node from the geo node + file_node = container.node("file1") + if file_node: + file_node.destroy() + + # Create an alembic node (supports animation) + alembic = container.createNode("alembic", node_name=node_name) + alembic.setParms({"fileName": file_path}) + + # Add unpack node + unpack_name = "unpack_{}".format(name) + unpack = container.createNode("unpack", node_name=unpack_name) + unpack.setInput(0, alembic) + unpack.setParms({"transfer_attributes": "path"}) + + # Add normal to points + # Order of menu ['point', 'vertex', 'prim', 'detail'] + normal_name = "normal_{}".format(name) + normal_node = container.createNode("normal", node_name=normal_name) + normal_node.setParms({"type": 0}) + + normal_node.setInput(0, unpack) + + null = container.createNode("null", node_name="OUT".format(name)) + null.setInput(0, normal_node) + + # Ensure display flag is on the Alembic input node and not on the OUT + # node to optimize "debug" displaying in the viewport. + alembic.setDisplayFlag(True) + + # Set new position for unpack node else it gets cluttered + nodes = [container, alembic, unpack, normal_node, null] + for nr, node in enumerate(nodes): + node.setPosition([0, (0 - nr)]) + + self[:] = nodes + + return pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) + + def update(self, container, representation): + + node = container["node"] + try: + alembic_node = next( + n for n in node.children() if n.type().name() == "alembic" + ) + except StopIteration: + self.log.error("Could not find node of type `alembic`") + return + + # Update the file path + file_path = get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + alembic_node.setParms({"fileName": file_path}) + + # Update attribute + node.setParms({"representation": str(representation["_id"])}) + + def remove(self, container): + + node = container["node"] + node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_alembic_archive.py b/client/ayon_core/hosts/houdini/plugins/load/load_alembic_archive.py similarity index 96% rename from openpype/hosts/houdini/plugins/load/load_alembic_archive.py rename to client/ayon_core/hosts/houdini/plugins/load/load_alembic_archive.py index 3a577f72b4..cfe3b16ebb 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic_archive.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_alembic_archive.py @@ -1,9 +1,9 @@ import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.houdini.api import pipeline +from ayon_core.hosts.houdini.api import pipeline class AbcArchiveLoader(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/load/load_ass.py b/client/ayon_core/hosts/houdini/plugins/load/load_ass.py similarity index 96% rename from openpype/hosts/houdini/plugins/load/load_ass.py rename to client/ayon_core/hosts/houdini/plugins/load/load_ass.py index 557d601677..6fbe315adb 100644 --- a/openpype/hosts/houdini/plugins/load/load_ass.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_ass.py @@ -1,11 +1,11 @@ import os import re -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.houdini.api import pipeline +from ayon_core.hosts.houdini.api import pipeline class AssLoader(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/load/load_bgeo.py b/client/ayon_core/hosts/houdini/plugins/load/load_bgeo.py similarity index 97% rename from openpype/hosts/houdini/plugins/load/load_bgeo.py rename to client/ayon_core/hosts/houdini/plugins/load/load_bgeo.py index 489bf944ed..afcf82562c 100644 --- a/openpype/hosts/houdini/plugins/load/load_bgeo.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_bgeo.py @@ -2,11 +2,11 @@ import os import re -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.houdini.api import pipeline +from ayon_core.hosts.houdini.api import pipeline class BgeoLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_camera.py b/client/ayon_core/hosts/houdini/plugins/load/load_camera.py new file mode 100644 index 0000000000..11826fb30d --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/load_camera.py @@ -0,0 +1,211 @@ +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.houdini.api import pipeline + +from ayon_core.hosts.houdini.api.lib import ( + set_camera_resolution, + get_camera_from_container +) + +import hou + + +ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")' + '.alembicGetCameraDict') + + +def transfer_non_default_values(src, dest, ignore=None): + """Copy parm from src to dest. + + Because the Alembic Archive rebuilds the entire node + hierarchy on triggering "Build Hierarchy" we want to + preserve any local tweaks made by the user on the camera + for ease of use. That could be a background image, a + resolution change or even Redshift camera parameters. + + We try to do so by finding all Parms that exist on both + source and destination node, include only those that both + are not at their default value, they must be visible, + we exclude those that have the special "alembic archive" + channel expression and ignore certain Parm types. + + """ + + ignore_types = { + hou.parmTemplateType.Toggle, + hou.parmTemplateType.Menu, + hou.parmTemplateType.Button, + hou.parmTemplateType.FolderSet, + hou.parmTemplateType.Separator, + hou.parmTemplateType.Label, + } + + src.updateParmStates() + + for parm in src.allParms(): + + if ignore and parm.name() in ignore: + continue + + # If destination parm does not exist, ignore.. + dest_parm = dest.parm(parm.name()) + if not dest_parm: + continue + + # Ignore values that are currently at default + if parm.isAtDefault() and dest_parm.isAtDefault(): + continue + + if not parm.isVisible(): + # Ignore hidden parameters, assume they + # are implementation details + continue + + expression = None + try: + expression = parm.expression() + except hou.OperationFailed: + # No expression present + pass + + if expression is not None and ARCHIVE_EXPRESSION in expression: + # Assume it's part of the automated connections that the + # Alembic Archive makes on loading of the camera and thus we do + # not want to transfer the expression + continue + + # Ignore folders, separators, etc. + if parm.parmTemplate().type() in ignore_types: + continue + + print("Preserving attribute: %s" % parm.name()) + dest_parm.setFromParm(parm) + + +class CameraLoader(load.LoaderPlugin): + """Load camera from an Alembic file""" + + families = ["camera"] + label = "Load Camera (abc)" + representations = ["abc"] + order = -10 + + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + # Format file name, Houdini only wants forward slashes + file_path = self.filepath_from_context(context).replace("\\", "/") + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create a archive node + node = self.create_and_connect(obj, "alembicarchive", node_name) + + # TODO: add FPS of project / asset + node.setParms({"fileName": file_path, "channelRef": True}) + + # Apply some magic + node.parm("buildHierarchy").pressButton() + node.moveToGoodPosition() + + # Create an alembic xform node + nodes = [node] + + camera = get_camera_from_container(node) + self._match_maya_render_mask(camera) + set_camera_resolution(camera, asset_doc=context["asset"]) + self[:] = nodes + + return pipeline.containerise(node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="") + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + # Update attributes + node.setParms({"fileName": file_path, + "representation": str(representation["_id"])}) + + # Store the cam temporarily next to the Alembic Archive + # so that we can preserve parm values the user set on it + # after build hierarchy was triggered. + old_camera = get_camera_from_container(node) + temp_camera = old_camera.copyTo(node.parent()) + + # Rebuild + node.parm("buildHierarchy").pressButton() + + # Apply values to the new camera + new_camera = get_camera_from_container(node) + transfer_non_default_values(temp_camera, + new_camera, + # The hidden uniform scale attribute + # gets a default connection to + # "icon_scale" just skip that completely + ignore={"scale"}) + + self._match_maya_render_mask(new_camera) + set_camera_resolution(new_camera) + + temp_camera.destroy() + + def remove(self, container): + + node = container["node"] + node.destroy() + + def create_and_connect(self, node, node_type, name=None): + """Create a node within a node which and connect it to the input + + Args: + node(hou.Node): parent of the new node + node_type(str) name of the type of node, eg: 'alembic' + name(str, Optional): name of the node + + Returns: + hou.Node + + """ + if name: + new_node = node.createNode(node_type, node_name=name) + else: + new_node = node.createNode(node_type) + + new_node.moveToGoodPosition() + return new_node + + def _match_maya_render_mask(self, camera): + """Workaround to match Maya render mask in Houdini""" + + # print("Setting match maya render mask ") + parm = camera.parm("aperture") + expression = parm.expression() + expression = expression.replace("return ", "aperture = ") + expression += """ +# Match maya render mask (logic from Houdini's own FBX importer) +node = hou.pwd() +resx = node.evalParm('resx') +resy = node.evalParm('resy') +aspect = node.evalParm('aspect') +aperture *= min(1, (resx / resy * aspect) / 1.5) +return aperture +""" + parm.setExpression(expression, language=hou.exprLanguage.Python) diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_fbx.py b/client/ayon_core/hosts/houdini/plugins/load/load_fbx.py new file mode 100644 index 0000000000..cc1a746d93 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/load_fbx.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +"""Fbx Loader for houdini. """ +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.houdini.api import pipeline + + +class FbxLoader(load.LoaderPlugin): + """Load fbx files. """ + + label = "Load FBX" + icon = "code-fork" + color = "orange" + + order = -10 + + families = ["*"] + representations = ["*"] + extensions = {"fbx"} + + def load(self, context, name=None, namespace=None, data=None): + + # get file path from context + file_path = self.filepath_from_context(context) + file_path = file_path.replace("\\", "/") + + # get necessary data + namespace, node_name = self.get_node_name(context, name, namespace) + + # create load tree + nodes = self.create_load_node_tree(file_path, node_name, name) + + self[:] = nodes + + # Call containerise function which does some automations for you + # like moving created nodes to the AVALON_CONTAINERS subnetwork + containerised_nodes = pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) + + return containerised_nodes + + def update(self, container, representation): + + node = container["node"] + try: + file_node = next( + n for n in node.children() if n.type().name() == "file" + ) + except StopIteration: + self.log.error("Could not find node of type `file`") + return + + # Update the file path from representation + file_path = get_representation_path(representation) + file_path = file_path.replace("\\", "/") + + file_node.setParms({"file": file_path}) + + # Update attribute + node.setParms({"representation": str(representation["_id"])}) + + def remove(self, container): + + node = container["node"] + node.destroy() + + def switch(self, container, representation): + self.update(container, representation) + + def get_node_name(self, context, name=None, namespace=None): + """Define node name.""" + + if not namespace: + namespace = context["asset"]["name"] + + if namespace: + node_name = "{}_{}".format(namespace, name) + else: + node_name = name + + return namespace, node_name + + def create_load_node_tree(self, file_path, node_name, subset_name): + """Create Load network. + + you can start building your tree at any obj level. + it'll be much easier to build it in the root obj level. + + Afterwards, your tree will be automatically moved to + '/obj/AVALON_CONTAINERS' subnetwork. + """ + import hou + + # Get the root obj level + obj = hou.node("/obj") + + # Create a new obj geo node + parent_node = obj.createNode("geo", node_name=node_name) + + # In older houdini, + # when reating a new obj geo node, a default file node will be + # automatically created. + # so, we will delete it if exists. + file_node = parent_node.node("file1") + if file_node: + file_node.destroy() + + # Create a new file node + file_node = parent_node.createNode("file", node_name=node_name) + file_node.setParms({"file": file_path}) + + # Create attribute delete + attribdelete_name = "attribdelete_{}".format(subset_name) + attribdelete = parent_node.createNode("attribdelete", + node_name=attribdelete_name) + attribdelete.setParms({"ptdel": "fbx_*"}) + attribdelete.setInput(0, file_node) + + # Create a Null node + null_name = "OUT_{}".format(subset_name) + null = parent_node.createNode("null", node_name=null_name) + null.setInput(0, attribdelete) + + # Ensure display flag is on the file_node input node and not on the OUT + # node to optimize "debug" displaying in the viewport. + file_node.setDisplayFlag(True) + + # Set new position for children nodes + parent_node.layoutChildren() + + # Return all the nodes + return [parent_node, file_node, attribdelete, null] diff --git a/openpype/hosts/houdini/plugins/load/load_hda.py b/client/ayon_core/hosts/houdini/plugins/load/load_hda.py similarity index 95% rename from openpype/hosts/houdini/plugins/load/load_hda.py rename to client/ayon_core/hosts/houdini/plugins/load/load_hda.py index 9630716253..288152f2bd 100644 --- a/openpype/hosts/houdini/plugins/load/load_hda.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_hda.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.houdini.api import pipeline +from ayon_core.hosts.houdini.api import pipeline class HdaLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_image.py b/client/ayon_core/hosts/houdini/plugins/load/load_image.py new file mode 100644 index 0000000000..20fe2f87ca --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/load_image.py @@ -0,0 +1,132 @@ +import os + +from ayon_core.pipeline import ( + load, + get_representation_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.houdini.api import lib, pipeline + +import hou + + +def get_image_avalon_container(): + """The COP2 files must be in a COP2 network. + + So we maintain a single entry point within AVALON_CONTAINERS, + just for ease of use. + + """ + + path = pipeline.AVALON_CONTAINERS + avalon_container = hou.node(path) + if not avalon_container: + # Let's create avalon container secretly + # but make sure the pipeline still is built the + # way we anticipate it was built, asserting it. + assert path == "/obj/AVALON_CONTAINERS" + + parent = hou.node("/obj") + avalon_container = parent.createNode( + "subnet", node_name="AVALON_CONTAINERS" + ) + + image_container = hou.node(path + "/IMAGES") + if not image_container: + image_container = avalon_container.createNode( + "cop2net", node_name="IMAGES" + ) + image_container.moveToGoodPosition() + + return image_container + + +class ImageLoader(load.LoaderPlugin): + """Load images into COP2""" + + families = ["imagesequence"] + label = "Load Image (COP2)" + representations = ["*"] + order = -10 + + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + # Format file name, Houdini only wants forward slashes + file_path = self.filepath_from_context(context) + file_path = os.path.normpath(file_path) + file_path = file_path.replace("\\", "/") + file_path = self._get_file_sequence(file_path) + + # Get the root node + parent = get_image_avalon_container() + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + node = parent.createNode("file", node_name=node_name) + node.moveToGoodPosition() + + node.setParms({"filename1": file_path}) + + # Imprint it manually + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": node_name, + "namespace": namespace, + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + } + + # todo: add folder="Avalon" + lib.imprint(node, data) + + return node + + def update(self, container, representation): + + node = container["node"] + + # Update the file path + file_path = get_representation_path(representation) + file_path = file_path.replace("\\", "/") + file_path = self._get_file_sequence(file_path) + + # Update attributes + node.setParms( + { + "filename1": file_path, + "representation": str(representation["_id"]), + } + ) + + def remove(self, container): + + node = container["node"] + + # Let's clean up the IMAGES COP2 network + # if it ends up being empty and we deleted + # the last file node. Store the parent + # before we delete the node. + parent = node.parent() + + node.destroy() + + if not parent.children(): + parent.destroy() + + def _get_file_sequence(self, file_path): + root = os.path.dirname(file_path) + files = sorted(os.listdir(root)) + + first_fname = files[0] + prefix, padding, suffix = first_fname.rsplit(".", 2) + fname = ".".join([prefix, "$F{}".format(len(padding)), suffix]) + return os.path.join(root, fname).replace("\\", "/") + + def switch(self, container, representation): + self.update(container, representation) diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_redshift_proxy.py b/client/ayon_core/hosts/houdini/plugins/load/load_redshift_proxy.py new file mode 100644 index 0000000000..dd6e78b3bc --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/load/load_redshift_proxy.py @@ -0,0 +1,112 @@ +import os +import re +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.houdini.api import pipeline +from ayon_core.pipeline.load import LoadError + +import hou + + +class RedshiftProxyLoader(load.LoaderPlugin): + """Load Redshift Proxy""" + + families = ["redshiftproxy"] + label = "Load Redshift Proxy" + representations = ["rs"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + + # Get the root node + obj = hou.node("/obj") + + # Define node name + namespace = namespace if namespace else context["asset"]["name"] + node_name = "{}_{}".format(namespace, name) if namespace else name + + # Create a new geo node + container = obj.createNode("geo", node_name=node_name) + + # Check whether the Redshift parameters exist - if not, then likely + # redshift is not set up or initialized correctly + if not container.parm("RS_objprop_proxy_enable"): + container.destroy() + raise LoadError("Unable to initialize geo node with Redshift " + "attributes. Make sure you have the Redshift " + "plug-in set up correctly for Houdini.") + + # Enable by default + container.setParms({ + "RS_objprop_proxy_enable": True, + "RS_objprop_proxy_file": self.format_path( + self.filepath_from_context(context), + context["representation"]) + }) + + # Remove the file node, it only loads static meshes + # Houdini 17 has removed the file node from the geo node + file_node = container.node("file1") + if file_node: + file_node.destroy() + + # Add this stub node inside so it previews ok + proxy_sop = container.createNode("redshift_proxySOP", + node_name=node_name) + proxy_sop.setDisplayFlag(True) + + nodes = [container, proxy_sop] + + self[:] = nodes + + return pipeline.containerise( + node_name, + namespace, + nodes, + context, + self.__class__.__name__, + suffix="", + ) + + def update(self, container, representation): + + # Update the file path + file_path = get_representation_path(representation) + + node = container["node"] + node.setParms({ + "RS_objprop_proxy_file": self.format_path( + file_path, representation) + }) + + # Update attribute + node.setParms({"representation": str(representation["_id"])}) + + def remove(self, container): + + node = container["node"] + node.destroy() + + @staticmethod + def format_path(path, representation): + """Format file path correctly for single redshift proxy + or redshift proxy sequence.""" + if not os.path.exists(path): + raise RuntimeError("Path does not exist: %s" % path) + + is_sequence = bool(representation["context"].get("frame")) + # The path is either a single file or sequence in a folder. + if is_sequence: + filename = re.sub(r"(.*)\.(\d+)\.(rs.*)", "\\1.$F4.\\3", path) + filename = os.path.join(path, filename) + else: + filename = path + + filename = os.path.normpath(filename) + filename = filename.replace("\\", "/") + + return filename diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/client/ayon_core/hosts/houdini/plugins/load/load_usd_layer.py similarity index 96% rename from openpype/hosts/houdini/plugins/load/load_usd_layer.py rename to client/ayon_core/hosts/houdini/plugins/load/load_usd_layer.py index 1528cf549f..2c37c24884 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_usd_layer.py @@ -1,9 +1,9 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, AVALON_CONTAINER_ID, ) -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib class USDSublayerLoader(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/client/ayon_core/hosts/houdini/plugins/load/load_usd_reference.py similarity index 96% rename from openpype/hosts/houdini/plugins/load/load_usd_reference.py rename to client/ayon_core/hosts/houdini/plugins/load/load_usd_reference.py index 8402ad072c..9396f00cce 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_usd_reference.py @@ -1,9 +1,9 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, AVALON_CONTAINER_ID, ) -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib class USDReferenceLoader(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/client/ayon_core/hosts/houdini/plugins/load/load_vdb.py similarity index 97% rename from openpype/hosts/houdini/plugins/load/load_vdb.py rename to client/ayon_core/hosts/houdini/plugins/load/load_vdb.py index bcc4f200d3..c3e374ee8d 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_vdb.py @@ -1,11 +1,11 @@ import os import re -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.hosts.houdini.api import pipeline +from ayon_core.hosts.houdini.api import pipeline class VdbLoader(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/client/ayon_core/hosts/houdini/plugins/load/show_usdview.py similarity index 93% rename from openpype/hosts/houdini/plugins/load/show_usdview.py rename to client/ayon_core/hosts/houdini/plugins/load/show_usdview.py index d56c4acc4f..2f86f23b68 100644 --- a/openpype/hosts/houdini/plugins/load/show_usdview.py +++ b/client/ayon_core/hosts/houdini/plugins/load/show_usdview.py @@ -2,8 +2,8 @@ import platform import subprocess -from openpype.lib.vendor_bin_utils import find_executable -from openpype.pipeline import load +from ayon_core.lib.vendor_bin_utils import find_executable +from ayon_core.pipeline import load class ShowInUsdview(load.LoaderPlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_active_state.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_active_state.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_active_state.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_active_state.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py index ffc2a526a3..7fe38555a3 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py @@ -4,8 +4,8 @@ import hou import pyblish.api -from openpype.hosts.houdini.api import colorspace -from openpype.hosts.houdini.api.lib import ( +from ayon_core.hosts.houdini.api import colorspace +from ayon_core.hosts.houdini.api.lib import ( evalParmNoFrame, get_color_management_preferences) diff --git a/openpype/hosts/houdini/plugins/publish/collect_asset_handles.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_asset_handles.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/collect_asset_handles.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_asset_handles.py index 67a281639d..6b62ea09d4 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_asset_handles.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_asset_handles.py @@ -2,12 +2,12 @@ """Collector plugin for frames data on ROP instances.""" import hou # noqa import pyblish.api -from openpype.lib import BoolDef -from openpype.pipeline import OpenPypePyblishPluginMixin +from ayon_core.lib import BoolDef +from ayon_core.pipeline import AYONPyblishPluginMixin class CollectAssetHandles(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Apply asset handles. If instance does not have: diff --git a/openpype/hosts/houdini/plugins/publish/collect_cache_farm.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_cache_farm.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py index 36ade32a35..f91c253c25 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_cache_farm.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py @@ -1,7 +1,7 @@ import os import pyblish.api import hou -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib class CollectDataforCache(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_chunk_size.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_chunk_size.py similarity index 86% rename from openpype/hosts/houdini/plugins/publish/collect_chunk_size.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_chunk_size.py index f8cd4089e2..3e2561dd6f 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_chunk_size.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_chunk_size.py @@ -1,10 +1,10 @@ import pyblish.api -from openpype.lib import NumberDef -from openpype.pipeline import OpenPypePyblishPluginMixin +from ayon_core.lib import NumberDef +from ayon_core.pipeline import AYONPyblishPluginMixin class CollectChunkSize(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Collect chunk size for cache submission to Deadline.""" order = pyblish.api.CollectorOrder + 0.05 diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_current_file.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_current_file.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_current_file.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_frames.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py index 089fae6b1b..a643ab0d38 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py @@ -5,7 +5,7 @@ import hou # noqa import pyblish.api -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib class CollectFrames(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_inputs.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_inputs.py new file mode 100644 index 0000000000..7d7fabb315 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_inputs.py @@ -0,0 +1,120 @@ +import pyblish.api + +from ayon_core.pipeline import registered_host + + +def collect_input_containers(nodes): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + + # Lookup by node ids + lookup = frozenset(nodes) + + containers = [] + host = registered_host() + for container in host.ls(): + + node = container["node"] + + # Usually the loaded containers don't have any complex references + # and the contained children should be all we need. So we disregard + # checking for .references() on the nodes. + members = set(node.allSubChildren()) + members.add(node) # include the node itself + + # If there's an intersection + if not lookup.isdisjoint(members): + containers.append(container) + + return containers + + +def iter_upstream(node): + """Yields all upstream inputs for the current node. + + This includes all `node.inputAncestors()` but also traverses through all + `node.references()` for the node itself and for any of the upstream nodes. + This method has no max-depth and will collect all upstream inputs. + + Yields: + hou.Node: The upstream nodes, including references. + + """ + + upstream = node.inputAncestors( + include_ref_inputs=True, follow_subnets=True + ) + + # Initialize process queue with the node's ancestors itself + queue = list(upstream) + collected = set(upstream) + + # Traverse upstream references for all nodes and yield them as we + # process the queue. + while queue: + upstream_node = queue.pop() + yield upstream_node + + # Find its references that are not collected yet. + references = upstream_node.references() + references = [n for n in references if n not in collected] + + queue.extend(references) + collected.update(references) + + # Include the references' ancestors that have not been collected yet. + for reference in references: + ancestors = reference.inputAncestors( + include_ref_inputs=True, follow_subnets=True + ) + ancestors = [n for n in ancestors if n not in collected] + + queue.extend(ancestors) + collected.update(ancestors) + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect source input containers used for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + + def process(self, instance): + # We can't get the "inputAncestors" directly from the ROP + # node, so we find the related output node (set in SOP/COP path) + # and include that together with its ancestors + output = instance.data.get("output_node") + + if output is None: + # If no valid output node is set then ignore it as validation + # will be checking those cases. + self.log.debug( + "No output node found, skipping collecting of inputs.." + ) + return + + # Collect all upstream parents + nodes = list(iter_upstream(output)) + nodes.append(output) + + # Collect containers for the given set of nodes + containers = collect_input_containers(nodes) + + inputs = [c["representation"] for c in containers] + instance.data["inputRepresentations"] = inputs + self.log.debug("Collected inputs: %s" % inputs) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_instances.py new file mode 100644 index 0000000000..2780da95d9 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_instances.py @@ -0,0 +1,93 @@ +import hou + +import pyblish.api + +from ayon_core.hosts.houdini.api import lib + + +class CollectInstances(pyblish.api.ContextPlugin): + """Gather instances by all node in out graph and pre-defined attributes + + This collector takes into account assets that are associated with + an specific node and marked with a unique identifier; + + Identifier: + id (str): "pyblish.avalon.instance + + Specific node: + The specific node is important because it dictates in which way the + subset is being exported. + + alembic: will export Alembic file which supports cascading attributes + like 'cbId' and 'path' + geometry: Can export a wide range of file types, default out + + """ + + order = pyblish.api.CollectorOrder - 0.01 + label = "Collect Instances" + hosts = ["houdini"] + + def process(self, context): + + nodes = hou.node("/out").children() + nodes += hou.node("/obj").children() + + # Include instances in USD stage only when it exists so it + # remains backwards compatible with version before houdini 18 + stage = hou.node("/stage") + if stage: + nodes += stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop) + + for node in nodes: + + if not node.parm("id"): + continue + + if node.evalParm("id") != "pyblish.avalon.instance": + continue + + # instance was created by new creator code, skip it as + # it is already collected. + if node.parm("creator_identifier"): + continue + + has_family = node.evalParm("family") + assert has_family, "'%s' is missing 'family'" % node.name() + + self.log.info( + "Processing legacy instance node {}".format(node.path()) + ) + + data = lib.read(node) + # Check bypass state and reverse + if hasattr(node, "isBypassed"): + data.update({"active": not node.isBypassed()}) + + # temporarily translation of `active` to `publish` till issue has + # been resolved. + # https://github.com/pyblish/pyblish-base/issues/307 + if "active" in data: + data["publish"] = data["active"] + + # Create nice name if the instance has a frame range. + label = data.get("name", node.name()) + label += " (%s)" % data["asset"] # include asset in name + + instance = context.create_instance(label) + + # Include `families` using `family` data + instance.data["families"] = [instance.data["family"]] + + instance[:] = [node] + instance.data["instance_node"] = node.path() + instance.data.update(data) + + def sort_by_family(instance): + """Sort by family""" + return instance.data.get("families", instance.data.get("family")) + + # Sort/grouped by family (preserving local index) + context[:] = sorted(context, key=sort_by_family) + + return context diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_instances_usd_layered.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_instances_usd_layered.py index d154cdc7c0..800d6fb883 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances_usd_layered.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_instances_usd_layered.py @@ -1,8 +1,8 @@ import hou import pyblish.api -from openpype.hosts.houdini.api import lib -import openpype.hosts.houdini.api.usd as hou_usdlib -import openpype.lib.usdlib as usdlib +from ayon_core.hosts.houdini.api import lib +import ayon_core.hosts.houdini.api.usd as hou_usdlib +import ayon_core.lib.usdlib as usdlib class CollectInstancesUsdLayered(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/collect_karma_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py index dac350a6ef..85100bc2c6 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py @@ -4,11 +4,11 @@ import hou import pyblish.api -from openpype.hosts.houdini.api.lib import ( +from ayon_core.hosts.houdini.api.lib import ( evalParmNoFrame, get_color_management_preferences ) -from openpype.hosts.houdini.api import ( +from ayon_core.hosts.houdini.api import ( colorspace ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py index 64ef20f4e7..d46476c2ce 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py @@ -4,11 +4,11 @@ import hou import pyblish.api -from openpype.hosts.houdini.api.lib import ( +from ayon_core.hosts.houdini.api.lib import ( evalParmNoFrame, get_color_management_preferences ) -from openpype.hosts.houdini.api import ( +from ayon_core.hosts.houdini.api import ( colorspace ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_output_node.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/collect_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_output_node.py index bca3d9fdc1..26381e065e 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_output_node.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import KnownPublishError +from ayon_core.pipeline.publish import KnownPublishError class CollectOutputSOPPath(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_pointcache_type.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_pointcache_type.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_pointcache_type.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_pointcache_type.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py index aec7e07fbc..26dd942559 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -4,11 +4,11 @@ import hou import pyblish.api -from openpype.hosts.houdini.api.lib import ( +from ayon_core.hosts.houdini.api.lib import ( evalParmNoFrame, get_color_management_preferences ) -from openpype.hosts.houdini.api import ( +from ayon_core.hosts.houdini.api import ( colorspace ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_remote_publish.py similarity index 88% rename from openpype/hosts/houdini/plugins/publish/collect_remote_publish.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_remote_publish.py index d56d389be0..5d459f525e 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_remote_publish.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_remote_publish.py @@ -1,8 +1,8 @@ import pyblish.api import hou -from openpype.pipeline.publish import RepairAction -from openpype.hosts.houdini.api import lib +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.houdini.api import lib class CollectRemotePublishSettings(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_render_products.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_render_products.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_render_products.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_render_products.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_review_data.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_review_data.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_rop_frame_range.py similarity index 95% rename from openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_rop_frame_range.py index 1e6bc3b16e..44afaf2466 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_rop_frame_range.py @@ -2,7 +2,7 @@ """Collector plugin for frames data on ROP instances.""" import hou # noqa import pyblish.api -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib class CollectRopFrameRange(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_staticmesh_type.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_staticmesh_type.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_staticmesh_type.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_staticmesh_type.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_usd_bootstrap.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_usd_bootstrap.py index 462cf99b9c..ed54ad8bc1 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_usd_bootstrap.py @@ -1,11 +1,11 @@ import pyblish.api -from openpype.client import ( +from ayon_core.client import ( get_subset_by_name, get_asset_by_name, get_asset_name_identifier, ) -import openpype.lib.usdlib as usdlib +import ayon_core.lib.usdlib as usdlib class CollectUsdBootstrap(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_usd_layers.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/collect_usd_layers.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_usd_layers.py index 696560a590..e36cd875ba 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_usd_layers.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_usd_layers.py @@ -1,7 +1,7 @@ import os import pyblish.api -import openpype.hosts.houdini.api.usd as usdlib +import ayon_core.hosts.houdini.api.usd as usdlib import hou diff --git a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/collect_vray_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py index ad4fdb0da5..f80ca39f1c 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py @@ -4,11 +4,11 @@ import hou import pyblish.api -from openpype.hosts.houdini.api.lib import ( +from ayon_core.hosts.houdini.api.lib import ( evalParmNoFrame, get_color_management_preferences ) -from openpype.hosts.houdini.api import ( +from ayon_core.hosts.houdini.api import ( colorspace ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_workfile.py diff --git a/openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_workscene_fps.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/collect_workscene_fps.py rename to client/ayon_core/hosts/houdini/plugins/publish/collect_workscene_fps.py diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py new file mode 100644 index 0000000000..daf30b26ed --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py @@ -0,0 +1,48 @@ +import os + +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop + +import hou + + +class ExtractAlembic(publish.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract Alembic" + hosts = ["houdini"] + families = ["abc", "camera"] + targets = ["local", "remote"] + + def process(self, instance): + if instance.data.get("farm"): + self.log.debug("Should be processed on farm, skipping.") + return + + ropnode = hou.node(instance.data["instance_node"]) + + # Get the filename from the filename parameter + output = ropnode.evalParm("filename") + staging_dir = os.path.dirname(output) + instance.data["stagingDir"] = staging_dir + + file_name = os.path.basename(output) + + # We run the render + self.log.info("Writing alembic '%s' to '%s'" % (file_name, + staging_dir)) + + render_rop(ropnode) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'abc', + 'ext': 'abc', + 'files': file_name, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_ass.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_ass.py similarity index 95% rename from openpype/hosts/houdini/plugins/publish/extract_ass.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_ass.py index 28dd08f999..24b956ad81 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_ass.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_ass.py @@ -2,8 +2,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop import hou diff --git a/openpype/hosts/houdini/plugins/publish/extract_bgeo.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_bgeo.py similarity index 91% rename from openpype/hosts/houdini/plugins/publish/extract_bgeo.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_bgeo.py index a3840f8f73..448cf97848 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_bgeo.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_bgeo.py @@ -2,9 +2,9 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop -from openpype.hosts.houdini.api import lib +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop +from ayon_core.hosts.houdini.api import lib import hou diff --git a/openpype/hosts/houdini/plugins/publish/extract_composite.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py similarity index 93% rename from openpype/hosts/houdini/plugins/publish/extract_composite.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py index 11cf83a46d..c6dfb4332d 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_composite.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py @@ -1,8 +1,8 @@ import os import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop, splitext +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop, splitext import hou diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_fbx.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_fbx.py new file mode 100644 index 0000000000..7ef004d7cb --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_fbx.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +"""Fbx Extractor for houdini. """ + +import os +import pyblish.api +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop + +import hou + + +class ExtractFBX(publish.Extractor): + + label = "Extract FBX" + families = ["fbx"] + hosts = ["houdini"] + + order = pyblish.api.ExtractorOrder + 0.1 + + def process(self, instance): + + # get rop node + ropnode = hou.node(instance.data.get("instance_node")) + output_file = ropnode.evalParm("sopoutput") + + # get staging_dir and file_name + staging_dir = os.path.normpath(os.path.dirname(output_file)) + file_name = os.path.basename(output_file) + + # render rop + self.log.debug("Writing FBX '%s' to '%s'", file_name, staging_dir) + render_rop(ropnode) + + # prepare representation + representation = { + "name": "fbx", + "ext": "fbx", + "files": file_name, + "stagingDir": staging_dir + } + + # A single frame may also be rendered without start/end frame. + if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa + representation["frameStart"] = instance.data["frameStartHandle"] + representation["frameEnd"] = instance.data["frameEndHandle"] + + # set value type for 'representations' key to list + if "representations" not in instance.data: + instance.data["representations"] = [] + + # update instance data + instance.data["stagingDir"] = staging_dir + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_hda.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_hda.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/extract_hda.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_hda.py index 8b97bf364f..5fe83e0dcf 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_hda.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_hda.py @@ -2,7 +2,7 @@ import os from pprint import pformat import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish import hou diff --git a/openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_mantra_ifd.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_mantra_ifd.py index 894260d1bf..f0bcf4b371 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_mantra_ifd.py @@ -2,7 +2,7 @@ import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish import hou diff --git a/openpype/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py similarity index 93% rename from openpype/hosts/houdini/plugins/publish/extract_opengl.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py index 38808089ac..fabdfd9a9d 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_opengl.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py @@ -2,8 +2,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop import hou diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_redshift_proxy.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_redshift_proxy.py new file mode 100644 index 0000000000..e08a73ae8e --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_redshift_proxy.py @@ -0,0 +1,54 @@ +import os + +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop + +import hou + + +class ExtractRedshiftProxy(publish.Extractor): + + order = pyblish.api.ExtractorOrder + 0.1 + label = "Extract Redshift Proxy" + families = ["redshiftproxy"] + hosts = ["houdini"] + targets = ["local", "remote"] + + def process(self, instance): + if instance.data.get("farm"): + self.log.debug("Should be processed on farm, skipping.") + return + ropnode = hou.node(instance.data.get("instance_node")) + + # Get the filename from the filename parameter + # `.evalParm(parameter)` will make sure all tokens are resolved + output = ropnode.evalParm("RS_archive_file") + staging_dir = os.path.normpath(os.path.dirname(output)) + instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(output) + + self.log.info("Writing Redshift Proxy '%s' to '%s'" % (file_name, + staging_dir)) + + render_rop(ropnode) + + output = instance.data["frames"] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "rs", + "ext": "rs", + "files": output, + "stagingDir": staging_dir, + } + + # A single frame may also be rendered without start/end frame. + if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa + representation["frameStart"] = instance.data["frameStartHandle"] + representation["frameEnd"] = instance.data["frameEndHandle"] + + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_usd.py similarity index 92% rename from openpype/hosts/houdini/plugins/publish/extract_usd.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_usd.py index 61c1b477b2..0aeed06643 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_usd.py @@ -2,8 +2,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop import hou diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_usd_layered.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/extract_usd_layered.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_usd_layered.py index d6193f13c1..7160e3d282 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -6,18 +6,18 @@ import pyblish.api -from openpype.client import ( +from ayon_core.client import ( get_asset_by_name, get_subset_by_name, get_last_version_by_subset_id, get_representation_by_name, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, publish, ) -import openpype.hosts.houdini.api.usd as hou_usdlib -from openpype.hosts.houdini.api.lib import render_rop +import ayon_core.hosts.houdini.api.usd as hou_usdlib +from ayon_core.hosts.houdini.api.lib import render_rop class ExitStack(object): diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_vdb_cache.py similarity index 93% rename from openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py rename to client/ayon_core/hosts/houdini/plugins/publish/extract_vdb_cache.py index 8ac16704f0..4544d33e57 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_vdb_cache.py @@ -2,8 +2,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop import hou diff --git a/openpype/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml b/client/ayon_core/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml similarity index 100% rename from openpype/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml rename to client/ayon_core/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml diff --git a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py new file mode 100644 index 0000000000..73145b211a --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py @@ -0,0 +1,50 @@ +import pyblish.api + +from ayon_core.lib import version_up +from ayon_core.pipeline import registered_host +from ayon_core.pipeline.publish import get_errored_plugins_from_context +from ayon_core.hosts.houdini.api import HoudiniHost +from ayon_core.pipeline.publish import KnownPublishError + + +class IncrementCurrentFile(pyblish.api.ContextPlugin): + """Increment the current file. + + Saves the current scene with an increased version number. + + """ + + label = "Increment current file" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["houdini"] + families = ["workfile", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "usdrender", + "publish.hou"] + optional = True + + def process(self, context): + + errored_plugins = get_errored_plugins_from_context(context) + if any( + plugin.__name__ == "HoudiniSubmitPublishDeadline" + for plugin in errored_plugins + ): + raise KnownPublishError( + "Skipping incrementing current file because " + "submission to deadline failed." + ) + + # Filename must not have changed since collecting + host = registered_host() # type: HoudiniHost + current_file = host.current_file() + if context.data["currentFile"] != current_file: + raise KnownPublishError( + "Collected filename mismatches from current scene name." + ) + + new_filepath = version_up(current_file) + host.save_workfile(new_filepath) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/save_scene.py b/client/ayon_core/hosts/houdini/plugins/publish/save_scene.py new file mode 100644 index 0000000000..7c453038ea --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/save_scene.py @@ -0,0 +1,26 @@ +import pyblish.api + +from ayon_core.pipeline import registered_host + + +class SaveCurrentScene(pyblish.api.ContextPlugin): + """Save current scene""" + + label = "Save current file" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["houdini"] + + def process(self, context): + + # Filename must not have changed since collecting + host = registered_host() + current_file = host.get_current_workfile() + assert context.data['currentFile'] == current_file, ( + "Collected filename from current scene name." + ) + + if host.workfile_has_unsaved_changes(): + self.log.info("Saving current file: {}".format(current_file)) + host.save_workfile(current_file) + else: + self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py similarity index 99% rename from openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py index af9e080466..18a459bf7b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_abc_primitive_to_detail.py @@ -2,7 +2,7 @@ import pyblish.api from collections import defaultdict -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_alembic_face_sets.py similarity index 100% rename from openpype/hosts/houdini/plugins/publish/validate_alembic_face_sets.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_alembic_face_sets.py diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_alembic_input_node.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_alembic_input_node.py index 47c47e4ea2..dbc38058e6 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_alembic_input_node.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_animation_settings.py similarity index 93% rename from openpype/hosts/houdini/plugins/publish/validate_animation_settings.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_animation_settings.py index 79387fbef5..e28c38ece0 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_animation_settings.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_animation_settings.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError -from openpype.hosts.houdini.api import lib +from ayon_core.pipeline.publish import PublishValidationError +from ayon_core.hosts.houdini.api import lib import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_bypass.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_bypass.py similarity index 95% rename from openpype/hosts/houdini/plugins/publish/validate_bypass.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_bypass.py index c10c5a2c05..8a83ff42fb 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_bypass.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_bypass.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_camera_rop.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_camera_rop.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_camera_rop.py index 41b5273e6a..ad4ace988a 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_camera_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_camera_rop.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Validator plugin for Houdini Camera ROP settings.""" import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateCameraROP(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py index 1fc767b309..95414ae7f1 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_cop_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -3,7 +3,7 @@ import pyblish.api import six -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateCopOutputNode(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_fbx_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_fbx_output_node.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/validate_fbx_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_fbx_output_node.py index 894dad7d72..08eaa182c0 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_fbx_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_fbx_output_node.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError -from openpype.hosts.houdini.api.action import ( +from ayon_core.pipeline import PublishValidationError +from ayon_core.hosts.houdini.api.action import ( SelectInvalidAction, SelectROPAction, ) -from openpype.hosts.houdini.api.lib import get_obj_node_output +from ayon_core.hosts.houdini.api.lib import get_obj_node_output import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_file_extension.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_file_extension.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_file_extension.py index 6594d10851..1134f8853a 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_file_extension.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_file_extension.py @@ -2,8 +2,8 @@ import os import pyblish.api -from openpype.hosts.houdini.api import lib -from openpype.pipeline import PublishValidationError +from ayon_core.hosts.houdini.api import lib +from ayon_core.pipeline import PublishValidationError import hou diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_frame_range.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_frame_range.py new file mode 100644 index 0000000000..36e1b9b2a5 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_frame_range.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from ayon_core.pipeline import PublishValidationError +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.houdini.api.action import SelectInvalidAction + +import hou + + +class DisableUseAssetHandlesAction(RepairAction): + label = "Disable use asset handles" + icon = "mdi.toggle-switch-off" + + +class ValidateFrameRange(pyblish.api.InstancePlugin): + """Validate Frame Range. + + Due to the usage of start and end handles, + then Frame Range must be >= (start handle + end handle) + which results that frameEnd be smaller than frameStart + """ + + order = pyblish.api.ValidatorOrder - 0.1 + hosts = ["houdini"] + label = "Validate Frame Range" + actions = [DisableUseAssetHandlesAction, SelectInvalidAction] + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError( + title="Invalid Frame Range", + message=( + "Invalid frame range because the instance " + "start frame ({0[frameStart]}) is higher than " + "the end frame ({0[frameEnd]})" + .format(instance.data) + ), + description=( + "## Invalid Frame Range\n" + "The frame range for the instance is invalid because " + "the start frame is higher than the end frame.\n\nThis " + "is likely due to asset handles being applied to your " + "instance or the ROP node's start frame " + "is set higher than the end frame.\n\nIf your ROP frame " + "range is correct and you do not want to apply asset " + "handles make sure to disable Use asset handles on the " + "publish instance." + ) + ) + + @classmethod + def get_invalid(cls, instance): + + if not instance.data.get("instance_node"): + return + + rop_node = hou.node(instance.data["instance_node"]) + frame_start = instance.data.get("frameStart") + frame_end = instance.data.get("frameEnd") + + if frame_start is None or frame_end is None: + cls.log.debug( + "Skipping frame range validation for " + "instance without frame data: {}".format(rop_node.path()) + ) + return + + if frame_start > frame_end: + cls.log.info( + "The ROP node render range is set to " + "{0[frameStartHandle]} - {0[frameEndHandle]} " + "The asset handles applied to the instance are start handle " + "{0[handleStart]} and end handle {0[handleEnd]}" + .format(instance.data) + ) + return [rop_node] + + @classmethod + def repair(cls, instance): + + if not cls.get_invalid(instance): + # Already fixed + return + + # Disable use asset handles + context = instance.context + create_context = context.data["create_context"] + instance_id = instance.data.get("instance_id") + if not instance_id: + cls.log.debug("'{}' must have instance id" + .format(instance)) + return + + created_instance = create_context.get_instance_by_id(instance_id) + if not instance_id: + cls.log.debug("Unable to find instance '{}' by id" + .format(instance)) + return + + created_instance.publish_attributes["CollectAssetHandles"]["use_handles"] = False # noqa + + create_context.save_changes() + cls.log.debug("use asset handles is turned off for '{}'" + .format(instance)) diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_frame_token.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_frame_token.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_frame_token.py index 06d4003295..20fb859146 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_frame_token.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_frame_token.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.houdini.api import lib +from ayon_core.hosts.houdini.api import lib import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_houdini_license_category.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_houdini_license_category.py index 108a700bbe..0df858ca4b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_houdini_license_category.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_houdini_license_category.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_mesh_is_static.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_mesh_is_static.py similarity index 86% rename from openpype/hosts/houdini/plugins/publish/validate_mesh_is_static.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_mesh_is_static.py index b499682e0b..289e00339b 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_mesh_is_static.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_mesh_is_static.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- """Validator for correct naming of Static Meshes.""" import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline.publish import ValidateContentsOrder -from openpype.hosts.houdini.api.action import SelectInvalidAction -from openpype.hosts.houdini.api.lib import get_output_children +from ayon_core.hosts.houdini.api.action import SelectInvalidAction +from ayon_core.hosts.houdini.api.lib import get_output_children class ValidateMeshIsStatic(pyblish.api.InstancePlugin, diff --git a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py index 9d1f92a101..38f1c4e176 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_mkpaths_toggled.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_no_errors.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_no_errors.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_no_errors.py index 6c48eae70a..ae1e5cad27 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_no_errors.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_no_errors.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pyblish.api import hou -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError def cook_in_range(node, start, end): diff --git a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py similarity index 98% rename from openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py index 471fa5b6d1..f63cb23138 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_primitive_hierarchy_paths.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError -from openpype.pipeline.publish import ( +from ayon_core.pipeline import PublishValidationError +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish.py similarity index 91% rename from openpype/hosts/houdini/plugins/publish/validate_remote_publish.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish.py index 4f71d79382..133b45e8c3 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish.py @@ -1,9 +1,9 @@ # -*-coding: utf-8 -*- import pyblish.api -from openpype.hosts.houdini.api import lib -from openpype.pipeline.publish import RepairContextAction -from openpype.pipeline import PublishValidationError +from ayon_core.hosts.houdini.api import lib +from ayon_core.pipeline.publish import RepairContextAction +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py similarity index 90% rename from openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py index 8ec62f4e85..d4c6e7a45e 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_remote_publish_enabled.py @@ -2,8 +2,8 @@ import pyblish.api import hou -from openpype.pipeline.publish import RepairContextAction -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline.publish import RepairContextAction +from ayon_core.pipeline import PublishValidationError class ValidateRemotePublishEnabled(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py similarity index 91% rename from openpype/hosts/houdini/plugins/publish/validate_review_colorspace.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py index 03ecd1b052..031138e21d 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_review_colorspace.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import RepairAction -from openpype.hosts.houdini.api.action import SelectROPAction +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.houdini.api.action import SelectROPAction import os import hou @@ -68,7 +68,7 @@ def repair(cls, instance): It is a helper action more than a repair action, used to set colorspace on opengl node to the default view. """ - from openpype.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa rop_node = hou.node(instance.data["instance_node"]) diff --git a/openpype/hosts/houdini/plugins/publish/validate_scene_review.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_scene_review.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py index a44b7e1597..b6007d3f0f 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_scene_review.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_sop_output_node.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_sop_output_node.py index 9590e37d26..61cf7596ac 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_sop_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_sop_output_node.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError -from openpype.hosts.houdini.api.action import ( +from ayon_core.pipeline import PublishValidationError +from ayon_core.hosts.houdini.api.action import ( SelectInvalidAction, SelectROPAction, ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_subset_name.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_subset_name.py similarity index 92% rename from openpype/hosts/houdini/plugins/publish/validate_subset_name.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_subset_name.py index 7bed74ebb1..67807b5366 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_subset_name.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_subset_name.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- """Validator for correct naming of Static Meshes.""" import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, ) -from openpype.hosts.houdini.api.action import SelectInvalidAction -from openpype.pipeline.create import get_subset_name +from ayon_core.hosts.houdini.api.action import SelectInvalidAction +from ayon_core.pipeline.create import get_subset_name import hou diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py new file mode 100644 index 0000000000..dbee293074 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +"""Validator for correct naming of Static Meshes.""" +import pyblish.api +from ayon_core.pipeline import ( + PublishValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.pipeline.publish import ValidateContentsOrder + +from ayon_core.hosts.houdini.api.action import SelectInvalidAction +from ayon_core.hosts.houdini.api.lib import get_output_children + +import hou + + +class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validate name of Unreal Static Mesh. + + This validator checks if output node name has a collision prefix: + - UBX + - UCP + - USP + - UCX + + This validator also checks if subset name is correct + - {static mesh prefix}_{Asset-Name}{Variant}. + + """ + + families = ["staticMesh"] + hosts = ["houdini"] + label = "Unreal Static Mesh Name (FBX)" + order = ValidateContentsOrder + 0.1 + actions = [SelectInvalidAction] + + optional = True + collision_prefixes = [] + static_mesh_prefix = "" + + @classmethod + def apply_settings(cls, project_settings, system_settings): + + settings = ( + project_settings["houdini"]["create"]["CreateStaticMesh"] + ) + cls.collision_prefixes = settings["collision_prefixes"] + cls.static_mesh_prefix = settings["static_mesh_prefix"] + + def process(self, instance): + + if not self.is_active(instance.data): + return + + invalid = self.get_invalid(instance) + if invalid: + nodes = [n.path() for n in invalid] + raise PublishValidationError( + "See log for details. " + "Invalid nodes: {0}".format(nodes) + ) + + @classmethod + def get_invalid(cls, instance): + + invalid = [] + + rop_node = hou.node(instance.data["instance_node"]) + output_node = instance.data.get("output_node") + if output_node is None: + cls.log.debug( + "No Output Node, skipping check.." + ) + return + + if rop_node.evalParm("buildfrompath"): + # This validator doesn't support naming check if + # building hierarchy from path' is used + cls.log.info( + "Using 'Build Hierarchy from Path Attribute', skipping check.." + ) + return + + # Check nodes names + all_outputs = get_output_children(output_node, include_sops=False) + for output in all_outputs: + for prefix in cls.collision_prefixes: + if output.name().startswith(prefix): + invalid.append(output) + cls.log.error( + "Invalid node name: Node '%s' " + "includes a collision prefix '%s'", + output.path(), prefix + ) + break + + return invalid diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py index f2c7878c4e..2b727670ad 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_layer_path_backslashes.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import pyblish.api -import openpype.hosts.houdini.api.usd as hou_usdlib -from openpype.pipeline import PublishValidationError +import ayon_core.hosts.houdini.api.usd as hou_usdlib +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py index b8faae16d7..dc1a19cae0 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_model_and_shade.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import pyblish.api -import openpype.hosts.houdini.api.usd as hou_usdlib -from openpype.pipeline import PublishValidationError +import ayon_core.hosts.houdini.api.usd as hou_usdlib +from ayon_core.pipeline import PublishValidationError from pxr import UsdShade, UsdRender, UsdLux diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_output_node.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_output_node.py index 5cb5bd35fb..968d64e8fc 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_output_node.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateUSDOutputNode(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_render_product_names.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_render_product_names.py index 1daa96f2b9..4825b7cc71 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_render_product_names.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_render_product_names.py @@ -2,7 +2,7 @@ import os import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateUSDRenderProductNames(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_setdress.py similarity index 94% rename from openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_setdress.py index b96d185482..40b67e896a 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_setdress.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_setdress.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import pyblish.api -import openpype.hosts.houdini.api.usd as hou_usdlib -from openpype.pipeline import PublishValidationError +import ayon_core.hosts.houdini.api.usd as hou_usdlib +from ayon_core.pipeline import PublishValidationError class ValidateUsdSetDress(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py similarity index 87% rename from openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py index 0db782d545..8fa20ace02 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py @@ -3,9 +3,9 @@ import pyblish.api -from openpype.client import get_subset_by_name -from openpype.pipeline.publish import ValidateContentsOrder -from openpype.pipeline import PublishValidationError +from ayon_core.client import get_subset_by_name +from ayon_core.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline import PublishValidationError class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py index cb2099437d..d85f20e3ce 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_usd_shade_workspace.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError import hou diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_vdb_output_node.py similarity index 97% rename from openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_vdb_output_node.py index b51e1007f0..319a9a4b50 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_vdb_output_node.py @@ -4,8 +4,8 @@ import pyblish.api import hou -from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.houdini.api.action import SelectInvalidAction +from ayon_core.pipeline import PublishXmlValidationError +from ayon_core.hosts.houdini.api.action import SelectInvalidAction def group_consecutive_numbers(nums): diff --git a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_workfile_paths.py similarity index 96% rename from openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py rename to client/ayon_core/hosts/houdini/plugins/publish/validate_workfile_paths.py index afe05e3173..7984b7615c 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_workfile_paths.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- import pyblish.api import hou -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import RepairAction +from ayon_core.pipeline.publish import RepairAction class ValidateWorkfilePaths( diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/client/ayon_core/hosts/houdini/startup/MainMenuCommon.xml similarity index 80% rename from openpype/hosts/houdini/startup/MainMenuCommon.xml rename to client/ayon_core/hosts/houdini/startup/MainMenuCommon.xml index 0903aef7bc..b2ea142cd5 100644 --- a/openpype/hosts/houdini/startup/MainMenuCommon.xml +++ b/client/ayon_core/hosts/houdini/startup/MainMenuCommon.xml @@ -4,11 +4,11 @@ @@ -20,7 +20,7 @@ return label @@ -30,7 +30,7 @@ host_tools.show_publisher(parent, tab="create") @@ -40,7 +40,7 @@ host_tools.show_loader(parent=parent, use_context=True) @@ -50,7 +50,7 @@ host_tools.show_publisher(parent, tab="publish") @@ -60,7 +60,7 @@ host_tools.show_scene_inventory(parent) @@ -72,7 +72,7 @@ host_tools.show_library_loader(parent=parent) @@ -81,16 +81,16 @@ host_tools.show_workfiles(parent) @@ -99,7 +99,7 @@ openpype.hosts.houdini.api.lib.update_houdini_vars_context_dialog() diff --git a/client/ayon_core/hosts/houdini/startup/python2.7libs/pythonrc.py b/client/ayon_core/hosts/houdini/startup/python2.7libs/pythonrc.py new file mode 100644 index 0000000000..6e45eb6a10 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/python2.7libs/pythonrc.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +"""OpenPype startup script.""" +from ayon_core.pipeline import install_host +from ayon_core.hosts.houdini.api import HoudiniHost + + +def main(): + print("Installing AYON ...") + install_host(HoudiniHost()) + + +main() diff --git a/client/ayon_core/hosts/houdini/startup/python3.10libs/pythonrc.py b/client/ayon_core/hosts/houdini/startup/python3.10libs/pythonrc.py new file mode 100644 index 0000000000..6e45eb6a10 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/python3.10libs/pythonrc.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +"""OpenPype startup script.""" +from ayon_core.pipeline import install_host +from ayon_core.hosts.houdini.api import HoudiniHost + + +def main(): + print("Installing AYON ...") + install_host(HoudiniHost()) + + +main() diff --git a/client/ayon_core/hosts/houdini/startup/python3.7libs/pythonrc.py b/client/ayon_core/hosts/houdini/startup/python3.7libs/pythonrc.py new file mode 100644 index 0000000000..6e45eb6a10 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/python3.7libs/pythonrc.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +"""OpenPype startup script.""" +from ayon_core.pipeline import install_host +from ayon_core.hosts.houdini.api import HoudiniHost + + +def main(): + print("Installing AYON ...") + install_host(HoudiniHost()) + + +main() diff --git a/client/ayon_core/hosts/houdini/startup/python3.9libs/pythonrc.py b/client/ayon_core/hosts/houdini/startup/python3.9libs/pythonrc.py new file mode 100644 index 0000000000..6e45eb6a10 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/python3.9libs/pythonrc.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +"""OpenPype startup script.""" +from ayon_core.pipeline import install_host +from ayon_core.hosts.houdini.api import HoudiniHost + + +def main(): + print("Installing AYON ...") + install_host(HoudiniHost()) + + +main() diff --git a/openpype/hosts/max/__init__.py b/client/ayon_core/hosts/max/__init__.py similarity index 100% rename from openpype/hosts/max/__init__.py rename to client/ayon_core/hosts/max/__init__.py diff --git a/client/ayon_core/hosts/max/addon.py b/client/ayon_core/hosts/max/addon.py new file mode 100644 index 0000000000..416014025c --- /dev/null +++ b/client/ayon_core/hosts/max/addon.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +MAX_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class MaxAddon(OpenPypeModule, IHostAddon): + name = "max" + host_name = "max" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Remove auto screen scale factor for Qt + # - let 3dsmax decide it's value + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + + def get_workfile_extensions(self): + return [".max"] + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(MAX_HOST_DIR, "hooks") + ] diff --git a/openpype/hosts/max/api/__init__.py b/client/ayon_core/hosts/max/api/__init__.py similarity index 100% rename from openpype/hosts/max/api/__init__.py rename to client/ayon_core/hosts/max/api/__init__.py diff --git a/openpype/hosts/max/api/colorspace.py b/client/ayon_core/hosts/max/api/colorspace.py similarity index 100% rename from openpype/hosts/max/api/colorspace.py rename to client/ayon_core/hosts/max/api/colorspace.py diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py new file mode 100644 index 0000000000..05c3364e4a --- /dev/null +++ b/client/ayon_core/hosts/max/api/lib.py @@ -0,0 +1,562 @@ +# -*- coding: utf-8 -*- +"""Library of functions useful for 3dsmax pipeline.""" +import contextlib +import logging +import json +from typing import Any, Dict, Union + +import six +from ayon_core.pipeline import get_current_project_name, colorspace +from ayon_core.settings import get_project_settings +from ayon_core.pipeline.context_tools import ( + get_current_project, get_current_project_asset) +from ayon_core.style import load_stylesheet +from pymxs import runtime as rt + + +JSON_PREFIX = "JSON::" +log = logging.getLogger("ayon_core.hosts.max") + + +def get_main_window(): + """Acquire Max's main window""" + from qtpy import QtWidgets + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "QmaxApplicationWindow" + for widget in top_widgets: + if ( + widget.inherits("QMainWindow") + and widget.metaObject().className() == name + ): + return widget + raise RuntimeError('Count not find 3dsMax main window.') + + +def imprint(node_name: str, data: dict) -> bool: + node = rt.GetNodeByName(node_name) + if not node: + return False + + for k, v in data.items(): + if isinstance(v, (dict, list)): + rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}") + else: + rt.SetUserProp(node, k, v) + + return True + + +def lsattr( + attr: str, + value: Union[str, None] = None, + root: Union[str, None] = None) -> list: + """List nodes having attribute with specified value. + + Args: + attr (str): Attribute name to match. + value (str, Optional): Value to match, of omitted, all nodes + with specified attribute are returned no matter of value. + root (str, Optional): Root node name. If omitted, scene root is used. + + Returns: + list of nodes. + """ + root = rt.RootNode if root is None else rt.GetNodeByName(root) + + def output_node(node, nodes): + nodes.append(node) + for child in node.Children: + output_node(child, nodes) + + nodes = [] + output_node(root, nodes) + return [ + n for n in nodes + if rt.GetUserProp(n, attr) == value + ] if value else [ + n for n in nodes + if rt.GetUserProp(n, attr) + ] + + +def read(container) -> dict: + data = {} + props = rt.GetUserPropBuffer(container) + # this shouldn't happen but let's guard against it anyway + if not props: + return data + + for line in props.split("\r\n"): + try: + key, value = line.split("=") + except ValueError: + # if the line cannot be split we can't really parse it + continue + + value = value.strip() + if isinstance(value.strip(), six.string_types) and \ + value.startswith(JSON_PREFIX): + with contextlib.suppress(json.JSONDecodeError): + value = json.loads(value[len(JSON_PREFIX):]) + + # default value behavior + # convert maxscript boolean values + if value == "true": + value = True + elif value == "false": + value = False + + data[key.strip()] = value + + data["instance_node"] = container.Name + + return data + + +@contextlib.contextmanager +def maintained_selection(): + previous_selection = rt.GetCurrentSelection() + try: + yield + finally: + if previous_selection: + rt.Select(previous_selection) + else: + rt.Select() + + +def get_all_children(parent, node_type=None): + """Handy function to get all the children of a given node + + Args: + parent (3dsmax Node1): Node to get all children of. + node_type (None, runtime.class): give class to check for + e.g. rt.FFDBox/rt.GeometryClass etc. + + Returns: + list: list of all children of the parent node + """ + def list_children(node): + children = [] + for c in node.Children: + children.append(c) + children = children + list_children(c) + return children + child_list = list_children(parent) + + return ([x for x in child_list if rt.SuperClassOf(x) == node_type] + if node_type else child_list) + + +def get_current_renderer(): + """ + Notes: + Get current renderer for Max + + Returns: + "{Current Renderer}:{Current Renderer}" + e.g. "Redshift_Renderer:Redshift_Renderer" + """ + return rt.renderers.production + + +def get_default_render_folder(project_setting=None): + return (project_setting["max"] + ["RenderSettings"] + ["default_render_image_folder"]) + + +def set_render_frame_range(start_frame, end_frame): + """ + Note: + Frame range can be specified in different types. Possible values are: + * `1` - Single frame. + * `2` - Active time segment ( animationRange ). + * `3` - User specified Range. + * `4` - User specified Frame pickup string (for example `1,3,5-12`). + + Todo: + Current type is hard-coded, there should be a custom setting for this. + """ + rt.rendTimeType = 3 + if start_frame is not None and end_frame is not None: + rt.rendStart = int(start_frame) + rt.rendEnd = int(end_frame) + + +def get_multipass_setting(project_setting=None): + return (project_setting["max"] + ["RenderSettings"] + ["multipass"]) + + +def set_scene_resolution(width: int, height: int): + """Set the render resolution + + Args: + width(int): value of the width + height(int): value of the height + + Returns: + None + + """ + # make sure the render dialog is closed + # for the update of resolution + # Changing the Render Setup dialog settings should be done + # with the actual Render Setup dialog in a closed state. + if rt.renderSceneDialog.isOpen(): + rt.renderSceneDialog.close() + + rt.renderWidth = width + rt.renderHeight = height + + +def reset_scene_resolution(): + """Apply the scene resolution from the project definition + + scene resolution can be overwritten by an asset if the asset.data contains + any information regarding scene resolution . + Returns: + None + """ + data = ["data.resolutionWidth", "data.resolutionHeight"] + project_resolution = get_current_project(fields=data) + project_resolution_data = project_resolution["data"] + asset_resolution = get_current_project_asset(fields=data) + asset_resolution_data = asset_resolution["data"] + # Set project resolution + project_width = int(project_resolution_data.get("resolutionWidth", 1920)) + project_height = int(project_resolution_data.get("resolutionHeight", 1080)) + width = int(asset_resolution_data.get("resolutionWidth", project_width)) + height = int(asset_resolution_data.get("resolutionHeight", project_height)) + + set_scene_resolution(width, height) + + +def get_frame_range(asset_doc=None) -> Union[Dict[str, Any], None]: + """Get the current assets frame range and handles. + + Args: + asset_doc (dict): Asset Entity Data + + Returns: + dict: with frame start, frame end, handle start, handle end. + """ + # Set frame start/end + if asset_doc is None: + asset_doc = get_current_project_asset() + + data = asset_doc["data"] + frame_start = data.get("frameStart") + frame_end = data.get("frameEnd") + + if frame_start is None or frame_end is None: + return {} + + frame_start = int(frame_start) + frame_end = int(frame_end) + handle_start = int(data.get("handleStart", 0)) + handle_end = int(data.get("handleEnd", 0)) + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + + return { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStartHandle": frame_start_handle, + "frameEndHandle": frame_end_handle, + } + + +def reset_frame_range(fps: bool = True): + """Set frame range to current asset. + This is part of 3dsmax documentation: + + animationRange: A System Global variable which lets you get and + set an Interval value that defines the start and end frames + of the Active Time Segment. + frameRate: A System Global variable which lets you get + and set an Integer value that defines the current + scene frame rate in frames-per-second. + """ + if fps: + data_fps = get_current_project(fields=["data.fps"]) + fps_number = float(data_fps["data"]["fps"]) + rt.frameRate = fps_number + frame_range = get_frame_range() + + set_timeline( + frame_range["frameStartHandle"], frame_range["frameEndHandle"]) + set_render_frame_range( + frame_range["frameStartHandle"], frame_range["frameEndHandle"]) + + +def reset_unit_scale(): + """Apply the unit scale setting to 3dsMax + """ + project_name = get_current_project_name() + settings = get_project_settings(project_name).get("max") + scene_scale = settings.get("unit_scale_settings", + {}).get("scene_unit_scale") + if scene_scale: + rt.units.DisplayType = rt.Name("Metric") + rt.units.MetricType = rt.Name(scene_scale) + else: + rt.units.DisplayType = rt.Name("Generic") + + +def convert_unit_scale(): + """Convert system unit scale in 3dsMax + for fbx export + + Returns: + str: unit scale + """ + unit_scale_dict = { + "millimeters": "mm", + "centimeters": "cm", + "meters": "m", + "kilometers": "km" + } + current_unit_scale = rt.Execute("units.MetricType as string") + return unit_scale_dict[current_unit_scale] + + +def set_context_setting(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + frame range + resolution + + Returns: + None + """ + reset_scene_resolution() + reset_frame_range() + reset_colorspace() + reset_unit_scale() + + +def get_max_version(): + """ + Args: + get max version date for deadline + + Returns: + #(25000, 62, 0, 25, 0, 0, 997, 2023, "") + max_info[7] = max version date + """ + max_info = rt.MaxVersion() + return max_info[7] + + +def is_headless(): + """Check if 3dsMax runs in batch mode. + If it returns True, it runs in 3dsbatch.exe + If it returns False, it runs in 3dsmax.exe + """ + return rt.maxops.isInNonInteractiveMode() + + +def set_timeline(frameStart, frameEnd): + """Set frame range for timeline editor in Max + """ + rt.animationRange = rt.interval(frameStart, frameEnd) + return rt.animationRange + + +def reset_colorspace(): + """OCIO Configuration + Supports in 3dsMax 2024+ + + """ + if int(get_max_version()) < 2024: + return + project_name = get_current_project_name() + colorspace_mgr = rt.ColorPipelineMgr + project_settings = get_project_settings(project_name) + + max_config_data = colorspace.get_imageio_config( + project_name, "max", project_settings) + if max_config_data: + ocio_config_path = max_config_data["path"] + colorspace_mgr = rt.ColorPipelineMgr + colorspace_mgr.Mode = rt.Name("OCIO_Custom") + colorspace_mgr.OCIOConfigPath = ocio_config_path + + +def check_colorspace(): + parent = get_main_window() + if parent is None: + log.info("Skipping outdated pop-up " + "because Max main window can't be found.") + if int(get_max_version()) >= 2024: + color_mgr = rt.ColorPipelineMgr + project_name = get_current_project_name() + project_settings = get_project_settings(project_name) + max_config_data = colorspace.get_imageio_config( + project_name, "max", project_settings) + if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"): + if not is_headless(): + from ayon_core.tools.utils import SimplePopup + dialog = SimplePopup(parent=parent) + dialog.setWindowTitle("Warning: Wrong OCIO Mode") + dialog.set_message("This scene has wrong OCIO " + "Mode setting.") + dialog.set_button_text("Fix") + dialog.setStyleSheet(load_stylesheet()) + dialog.on_clicked.connect(reset_colorspace) + dialog.show() + +def unique_namespace(namespace, format="%02d", + prefix="", suffix="", con_suffix="CON"): + """Return unique namespace + + Arguments: + namespace (str): Name of namespace to consider + format (str, optional): Formatting of the given iteration number + suffix (str, optional): Only consider namespaces with this suffix. + con_suffix: max only, for finding the name of the master container + + >>> unique_namespace("bar") + # bar01 + >>> unique_namespace(":hello") + # :hello01 + >>> unique_namespace("bar:", suffix="_NS") + # bar01_NS: + + """ + + def current_namespace(): + current = namespace + # When inside a namespace Max adds no trailing : + if not current.endswith(":"): + current += ":" + return current + + # Always check against the absolute namespace root + # There's no clash with :x if we're defining namespace :a:x + ROOT = ":" if namespace.startswith(":") else current_namespace() + + # Strip trailing `:` tokens since we might want to add a suffix + start = ":" if namespace.startswith(":") else "" + end = ":" if namespace.endswith(":") else "" + namespace = namespace.strip(":") + if ":" in namespace: + # Split off any nesting that we don't uniqify anyway. + parents, namespace = namespace.rsplit(":", 1) + start += parents + ":" + ROOT += start + + iteration = 1 + increment_version = True + while increment_version: + nr_namespace = namespace + format % iteration + unique = prefix + nr_namespace + suffix + container_name = f"{unique}:{namespace}{con_suffix}" + if not rt.getNodeByName(container_name): + name_space = start + unique + end + increment_version = False + return name_space + else: + increment_version = True + iteration += 1 + + +def get_namespace(container_name): + """Get the namespace and name of the sub-container + + Args: + container_name (str): the name of master container + + Raises: + RuntimeError: when there is no master container found + + Returns: + namespace (str): namespace of the sub-container + name (str): name of the sub-container + """ + node = rt.getNodeByName(container_name) + if not node: + raise RuntimeError("Master Container Not Found..") + name = rt.getUserProp(node, "name") + namespace = rt.getUserProp(node, "namespace") + return namespace, name + + +def object_transform_set(container_children): + """A function which allows to store the transform of + previous loaded object(s) + Args: + container_children(list): A list of nodes + + Returns: + transform_set (dict): A dict with all transform data of + the previous loaded object(s) + """ + transform_set = {} + for node in container_children: + name = f"{node.name}.transform" + transform_set[name] = node.pos + name = f"{node.name}.scale" + transform_set[name] = node.scale + return transform_set + + +def get_plugins() -> list: + """Get all loaded plugins in 3dsMax + + Returns: + plugin_info_list: a list of loaded plugins + """ + manager = rt.PluginManager + count = manager.pluginDllCount + plugin_info_list = [] + for p in range(1, count + 1): + plugin_info = manager.pluginDllName(p) + plugin_info_list.append(plugin_info) + + return plugin_info_list + + +@contextlib.contextmanager +def render_resolution(width, height): + """Set render resolution option during context + + Args: + width (int): render width + height (int): render height + """ + current_renderWidth = rt.renderWidth + current_renderHeight = rt.renderHeight + try: + rt.renderWidth = width + rt.renderHeight = height + yield + finally: + rt.renderWidth = current_renderWidth + rt.renderHeight = current_renderHeight + + +@contextlib.contextmanager +def suspended_refresh(): + """Suspended refresh for scene and modify panel redraw. + """ + if is_headless(): + yield + return + rt.disableSceneRedraw() + rt.suspendEditing() + try: + yield + + finally: + rt.enableSceneRedraw() + rt.resumeEditing() diff --git a/client/ayon_core/hosts/max/api/lib_renderproducts.py b/client/ayon_core/hosts/max/api/lib_renderproducts.py new file mode 100644 index 0000000000..710ed0031a --- /dev/null +++ b/client/ayon_core/hosts/max/api/lib_renderproducts.py @@ -0,0 +1,275 @@ +# Render Element Example : For scanline render, VRay +# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC +# arnold +# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html +import os + +from pymxs import runtime as rt + +from ayon_core.hosts.max.api.lib import get_current_renderer +from ayon_core.pipeline import get_current_project_name +from ayon_core.settings import get_project_settings + + +class RenderProducts(object): + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + get_current_project_name() + ) + + def get_beauty(self, container): + render_dir = os.path.dirname(rt.rendOutputFilename) + + output_file = os.path.join(render_dir, container) + + setting = self._project_settings + img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa + + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + + return { + "beauty": self.get_expected_beauty( + output_file, start_frame, end_frame, img_fmt + ) + } + + def get_multiple_beauty(self, outputs, cameras): + beauty_output_frames = dict() + for output, camera in zip(outputs, cameras): + filename, ext = os.path.splitext(output) + filename = filename.replace(".", "") + ext = ext.replace(".", "") + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + new_beauty = self.get_expected_beauty( + filename, start_frame, end_frame, ext + ) + beauty_output = ({ + f"{camera}_beauty": new_beauty + }) + beauty_output_frames.update(beauty_output) + return beauty_output_frames + + def get_multiple_aovs(self, outputs, cameras): + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + aovs_frames = {} + for output, camera in zip(outputs, cameras): + filename, ext = os.path.splitext(output) + filename = filename.replace(".", "") + ext = ext.replace(".", "") + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + + if renderer in [ + "ART_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + aovs_frames.update({ + f"{camera}_{name}": self.get_expected_aovs( + filename, name, start_frame, + end_frame, ext) + }) + elif renderer == "Redshift_Renderer": + render_name = self.get_render_elements_name() + if render_name: + rs_aov_files = rt.Execute("renderers.current.separateAovFiles") # noqa + # this doesn't work, always returns False + # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles + if ext == "exr" and not rs_aov_files: + for name in render_name: + if name == "RsCryptomatte": + aovs_frames.update({ + f"{camera}_{name}": self.get_expected_aovs( + filename, name, start_frame, + end_frame, ext) + }) + else: + for name in render_name: + aovs_frames.update({ + f"{camera}_{name}": self.get_expected_aovs( + filename, name, start_frame, + end_frame, ext) + }) + elif renderer == "Arnold": + render_name = self.get_arnold_product_name() + if render_name: + for name in render_name: + aovs_frames.update({ + f"{camera}_{name}": self.get_expected_arnold_product( # noqa + filename, name, start_frame, + end_frame, ext) + }) + elif renderer in [ + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3" + ]: + if ext != "exr": + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + aovs_frames.update({ + f"{camera}_{name}": self.get_expected_aovs( + filename, name, start_frame, + end_frame, ext) + }) + + return aovs_frames + + def get_aovs(self, container): + render_dir = os.path.dirname(rt.rendOutputFilename) + + output_file = os.path.join(render_dir, + container) + + setting = self._project_settings + img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa + + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + render_dict = {} + + if renderer in [ + "ART_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_aovs( + output_file, name, start_frame, + end_frame, img_fmt) + }) + elif renderer == "Redshift_Renderer": + render_name = self.get_render_elements_name() + if render_name: + rs_aov_files = rt.Execute("renderers.current.separateAovFiles") + # this doesn't work, always returns False + # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles + if img_fmt == "exr" and not rs_aov_files: + for name in render_name: + if name == "RsCryptomatte": + render_dict.update({ + name: self.get_expected_aovs( + output_file, name, start_frame, + end_frame, img_fmt) + }) + else: + for name in render_name: + render_dict.update({ + name: self.get_expected_aovs( + output_file, name, start_frame, + end_frame, img_fmt) + }) + + elif renderer == "Arnold": + render_name = self.get_arnold_product_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_arnold_product( + output_file, name, start_frame, + end_frame, img_fmt) + }) + elif renderer in [ + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3" + ]: + if img_fmt != "exr": + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_aovs( + output_file, name, start_frame, + end_frame, img_fmt) # noqa + }) + + return render_dict + + def get_expected_beauty(self, folder, start_frame, end_frame, fmt): + beauty_frame_range = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + beauty_output = f"{folder}.{frame}.{fmt}" + beauty_output = beauty_output.replace("\\", "/") + beauty_frame_range.append(beauty_output) + + return beauty_frame_range + + def get_arnold_product_name(self): + """Get all the Arnold AOVs name""" + aov_name = [] + + amw = rt.MaxToAOps.AOVsManagerWindow() + aov_mgr = rt.renderers.current.AOVManager + # Check if there is any aov group set in AOV manager + aov_group_num = len(aov_mgr.drivers) + if aov_group_num < 1: + return + for i in range(aov_group_num): + # get the specific AOV group + aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list) + # close the AOVs manager window + amw.close() + + return aov_name + + def get_expected_arnold_product(self, folder, name, + start_frame, end_frame, fmt): + """Get all the expected Arnold AOVs""" + aov_list = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + render_element = f"{folder}_{name}.{frame}.{fmt}" + render_element = render_element.replace("\\", "/") + aov_list.append(render_element) + + return aov_list + + def get_render_elements_name(self): + """Get all the render element names for general """ + render_name = [] + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 1: + return + # get render elements from the renders + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + if renderlayer_name.enabled: + target, renderpass = str(renderlayer_name).split(":") + render_name.append(renderpass) + + return render_name + + def get_expected_aovs(self, folder, name, + start_frame, end_frame, fmt): + """Get all the expected render element output files. """ + render_elements = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + render_element = f"{folder}_{name}.{frame}.{fmt}" + render_element = render_element.replace("\\", "/") + render_elements.append(render_element) + + return render_elements + + def image_format(self): + return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa diff --git a/client/ayon_core/hosts/max/api/lib_rendersettings.py b/client/ayon_core/hosts/max/api/lib_rendersettings.py new file mode 100644 index 0000000000..7ffc024ba3 --- /dev/null +++ b/client/ayon_core/hosts/max/api/lib_rendersettings.py @@ -0,0 +1,227 @@ +import os +from pymxs import runtime as rt +from ayon_core.lib import Logger +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import get_current_project_name +from ayon_core.pipeline.context_tools import get_current_project_asset + +from ayon_core.hosts.max.api.lib import ( + set_render_frame_range, + get_current_renderer, + get_default_render_folder +) + + +class RenderSettings(object): + + log = Logger.get_logger("RenderSettings") + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + def __init__(self, project_settings=None): + """ + Set up the naming convention for the render + elements for the deadline submission + """ + + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + get_current_project_name() + ) + + def set_render_camera(self, selection): + for sel in selection: + # to avoid Attribute Error from pymxs wrapper + if rt.classOf(sel) in rt.Camera.classes: + rt.viewport.setCamera(sel) + return + raise RuntimeError("Active Camera not found") + + def render_output(self, container): + folder = rt.maxFilePath + # hard-coded, should be customized in the setting + file = rt.maxFileName + folder = folder.replace("\\", "/") + # hard-coded, set the renderoutput path + setting = self._project_settings + render_folder = get_default_render_folder(setting) + filename, ext = os.path.splitext(file) + output_dir = os.path.join(folder, + render_folder, + filename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + # hard-coded, should be customized in the setting + context = get_current_project_asset() + + # get project resolution + width = context["data"].get("resolutionWidth") + height = context["data"].get("resolutionHeight") + # Set Frame Range + frame_start = context["data"].get("frame_start") + frame_end = context["data"].get("frame_end") + set_render_frame_range(frame_start, frame_end) + # get the production render + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output = os.path.join(output_dir, container) + try: + aov_separator = self._aov_chars[( + self._project_settings["max"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "." + output_filename = f"{output}..{img_fmt}" + output_filename = output_filename.replace("{aov_separator}", + aov_separator) + rt.rendOutputFilename = output_filename + if renderer == "VUE_File_Renderer": + return + # TODO: Finish the arnold render setup + if renderer == "Arnold": + self.arnold_setup() + + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + self.render_element_layer(output, width, height, img_fmt) + + rt.rendSaveFile = True + + if rt.renderSceneDialog.isOpen(): + rt.renderSceneDialog.close() + + def arnold_setup(self): + # get Arnold RenderView run in the background + # for setting up renderable camera + arv = rt.MAXToAOps.ArnoldRenderView() + render_camera = rt.viewport.GetCamera() + if render_camera: + arv.setOption("Camera", str(render_camera)) + + # TODO: add AOVs and extension + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + setup_cmd = ( + f""" + amw = MaxtoAOps.AOVsManagerWindow() + amw.close() + aovmgr = renderers.current.AOVManager + aovmgr.drivers = #() + img_fmt = "{img_fmt}" + if img_fmt == "png" then driver = ArnoldPNGDriver() + if img_fmt == "jpg" then driver = ArnoldJPEGDriver() + if img_fmt == "exr" then driver = ArnoldEXRDriver() + if img_fmt == "tif" then driver = ArnoldTIFFDriver() + if img_fmt == "tiff" then driver = ArnoldTIFFDriver() + append aovmgr.drivers driver + aovmgr.drivers[1].aov_list = #() + """) + + rt.execute(setup_cmd) + arv.close() + + def render_element_layer(self, dir, width, height, ext): + """For Renderers with render elements""" + rt.renderWidth = width + rt.renderHeight = height + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + aov_name = f"{dir}_{renderpass}..{ext}" + render_elem.SetRenderElementFileName(i, aov_name) + + def get_render_output(self, container, output_dir): + output = os.path.join(output_dir, container) + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output_filename = f"{output}..{img_fmt}" + return output_filename + + def get_render_element(self): + orig_render_elem = [] + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + render_element = render_elem.GetRenderElementFilename(i) + orig_render_elem.append(render_element) + + return orig_render_elem + + def get_batch_render_elements(self, container, + output_dir, camera): + render_element_list = list() + output = os.path.join(output_dir, container) + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + aov_name = f"{output}_{camera}_{renderpass}..{img_fmt}" + render_element_list.append(aov_name) + return render_element_list + + def get_batch_render_output(self, camera): + target_layer_no = rt.batchRenderMgr.FindView(camera) + target_layer = rt.batchRenderMgr.GetView(target_layer_no) + return target_layer.outputFilename + + def batch_render_elements(self, camera): + target_layer_no = rt.batchRenderMgr.FindView(camera) + target_layer = rt.batchRenderMgr.GetView(target_layer_no) + outputfilename = target_layer.outputFilename + directory = os.path.dirname(outputfilename) + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + ext = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + aov_name = f"{directory}_{camera}_{renderpass}..{ext}" + render_elem.SetRenderElementFileName(i, aov_name) + + def batch_render_layer(self, container, + output_dir, cameras): + outputs = list() + output = os.path.join(output_dir, container) + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + for cam in cameras: + camera = rt.getNodeByName(cam) + layer_no = rt.batchRenderMgr.FindView(cam) + renderlayer = None + if layer_no == 0: + renderlayer = rt.batchRenderMgr.CreateView(camera) + else: + renderlayer = rt.batchRenderMgr.GetView(layer_no) + # use camera name as renderlayer name + renderlayer.name = cam + renderlayer.outputFilename = f"{output}_{cam}..{img_fmt}" + outputs.append(renderlayer.outputFilename) + return outputs diff --git a/client/ayon_core/hosts/max/api/menu.py b/client/ayon_core/hosts/max/api/menu.py new file mode 100644 index 0000000000..d968874a7e --- /dev/null +++ b/client/ayon_core/hosts/max/api/menu.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +"""3dsmax menu definition of AYON.""" +import os +from qtpy import QtWidgets, QtCore +from pymxs import runtime as rt + +from ayon_core.tools.utils import host_tools +from ayon_core.hosts.max.api import lib + + +class OpenPypeMenu(object): + """Object representing OpenPype/AYON menu. + + This is using "hack" to inject itself before "Help" menu of 3dsmax. + For some reason `postLoadingMenus` event doesn't fire, and main menu + if probably re-initialized by menu templates, se we wait for at least + 1 event Qt event loop before trying to insert. + + """ + + def __init__(self): + super().__init__() + self.main_widget = self.get_main_widget() + self.menu = None + + timer = QtCore.QTimer() + # set number of event loops to wait. + timer.setInterval(1) + timer.timeout.connect(self._on_timer) + timer.start() + + self._timer = timer + self._counter = 0 + + def _on_timer(self): + if self._counter < 1: + self._counter += 1 + return + + self._counter = 0 + self._timer.stop() + self.build_openpype_menu() + + @staticmethod + def get_main_widget(): + """Get 3dsmax main window.""" + return QtWidgets.QWidget.find(rt.windows.getMAXHWND()) + + def get_main_menubar(self) -> QtWidgets.QMenuBar: + """Get main Menubar by 3dsmax main window.""" + return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0] + + def get_or_create_openpype_menu( + self, name: str = "&Openpype", + before: str = "&Help") -> QtWidgets.QAction: + """Create AYON menu. + + Args: + name (str, Optional): AYON menu name. + before (str, Optional): Name of the 3dsmax main menu item to + add AYON menu before. + + Returns: + QtWidgets.QAction: AYON menu action. + + """ + if self.menu is not None: + return self.menu + + menu_bar = self.get_main_menubar() + menu_items = menu_bar.findChildren( + QtWidgets.QMenu, options=QtCore.Qt.FindDirectChildrenOnly) + help_action = None + for item in menu_items: + if name in item.title(): + # we already have OpenPype menu + return item + + if before in item.title(): + help_action = item.menuAction() + tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" + op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label)) + menu_bar.insertMenu(help_action, op_menu) + + self.menu = op_menu + return op_menu + + def build_openpype_menu(self) -> QtWidgets.QAction: + """Build items in AYON menu.""" + openpype_menu = self.get_or_create_openpype_menu() + load_action = QtWidgets.QAction("Load...", openpype_menu) + load_action.triggered.connect(self.load_callback) + openpype_menu.addAction(load_action) + + publish_action = QtWidgets.QAction("Publish...", openpype_menu) + publish_action.triggered.connect(self.publish_callback) + openpype_menu.addAction(publish_action) + + manage_action = QtWidgets.QAction("Manage...", openpype_menu) + manage_action.triggered.connect(self.manage_callback) + openpype_menu.addAction(manage_action) + + library_action = QtWidgets.QAction("Library...", openpype_menu) + library_action.triggered.connect(self.library_callback) + openpype_menu.addAction(library_action) + + openpype_menu.addSeparator() + + workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu) + workfiles_action.triggered.connect(self.workfiles_callback) + openpype_menu.addAction(workfiles_action) + + openpype_menu.addSeparator() + + res_action = QtWidgets.QAction("Set Resolution", openpype_menu) + res_action.triggered.connect(self.resolution_callback) + openpype_menu.addAction(res_action) + + frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu) + frame_action.triggered.connect(self.frame_range_callback) + openpype_menu.addAction(frame_action) + + colorspace_action = QtWidgets.QAction("Set Colorspace", openpype_menu) + colorspace_action.triggered.connect(self.colorspace_callback) + openpype_menu.addAction(colorspace_action) + + unit_scale_action = QtWidgets.QAction("Set Unit Scale", openpype_menu) + unit_scale_action.triggered.connect(self.unit_scale_callback) + openpype_menu.addAction(unit_scale_action) + + return openpype_menu + + def load_callback(self): + """Callback to show Loader tool.""" + host_tools.show_loader(parent=self.main_widget) + + def publish_callback(self): + """Callback to show Publisher tool.""" + host_tools.show_publisher(parent=self.main_widget) + + def manage_callback(self): + """Callback to show Scene Manager/Inventory tool.""" + host_tools.show_scene_inventory(parent=self.main_widget) + + def library_callback(self): + """Callback to show Library Loader tool.""" + host_tools.show_library_loader(parent=self.main_widget) + + def workfiles_callback(self): + """Callback to show Workfiles tool.""" + host_tools.show_workfiles(parent=self.main_widget) + + def resolution_callback(self): + """Callback to reset scene resolution""" + return lib.reset_scene_resolution() + + def frame_range_callback(self): + """Callback to reset frame range""" + return lib.reset_frame_range() + + def colorspace_callback(self): + """Callback to reset colorspace""" + return lib.reset_colorspace() + + def unit_scale_callback(self): + """Callback to reset unit scale""" + return lib.reset_unit_scale() diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py new file mode 100644 index 0000000000..ff5ef0640b --- /dev/null +++ b/client/ayon_core/hosts/max/api/pipeline.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +"""Pipeline tools for OpenPype Houdini integration.""" +import os +import logging +from operator import attrgetter + +import json + +from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost +import pyblish.api +from ayon_core.pipeline import ( + register_creator_plugin_path, + register_loader_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.hosts.max.api.menu import OpenPypeMenu +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.plugin import MS_CUSTOM_ATTRIB +from ayon_core.hosts.max import MAX_HOST_DIR + +from pymxs import runtime as rt # noqa + +log = logging.getLogger("ayon_core.hosts.max") + +PLUGINS_DIR = os.path.join(MAX_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + + name = "max" + menu = None + + def __init__(self): + super(MaxHost, self).__init__() + self._op_events = {} + self._has_been_setup = False + + def install(self): + pyblish.api.register_host("max") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + # self._register_callbacks() + self.menu = OpenPypeMenu() + + self._has_been_setup = True + + def context_setting(): + return lib.set_context_setting() + + rt.callbacks.addScript(rt.Name('systemPostNew'), + context_setting) + + rt.callbacks.addScript(rt.Name('filePostOpen'), + lib.check_colorspace) + + def has_unsaved_changes(self): + # TODO: how to get it from 3dsmax? + return True + + def get_workfile_extensions(self): + return [".max"] + + def save_workfile(self, dst_path=None): + rt.saveMaxFile(dst_path) + return dst_path + + def open_workfile(self, filepath): + rt.checkForSave() + rt.loadMaxFile(filepath) + return filepath + + def get_current_workfile(self): + return os.path.join(rt.maxFilePath, rt.maxFileName) + + def get_containers(self): + return ls() + + def _register_callbacks(self): + rt.callbacks.removeScripts(id=rt.name("OpenPypeCallbacks")) + + rt.callbacks.addScript( + rt.Name("postLoadingMenus"), + self._deferred_menu_creation, id=rt.Name('OpenPypeCallbacks')) + + def _deferred_menu_creation(self): + self.log.info("Building menu ...") + self.menu = OpenPypeMenu() + + @staticmethod + def create_context_node(): + """Helper for creating context holding node.""" + + root_scene = rt.rootScene + + create_attr_script = (""" +attributes "OpenPypeContext" +( + parameters main rollout:params + ( + context type: #string + ) + + rollout params "OpenPype Parameters" + ( + editText editTextContext "Context" type: #string + ) +) + """) + + attr = rt.execute(create_attr_script) + rt.custAttributes.add(root_scene, attr) + + return root_scene.OpenPypeContext.context + + def update_context_data(self, data, changes): + try: + _ = rt.rootScene.OpenPypeContext.context + except AttributeError: + # context node doesn't exists + self.create_context_node() + + rt.rootScene.OpenPypeContext.context = json.dumps(data) + + def get_context_data(self): + try: + context = rt.rootScene.OpenPypeContext.context + except AttributeError: + # context node doesn't exists + context = self.create_context_node() + if not context: + context = "{}" + return json.loads(context) + + def save_file(self, dst_path=None): + # Force forwards slashes to avoid segfault + dst_path = dst_path.replace("\\", "/") + rt.saveMaxFile(dst_path) + + +def ls() -> list: + """Get all OpenPype instances.""" + objs = rt.objects + containers = [ + obj for obj in objs + if rt.getUserProp(obj, "id") == AVALON_CONTAINER_ID + ] + + for container in sorted(containers, key=attrgetter("name")): + yield lib.read(container) + + +def containerise(name: str, nodes: list, context, + namespace=None, loader=None, suffix="_CON"): + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or "", + "loader": loader, + "representation": context["representation"]["_id"], + } + container_name = f"{namespace}:{name}{suffix}" + container = rt.container(name=container_name) + import_custom_attribute_data(container, nodes) + if not lib.imprint(container_name, data): + print(f"imprinting of {container_name} failed.") + return container + + +def load_custom_attribute_data(): + """Re-loading the AYON custom parameter built by the creator + + Returns: + attribute: re-loading the custom OP attributes set in Maxscript + """ + return rt.Execute(MS_CUSTOM_ATTRIB) + + +def import_custom_attribute_data(container: str, selections: list): + """Importing the Openpype/AYON custom parameter built by the creator + + Args: + container (str): target container which adds custom attributes + selections (list): nodes to be added into + group in custom attributes + """ + attrs = load_custom_attribute_data() + modifier = rt.EmptyModifier() + rt.addModifier(container, modifier) + container.modifiers[0].name = "OP Data" + rt.custAttributes.add(container.modifiers[0], attrs) + node_list = [] + sel_list = [] + for i in selections: + node_ref = rt.NodeTransformMonitor(node=i) + node_list.append(node_ref) + sel_list.append(str(i)) + + # Setting the property + rt.setProperty( + container.modifiers[0].openPypeData, + "all_handles", node_list) + rt.setProperty( + container.modifiers[0].openPypeData, + "sel_list", sel_list) + + +def update_custom_attribute_data(container: str, selections: list): + """Updating the AYON custom parameter built by the creator + + Args: + container (str): target container which adds custom attributes + selections (list): nodes to be added into + group in custom attributes + """ + if container.modifiers[0].name == "OP Data": + rt.deleteModifier(container, container.modifiers[0]) + import_custom_attribute_data(container, selections) + + +def get_previous_loaded_object(container: str): + """Get previous loaded_object through the OP data + + Args: + container (str): the container which stores the OP data + + Returns: + node_list(list): list of nodes which are previously loaded + """ + node_list = [] + sel_list = rt.getProperty(container.modifiers[0].openPypeData, "sel_list") + for obj in rt.Objects: + if str(obj) in sel_list: + node_list.append(obj) + return node_list diff --git a/client/ayon_core/hosts/max/api/plugin.py b/client/ayon_core/hosts/max/api/plugin.py new file mode 100644 index 0000000000..3551450c24 --- /dev/null +++ b/client/ayon_core/hosts/max/api/plugin.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +"""3dsmax specific Avalon/Pyblish plugin definitions.""" +from abc import ABCMeta + +import six +from pymxs import runtime as rt + +from ayon_core.lib import BoolDef +from ayon_core.pipeline import CreatedInstance, Creator, CreatorError + +from .lib import imprint, lsattr, read + +MS_CUSTOM_ATTRIB = """attributes "openPypeData" +( + parameters main rollout:OPparams + ( + all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on + sel_list type:#stringTab tabSize:0 tabSizeVariable:on + ) + + rollout OPparams "OP Parameters" + ( + listbox list_node "Node References" items:#() + button button_add "Add to Container" + button button_del "Delete from Container" + + fn node_to_name the_node = + ( + handle = the_node.handle + obj_name = the_node.name + handle_name = obj_name + "<" + handle as string + ">" + return handle_name + ) + fn nodes_to_add node = + ( + sceneObjs = #() + if classOf node == Container do return false + n = node as string + for obj in Objects do + ( + tmp_obj = obj as string + append sceneObjs tmp_obj + ) + if sel_list != undefined do + ( + for obj in sel_list do + ( + idx = findItem sceneObjs obj + if idx do + ( + deleteItem sceneObjs idx + ) + ) + ) + idx = findItem sceneObjs n + if idx then return true else false + ) + + fn nodes_to_rmv node = + ( + n = node as string + idx = findItem sel_list n + if idx then return true else false + ) + + on button_add pressed do + ( + current_sel = selectByName title:"Select Objects to add to + the Container" buttontext:"Add" filter:nodes_to_add + if current_sel == undefined then return False + temp_arr = #() + i_node_arr = #() + for c in current_sel do + ( + handle_name = node_to_name c + node_ref = NodeTransformMonitor node:c + idx = finditem list_node.items handle_name + if idx do ( + continue + ) + name = c as string + append temp_arr handle_name + append i_node_arr node_ref + append sel_list name + ) + all_handles = join i_node_arr all_handles + list_node.items = join temp_arr list_node.items + ) + + on button_del pressed do + ( + current_sel = selectByName title:"Select Objects to remove + from the Container" buttontext:"Remove" filter: nodes_to_rmv + if current_sel == undefined or current_sel.count == 0 then + ( + return False + ) + temp_arr = #() + i_node_arr = #() + new_i_node_arr = #() + new_temp_arr = #() + + for c in current_sel do + ( + node_ref = NodeTransformMonitor node:c as string + handle_name = node_to_name c + n = c as string + tmp_all_handles = #() + for i in all_handles do + ( + tmp = i as string + append tmp_all_handles tmp + ) + idx = finditem tmp_all_handles node_ref + if idx do + ( + new_i_node_arr = DeleteItem all_handles idx + + ) + idx = finditem list_node.items handle_name + if idx do + ( + new_temp_arr = DeleteItem list_node.items idx + ) + idx = finditem sel_list n + if idx do + ( + sel_list = DeleteItem sel_list idx + ) + ) + all_handles = join i_node_arr new_i_node_arr + list_node.items = join temp_arr new_temp_arr + ) + + on OPparams open do + ( + if all_handles.count != 0 then + ( + temp_arr = #() + for x in all_handles do + ( + if x.node == undefined do continue + handle_name = node_to_name x.node + append temp_arr handle_name + ) + list_node.items = temp_arr + ) + ) + ) +)""" + + +class OpenPypeCreatorError(CreatorError): + pass + + +class MaxCreatorBase(object): + + @staticmethod + def cache_subsets(shared_data): + if shared_data.get("max_cached_subsets") is not None: + return shared_data + + shared_data["max_cached_subsets"] = {} + cached_instances = lsattr("id", "pyblish.avalon.instance") + for i in cached_instances: + creator_id = rt.GetUserProp(i, "creator_identifier") + if creator_id not in shared_data["max_cached_subsets"]: + shared_data["max_cached_subsets"][creator_id] = [i.name] + else: + shared_data[ + "max_cached_subsets"][creator_id].append(i.name) + return shared_data + + @staticmethod + def create_instance_node(node): + """Create instance node. + + If the supplied node is existing node, it will be used to hold the + instance, otherwise new node of type Dummy will be created. + + Args: + node (rt.MXSWrapperBase, str): Node or node name to use. + + Returns: + instance + """ + if isinstance(node, str): + node = rt.Container(name=node) + + attrs = rt.Execute(MS_CUSTOM_ATTRIB) + modifier = rt.EmptyModifier() + rt.addModifier(node, modifier) + node.modifiers[0].name = "OP Data" + rt.custAttributes.add(node.modifiers[0], attrs) + + return node + + +@six.add_metaclass(ABCMeta) +class MaxCreator(Creator, MaxCreatorBase): + selected_nodes = [] + + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = rt.GetCurrentSelection() + if rt.getNodeByName(subset_name): + raise CreatorError(f"'{subset_name}' is already created..") + + instance_node = self.create_instance_node(subset_name) + instance_data["instance_node"] = instance_node.name + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + if pre_create_data.get("use_selection"): + + node_list = [] + sel_list = [] + for i in self.selected_nodes: + node_ref = rt.NodeTransformMonitor(node=i) + node_list.append(node_ref) + sel_list.append(str(i)) + + # Setting the property + rt.setProperty( + instance_node.modifiers[0].openPypeData, + "all_handles", node_list) + rt.setProperty( + instance_node.modifiers[0].openPypeData, + "sel_list", sel_list) + + self._add_instance_to_context(instance) + imprint(instance_node.name, instance.data_to_store()) + + return instance + + def collect_instances(self): + self.cache_subsets(self.collection_shared_data) + for instance in self.collection_shared_data["max_cached_subsets"].get(self.identifier, []): # noqa + created_instance = CreatedInstance.from_existing( + read(rt.GetNodeByName(instance)), self + ) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, changes in update_list: + instance_node = created_inst.get("instance_node") + new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + subset = new_values.get("subset", "") + if subset and instance_node != subset: + node = rt.getNodeByName(instance_node) + new_subset_name = new_values["subset"] + if rt.getNodeByName(new_subset_name): + raise CreatorError( + "The subset '{}' already exists.".format( + new_subset_name)) + instance_node = new_subset_name + created_inst["instance_node"] = instance_node + node.name = instance_node + + imprint( + instance_node, + created_inst.data_to_store(), + ) + + def remove_instances(self, instances): + """Remove specified instance from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + for instance in instances: + instance_node = rt.GetNodeByName( + instance.data.get("instance_node")) + if instance_node: + count = rt.custAttributes.count(instance_node.modifiers[0]) + rt.custAttributes.delete(instance_node.modifiers[0], count) + rt.Delete(instance_node) + + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection") + ] diff --git a/openpype/hosts/max/api/preview_animation.py b/client/ayon_core/hosts/max/api/preview_animation.py similarity index 99% rename from openpype/hosts/max/api/preview_animation.py rename to client/ayon_core/hosts/max/api/preview_animation.py index 74579b165f..f715efa53d 100644 --- a/openpype/hosts/max/api/preview_animation.py +++ b/client/ayon_core/hosts/max/api/preview_animation.py @@ -3,7 +3,7 @@ from pymxs import runtime as rt from .lib import get_max_version, render_resolution -log = logging.getLogger("openpype.hosts.max") +log = logging.getLogger("ayon_core.hosts.max") @contextlib.contextmanager diff --git a/openpype/hosts/max/hooks/force_startup_script.py b/client/ayon_core/hosts/max/hooks/force_startup_script.py similarity index 87% rename from openpype/hosts/max/hooks/force_startup_script.py rename to client/ayon_core/hosts/max/hooks/force_startup_script.py index 5fb8334d4b..659be7dfc6 100644 --- a/openpype/hosts/max/hooks/force_startup_script.py +++ b/client/ayon_core/hosts/max/hooks/force_startup_script.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Pre-launch to force 3ds max startup script.""" import os -from openpype.hosts.max import MAX_HOST_DIR -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.hosts.max import MAX_HOST_DIR +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class ForceStartupScript(PreLaunchHook): diff --git a/openpype/hosts/max/hooks/inject_python.py b/client/ayon_core/hosts/max/hooks/inject_python.py similarity index 90% rename from openpype/hosts/max/hooks/inject_python.py rename to client/ayon_core/hosts/max/hooks/inject_python.py index e9dddbf710..36d53551ba 100644 --- a/openpype/hosts/max/hooks/inject_python.py +++ b/client/ayon_core/hosts/max/hooks/inject_python.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Pre-launch hook to inject python environment.""" import os -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class InjectPythonPath(PreLaunchHook): diff --git a/client/ayon_core/hosts/max/hooks/set_paths.py b/client/ayon_core/hosts/max/hooks/set_paths.py new file mode 100644 index 0000000000..c18fd29295 --- /dev/null +++ b/client/ayon_core/hosts/max/hooks/set_paths.py @@ -0,0 +1,18 @@ +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes + + +class SetPath(PreLaunchHook): + """Set current dir to workdir. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = {"max"} + launch_types = {LaunchTypes.local} + + def execute(self): + workdir = self.launch_context.env.get("AVALON_WORKDIR", "") + if not workdir: + self.log.warning("BUG: Workdir is not filled.") + return + + self.launch_context.kwargs["cwd"] = workdir diff --git a/openpype/hosts/hiero/vendor/google/protobuf/util/__init__.py b/client/ayon_core/hosts/max/plugins/__init__.py similarity index 100% rename from openpype/hosts/hiero/vendor/google/protobuf/util/__init__.py rename to client/ayon_core/hosts/max/plugins/__init__.py diff --git a/client/ayon_core/hosts/max/plugins/create/create_camera.py b/client/ayon_core/hosts/max/plugins/create/create_camera.py new file mode 100644 index 0000000000..a35d5fc6b9 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_camera.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +from ayon_core.hosts.max.api import plugin + + +class CreateCamera(plugin.MaxCreator): + """Creator plugin for Camera.""" + identifier = "io.openpype.creators.max.camera" + label = "Camera" + family = "camera" + icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_maxScene.py b/client/ayon_core/hosts/max/plugins/create/create_maxScene.py similarity index 86% rename from openpype/hosts/max/plugins/create/create_maxScene.py rename to client/ayon_core/hosts/max/plugins/create/create_maxScene.py index 851e26dda2..4b8328d38f 100644 --- a/openpype/hosts/max/plugins/create/create_maxScene.py +++ b/client/ayon_core/hosts/max/plugins/create/create_maxScene.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator plugin for creating raw max scene.""" -from openpype.hosts.max.api import plugin +from ayon_core.hosts.max.api import plugin class CreateMaxScene(plugin.MaxCreator): diff --git a/client/ayon_core/hosts/max/plugins/create/create_model.py b/client/ayon_core/hosts/max/plugins/create/create_model.py new file mode 100644 index 0000000000..73f0260807 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_model.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for model.""" +from ayon_core.hosts.max.api import plugin + + +class CreateModel(plugin.MaxCreator): + """Creator plugin for Model.""" + identifier = "io.openpype.creators.max.model" + label = "Model" + family = "model" + icon = "gear" diff --git a/client/ayon_core/hosts/max/plugins/create/create_pointcache.py b/client/ayon_core/hosts/max/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..d28f5008e5 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_pointcache.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating pointcache alembics.""" +from ayon_core.hosts.max.api import plugin + + +class CreatePointCache(plugin.MaxCreator): + """Creator plugin for Point caches.""" + identifier = "io.openpype.creators.max.pointcache" + label = "Point Cache" + family = "pointcache" + icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_pointcloud.py b/client/ayon_core/hosts/max/plugins/create/create_pointcloud.py similarity index 86% rename from openpype/hosts/max/plugins/create/create_pointcloud.py rename to client/ayon_core/hosts/max/plugins/create/create_pointcloud.py index bc7706069d..aa6be04da4 100644 --- a/openpype/hosts/max/plugins/create/create_pointcloud.py +++ b/client/ayon_core/hosts/max/plugins/create/create_pointcloud.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator plugin for creating point cloud.""" -from openpype.hosts.max.api import plugin +from ayon_core.hosts.max.api import plugin class CreatePointCloud(plugin.MaxCreator): diff --git a/client/ayon_core/hosts/max/plugins/create/create_redshift_proxy.py b/client/ayon_core/hosts/max/plugins/create/create_redshift_proxy.py new file mode 100644 index 0000000000..e524e85cf6 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_redshift_proxy.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +from ayon_core.hosts.max.api import plugin +from ayon_core.pipeline import CreatedInstance + + +class CreateRedshiftProxy(plugin.MaxCreator): + identifier = "io.openpype.creators.max.redshiftproxy" + label = "Redshift Proxy" + family = "redshiftproxy" + icon = "gear" diff --git a/client/ayon_core/hosts/max/plugins/create/create_render.py b/client/ayon_core/hosts/max/plugins/create/create_render.py new file mode 100644 index 0000000000..73c18bfb4b --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_render.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +import os +from ayon_core.hosts.max.api import plugin +from ayon_core.lib import BoolDef +from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings + + +class CreateRender(plugin.MaxCreator): + """Creator plugin for Renders.""" + identifier = "io.openpype.creators.max.render" + label = "Render" + family = "maxrender" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + file = rt.maxFileName + filename, _ = os.path.splitext(file) + instance_data["AssetName"] = filename + instance_data["multiCamera"] = pre_create_data.get("multi_cam") + num_of_renderlayer = rt.batchRenderMgr.numViews + if num_of_renderlayer > 0: + rt.batchRenderMgr.DeleteView(num_of_renderlayer) + + instance = super(CreateRender, self).create( + subset_name, + instance_data, + pre_create_data) + + container_name = instance.data.get("instance_node") + # set output paths for rendering(mandatory for deadline) + RenderSettings().render_output(container_name) + # TODO: create multiple camera options + if self.selected_nodes: + selected_nodes_name = [] + for sel in self.selected_nodes: + name = sel.name + selected_nodes_name.append(name) + RenderSettings().batch_render_layer( + container_name, filename, + selected_nodes_name) + + def get_pre_create_attr_defs(self): + attrs = super(CreateRender, self).get_pre_create_attr_defs() + return attrs + [ + BoolDef("multi_cam", + label="Multiple Cameras Submission", + default=False), + ] diff --git a/client/ayon_core/hosts/max/plugins/create/create_review.py b/client/ayon_core/hosts/max/plugins/create/create_review.py new file mode 100644 index 0000000000..a757b3b5bd --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/create/create_review.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating review in Max.""" +from ayon_core.hosts.max.api import plugin +from ayon_core.lib import BoolDef, EnumDef, NumberDef + + +class CreateReview(plugin.MaxCreator): + """Review in 3dsMax""" + + identifier = "io.openpype.creators.max.review" + label = "Review" + family = "review" + icon = "video-camera" + + review_width = 1920 + review_height = 1080 + percentSize = 100 + keep_images = False + image_format = "png" + visual_style = "Realistic" + viewport_preset = "Quality" + vp_texture = True + anti_aliasing = "None" + + def apply_settings(self, project_settings): + settings = project_settings["max"]["CreateReview"] # noqa + + # Take some defaults from settings + self.review_width = settings.get("review_width", self.review_width) + self.review_height = settings.get("review_height", self.review_height) + self.percentSize = settings.get("percentSize", self.percentSize) + self.keep_images = settings.get("keep_images", self.keep_images) + self.image_format = settings.get("image_format", self.image_format) + self.visual_style = settings.get("visual_style", self.visual_style) + self.viewport_preset = settings.get( + "viewport_preset", self.viewport_preset) + self.anti_aliasing = settings.get( + "anti_aliasing", self.anti_aliasing) + self.vp_texture = settings.get("vp_texture", self.vp_texture) + + def create(self, subset_name, instance_data, pre_create_data): + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["imageFormat", + "keepImages", + "review_width", + "review_height", + "percentSize", + "visualStyleMode", + "viewportPreset", + "antialiasingQuality", + "vpTexture"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] + + super(CreateReview, self).create( + subset_name, + instance_data, + pre_create_data) + + def get_instance_attr_defs(self): + image_format_enum = ["exr", "jpg", "png", "tga"] + + visual_style_preset_enum = [ + "Realistic", "Shaded", "Facets", + "ConsistentColors", "HiddenLine", + "Wireframe", "BoundingBox", "Ink", + "ColorInk", "Acrylic", "Tech", "Graphite", + "ColorPencil", "Pastel", "Clay", "ModelAssist" + ] + preview_preset_enum = [ + "Quality", "Standard", "Performance", + "DXMode", "Customize"] + anti_aliasing_enum = ["None", "2X", "4X", "8X"] + + return [ + NumberDef("review_width", + label="Review width", + decimals=0, + minimum=0, + default=self.review_width), + NumberDef("review_height", + label="Review height", + decimals=0, + minimum=0, + default=self.review_height), + NumberDef("percentSize", + label="Percent of Output", + default=self.percentSize, + minimum=1, + decimals=0), + BoolDef("keepImages", + label="Keep Image Sequences", + default=self.keep_images), + EnumDef("imageFormat", + image_format_enum, + default=self.image_format, + label="Image Format Options"), + EnumDef("visualStyleMode", + visual_style_preset_enum, + default=self.visual_style, + label="Preference"), + EnumDef("viewportPreset", + preview_preset_enum, + default=self.viewport_preset, + label="Preview Preset"), + EnumDef("antialiasingQuality", + anti_aliasing_enum, + default=self.anti_aliasing, + label="Anti-aliasing Quality"), + BoolDef("vpTexture", + label="Viewport Texture", + default=self.vp_texture) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + attrs = super().get_pre_create_attr_defs() + return attrs + self.get_instance_attr_defs() diff --git a/openpype/hosts/max/plugins/create/create_tycache.py b/client/ayon_core/hosts/max/plugins/create/create_tycache.py similarity index 85% rename from openpype/hosts/max/plugins/create/create_tycache.py rename to client/ayon_core/hosts/max/plugins/create/create_tycache.py index 92d12e012f..81ccd3607c 100644 --- a/openpype/hosts/max/plugins/create/create_tycache.py +++ b/client/ayon_core/hosts/max/plugins/create/create_tycache.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator plugin for creating TyCache.""" -from openpype.hosts.max.api import plugin +from ayon_core.hosts.max.api import plugin class CreateTyCache(plugin.MaxCreator): diff --git a/client/ayon_core/hosts/max/plugins/load/load_camera_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_camera_fbx.py new file mode 100644 index 0000000000..34b120c179 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/load/load_camera_fbx.py @@ -0,0 +1,99 @@ +import os + +from ayon_core.hosts.max.api import lib, maintained_selection +from ayon_core.hosts.max.api.lib import ( + unique_namespace, + get_namespace, + object_transform_set +) +from ayon_core.hosts.max.api.pipeline import ( + containerise, + get_previous_loaded_object, + update_custom_attribute_data +) +from ayon_core.pipeline import get_representation_path, load + + +class FbxLoader(load.LoaderPlugin): + """Fbx Loader.""" + + families = ["camera"] + representations = ["fbx"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + filepath = self.filepath_from_context(context) + filepath = os.path.normpath(filepath) + rt.FBXImporterSetParam("Animation", True) + rt.FBXImporterSetParam("Camera", True) + rt.FBXImporterSetParam("AxisConversionMethod", True) + rt.FBXImporterSetParam("Mode", rt.Name("create")) + rt.FBXImporterSetParam("Preserveinstances", True) + rt.ImportFile( + filepath, + rt.name("noPrompt"), + using=rt.FBXIMP) + + namespace = unique_namespace( + name + "_", + suffix="_", + ) + selections = rt.GetCurrentSelection() + + for selection in selections: + selection.name = f"{namespace}:{selection.name}" + + return containerise( + name, selections, context, + namespace, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node_name = container["instance_node"] + node = rt.getNodeByName(node_name) + namespace, _ = get_namespace(node_name) + + node_list = get_previous_loaded_object(node) + rt.Select(node_list) + prev_fbx_objects = rt.GetCurrentSelection() + transform_data = object_transform_set(prev_fbx_objects) + for prev_fbx_obj in prev_fbx_objects: + if rt.isValidNode(prev_fbx_obj): + rt.Delete(prev_fbx_obj) + + rt.FBXImporterSetParam("Animation", True) + rt.FBXImporterSetParam("Camera", True) + rt.FBXImporterSetParam("Mode", rt.Name("merge")) + rt.FBXImporterSetParam("AxisConversionMethod", True) + rt.FBXImporterSetParam("Preserveinstances", True) + rt.ImportFile( + path, rt.name("noPrompt"), using=rt.FBXIMP) + current_fbx_objects = rt.GetCurrentSelection() + fbx_objects = [] + for fbx_object in current_fbx_objects: + fbx_object.name = f"{namespace}:{fbx_object.name}" + fbx_objects.append(fbx_object) + fbx_transform = f"{fbx_object.name}.transform" + if fbx_transform in transform_data.keys(): + fbx_object.pos = transform_data[fbx_transform] or 0 + fbx_object.scale = transform_data[ + f"{fbx_object.name}.scale"] or 0 + + update_custom_attribute_data(node, fbx_objects) + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_max_scene.py b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py similarity index 94% rename from openpype/hosts/max/plugins/load/load_max_scene.py rename to client/ayon_core/hosts/max/plugins/load/load_max_scene.py index 0b5f0a2858..7267d7a59e 100644 --- a/openpype/hosts/max/plugins/load/load_max_scene.py +++ b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py @@ -1,16 +1,16 @@ import os -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( unique_namespace, get_namespace, object_transform_set ) -from openpype.hosts.max.api.pipeline import ( +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.pipeline import get_representation_path, load +from ayon_core.pipeline import get_representation_path, load class MaxSceneLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/max/plugins/load/load_model.py b/client/ayon_core/hosts/max/plugins/load/load_model.py new file mode 100644 index 0000000000..796e1b80ad --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/load/load_model.py @@ -0,0 +1,121 @@ +import os +from ayon_core.pipeline import load, get_representation_path +from ayon_core.hosts.max.api.pipeline import ( + containerise, + get_previous_loaded_object +) +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( + maintained_selection, unique_namespace +) + + +class ModelAbcLoader(load.LoaderPlugin): + """Loading model with the Alembic loader.""" + + families = ["model"] + label = "Load Model with Alembic" + representations = ["abc"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + file_path = os.path.normpath(self.filepath_from_context(context)) + + abc_before = { + c + for c in rt.rootNode.Children + if rt.classOf(c) == rt.AlembicContainer + } + + rt.AlembicImport.ImportToRoot = False + rt.AlembicImport.CustomAttributes = True + rt.AlembicImport.UVs = True + rt.AlembicImport.VertexColors = True + rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport) + + abc_after = { + c + for c in rt.rootNode.Children + if rt.classOf(c) == rt.AlembicContainer + } + + # This should yield new AlembicContainer node + abc_containers = abc_after.difference(abc_before) + + if len(abc_containers) != 1: + self.log.error("Something failed when loading.") + + abc_container = abc_containers.pop() + + namespace = unique_namespace( + name + "_", + suffix="_", + ) + abc_objects = [] + for abc_object in abc_container.Children: + abc_object.name = f"{namespace}:{abc_object.name}" + abc_objects.append(abc_object) + # rename the abc container with namespace + abc_container_name = f"{namespace}:{name}" + abc_container.name = abc_container_name + abc_objects.append(abc_container) + + return containerise( + name, abc_objects, context, + namespace, loader=self.__class__.__name__ + ) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.GetNodeByName(container["instance_node"]) + node_list = [n for n in get_previous_loaded_object(node) + if rt.ClassOf(n) == rt.AlembicContainer] + with maintained_selection(): + rt.Select(node_list) + + for alembic in rt.Selection: + abc = rt.GetNodeByName(alembic.name) + rt.Select(abc.Children) + for abc_con in abc.Children: + abc_con.source = path + rt.Select(abc_con.Children) + for abc_obj in abc_con.Children: + abc_obj.source = path + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) + + @staticmethod + def get_container_children(parent, type_name): + from pymxs import runtime as rt + + def list_children(node): + children = [] + for c in node.Children: + children.append(c) + children += list_children(c) + return children + + filtered = [] + for child in list_children(parent): + class_type = str(rt.ClassOf(child.baseObject)) + if class_type == type_name: + filtered.append(child) + + return filtered diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py similarity index 92% rename from openpype/hosts/max/plugins/load/load_model_fbx.py rename to client/ayon_core/hosts/max/plugins/load/load_model_fbx.py index 71fc382eed..827cf63b39 100644 --- a/openpype/hosts/max/plugins/load/load_model_fbx.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py @@ -1,16 +1,16 @@ import os -from openpype.pipeline import load, get_representation_path -from openpype.hosts.max.api.pipeline import ( +from ayon_core.pipeline import load, get_representation_path +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( unique_namespace, get_namespace, object_transform_set ) -from openpype.hosts.max.api.lib import maintained_selection +from ayon_core.hosts.max.api.lib import maintained_selection class FbxModelLoader(load.LoaderPlugin): diff --git a/openpype/hosts/max/plugins/load/load_model_obj.py b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py similarity index 91% rename from openpype/hosts/max/plugins/load/load_model_obj.py rename to client/ayon_core/hosts/max/plugins/load/load_model_obj.py index aedb288a2d..22d3d4b58a 100644 --- a/openpype/hosts/max/plugins/load/load_model_obj.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py @@ -1,19 +1,19 @@ import os -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( unique_namespace, get_namespace, maintained_selection, object_transform_set ) -from openpype.hosts.max.api.lib import maintained_selection -from openpype.hosts.max.api.pipeline import ( +from ayon_core.hosts.max.api.lib import maintained_selection +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.pipeline import get_representation_path, load +from ayon_core.pipeline import get_representation_path, load class ObjLoader(load.LoaderPlugin): diff --git a/openpype/hosts/max/plugins/load/load_model_usd.py b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py similarity index 92% rename from openpype/hosts/max/plugins/load/load_model_usd.py rename to client/ayon_core/hosts/max/plugins/load/load_model_usd.py index bce4bd4a9a..8d42219217 100644 --- a/openpype/hosts/max/plugins/load/load_model_usd.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py @@ -1,21 +1,21 @@ import os from pymxs import runtime as rt -from openpype.pipeline.load import LoadError -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( +from ayon_core.pipeline.load import LoadError +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( unique_namespace, get_namespace, object_transform_set, get_plugins ) -from openpype.hosts.max.api.lib import maintained_selection -from openpype.hosts.max.api.pipeline import ( +from ayon_core.hosts.max.api.lib import maintained_selection +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.pipeline import get_representation_path, load +from ayon_core.pipeline import get_representation_path, load class ModelUSDLoader(load.LoaderPlugin): diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache.py similarity index 94% rename from openpype/hosts/max/plugins/load/load_pointcache.py rename to client/ayon_core/hosts/max/plugins/load/load_pointcache.py index 3c2dfe8c25..a92fa66757 100644 --- a/openpype/hosts/max/plugins/load/load_pointcache.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache.py @@ -5,10 +5,10 @@ """ import os -from openpype.pipeline import load, get_representation_path -from openpype.hosts.max.api import lib, maintained_selection -from openpype.hosts.max.api.lib import unique_namespace -from openpype.hosts.max.api.pipeline import ( +from ayon_core.pipeline import load, get_representation_path +from ayon_core.hosts.max.api import lib, maintained_selection +from ayon_core.hosts.max.api.lib import unique_namespace +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object ) diff --git a/openpype/hosts/max/plugins/load/load_pointcache_ornatrix.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py similarity index 93% rename from openpype/hosts/max/plugins/load/load_pointcache_ornatrix.py rename to client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py index 96060a6a6f..27b2e271d2 100644 --- a/openpype/hosts/max/plugins/load/load_pointcache_ornatrix.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py @@ -1,19 +1,19 @@ import os -from openpype.pipeline import load, get_representation_path -from openpype.pipeline.load import LoadError -from openpype.hosts.max.api.pipeline import ( +from ayon_core.pipeline import load, get_representation_path +from ayon_core.pipeline.load import LoadError +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api.lib import ( unique_namespace, get_namespace, object_transform_set, get_plugins ) -from openpype.hosts.max.api import lib +from ayon_core.hosts.max.api import lib from pymxs import runtime as rt diff --git a/openpype/hosts/max/plugins/load/load_pointcloud.py b/client/ayon_core/hosts/max/plugins/load/load_pointcloud.py similarity index 89% rename from openpype/hosts/max/plugins/load/load_pointcloud.py rename to client/ayon_core/hosts/max/plugins/load/load_pointcloud.py index e0317a2e22..45e3da5621 100644 --- a/openpype/hosts/max/plugins/load/load_pointcloud.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcloud.py @@ -1,16 +1,16 @@ import os -from openpype.hosts.max.api import lib, maintained_selection -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api import lib, maintained_selection +from ayon_core.hosts.max.api.lib import ( unique_namespace, ) -from openpype.hosts.max.api.pipeline import ( +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.pipeline import get_representation_path, load +from ayon_core.pipeline import get_representation_path, load class PointCloudLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/max/plugins/load/load_redshift_proxy.py b/client/ayon_core/hosts/max/plugins/load/load_redshift_proxy.py new file mode 100644 index 0000000000..3f73210c24 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/load/load_redshift_proxy.py @@ -0,0 +1,77 @@ +import os +import clique + +from ayon_core.pipeline import ( + load, + get_representation_path +) +from ayon_core.pipeline.load import LoadError +from ayon_core.hosts.max.api.pipeline import ( + containerise, + update_custom_attribute_data, + get_previous_loaded_object +) +from ayon_core.hosts.max.api import lib +from ayon_core.hosts.max.api.lib import ( + unique_namespace, + get_plugins +) + + +class RedshiftProxyLoader(load.LoaderPlugin): + """Load rs files with Redshift Proxy""" + + label = "Load Redshift Proxy" + families = ["redshiftproxy"] + representations = ["rs"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + plugin_info = get_plugins() + if "redshift4max.dlr" not in plugin_info: + raise LoadError("Redshift not loaded/installed in Max..") + filepath = self.filepath_from_context(context) + rs_proxy = rt.RedshiftProxy() + rs_proxy.file = filepath + files_in_folder = os.listdir(os.path.dirname(filepath)) + collections, remainder = clique.assemble(files_in_folder) + if collections: + rs_proxy.is_sequence = True + + namespace = unique_namespace( + name + "_", + suffix="_", + ) + rs_proxy.name = f"{namespace}:{rs_proxy.name}" + + return containerise( + name, [rs_proxy], context, + namespace, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + node_list = get_previous_loaded_object(node) + rt.Select(node_list) + update_custom_attribute_data( + node, rt.Selection) + for proxy in rt.Selection: + proxy.file = path + + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_tycache.py b/client/ayon_core/hosts/max/plugins/load/load_tycache.py similarity index 88% rename from openpype/hosts/max/plugins/load/load_tycache.py rename to client/ayon_core/hosts/max/plugins/load/load_tycache.py index 41ea267c3d..48fb5c447a 100644 --- a/openpype/hosts/max/plugins/load/load_tycache.py +++ b/client/ayon_core/hosts/max/plugins/load/load_tycache.py @@ -1,15 +1,15 @@ import os -from openpype.hosts.max.api import lib, maintained_selection -from openpype.hosts.max.api.lib import ( +from ayon_core.hosts.max.api import lib, maintained_selection +from ayon_core.hosts.max.api.lib import ( unique_namespace, ) -from openpype.hosts.max.api.pipeline import ( +from ayon_core.hosts.max.api.pipeline import ( containerise, get_previous_loaded_object, update_custom_attribute_data ) -from openpype.pipeline import get_representation_path, load +from ayon_core.pipeline import get_representation_path, load class TyCacheLoader(load.LoaderPlugin): diff --git a/openpype/hosts/max/plugins/publish/collect_frame_range.py b/client/ayon_core/hosts/max/plugins/publish/collect_frame_range.py similarity index 100% rename from openpype/hosts/max/plugins/publish/collect_frame_range.py rename to client/ayon_core/hosts/max/plugins/publish/collect_frame_range.py diff --git a/openpype/hosts/max/plugins/publish/collect_members.py b/client/ayon_core/hosts/max/plugins/publish/collect_members.py similarity index 100% rename from openpype/hosts/max/plugins/publish/collect_members.py rename to client/ayon_core/hosts/max/plugins/publish/collect_members.py diff --git a/client/ayon_core/hosts/max/plugins/publish/collect_render.py b/client/ayon_core/hosts/max/plugins/publish/collect_render.py new file mode 100644 index 0000000000..a97e8a154e --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/collect_render.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +"""Collect Render""" +import os +import pyblish.api + +from pymxs import runtime as rt +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.hosts.max.api import colorspace +from ayon_core.hosts.max.api.lib import get_max_version, get_current_renderer +from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings +from ayon_core.hosts.max.api.lib_renderproducts import RenderProducts + + +class CollectRender(pyblish.api.InstancePlugin): + """Collect Render for Deadline""" + + order = pyblish.api.CollectorOrder + 0.02 + label = "Collect 3dsmax Render Layers" + hosts = ['max'] + families = ["maxrender"] + + def process(self, instance): + context = instance.context + folder = rt.maxFilePath + file = rt.maxFileName + current_file = os.path.join(folder, file) + filepath = current_file.replace("\\", "/") + context.data['currentFile'] = current_file + + files_by_aov = RenderProducts().get_beauty(instance.name) + aovs = RenderProducts().get_aovs(instance.name) + files_by_aov.update(aovs) + + camera = rt.viewport.GetCamera() + if instance.data.get("members"): + camera_list = [member for member in instance.data["members"] + if rt.ClassOf(member) == rt.Camera.Classes] + if camera_list: + camera = camera_list[-1] + + instance.data["cameras"] = [camera.name] if camera else None # noqa + + if instance.data.get("multiCamera"): + cameras = instance.data.get("members") + if not cameras: + raise KnownPublishError("There should be at least" + " one renderable camera in container") + sel_cam = [ + c.name for c in cameras + if rt.classOf(c) in rt.Camera.classes] + container_name = instance.data.get("instance_node") + render_dir = os.path.dirname(rt.rendOutputFilename) + outputs = RenderSettings().batch_render_layer( + container_name, render_dir, sel_cam + ) + + instance.data["cameras"] = sel_cam + + files_by_aov = RenderProducts().get_multiple_beauty( + outputs, sel_cam) + aovs = RenderProducts().get_multiple_aovs( + outputs, sel_cam) + files_by_aov.update(aovs) + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["files"] = list() + instance.data["expectedFiles"].append(files_by_aov) + instance.data["files"].append(files_by_aov) + + img_format = RenderProducts().image_format() + # OCIO config not support in + # most of the 3dsmax renderers + # so this is currently hard coded + # TODO: add options for redshift/vray ocio config + instance.data["colorspaceConfig"] = "" + instance.data["colorspaceDisplay"] = "sRGB" + instance.data["colorspaceView"] = "ACES 1.0 SDR-video" + + if int(get_max_version()) >= 2024: + colorspace_mgr = rt.ColorPipelineMgr # noqa + display = next( + (display for display in colorspace_mgr.GetDisplayList())) + view_transform = next( + (view for view in colorspace_mgr.GetViewList(display))) + instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath + instance.data["colorspaceDisplay"] = display + instance.data["colorspaceView"] = view_transform + + instance.data["renderProducts"] = colorspace.ARenderProduct() + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + # also need to get the render dir for conversion + data = { + "asset": instance.data["asset"], + "subset": str(instance.name), + "publish": True, + "maxversion": str(get_max_version()), + "imageFormat": img_format, + "family": 'maxrender', + "families": ['maxrender'], + "renderer": renderer, + "source": filepath, + "plugin": "3dsmax", + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"], + "farm": True + } + instance.data.update(data) + + # TODO: this should be unified with maya and its "multipart" flag + # on instance. + if renderer == "Redshift_Renderer": + instance.data.update( + {"separateAovFiles": rt.Execute( + "renderers.current.separateAovFiles")}) + + self.log.info("data: {0}".format(data)) diff --git a/client/ayon_core/hosts/max/plugins/publish/collect_review.py b/client/ayon_core/hosts/max/plugins/publish/collect_review.py new file mode 100644 index 0000000000..d746e2b2db --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/collect_review.py @@ -0,0 +1,153 @@ +# dont forget getting the focal length for burnin +"""Collect Review""" +import pyblish.api + +from pymxs import runtime as rt +from ayon_core.lib import BoolDef +from ayon_core.hosts.max.api.lib import get_max_version +from ayon_core.pipeline.publish import ( + AYONPyblishPluginMixin, + KnownPublishError +) + + +class CollectReview(pyblish.api.InstancePlugin, + AYONPyblishPluginMixin): + """Collect Review Data for Preview Animation""" + + order = pyblish.api.CollectorOrder + 0.02 + label = "Collect Review Data" + hosts = ['max'] + families = ["review"] + + def process(self, instance): + nodes = instance.data["members"] + + def is_camera(node): + is_camera_class = rt.classOf(node) in rt.Camera.classes + return is_camera_class and rt.isProperty(node, "fov") + + # Use first camera in instance + cameras = [node for node in nodes if is_camera(node)] + if cameras: + if len(cameras) > 1: + self.log.warning( + "Found more than one camera in instance, using first " + f"one found: {cameras[0]}" + ) + camera = cameras[0] + camera_name = camera.name + focal_length = camera.fov + else: + raise KnownPublishError( + "Unable to find a valid camera in 'Review' container." + " Only native max Camera supported. " + f"Found objects: {nodes}" + ) + creator_attrs = instance.data["creator_attributes"] + attr_values = self.get_attr_values_from_data(instance.data) + + general_preview_data = { + "review_camera": camera_name, + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"], + "percentSize": creator_attrs["percentSize"], + "imageFormat": creator_attrs["imageFormat"], + "keepImages": creator_attrs["keepImages"], + "fps": instance.context.data["fps"], + "review_width": creator_attrs["review_width"], + "review_height": creator_attrs["review_height"], + } + + if int(get_max_version()) >= 2024: + colorspace_mgr = rt.ColorPipelineMgr # noqa + display = next( + (display for display in colorspace_mgr.GetDisplayList())) + view_transform = next( + (view for view in colorspace_mgr.GetViewList(display))) + instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath + instance.data["colorspaceDisplay"] = display + instance.data["colorspaceView"] = view_transform + + preview_data = { + "vpStyle": creator_attrs["visualStyleMode"], + "vpPreset": creator_attrs["viewportPreset"], + "vpTextures": creator_attrs["vpTexture"], + "dspGeometry": attr_values.get("dspGeometry"), + "dspShapes": attr_values.get("dspShapes"), + "dspLights": attr_values.get("dspLights"), + "dspCameras": attr_values.get("dspCameras"), + "dspHelpers": attr_values.get("dspHelpers"), + "dspParticles": attr_values.get("dspParticles"), + "dspBones": attr_values.get("dspBones"), + "dspBkg": attr_values.get("dspBkg"), + "dspGrid": attr_values.get("dspGrid"), + "dspSafeFrame": attr_values.get("dspSafeFrame"), + "dspFrameNums": attr_values.get("dspFrameNums") + } + else: + general_viewport = { + "dspBkg": attr_values.get("dspBkg"), + "dspGrid": attr_values.get("dspGrid") + } + nitrous_manager = { + "AntialiasingQuality": creator_attrs["antialiasingQuality"], + } + nitrous_viewport = { + "VisualStyleMode": creator_attrs["visualStyleMode"], + "ViewportPreset": creator_attrs["viewportPreset"], + "UseTextureEnabled": creator_attrs["vpTexture"] + } + preview_data = { + "general_viewport": general_viewport, + "nitrous_manager": nitrous_manager, + "nitrous_viewport": nitrous_viewport, + "vp_btn_mgr": {"EnableButtons": False} + } + + # Enable ftrack functionality + instance.data.setdefault("families", []).append('ftrack') + + burnin_members = instance.data.setdefault("burninDataMembers", {}) + burnin_members["focalLength"] = focal_length + + instance.data.update(general_preview_data) + instance.data["viewport_options"] = preview_data + + @classmethod + def get_attribute_defs(cls): + return [ + BoolDef("dspGeometry", + label="Geometry", + default=True), + BoolDef("dspShapes", + label="Shapes", + default=False), + BoolDef("dspLights", + label="Lights", + default=False), + BoolDef("dspCameras", + label="Cameras", + default=False), + BoolDef("dspHelpers", + label="Helpers", + default=False), + BoolDef("dspParticles", + label="Particle Systems", + default=True), + BoolDef("dspBones", + label="Bone Objects", + default=False), + BoolDef("dspBkg", + label="Background", + default=True), + BoolDef("dspGrid", + label="Active Grid", + default=False), + BoolDef("dspSafeFrame", + label="Safe Frames", + default=False), + BoolDef("dspFrameNums", + label="Frame Numbers", + default=False) + ] diff --git a/openpype/hosts/max/plugins/publish/collect_tycache_attributes.py b/client/ayon_core/hosts/max/plugins/publish/collect_tycache_attributes.py similarity index 95% rename from openpype/hosts/max/plugins/publish/collect_tycache_attributes.py rename to client/ayon_core/hosts/max/plugins/publish/collect_tycache_attributes.py index 0351ca45c5..4855e952d8 100644 --- a/openpype/hosts/max/plugins/publish/collect_tycache_attributes.py +++ b/client/ayon_core/hosts/max/plugins/publish/collect_tycache_attributes.py @@ -1,11 +1,11 @@ import pyblish.api -from openpype.lib import EnumDef, TextDef -from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from ayon_core.lib import EnumDef, TextDef +from ayon_core.pipeline.publish import AYONPyblishPluginMixin class CollectTyCacheData(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Collect Channel Attributes for TyCache Export""" order = pyblish.api.CollectorOrder + 0.02 diff --git a/openpype/hosts/max/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/max/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/max/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/max/plugins/publish/collect_workfile.py diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py new file mode 100644 index 0000000000..67b5174200 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +""" +Export alembic file. + +Note: + Parameters on AlembicExport (AlembicExport.Parameter): + + ParticleAsMesh (bool): Sets whether particle shapes are exported + as meshes. + AnimTimeRange (enum): How animation is saved: + #CurrentFrame: saves current frame + #TimeSlider: saves the active time segments on time slider (default) + #StartEnd: saves a range specified by the Step + StartFrame (int) + EnFrame (int) + ShapeSuffix (bool): When set to true, appends the string "Shape" to the + name of each exported mesh. This property is set to false by default. + SamplesPerFrame (int): Sets the number of animation samples per frame. + Hidden (bool): When true, export hidden geometry. + UVs (bool): When true, export the mesh UV map channel. + Normals (bool): When true, export the mesh normals. + VertexColors (bool): When true, export the mesh vertex color map 0 and the + current vertex color display data when it differs + ExtraChannels (bool): When true, export the mesh extra map channels + (map channels greater than channel 1) + Velocity (bool): When true, export the meh vertex and particle velocity + data. + MaterialIDs (bool): When true, export the mesh material ID as + Alembic face sets. + Visibility (bool): When true, export the node visibility data. + LayerName (bool): When true, export the node layer name as an Alembic + object property. + MaterialName (bool): When true, export the geometry node material name as + an Alembic object property + ObjectID (bool): When true, export the geometry node g-buffer object ID as + an Alembic object property. + CustomAttributes (bool): When true, export the node and its modifiers + custom attributes into an Alembic object compound property. +""" +import os +import pyblish.api +from ayon_core.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.hosts.max.api.lib import suspended_refresh +from ayon_core.lib import BoolDef + + +class ExtractAlembic(publish.Extractor, + OptionalPyblishPluginMixin): + order = pyblish.api.ExtractorOrder + label = "Extract Pointcache" + hosts = ["max"] + families = ["pointcache"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + parent_dir = self.staging_dir(instance) + file_name = "{name}.abc".format(**instance.data) + path = os.path.join(parent_dir, file_name) + + with suspended_refresh(): + self._set_abc_attributes(instance) + with maintained_selection(): + # select and export + node_list = instance.data["members"] + rt.Select(node_list) + rt.exportFile( + path, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.AlembicExport, + ) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "abc", + "ext": "abc", + "files": file_name, + "stagingDir": parent_dir, + } + instance.data["representations"].append(representation) + + def _set_abc_attributes(self, instance): + start = instance.data["frameStartHandle"] + end = instance.data["frameEndHandle"] + attr_values = self.get_attr_values_from_data(instance.data) + custom_attrs = attr_values.get("custom_attrs", False) + if not custom_attrs: + self.log.debug( + "No Custom Attributes included in this abc export...") + rt.AlembicExport.ArchiveType = rt.Name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.Name("maya") + rt.AlembicExport.StartFrame = start + rt.AlembicExport.EndFrame = end + rt.AlembicExport.CustomAttributes = custom_attrs + + @classmethod + def get_attribute_defs(cls): + return [ + BoolDef("custom_attrs", + label="Custom Attributes", + default=False), + ] + + +class ExtractCameraAlembic(ExtractAlembic): + """Extract Camera with AlembicExport.""" + + label = "Extract Alembic Camera" + families = ["camera"] + + +class ExtractModel(ExtractAlembic): + """Extract Geometry in Alembic Format""" + label = "Extract Geometry (Alembic)" + families = ["model"] + + def _set_abc_attributes(self, instance): + attr_values = self.get_attr_values_from_data(instance.data) + custom_attrs = attr_values.get("custom_attrs", False) + if not custom_attrs: + self.log.debug( + "No Custom Attributes included in this abc export...") + rt.AlembicExport.ArchiveType = rt.name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.name("maya") + rt.AlembicExport.CustomAttributes = custom_attrs + rt.AlembicExport.UVs = True + rt.AlembicExport.VertexColors = True + rt.AlembicExport.PreserveInstances = True diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_fbx.py b/client/ayon_core/hosts/max/plugins/publish/extract_fbx.py new file mode 100644 index 0000000000..3d80588c47 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/extract_fbx.py @@ -0,0 +1,83 @@ +import os +import pyblish.api +from ayon_core.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.hosts.max.api.lib import convert_unit_scale + + +class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Geometry in FBX Format + """ + + order = pyblish.api.ExtractorOrder - 0.05 + label = "Extract FBX" + hosts = ["max"] + families = ["model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + stagingdir = self.staging_dir(instance) + filename = "{name}.fbx".format(**instance.data) + filepath = os.path.join(stagingdir, filename) + self._set_fbx_attributes() + + with maintained_selection(): + # select and export + node_list = instance.data["members"] + rt.Select(node_list) + rt.exportFile( + filepath, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.FBXEXP, + ) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "fbx", + "ext": "fbx", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, filepath) + ) + + def _set_fbx_attributes(self): + unit_scale = convert_unit_scale() + rt.FBXExporterSetParam("Animation", False) + rt.FBXExporterSetParam("Cameras", False) + rt.FBXExporterSetParam("Lights", False) + rt.FBXExporterSetParam("PointCache", False) + rt.FBXExporterSetParam("AxisConversionMethod", "Animation") + rt.FBXExporterSetParam("UpAxis", "Y") + rt.FBXExporterSetParam("Preserveinstances", True) + if unit_scale: + rt.FBXExporterSetParam("ConvertUnit", unit_scale) + + +class ExtractCameraFbx(ExtractModelFbx): + """Extract Camera with FbxExporter.""" + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Extract Fbx Camera" + families = ["camera"] + optional = True + + def _set_fbx_attributes(self): + unit_scale = convert_unit_scale() + rt.FBXExporterSetParam("Animation", True) + rt.FBXExporterSetParam("Cameras", True) + rt.FBXExporterSetParam("AxisConversionMethod", "Animation") + rt.FBXExporterSetParam("UpAxis", "Y") + rt.FBXExporterSetParam("Preserveinstances", True) + if unit_scale: + rt.FBXExporterSetParam("ConvertUnit", unit_scale) diff --git a/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py b/client/ayon_core/hosts/max/plugins/publish/extract_max_scene_raw.py similarity index 95% rename from openpype/hosts/max/plugins/publish/extract_max_scene_raw.py rename to client/ayon_core/hosts/max/plugins/publish/extract_max_scene_raw.py index 791cc65fcd..f5c703564c 100644 --- a/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_max_scene_raw.py @@ -1,6 +1,6 @@ import os import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin +from ayon_core.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt diff --git a/openpype/hosts/max/plugins/publish/extract_model_obj.py b/client/ayon_core/hosts/max/plugins/publish/extract_model_obj.py similarity index 86% rename from openpype/hosts/max/plugins/publish/extract_model_obj.py rename to client/ayon_core/hosts/max/plugins/publish/extract_model_obj.py index 8464353164..03bdde7d5d 100644 --- a/openpype/hosts/max/plugins/publish/extract_model_obj.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_model_obj.py @@ -1,10 +1,10 @@ import os import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin +from ayon_core.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.hosts.max.api.lib import suspended_refresh -from openpype.pipeline.publish import KnownPublishError +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.hosts.max.api.lib import suspended_refresh +from ayon_core.pipeline.publish import KnownPublishError class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin): diff --git a/openpype/hosts/max/plugins/publish/extract_model_usd.py b/client/ayon_core/hosts/max/plugins/publish/extract_model_usd.py similarity index 96% rename from openpype/hosts/max/plugins/publish/extract_model_usd.py rename to client/ayon_core/hosts/max/plugins/publish/extract_model_usd.py index da37c77bf7..64791e4c7d 100644 --- a/openpype/hosts/max/plugins/publish/extract_model_usd.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_model_usd.py @@ -3,8 +3,8 @@ import pyblish.api from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.pipeline import OptionalPyblishPluginMixin, publish +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.pipeline import OptionalPyblishPluginMixin, publish class ExtractModelUSD(publish.Extractor, diff --git a/openpype/hosts/max/plugins/publish/extract_pointcloud.py b/client/ayon_core/hosts/max/plugins/publish/extract_pointcloud.py similarity index 98% rename from openpype/hosts/max/plugins/publish/extract_pointcloud.py rename to client/ayon_core/hosts/max/plugins/publish/extract_pointcloud.py index d9fbe5e9dd..294d63794e 100644 --- a/openpype/hosts/max/plugins/publish/extract_pointcloud.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_pointcloud.py @@ -3,8 +3,8 @@ import pyblish.api from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.pipeline import publish +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.pipeline import publish class ExtractPointCloud(publish.Extractor): diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_redshift_proxy.py b/client/ayon_core/hosts/max/plugins/publish/extract_redshift_proxy.py new file mode 100644 index 0000000000..6a647670bc --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/extract_redshift_proxy.py @@ -0,0 +1,61 @@ +import os +import pyblish.api +from ayon_core.pipeline import publish +from pymxs import runtime as rt +from ayon_core.hosts.max.api import maintained_selection + + +class ExtractRedshiftProxy(publish.Extractor): + """ + Extract Redshift Proxy with rsProxy + """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract RedShift Proxy" + hosts = ["max"] + families = ["redshiftproxy"] + + def process(self, instance): + start = instance.data["frameStartHandle"] + end = instance.data["frameEndHandle"] + + self.log.debug("Extracting Redshift Proxy...") + stagingdir = self.staging_dir(instance) + rs_filename = "{name}.rs".format(**instance.data) + rs_filepath = os.path.join(stagingdir, rs_filename) + rs_filepath = rs_filepath.replace("\\", "/") + + rs_filenames = self.get_rsfiles(instance, start, end) + + with maintained_selection(): + # select and export + node_list = instance.data["members"] + rt.Select(node_list) + # Redshift rsProxy command + # rsProxy fp selected compress connectivity startFrame endFrame + # camera warnExisting transformPivotToOrigin + rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1) + + self.log.info("Performing Extraction ...") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'rs', + 'ext': 'rs', + 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info("Extracted instance '%s' to: %s" % (instance.name, + stagingdir)) + + def get_rsfiles(self, instance, startFrame, endFrame): + rs_filenames = [] + rs_name = instance.data["name"] + for frame in range(startFrame, endFrame + 1): + rs_filename = "%s.%04d.rs" % (rs_name, frame) + rs_filenames.append(rs_filename) + + return rs_filenames diff --git a/openpype/hosts/max/plugins/publish/extract_review_animation.py b/client/ayon_core/hosts/max/plugins/publish/extract_review_animation.py similarity index 95% rename from openpype/hosts/max/plugins/publish/extract_review_animation.py rename to client/ayon_core/hosts/max/plugins/publish/extract_review_animation.py index 99dc5c5cdc..12f1fbb63b 100644 --- a/openpype/hosts/max/plugins/publish/extract_review_animation.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_review_animation.py @@ -1,7 +1,7 @@ import os import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.max.api.preview_animation import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.max.api.preview_animation import ( render_preview_animation ) diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_thumbnail.py b/client/ayon_core/hosts/max/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..5764ce98c4 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/extract_thumbnail.py @@ -0,0 +1,51 @@ +import os +import pyblish.api +from ayon_core.pipeline import publish +from ayon_core.hosts.max.api.preview_animation import render_preview_animation + + +class ExtractThumbnail(publish.Extractor): + """Extract Thumbnail for Review + """ + + order = pyblish.api.ExtractorOrder + label = "Extract Thumbnail" + hosts = ["max"] + families = ["review"] + + def process(self, instance): + ext = instance.data.get("imageFormat") + frame = int(instance.data["frameStart"]) + staging_dir = self.staging_dir(instance) + filepath = os.path.join( + staging_dir, f"{instance.name}_thumbnail") + self.log.debug("Writing Thumbnail to '{}'".format(filepath)) + + review_camera = instance.data["review_camera"] + viewport_options = instance.data.get("viewport_options", {}) + files = render_preview_animation( + filepath, + ext, + review_camera, + start_frame=frame, + end_frame=frame, + percentSize=instance.data["percentSize"], + width=instance.data["review_width"], + height=instance.data["review_height"], + viewport_options=viewport_options) + + thumbnail = next(os.path.basename(path) for path in files) + + representation = { + "name": "thumbnail", + "ext": ext, + "files": thumbnail, + "stagingDir": staging_dir, + "thumbnail": True + } + + self.log.debug(f"{representation}") + + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(representation) diff --git a/openpype/hosts/max/plugins/publish/extract_tycache.py b/client/ayon_core/hosts/max/plugins/publish/extract_tycache.py similarity index 98% rename from openpype/hosts/max/plugins/publish/extract_tycache.py rename to client/ayon_core/hosts/max/plugins/publish/extract_tycache.py index 9bfe74f679..50bb06a765 100644 --- a/openpype/hosts/max/plugins/publish/extract_tycache.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_tycache.py @@ -3,8 +3,8 @@ import pyblish.api from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.pipeline import publish +from ayon_core.hosts.max.api import maintained_selection +from ayon_core.pipeline import publish class ExtractTyCache(publish.Extractor): diff --git a/client/ayon_core/hosts/max/plugins/publish/increment_workfile_version.py b/client/ayon_core/hosts/max/plugins/publish/increment_workfile_version.py new file mode 100644 index 0000000000..5f319966fe --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/increment_workfile_version.py @@ -0,0 +1,19 @@ +import pyblish.api +from ayon_core.lib import version_up +from pymxs import runtime as rt + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 0.9 + label = "Increment Workfile Version" + hosts = ["max"] + families = ["workfile"] + + def process(self, context): + path = context.data["currentFile"] + filepath = version_up(path) + + rt.saveMaxFile(filepath) + self.log.info("Incrementing file version") diff --git a/openpype/hosts/max/plugins/publish/save_scene.py b/client/ayon_core/hosts/max/plugins/publish/save_scene.py similarity index 100% rename from openpype/hosts/max/plugins/publish/save_scene.py rename to client/ayon_core/hosts/max/plugins/publish/save_scene.py diff --git a/openpype/hosts/max/plugins/publish/save_scenes_for_cameras.py b/client/ayon_core/hosts/max/plugins/publish/save_scenes_for_cameras.py similarity index 95% rename from openpype/hosts/max/plugins/publish/save_scenes_for_cameras.py rename to client/ayon_core/hosts/max/plugins/publish/save_scenes_for_cameras.py index f089bf663c..817db1b28f 100644 --- a/openpype/hosts/max/plugins/publish/save_scenes_for_cameras.py +++ b/client/ayon_core/hosts/max/plugins/publish/save_scenes_for_cameras.py @@ -4,9 +4,9 @@ import tempfile from pymxs import runtime as rt -from openpype.lib import run_subprocess -from openpype.hosts.max.api.lib_rendersettings import RenderSettings -from openpype.hosts.max.api.lib_renderproducts import RenderProducts +from ayon_core.lib import run_subprocess +from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings +from ayon_core.hosts.max.api.lib_renderproducts import RenderProducts class SaveScenesForCamera(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_attributes.py b/client/ayon_core/hosts/max/plugins/publish/validate_attributes.py new file mode 100644 index 0000000000..444a8f0829 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_attributes.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +"""Validator for Attributes.""" +from pyblish.api import ContextPlugin, ValidatorOrder +from pymxs import runtime as rt + +from ayon_core.pipeline.publish import ( + OptionalPyblishPluginMixin, + PublishValidationError, + RepairContextAction +) + + +def has_property(object_name, property_name): + """Return whether an object has a property with given name""" + return rt.Execute(f'isProperty {object_name} "{property_name}"') + + +def is_matching_value(object_name, property_name, value): + """Return whether an existing property matches value `value""" + property_value = rt.Execute(f"{object_name}.{property_name}") + + # Wrap property value if value is a string valued attributes + # starting with a `#` + if ( + isinstance(value, str) and + value.startswith("#") and + not value.endswith(")") + ): + # prefix value with `#` + # not applicable for #() array value type + # and only applicable for enum i.e. #bob, #sally + property_value = f"#{property_value}" + + return property_value == value + + +class ValidateAttributes(OptionalPyblishPluginMixin, + ContextPlugin): + """Validates attributes in the project setting are consistent + with the nodes from MaxWrapper Class in 3ds max. + E.g. "renderers.current.separateAovFiles", + "renderers.production.PrimaryGIEngine" + Admin(s) need to put the dict below and enable this validator for a check: + { + "renderers.current":{ + "separateAovFiles" : True + }, + "renderers.production":{ + "PrimaryGIEngine": "#RS_GIENGINE_BRUTE_FORCE" + } + .... + } + + """ + + order = ValidatorOrder + hosts = ["max"] + label = "Attributes" + actions = [RepairContextAction] + optional = True + + @classmethod + def get_invalid(cls, context): + attributes = ( + context.data["project_settings"]["max"]["publish"] + ["ValidateAttributes"]["attributes"] + ) + if not attributes: + return + invalid = [] + for object_name, required_properties in attributes.items(): + if not rt.Execute(f"isValidValue {object_name}"): + # Skip checking if the node does not + # exist in MaxWrapper Class + cls.log.debug(f"Unable to find '{object_name}'." + " Skipping validation of attributes.") + continue + + for property_name, value in required_properties.items(): + if not has_property(object_name, property_name): + cls.log.error( + "Non-existing property: " + f"{object_name}.{property_name}") + invalid.append((object_name, property_name)) + + if not is_matching_value(object_name, property_name, value): + cls.log.error( + f"Invalid value for: {object_name}.{property_name}" + f" should be: {value}") + invalid.append((object_name, property_name)) + + return invalid + + def process(self, context): + if not self.is_active(context.data): + self.log.debug("Skipping Validate Attributes...") + return + invalid_attributes = self.get_invalid(context) + if invalid_attributes: + bullet_point_invalid_statement = "\n".join( + "- {}".format(invalid) for invalid + in invalid_attributes + ) + report = ( + "Required Attribute(s) have invalid value(s).\n\n" + f"{bullet_point_invalid_statement}\n\n" + "You can use repair action to fix them if they are not\n" + "unknown property value(s)." + ) + raise PublishValidationError( + report, title="Invalid Value(s) for Required Attribute(s)") + + @classmethod + def repair(cls, context): + attributes = ( + context.data["project_settings"]["max"]["publish"] + ["ValidateAttributes"]["attributes"] + ) + invalid_attributes = cls.get_invalid(context) + for attrs in invalid_attributes: + prop, attr = attrs + value = attributes[prop][attr] + if isinstance(value, str) and not value.startswith("#"): + attribute_fix = '{}.{}="{}"'.format( + prop, attr, value + ) + else: + attribute_fix = "{}.{}={}".format( + prop, attr, value + ) + rt.Execute(attribute_fix) diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_camera_contents.py b/client/ayon_core/hosts/max/plugins/publish/validate_camera_contents.py new file mode 100644 index 0000000000..0473fd4a8a --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_camera_contents.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from ayon_core.pipeline import PublishValidationError +from pymxs import runtime as rt + + +class ValidateCameraContent(pyblish.api.InstancePlugin): + """Validates Camera instance contents. + + A Camera instance may only hold a SINGLE camera's transform + """ + + order = pyblish.api.ValidatorOrder + families = ["camera", "review"] + hosts = ["max"] + label = "Camera Contents" + camera_type = ["$Free_Camera", "$Target_Camera", + "$Physical_Camera", "$Target"] + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError(("Camera instance must only include" + "camera (and camera target). " + f"Invalid content {invalid}")) + + def get_invalid(self, instance): + """ + Get invalid nodes if the instance is not camera + """ + invalid = [] + container = instance.data["instance_node"] + self.log.info(f"Validating camera content for {container}") + + selection_list = instance.data["members"] + for sel in selection_list: + # to avoid Attribute Error from pymxs wrapper + sel_tmp = str(sel) + found = any(sel_tmp.startswith(cam) for cam in self.camera_type) + if not found: + self.log.error("Camera not found") + invalid.append(sel) + return invalid diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_deadline_publish.py b/client/ayon_core/hosts/max/plugins/publish/validate_deadline_publish.py new file mode 100644 index 0000000000..2c9ca4ae64 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_deadline_publish.py @@ -0,0 +1,43 @@ +import os +import pyblish.api +from pymxs import runtime as rt +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings + + +class ValidateDeadlinePublish(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates Render File Directory is + not the same in every submission + """ + + order = ValidateContentsOrder + families = ["maxrender"] + hosts = ["max"] + label = "Render Output for Deadline" + optional = True + actions = [RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + return + file = rt.maxFileName + filename, ext = os.path.splitext(file) + if filename not in rt.rendOutputFilename: + raise PublishValidationError( + "Render output folder " + "doesn't match the max scene name! " + "Use Repair action to " + "fix the folder file path.." + ) + + @classmethod + def repair(cls, instance): + container = instance.data.get("instance_node") + RenderSettings().render_output(container) + cls.log.debug("Reset the render output folder...") diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py new file mode 100644 index 0000000000..75a83c2b05 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py @@ -0,0 +1,88 @@ +import pyblish.api + +from pymxs import runtime as rt +from ayon_core.pipeline import ( + OptionalPyblishPluginMixin +) +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + KnownPublishError +) +from ayon_core.hosts.max.api.lib import get_frame_range, set_timeline + + +class ValidateFrameRange(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates the frame ranges. + + This is an optional validator checking if the frame range on instance + matches the frame range specified for the asset. + + It also validates render frame ranges of render layers. + + Repair action will change everything to match the asset frame range. + + This can be turned off by the artist to allow custom ranges. + """ + + label = "Validate Frame Range" + order = ValidateContentsOrder + families = ["camera", "maxrender", + "pointcache", "pointcloud", + "review", "redshiftproxy"] + hosts = ["max"] + optional = True + actions = [RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + self.log.debug("Skipping Validate Frame Range...") + return + + frame_range = get_frame_range( + asset_doc=instance.data["assetEntity"]) + + inst_frame_start = instance.data.get("frameStartHandle") + inst_frame_end = instance.data.get("frameEndHandle") + if inst_frame_start is None or inst_frame_end is None: + raise KnownPublishError( + "Missing frame start and frame end on " + "instance to to validate." + ) + frame_start_handle = frame_range["frameStartHandle"] + frame_end_handle = frame_range["frameEndHandle"] + errors = [] + if frame_start_handle != inst_frame_start: + errors.append( + f"Start frame ({inst_frame_start}) on instance does not match " # noqa + f"with the start frame ({frame_start_handle}) set on the asset data. ") # noqa + if frame_end_handle != inst_frame_end: + errors.append( + f"End frame ({inst_frame_end}) on instance does not match " + f"with the end frame ({frame_end_handle}) " + "from the asset data. ") + + if errors: + bullet_point_errors = "\n".join( + "- {}".format(error) for error in errors + ) + report = ( + "Frame range settings are incorrect.\n\n" + f"{bullet_point_errors}\n\n" + "You can use repair action to fix it." + ) + raise PublishValidationError(report, title="Frame Range incorrect") + + @classmethod + def repair(cls, instance): + frame_range = get_frame_range() + frame_start_handle = frame_range["frameStartHandle"] + frame_end_handle = frame_range["frameEndHandle"] + + if instance.data["family"] == "maxrender": + rt.rendStart = frame_start_handle + rt.rendEnd = frame_end_handle + else: + set_timeline(frame_start_handle, frame_end_handle) diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_instance_has_members.py b/client/ayon_core/hosts/max/plugins/publish/validate_instance_has_members.py new file mode 100644 index 0000000000..552e9ea0e2 --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_instance_has_members.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from ayon_core.pipeline import PublishValidationError + + +class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): + """Validates Instance has members. + + Check if MaxScene containers includes any contents underneath. + """ + + order = pyblish.api.ValidatorOrder + families = ["camera", + "model", + "maxScene", + "review", + "pointcache", + "pointcloud", + "redshiftproxy"] + hosts = ["max"] + label = "Container Contents" + + def process(self, instance): + if not instance.data["members"]: + raise PublishValidationError("No content found in the container") diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_loaded_plugin.py b/client/ayon_core/hosts/max/plugins/publish/validate_loaded_plugin.py new file mode 100644 index 0000000000..fe6e32b27b --- /dev/null +++ b/client/ayon_core/hosts/max/plugins/publish/validate_loaded_plugin.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +"""Validator for Loaded Plugin.""" +import os +import pyblish.api +from pymxs import runtime as rt + +from ayon_core.pipeline.publish import ( + RepairAction, + OptionalPyblishPluginMixin, + PublishValidationError +) +from ayon_core.hosts.max.api.lib import get_plugins + + +class ValidateLoadedPlugin(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validates if the specific plugin is loaded in 3ds max. + Studio Admin(s) can add the plugins they want to check in validation + via studio defined project settings + """ + + order = pyblish.api.ValidatorOrder + hosts = ["max"] + label = "Validate Loaded Plugins" + optional = True + actions = [RepairAction] + + family_plugins_mapping = {} + + @classmethod + def get_invalid(cls, instance): + """Plugin entry point.""" + family_plugins_mapping = cls.family_plugins_mapping + if not family_plugins_mapping: + return + + invalid = [] + # Find all plug-in requirements for current instance + instance_families = {instance.data["family"]} + instance_families.update(instance.data.get("families", [])) + cls.log.debug("Checking plug-in validation " + f"for instance families: {instance_families}") + all_required_plugins = set() + + for mapping in family_plugins_mapping: + # Check for matching families + if not mapping: + return + + match_families = {fam.strip() for fam in mapping["families"]} + has_match = "*" in match_families or match_families.intersection( + instance_families) + + if not has_match: + continue + + cls.log.debug( + f"Found plug-in family requirements: {match_families}") + required_plugins = [ + # match lowercase and format with os.environ to allow + # plugin names defined by max version, e.g. {3DSMAX_VERSION} + plugin.format(**os.environ).lower() + for plugin in mapping["plugins"] + # ignore empty fields in settings + if plugin.strip() + ] + + all_required_plugins.update(required_plugins) + + if not all_required_plugins: + # Instance has no plug-in requirements + return + + # get all DLL loaded plugins in Max and their plugin index + available_plugins = { + plugin_name.lower(): index for index, plugin_name in enumerate( + get_plugins()) + } + # validate the required plug-ins + for plugin in sorted(all_required_plugins): + plugin_index = available_plugins.get(plugin) + if plugin_index is None: + debug_msg = ( + f"Plugin {plugin} does not exist" + " in 3dsMax Plugin List." + ) + invalid.append((plugin, debug_msg)) + continue + if not rt.pluginManager.isPluginDllLoaded(plugin_index): + debug_msg = f"Plugin {plugin} not loaded." + invalid.append((plugin, debug_msg)) + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + self.log.debug("Skipping Validate Loaded Plugin...") + return + invalid = self.get_invalid(instance) + if invalid: + bullet_point_invalid_statement = "\n".join( + "- {}".format(message) for _, message in invalid + ) + report = ( + "Required plugins are not loaded.\n\n" + f"{bullet_point_invalid_statement}\n\n" + "You can use repair action to load the plugin." + ) + raise PublishValidationError( + report, title="Missing Required Plugins") + + @classmethod + def repair(cls, instance): + # get all DLL loaded plugins in Max and their plugin index + invalid = cls.get_invalid(instance) + if not invalid: + return + + # get all DLL loaded plugins in Max and their plugin index + available_plugins = { + plugin_name.lower(): index for index, plugin_name in enumerate( + get_plugins()) + } + + for invalid_plugin, _ in invalid: + plugin_index = available_plugins.get(invalid_plugin) + + if plugin_index is None: + cls.log.warning( + f"Can't enable missing plugin: {invalid_plugin}") + continue + + if not rt.pluginManager.isPluginDllLoaded(plugin_index): + rt.pluginManager.loadPluginDll(plugin_index) diff --git a/openpype/hosts/max/plugins/publish/validate_model_contents.py b/client/ayon_core/hosts/max/plugins/publish/validate_model_contents.py similarity index 96% rename from openpype/hosts/max/plugins/publish/validate_model_contents.py rename to client/ayon_core/hosts/max/plugins/publish/validate_model_contents.py index cfe016f03f..9a4d988aa4 100644 --- a/openpype/hosts/max/plugins/publish/validate_model_contents.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_model_contents.py @@ -2,7 +2,7 @@ import pyblish.api from pymxs import runtime as rt -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateModelContent(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/max/plugins/publish/validate_pointcloud.py b/client/ayon_core/hosts/max/plugins/publish/validate_pointcloud.py similarity index 98% rename from openpype/hosts/max/plugins/publish/validate_pointcloud.py rename to client/ayon_core/hosts/max/plugins/publish/validate_pointcloud.py index 54d6d0f11a..a025ed3992 100644 --- a/openpype/hosts/max/plugins/publish/validate_pointcloud.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_pointcloud.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError from pymxs import runtime as rt diff --git a/openpype/hosts/max/plugins/publish/validate_renderable_camera.py b/client/ayon_core/hosts/max/plugins/publish/validate_renderable_camera.py similarity index 89% rename from openpype/hosts/max/plugins/publish/validate_renderable_camera.py rename to client/ayon_core/hosts/max/plugins/publish/validate_renderable_camera.py index 61321661b5..ffd6b183fe 100644 --- a/openpype/hosts/max/plugins/publish/validate_renderable_camera.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_renderable_camera.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin) -from openpype.pipeline.publish import RepairAction -from openpype.hosts.max.api.lib import get_current_renderer +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.max.api.lib import get_current_renderer from pymxs import runtime as rt diff --git a/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py b/client/ayon_core/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py similarity index 91% rename from openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py rename to client/ayon_core/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py index bc82f82f3b..de3a806c85 100644 --- a/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError from pymxs import runtime as rt -from openpype.pipeline.publish import RepairAction -from openpype.hosts.max.api.lib import get_current_renderer +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.max.api.lib import get_current_renderer class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/max/plugins/publish/validate_resolution_setting.py b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py similarity index 93% rename from openpype/hosts/max/plugins/publish/validate_resolution_setting.py rename to client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py index 1c4b05c556..0058d3b262 100644 --- a/openpype/hosts/max/plugins/publish/validate_resolution_setting.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py @@ -1,13 +1,13 @@ import pyblish.api from pymxs import runtime as rt -from openpype.pipeline import ( +from ayon_core.pipeline import ( OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, PublishValidationError ) -from openpype.hosts.max.api.lib import reset_scene_resolution +from ayon_core.hosts.max.api.lib import reset_scene_resolution class ValidateResolutionSetting(pyblish.api.InstancePlugin, diff --git a/openpype/hosts/max/plugins/publish/validate_scene_saved.py b/client/ayon_core/hosts/max/plugins/publish/validate_scene_saved.py similarity index 90% rename from openpype/hosts/max/plugins/publish/validate_scene_saved.py rename to client/ayon_core/hosts/max/plugins/publish/validate_scene_saved.py index 8506b17315..3028a55337 100644 --- a/openpype/hosts/max/plugins/publish/validate_scene_saved.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_scene_saved.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError from pymxs import runtime as rt diff --git a/openpype/hosts/max/plugins/publish/validate_tyflow_data.py b/client/ayon_core/hosts/max/plugins/publish/validate_tyflow_data.py similarity index 98% rename from openpype/hosts/max/plugins/publish/validate_tyflow_data.py rename to client/ayon_core/hosts/max/plugins/publish/validate_tyflow_data.py index c0f29422ec..8dd8a1bb68 100644 --- a/openpype/hosts/max/plugins/publish/validate_tyflow_data.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_tyflow_data.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError from pymxs import runtime as rt diff --git a/openpype/hosts/max/startup/startup.ms b/client/ayon_core/hosts/max/startup/startup.ms similarity index 100% rename from openpype/hosts/max/startup/startup.ms rename to client/ayon_core/hosts/max/startup/startup.ms diff --git a/client/ayon_core/hosts/max/startup/startup.py b/client/ayon_core/hosts/max/startup/startup.py new file mode 100644 index 0000000000..49a861bad0 --- /dev/null +++ b/client/ayon_core/hosts/max/startup/startup.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +import os +import sys + +# this might happen in some 3dsmax version where PYTHONPATH isn't added +# to sys.path automatically +for path in os.environ["PYTHONPATH"].split(os.pathsep): + if path and path not in sys.path: + sys.path.append(path) + +from ayon_core.hosts.max.api import MaxHost +from ayon_core.pipeline import install_host + +host = MaxHost() +install_host(host) diff --git a/openpype/hosts/maya/__init__.py b/client/ayon_core/hosts/maya/__init__.py similarity index 100% rename from openpype/hosts/maya/__init__.py rename to client/ayon_core/hosts/maya/__init__.py diff --git a/client/ayon_core/hosts/maya/addon.py b/client/ayon_core/hosts/maya/addon.py new file mode 100644 index 0000000000..745850f6a8 --- /dev/null +++ b/client/ayon_core/hosts/maya/addon.py @@ -0,0 +1,49 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +MAYA_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class MayaAddon(OpenPypeModule, IHostAddon): + name = "maya" + host_name = "maya" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to PYTHONPATH + new_python_paths = [ + os.path.join(MAYA_ROOT_DIR, "startup") + ] + old_python_path = env.get("PYTHONPATH") or "" + for path in old_python_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_python_paths: + new_python_paths.append(norm_path) + + env["PYTHONPATH"] = os.pathsep.join(new_python_paths) + + # Set default environments + envs = { + "AYON_LOG_NO_COLORS": "1", + # For python module 'qtpy' + "QT_API": "PySide2", + # For python module 'Qt' + "QT_PREFERRED_BINDING": "PySide2" + } + for key, value in envs.items(): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(MAYA_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".ma", ".mb"] diff --git a/openpype/hosts/maya/api/__init__.py b/client/ayon_core/hosts/maya/api/__init__.py similarity index 100% rename from openpype/hosts/maya/api/__init__.py rename to client/ayon_core/hosts/maya/api/__init__.py diff --git a/client/ayon_core/hosts/maya/api/action.py b/client/ayon_core/hosts/maya/api/action.py new file mode 100644 index 0000000000..1edca82ee4 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/action.py @@ -0,0 +1,136 @@ +# absolute_import is needed to counter the `module has no cmds error` in Maya +from __future__ import absolute_import + +import pyblish.api + +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline.publish import get_errored_instances_from_context + + +class GenerateUUIDsOnInvalidAction(pyblish.api.Action): + """Generate UUIDs on the invalid nodes in the instance. + + Invalid nodes are those returned by the plugin's `get_invalid` method. + As such it is the plug-in's responsibility to ensure the nodes that + receive new UUIDs are actually invalid. + + Requires: + - instance.data["asset"] + + """ + + label = "Regenerate UUIDs" + on = "failed" # This action is only available on a failed plug-in + icon = "wrench" # Icon from Awesome Icon + + def process(self, context, plugin): + + from maya import cmds + + self.log.info("Finding bad nodes..") + + errored_instances = get_errored_instances_from_context(context) + + # Apply pyblish logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + + # Get the nodes from the all instances that ran through this plug-in + all_invalid = [] + for instance in instances: + invalid = plugin.get_invalid(instance) + + # Don't allow referenced nodes to get their ids regenerated to + # avoid loaded content getting messed up with reference edits + if invalid: + referenced = {node for node in invalid if + cmds.referenceQuery(node, isNodeReferenced=True)} + if referenced: + self.log.warning("Skipping UUID generation on referenced " + "nodes: {}".format(list(referenced))) + invalid = [node for node in invalid + if node not in referenced] + + if invalid: + + self.log.info("Fixing instance {}".format(instance.name)) + self._update_id_attribute(instance, invalid) + + all_invalid.extend(invalid) + + if not all_invalid: + self.log.info("No invalid nodes found.") + return + + all_invalid = list(set(all_invalid)) + self.log.info("Generated ids on nodes: {0}".format(all_invalid)) + + def _update_id_attribute(self, instance, nodes): + """Delete the id attribute + + Args: + instance: The instance we're fixing for + nodes (list): all nodes to regenerate ids on + """ + + from . import lib + + # Expecting this is called on validators in which case 'assetEntity' + # should be always available, but kept a way to query it by name. + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + asset_name = instance.data["asset"] + project_name = instance.context.data["projectName"] + self.log.info(( + "Asset is not stored on instance." + " Querying by name \"{}\" from project \"{}\"" + ).format(asset_name, project_name)) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + + for node, _id in lib.generate_ids(nodes, asset_id=asset_doc["_id"]): + lib.set_id(node, _id, overwrite=True) + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Maya when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + + try: + from maya import cmds + except ImportError: + raise ImportError("Current host is not Maya") + + errored_instances = get_errored_instances_from_context(context, + plugin=plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = list() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning("Plug-in returned to be invalid, " + "but has no selectable nodes.") + + # Ensure unique (process each node only once) + invalid = list(set(invalid)) + + if invalid: + self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) + cmds.select(invalid, replace=True, noExpand=True) + else: + self.log.info("No invalid nodes found.") + cmds.select(deselect=True) diff --git a/client/ayon_core/hosts/maya/api/commands.py b/client/ayon_core/hosts/maya/api/commands.py new file mode 100644 index 0000000000..b52d5e6c2d --- /dev/null +++ b/client/ayon_core/hosts/maya/api/commands.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +"""OpenPype script commands to be used directly in Maya.""" +from maya import cmds + +from ayon_core.client import get_asset_by_name, get_project +from ayon_core.pipeline import get_current_project_name, get_current_asset_name + + +class ToolWindows: + + _windows = {} + + @classmethod + def get_window(cls, tool): + """Get widget for specific tool. + + Args: + tool (str): Name of the tool. + + Returns: + Stored widget. + + """ + try: + return cls._windows[tool] + except KeyError: + return None + + @classmethod + def set_window(cls, tool, window): + """Set widget for the tool. + + Args: + tool (str): Name of the tool. + window (QtWidgets.QWidget): Widget + + """ + cls._windows[tool] = window + + +def edit_shader_definitions(): + from qtpy import QtWidgets + from ayon_core.hosts.maya.api.shader_definition_editor import ( + ShaderDefinitionsEditor + ) + from ayon_core.tools.utils import qt_app_context + + top_level_widgets = QtWidgets.QApplication.topLevelWidgets() + main_window = next(widget for widget in top_level_widgets + if widget.objectName() == "MayaWindow") + + with qt_app_context(): + window = ToolWindows.get_window("shader_definition_editor") + if not window: + window = ShaderDefinitionsEditor(parent=main_window) + ToolWindows.set_window("shader_definition_editor", window) + window.show() + + +def _resolution_from_document(doc): + if not doc or "data" not in doc: + print("Entered document is not valid. \"{}\"".format(str(doc))) + return None + + resolution_width = doc["data"].get("resolutionWidth") + resolution_height = doc["data"].get("resolutionHeight") + # Backwards compatibility + if resolution_width is None or resolution_height is None: + resolution_width = doc["data"].get("resolution_width") + resolution_height = doc["data"].get("resolution_height") + + # Make sure both width and height are set + if resolution_width is None or resolution_height is None: + cmds.warning( + "No resolution information found for \"{}\"".format(doc["name"]) + ) + return None + + return int(resolution_width), int(resolution_height) + + +def reset_resolution(): + # Default values + resolution_width = 1920 + resolution_height = 1080 + + # Get resolution from asset + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name) + resolution = _resolution_from_document(asset_doc) + # Try get resolution from project + if resolution is None: + # TODO go through visualParents + print(( + "Asset \"{}\" does not have set resolution." + " Trying to get resolution from project" + ).format(asset_name)) + project_doc = get_project(project_name) + resolution = _resolution_from_document(project_doc) + + if resolution is None: + msg = "Using default resolution {}x{}" + else: + resolution_width, resolution_height = resolution + msg = "Setting resolution to {}x{}" + + print(msg.format(resolution_width, resolution_height)) + + # set for different renderers + # arnold, vray, redshift, renderman + + renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer").lower() + # handle various renderman names + if renderer.startswith("renderman"): + renderer = "renderman" + + # default attributes are usable for Arnold, Renderman and Redshift + width_attr_name = "defaultResolution.width" + height_attr_name = "defaultResolution.height" + + # Vray has its own way + if renderer == "vray": + width_attr_name = "vraySettings.width" + height_attr_name = "vraySettings.height" + + cmds.setAttr(width_attr_name, resolution_width) + cmds.setAttr(height_attr_name, resolution_height) diff --git a/openpype/hosts/maya/api/customize.py b/client/ayon_core/hosts/maya/api/customize.py similarity index 98% rename from openpype/hosts/maya/api/customize.py rename to client/ayon_core/hosts/maya/api/customize.py index f4c4d6ed88..da046b538d 100644 --- a/openpype/hosts/maya/api/customize.py +++ b/client/ayon_core/hosts/maya/api/customize.py @@ -8,8 +8,8 @@ import maya.cmds as cmds import maya.mel as mel -from openpype import resources -from openpype.tools.utils import host_tools +from ayon_core import resources +from ayon_core.tools.utils import host_tools from .lib import get_main_window from ..tools import show_look_assigner diff --git a/openpype/hosts/maya/api/exitstack.py b/client/ayon_core/hosts/maya/api/exitstack.py similarity index 98% rename from openpype/hosts/maya/api/exitstack.py rename to client/ayon_core/hosts/maya/api/exitstack.py index d151ee16d7..5eb7e15784 100644 --- a/openpype/hosts/maya/api/exitstack.py +++ b/client/ayon_core/hosts/maya/api/exitstack.py @@ -6,7 +6,7 @@ Instead of using ExitStack from contextlib, use it from this module: ->>> from openpype.hosts.maya.api.exitstack import ExitStack +>>> from ayon_core.hosts.maya.api.exitstack import ExitStack It will provide the appropriate ExitStack implementation for the current running Python version. diff --git a/openpype/hosts/maya/api/fbx.py b/client/ayon_core/hosts/maya/api/fbx.py similarity index 99% rename from openpype/hosts/maya/api/fbx.py rename to client/ayon_core/hosts/maya/api/fbx.py index c8f4050bc1..97e95d2ec4 100644 --- a/openpype/hosts/maya/api/fbx.py +++ b/client/ayon_core/hosts/maya/api/fbx.py @@ -6,7 +6,7 @@ from maya import cmds # noqa import maya.mel as mel # noqa -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.hosts.maya.api.lib import maintained_selection class FBXExtractor: diff --git a/openpype/hosts/maya/api/gltf.py b/client/ayon_core/hosts/maya/api/gltf.py similarity index 100% rename from openpype/hosts/maya/api/gltf.py rename to client/ayon_core/hosts/maya/api/gltf.py diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py new file mode 100644 index 0000000000..7b791c3d51 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/lib.py @@ -0,0 +1,4240 @@ +"""Standalone helper functions""" + +import os +import copy +from pprint import pformat +import sys +import uuid +import re + +import json +import logging +import contextlib +import capture +from .exitstack import ExitStack +from collections import OrderedDict, defaultdict +from math import ceil +from six import string_types + +from maya import cmds, mel +from maya.api import OpenMaya + +from ayon_core.client import ( + get_project, + get_asset_by_name, + get_subsets, + get_last_versions, + get_representation_by_name +) +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( + get_current_project_name, + get_current_asset_name, + get_current_task_name, + discover_loader_plugins, + loaders_from_representation, + get_representation_path, + load_container, + registered_host +) +from ayon_core.lib import NumberDef +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.pipeline.create import CreateContext +from ayon_core.lib.profiles_filtering import filter_profiles + + +self = sys.modules[__name__] +self._parent = None + +log = logging.getLogger(__name__) + +IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True) +ATTRIBUTE_DICT = {"int": {"attributeType": "long"}, + "str": {"dataType": "string"}, + "unicode": {"dataType": "string"}, + "float": {"attributeType": "double"}, + "bool": {"attributeType": "bool"}} + +SHAPE_ATTRS = {"castsShadows", + "receiveShadows", + "motionBlur", + "primaryVisibility", + "smoothShading", + "visibleInReflections", + "visibleInRefractions", + "doubleSided", + "opposite"} + + +DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0] + +# The maya alembic export types +_alembic_options = { + "startFrame": float, + "endFrame": float, + "frameRange": str, # "start end"; overrides startFrame & endFrame + "eulerFilter": bool, + "frameRelativeSample": float, + "noNormals": bool, + "renderableOnly": bool, + "step": float, + "stripNamespaces": bool, + "uvWrite": bool, + "wholeFrameGeo": bool, + "worldSpace": bool, + "writeVisibility": bool, + "writeColorSets": bool, + "writeFaceSets": bool, + "writeCreases": bool, # Maya 2015 Ext1+ + "writeUVSets": bool, # Maya 2017+ + "dataFormat": str, + "root": (list, tuple), + "attr": (list, tuple), + "attrPrefix": (list, tuple), + "userAttr": (list, tuple), + "melPerFrameCallback": str, + "melPostJobCallback": str, + "pythonPerFrameCallback": str, + "pythonPostJobCallback": str, + "selection": bool +} + +INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} +FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} + + +DISPLAY_LIGHTS_ENUM = [ + {"label": "Use Project Settings", "value": "project_settings"}, + {"label": "Default Lighting", "value": "default"}, + {"label": "All Lights", "value": "all"}, + {"label": "Selected Lights", "value": "selected"}, + {"label": "Flat Lighting", "value": "flat"}, + {"label": "No Lights", "value": "none"} +] + + +def get_main_window(): + """Acquire Maya's main window""" + from qtpy import QtWidgets + + if self._parent is None: + self._parent = { + widget.objectName(): widget + for widget in QtWidgets.QApplication.topLevelWidgets() + }["MayaWindow"] + return self._parent + + +@contextlib.contextmanager +def suspended_refresh(suspend=True): + """Suspend viewport refreshes + + cmds.ogs(pause=True) is a toggle so we cant pass False. + """ + if IS_HEADLESS: + yield + return + + original_state = cmds.ogs(query=True, pause=True) + try: + if suspend and not original_state: + cmds.ogs(pause=True) + yield + finally: + if suspend and not original_state: + cmds.ogs(pause=True) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> scene = cmds.file(new=True, force=True) + >>> node = cmds.createNode("transform", name="Test") + >>> cmds.select("persp") + >>> with maintained_selection(): + ... cmds.select("Test", replace=True) + >>> "Test" in cmds.ls(selection=True) + False + + """ + + previous_selection = cmds.ls(selection=True) + try: + yield + finally: + if previous_selection: + cmds.select(previous_selection, + replace=True, + noExpand=True) + else: + cmds.select(clear=True) + + +def reload_all_udim_tile_previews(): + """Regenerate all UDIM tile preview in texture file""" + for texture_file in cmds.ls(type="file"): + if cmds.getAttr("{}.uvTilingMode".format(texture_file)) > 0: + cmds.ogs(regenerateUVTilePreview=texture_file) + + +@contextlib.contextmanager +def panel_camera(panel, camera): + """Set modelPanel's camera during the context. + + Arguments: + panel (str): modelPanel name. + camera (str): camera name. + + """ + original_camera = cmds.modelPanel(panel, query=True, camera=True) + try: + cmds.modelPanel(panel, edit=True, camera=camera) + yield + finally: + cmds.modelPanel(panel, edit=True, camera=original_camera) + + +def render_capture_preset(preset): + """Capture playblast with a preset. + + To generate the preset use `generate_capture_preset`. + + Args: + preset (dict): preset options + + Returns: + str: Output path of `capture.capture` + """ + + # Force a refresh at the start of the timeline + # TODO (Question): Why do we need to do this? What bug does it solve? + # Is this for simulations? + cmds.refresh(force=True) + refresh_frame_int = int(cmds.playbackOptions(query=True, minTime=True)) + cmds.currentTime(refresh_frame_int - 1, edit=True) + cmds.currentTime(refresh_frame_int, edit=True) + log.debug( + "Using preset: {}".format( + json.dumps(preset, indent=4, sort_keys=True) + ) + ) + preset = copy.deepcopy(preset) + # not supported by `capture` so we pop it off of the preset + reload_textures = preset["viewport_options"].pop("loadTextures", False) + panel = preset.pop("panel") + with ExitStack() as stack: + stack.enter_context(maintained_time()) + stack.enter_context(panel_camera(panel, preset["camera"])) + stack.enter_context(viewport_default_options(panel, preset)) + if reload_textures: + # Force immediate texture loading when to ensure + # all textures have loaded before the playblast starts + stack.enter_context(material_loading_mode(mode="immediate")) + # Regenerate all UDIM tiles previews + reload_all_udim_tile_previews() + path = capture.capture(log=self.log, **preset) + + return path + + +def generate_capture_preset(instance, camera, path, + start=None, end=None, capture_preset=None): + """Function for getting all the data of preset options for + playblast capturing + + Args: + instance (pyblish.api.Instance): instance + camera (str): review camera + path (str): filepath + start (int): frameStart + end (int): frameEnd + capture_preset (dict): capture preset + + Returns: + dict: Resulting preset + """ + preset = load_capture_preset(data=capture_preset) + + preset["camera"] = camera + preset["start_frame"] = start + preset["end_frame"] = end + preset["filename"] = path + preset["overwrite"] = True + preset["panel"] = instance.data["panel"] + + # Disable viewer since we use the rendering logic for publishing + # We don't want to open the generated playblast in a viewer directly. + preset["viewer"] = False + + # "isolate_view" will already have been applied at creation, so we'll + # ignore it here. + preset.pop("isolate_view") + + # Set resolution variables from capture presets + width_preset = capture_preset["Resolution"]["width"] + height_preset = capture_preset["Resolution"]["height"] + + # Set resolution variables from asset values + asset_data = instance.data["assetEntity"]["data"] + asset_width = asset_data.get("resolutionWidth") + asset_height = asset_data.get("resolutionHeight") + review_instance_width = instance.data.get("review_width") + review_instance_height = instance.data.get("review_height") + + # Use resolution from instance if review width/height is set + # Otherwise use the resolution from preset if it has non-zero values + # Otherwise fall back to asset width x height + # Else define no width, then `capture.capture` will use render resolution + if review_instance_width and review_instance_height: + preset["width"] = review_instance_width + preset["height"] = review_instance_height + elif width_preset and height_preset: + preset["width"] = width_preset + preset["height"] = height_preset + elif asset_width and asset_height: + preset["width"] = asset_width + preset["height"] = asset_height + + # Isolate view is requested by having objects in the set besides a + # camera. If there is only 1 member it'll be the camera because we + # validate to have 1 camera only. + if instance.data["isolate"] and len(instance.data["setMembers"]) > 1: + preset["isolate"] = instance.data["setMembers"] + + # Override camera options + # Enforce persisting camera depth of field + camera_options = preset.setdefault("camera_options", {}) + camera_options["depthOfField"] = cmds.getAttr( + "{0}.depthOfField".format(camera) + ) + + # Use Pan/Zoom from instance data instead of from preset + preset.pop("pan_zoom", None) + camera_options["panZoomEnabled"] = instance.data["panZoom"] + + # Override viewport options by instance data + viewport_options = preset.setdefault("viewport_options", {}) + viewport_options["displayLights"] = instance.data["displayLights"] + viewport_options["imagePlane"] = instance.data.get("imagePlane", True) + + # Override transparency if requested. + transparency = instance.data.get("transparency", 0) + if transparency != 0: + preset["viewport2_options"]["transparencyAlgorithm"] = transparency + + # Update preset with current panel setting + # if override_viewport_options is turned off + if not capture_preset["Viewport Options"]["override_viewport_options"]: + panel_preset = capture.parse_view(preset["panel"]) + panel_preset.pop("camera") + preset.update(panel_preset) + + return preset + + +@contextlib.contextmanager +def viewport_default_options(panel, preset): + """Context manager used by `render_capture_preset`. + + We need to explicitly enable some viewport changes so the viewport is + refreshed ahead of playblasting. + + """ + # TODO: Clarify in the docstring WHY we need to set it ahead of + # playblasting. What issues does it solve? + viewport_defaults = {} + try: + keys = [ + "useDefaultMaterial", + "wireframeOnShaded", + "xray", + "jointXray", + "backfaceCulling", + "textures" + ] + for key in keys: + viewport_defaults[key] = cmds.modelEditor( + panel, query=True, **{key: True} + ) + if preset["viewport_options"].get(key): + cmds.modelEditor( + panel, edit=True, **{key: True} + ) + yield + finally: + # Restoring viewport options. + if viewport_defaults: + cmds.modelEditor( + panel, edit=True, **viewport_defaults + ) + + +@contextlib.contextmanager +def material_loading_mode(mode="immediate"): + """Set material loading mode during context""" + original = cmds.displayPref(query=True, materialLoadingMode=True) + cmds.displayPref(materialLoadingMode=mode) + try: + yield + finally: + cmds.displayPref(materialLoadingMode=original) + + +def get_namespace(node): + """Return namespace of given node""" + node_name = node.rsplit("|", 1)[-1] + if ":" in node_name: + return node_name.rsplit(":", 1)[0] + else: + return "" + + +def strip_namespace(node, namespace): + """Strip given namespace from node path. + + The namespace will only be stripped from names + if it starts with that namespace. If the namespace + occurs within another namespace it's not removed. + + Examples: + >>> strip_namespace("namespace:node", namespace="namespace:") + "node" + >>> strip_namespace("hello:world:node", namespace="hello:world") + "node" + >>> strip_namespace("hello:world:node", namespace="hello") + "world:node" + >>> strip_namespace("hello:world:node", namespace="world") + "hello:world:node" + >>> strip_namespace("ns:group|ns:node", namespace="ns") + "group|node" + + Returns: + str: Node name without given starting namespace. + + """ + + # Ensure namespace ends with `:` + if not namespace.endswith(":"): + namespace = "{}:".format(namespace) + + # The long path for a node can also have the namespace + # in its parents so we need to remove it from each + return "|".join( + name[len(namespace):] if name.startswith(namespace) else name + for name in node.split("|") + ) + + +def get_custom_namespace(custom_namespace): + """Return unique namespace. + + The input namespace can contain a single group + of '#' number tokens to indicate where the namespace's + unique index should go. The amount of tokens defines + the zero padding of the number, e.g ### turns into 001. + + Warning: Note that a namespace will always be + prefixed with a _ if it starts with a digit + + Example: + >>> get_custom_namespace("myspace_##_") + # myspace_01_ + >>> get_custom_namespace("##_myspace") + # _01_myspace + >>> get_custom_namespace("myspace##") + # myspace01 + + """ + split = re.split("([#]+)", custom_namespace, 1) + + if len(split) == 3: + base, padding, suffix = split + padding = "%0{}d".format(len(padding)) + else: + base = split[0] + padding = "%02d" # default padding + suffix = "" + + return unique_namespace( + base, + format=padding, + prefix="_" if not base or base[0].isdigit() else "", + suffix=suffix + ) + + +def unique_namespace(namespace, format="%02d", prefix="", suffix=""): + """Return unique namespace + + Arguments: + namespace (str): Name of namespace to consider + format (str, optional): Formatting of the given iteration number + suffix (str, optional): Only consider namespaces with this suffix. + + >>> unique_namespace("bar") + # bar01 + >>> unique_namespace(":hello") + # :hello01 + >>> unique_namespace("bar:", suffix="_NS") + # bar01_NS: + + """ + + def current_namespace(): + current = cmds.namespaceInfo(currentNamespace=True, + absoluteName=True) + # When inside a namespace Maya adds no trailing : + if not current.endswith(":"): + current += ":" + return current + + # Always check against the absolute namespace root + # There's no clash with :x if we're defining namespace :a:x + ROOT = ":" if namespace.startswith(":") else current_namespace() + + # Strip trailing `:` tokens since we might want to add a suffix + start = ":" if namespace.startswith(":") else "" + end = ":" if namespace.endswith(":") else "" + namespace = namespace.strip(":") + if ":" in namespace: + # Split off any nesting that we don't uniqify anyway. + parents, namespace = namespace.rsplit(":", 1) + start += parents + ":" + ROOT += start + + def exists(n): + # Check for clash with nodes and namespaces + fullpath = ROOT + n + return cmds.objExists(fullpath) or cmds.namespace(exists=fullpath) + + iteration = 1 + while True: + nr_namespace = namespace + format % iteration + unique = prefix + nr_namespace + suffix + + if not exists(unique): + return start + unique + end + + iteration += 1 + + +def read(node): + """Return user-defined attributes from `node`""" + + data = dict() + + for attr in cmds.listAttr(node, userDefined=True) or list(): + try: + value = cmds.getAttr(node + "." + attr, asString=True) + + except RuntimeError: + # For Message type attribute or others that have connections, + # take source node name as value. + source = cmds.listConnections(node + "." + attr, + source=True, + destination=False) + source = cmds.ls(source, long=True) or [None] + value = source[0] + + except ValueError: + # Some attributes cannot be read directly, + # such as mesh and color attributes. These + # are considered non-essential to this + # particular publishing pipeline. + value = None + + data[attr] = value + + return data + + +def matrix_equals(a, b, tolerance=1e-10): + """ + Compares two matrices with an imperfection tolerance + + Args: + a (list, tuple): the matrix to check + b (list, tuple): the matrix to check against + tolerance (float): the precision of the differences + + Returns: + bool : True or False + + """ + if not all(abs(x - y) < tolerance for x, y in zip(a, b)): + return False + return True + + +def float_round(num, places=0, direction=ceil): + return direction(num * (10**places)) / float(10**places) + + +def pairwise(iterable): + """s -> (s0,s1), (s2,s3), (s4, s5), ...""" + from six.moves import zip + + a = iter(iterable) + return zip(a, a) + + +def collect_animation_defs(fps=False): + """Get the basic animation attribute defintions for the publisher. + + Returns: + OrderedDict + + """ + + # get scene values as defaults + frame_start = cmds.playbackOptions(query=True, minTime=True) + frame_end = cmds.playbackOptions(query=True, maxTime=True) + frame_start_handle = cmds.playbackOptions( + query=True, animationStartTime=True + ) + frame_end_handle = cmds.playbackOptions(query=True, animationEndTime=True) + + handle_start = frame_start - frame_start_handle + handle_end = frame_end_handle - frame_end + + # build attributes + defs = [ + NumberDef("frameStart", + label="Frame Start", + default=frame_start, + decimals=0), + NumberDef("frameEnd", + label="Frame End", + default=frame_end, + decimals=0), + NumberDef("handleStart", + label="Handle Start", + default=handle_start, + decimals=0), + NumberDef("handleEnd", + label="Handle End", + default=handle_end, + decimals=0), + NumberDef("step", + label="Step size", + tooltip="A smaller step size means more samples and larger " + "output files.\n" + "A 1.0 step size is a single sample every frame.\n" + "A 0.5 step size is two samples per frame.\n" + "A 0.2 step size is five samples per frame.", + default=1.0, + decimals=3), + ] + + if fps: + current_fps = mel.eval('currentTimeUnitToFPS()') + fps_def = NumberDef( + "fps", label="FPS", default=current_fps, decimals=5 + ) + defs.append(fps_def) + + return defs + + +def imprint(node, data): + """Write `data` to `node` as userDefined attributes + + Arguments: + node (str): Long name of node + data (dict): Dictionary of key/value pairs + + Example: + >>> from maya import cmds + >>> def compute(): + ... return 6 + ... + >>> cube, generator = cmds.polyCube() + >>> imprint(cube, { + ... "regularString": "myFamily", + ... "computedValue": lambda: compute() + ... }) + ... + >>> cmds.getAttr(cube + ".computedValue") + 6 + + """ + + for key, value in data.items(): + + if callable(value): + # Support values evaluated at imprint + value = value() + + if isinstance(value, bool): + add_type = {"attributeType": "bool"} + set_type = {"keyable": False, "channelBox": True} + elif isinstance(value, string_types): + add_type = {"dataType": "string"} + set_type = {"type": "string"} + elif isinstance(value, int): + add_type = {"attributeType": "long"} + set_type = {"keyable": False, "channelBox": True} + elif isinstance(value, float): + add_type = {"attributeType": "double"} + set_type = {"keyable": False, "channelBox": True} + elif isinstance(value, (list, tuple)): + add_type = {"attributeType": "enum", "enumName": ":".join(value)} + set_type = {"keyable": False, "channelBox": True} + value = 0 # enum default + else: + raise TypeError("Unsupported type: %r" % type(value)) + + cmds.addAttr(node, longName=key, **add_type) + cmds.setAttr(node + "." + key, value, **set_type) + + +def lsattr(attr, value=None): + """Return nodes matching `key` and `value` + + Arguments: + attr (str): Name of Maya attribute + value (object, optional): Value of attribute. If none + is provided, return all nodes with this attribute. + + Example: + >> lsattr("id", "myId") + ["myNode"] + >> lsattr("id") + ["myNode", "myOtherNode"] + + """ + + if value is None: + return cmds.ls("*.%s" % attr, + recursive=True, + objectsOnly=True, + long=True) + return lsattrs({attr: value}) + + +def lsattrs(attrs): + """Return nodes with the given attribute(s). + + Arguments: + attrs (dict): Name and value pairs of expected matches + + Example: + >>> # Return nodes with an `age` of five. + >>> lsattrs({"age": "five"}) + >>> # Return nodes with both `age` and `color` of five and blue. + >>> lsattrs({"age": "five", "color": "blue"}) + + Return: + list: matching nodes. + + """ + + dep_fn = OpenMaya.MFnDependencyNode() + dag_fn = OpenMaya.MFnDagNode() + selection_list = OpenMaya.MSelectionList() + + first_attr = next(iter(attrs)) + + try: + selection_list.add("*.{0}".format(first_attr), + searchChildNamespaces=True) + except RuntimeError as exc: + if str(exc).endswith("Object does not exist"): + return [] + + matches = set() + for i in range(selection_list.length()): + node = selection_list.getDependNode(i) + if node.hasFn(OpenMaya.MFn.kDagNode): + fn_node = dag_fn.setObject(node) + full_path_names = [path.fullPathName() + for path in fn_node.getAllPaths()] + else: + fn_node = dep_fn.setObject(node) + full_path_names = [fn_node.name()] + + for attr in attrs: + try: + plug = fn_node.findPlug(attr, True) + if plug.asString() != attrs[attr]: + break + except RuntimeError: + break + else: + matches.update(full_path_names) + + return list(matches) + + +@contextlib.contextmanager +def attribute_values(attr_values): + """Remaps node attributes to values during context. + + Arguments: + attr_values (dict): Dictionary with (attr, value) + + """ + + original = [(attr, cmds.getAttr(attr)) for attr in attr_values] + try: + for attr, value in attr_values.items(): + if isinstance(value, string_types): + cmds.setAttr(attr, value, type="string") + else: + cmds.setAttr(attr, value) + yield + finally: + for attr, value in original: + if isinstance(value, string_types): + cmds.setAttr(attr, value, type="string") + elif value is None and cmds.getAttr(attr, type=True) == "string": + # In some cases the maya.cmds.getAttr command returns None + # for string attributes but this value cannot assigned. + # Note: After setting it once to "" it will then return "" + # instead of None. So this would only happen once. + cmds.setAttr(attr, "", type="string") + else: + cmds.setAttr(attr, value) + + +@contextlib.contextmanager +def keytangent_default(in_tangent_type='auto', + out_tangent_type='auto'): + """Set the default keyTangent for new keys during this context""" + + original_itt = cmds.keyTangent(query=True, g=True, itt=True)[0] + original_ott = cmds.keyTangent(query=True, g=True, ott=True)[0] + cmds.keyTangent(g=True, itt=in_tangent_type) + cmds.keyTangent(g=True, ott=out_tangent_type) + try: + yield + finally: + cmds.keyTangent(g=True, itt=original_itt) + cmds.keyTangent(g=True, ott=original_ott) + + +@contextlib.contextmanager +def undo_chunk(): + """Open a undo chunk during context.""" + + try: + cmds.undoInfo(openChunk=True) + yield + finally: + cmds.undoInfo(closeChunk=True) + + +@contextlib.contextmanager +def evaluation(mode="off"): + """Set the evaluation manager during context. + + Arguments: + mode (str): The mode to apply during context. + "off": The standard DG evaluation (stable) + "serial": A serial DG evaluation + "parallel": The Maya 2016+ parallel evaluation + + """ + + original = cmds.evaluationManager(query=True, mode=1)[0] + try: + cmds.evaluationManager(mode=mode) + yield + finally: + cmds.evaluationManager(mode=original) + + +@contextlib.contextmanager +def empty_sets(sets, force=False): + """Remove all members of the sets during the context""" + + assert isinstance(sets, (list, tuple)) + + original = dict() + original_connections = [] + + # Store original state + for obj_set in sets: + members = cmds.sets(obj_set, query=True) + original[obj_set] = members + + try: + for obj_set in sets: + cmds.sets(clear=obj_set) + if force: + # Break all connections if force is enabled, this way we + # prevent Maya from exporting any reference nodes which are + # connected with placeHolder[x] attributes + plug = "%s.dagSetMembers" % obj_set + connections = cmds.listConnections(plug, + source=True, + destination=False, + plugs=True, + connections=True) or [] + original_connections.extend(connections) + for dest, src in pairwise(connections): + cmds.disconnectAttr(src, dest) + yield + finally: + + for dest, src in pairwise(original_connections): + cmds.connectAttr(src, dest) + + # Restore original members + _iteritems = getattr(original, "iteritems", original.items) + for origin_set, members in _iteritems(): + cmds.sets(members, forceElement=origin_set) + + +@contextlib.contextmanager +def renderlayer(layer): + """Set the renderlayer during the context + + Arguments: + layer (str): Name of layer to switch to. + + """ + + original = cmds.editRenderLayerGlobals(query=True, + currentRenderLayer=True) + + try: + cmds.editRenderLayerGlobals(currentRenderLayer=layer) + yield + finally: + cmds.editRenderLayerGlobals(currentRenderLayer=original) + + +class delete_after(object): + """Context Manager that will delete collected nodes after exit. + + This allows to ensure the nodes added to the context are deleted + afterwards. This is useful if you want to ensure nodes are deleted + even if an error is raised. + + Examples: + with delete_after() as delete_bin: + cube = maya.cmds.polyCube() + delete_bin.extend(cube) + # cube exists + # cube deleted + + """ + + def __init__(self, nodes=None): + + self._nodes = list() + + if nodes: + self.extend(nodes) + + def append(self, node): + self._nodes.append(node) + + def extend(self, nodes): + self._nodes.extend(nodes) + + def __iter__(self): + return iter(self._nodes) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if self._nodes: + cmds.delete(self._nodes) + + +def get_current_renderlayer(): + return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) + + +def get_renderer(layer): + with renderlayer(layer): + return cmds.getAttr("defaultRenderGlobals.currentRenderer") + + +@contextlib.contextmanager +def no_undo(flush=False): + """Disable the undo queue during the context + + Arguments: + flush (bool): When True the undo queue will be emptied when returning + from the context losing all undo history. Defaults to False. + + """ + original = cmds.undoInfo(query=True, state=True) + keyword = 'state' if flush else 'stateWithoutFlush' + + try: + cmds.undoInfo(**{keyword: False}) + yield + finally: + cmds.undoInfo(**{keyword: original}) + + +def get_shader_assignments_from_shapes(shapes, components=True): + """Return the shape assignment per related shading engines. + + Returns a dictionary where the keys are shadingGroups and the values are + lists of assigned shapes or shape-components. + + Since `maya.cmds.sets` returns shader members on the shapes as components + on the transform we correct that in this method too. + + For the 'shapes' this will return a dictionary like: + { + "shadingEngineX": ["nodeX", "nodeY"], + "shadingEngineY": ["nodeA", "nodeB"] + } + + Args: + shapes (list): The shapes to collect the assignments for. + components (bool): Whether to include the component assignments. + + Returns: + dict: The {shadingEngine: shapes} relationships + + """ + + shapes = cmds.ls(shapes, + long=True, + shapes=True, + objectsOnly=True) + if not shapes: + return {} + + # Collect shading engines and their shapes + assignments = defaultdict(list) + for shape in shapes: + + # Get unique shading groups for the shape + shading_groups = cmds.listConnections(shape, + source=False, + destination=True, + plugs=False, + connections=False, + type="shadingEngine") or [] + shading_groups = list(set(shading_groups)) + for shading_group in shading_groups: + assignments[shading_group].append(shape) + + if components: + # Note: Components returned from maya.cmds.sets are "listed" as if + # being assigned to the transform like: pCube1.f[0] as opposed + # to pCubeShape1.f[0] so we correct that here too. + + # Build a mapping from parent to shapes to include in lookup. + transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes} + lookup = set(shapes) | set(transforms.keys()) + + component_assignments = defaultdict(list) + for shading_group in assignments.keys(): + members = cmds.ls(cmds.sets(shading_group, query=True), long=True) + for member in members: + + node = member.split(".", 1)[0] + if node not in lookup: + continue + + # Component + if "." in member: + + # Fix transform to shape as shaders are assigned to shapes + if node in transforms: + shape = transforms[node] + component = member.split(".", 1)[1] + member = "{0}.{1}".format(shape, component) + + component_assignments[shading_group].append(member) + assignments = component_assignments + + return dict(assignments) + + +@contextlib.contextmanager +def shader(nodes, shadingEngine="initialShadingGroup"): + """Assign a shader to nodes during the context""" + + shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1) + original = get_shader_assignments_from_shapes(shapes) + + try: + # Assign override shader + if shapes: + cmds.sets(shapes, edit=True, forceElement=shadingEngine) + yield + finally: + + # Assign original shaders + for sg, members in original.items(): + if members: + cmds.sets(members, edit=True, forceElement=sg) + + +@contextlib.contextmanager +def displaySmoothness(nodes, + divisionsU=0, + divisionsV=0, + pointsWire=4, + pointsShaded=1, + polygonObject=1): + """Set the displaySmoothness during the context""" + + # Ensure only non-intermediate shapes + nodes = cmds.ls(nodes, + dag=1, + shapes=1, + long=1, + noIntermediate=True) + + def parse(node): + """Parse the current state of a node""" + state = {} + for key in ["divisionsU", + "divisionsV", + "pointsWire", + "pointsShaded", + "polygonObject"]: + value = cmds.displaySmoothness(node, query=1, **{key: True}) + if value is not None: + state[key] = value[0] + return state + + originals = dict((node, parse(node)) for node in nodes) + + try: + # Apply current state + cmds.displaySmoothness(nodes, + divisionsU=divisionsU, + divisionsV=divisionsV, + pointsWire=pointsWire, + pointsShaded=pointsShaded, + polygonObject=polygonObject) + yield + finally: + # Revert state + _iteritems = getattr(originals, "iteritems", originals.items) + for node, state in _iteritems(): + if state: + cmds.displaySmoothness(node, **state) + + +@contextlib.contextmanager +def no_display_layers(nodes): + """Ensure nodes are not in a displayLayer during context. + + Arguments: + nodes (list): The nodes to remove from any display layer. + + """ + + # Ensure long names + nodes = cmds.ls(nodes, long=True) + + # Get the original state + lookup = set(nodes) + original = {} + for layer in cmds.ls(type='displayLayer'): + + # Skip default layer + if layer == "defaultLayer": + continue + + members = cmds.editDisplayLayerMembers(layer, + query=True, + fullNames=True) + if not members: + continue + members = set(members) + + included = lookup.intersection(members) + if included: + original[layer] = list(included) + + try: + # Add all nodes to default layer + cmds.editDisplayLayerMembers("defaultLayer", nodes, noRecurse=True) + yield + finally: + # Restore original members + _iteritems = getattr(original, "iteritems", original.items) + for layer, members in _iteritems(): + cmds.editDisplayLayerMembers(layer, members, noRecurse=True) + + +@contextlib.contextmanager +def namespaced(namespace, new=True, relative_names=None): + """Work inside namespace during context + + Args: + new (bool): When enabled this will rename the namespace to a unique + namespace if the input namespace already exists. + + Yields: + str: The namespace that is used during the context + + """ + original = cmds.namespaceInfo(cur=True, absoluteName=True) + original_relative_names = cmds.namespace(query=True, relativeNames=True) + if new: + namespace = unique_namespace(namespace) + cmds.namespace(add=namespace) + if relative_names is not None: + cmds.namespace(relativeNames=relative_names) + try: + cmds.namespace(set=namespace) + yield namespace + finally: + cmds.namespace(set=original) + if relative_names is not None: + cmds.namespace(relativeNames=original_relative_names) + + +@contextlib.contextmanager +def maintained_selection_api(): + """Maintain selection using the Maya Python API. + + Warning: This is *not* added to the undo stack. + + """ + original = OpenMaya.MGlobal.getActiveSelectionList() + try: + yield + finally: + OpenMaya.MGlobal.setActiveSelectionList(original) + + +@contextlib.contextmanager +def tool(context): + """Set a tool context during the context manager. + + """ + original = cmds.currentCtx() + try: + cmds.setToolTo(context) + yield + finally: + cmds.setToolTo(original) + + +def polyConstraint(components, *args, **kwargs): + """Return the list of *components* with the constraints applied. + + A wrapper around Maya's `polySelectConstraint` to retrieve its results as + a list without altering selections. For a list of possible constraints + see `maya.cmds.polySelectConstraint` documentation. + + Arguments: + components (list): List of components of polygon meshes + + Returns: + list: The list of components filtered by the given constraints. + + """ + + kwargs.pop('mode', None) + + with no_undo(flush=False): + # Reverting selection to the original selection using + # `maya.cmds.select` can be slow in rare cases where previously + # `maya.cmds.polySelectConstraint` had set constrain to "All and Next" + # and the "Random" setting was activated. To work around this we + # revert to the original selection using the Maya API. This is safe + # since we're not generating any undo change anyway. + with tool("selectSuperContext"): + # Selection can be very slow when in a manipulator mode. + # So we force the selection context which is fast. + with maintained_selection_api(): + # Apply constraint using mode=2 (current and next) so + # it applies to the selection made before it; because just + # a `maya.cmds.select()` call will not trigger the constraint. + with reset_polySelectConstraint(): + cmds.select(components, r=1, noExpand=True) + cmds.polySelectConstraint(*args, mode=2, **kwargs) + result = cmds.ls(selection=True) + cmds.select(clear=True) + return result + + +@contextlib.contextmanager +def reset_polySelectConstraint(reset=True): + """Context during which the given polyConstraint settings are disabled. + + The original settings are restored after the context. + + """ + + original = cmds.polySelectConstraint(query=True, stateString=True) + + try: + if reset: + # Ensure command is available in mel + # This can happen when running standalone + if not mel.eval("exists resetPolySelectConstraint"): + mel.eval("source polygonConstraint") + + # Reset all parameters + mel.eval("resetPolySelectConstraint;") + cmds.polySelectConstraint(disable=True) + yield + finally: + mel.eval(original) + + +def is_visible(node, + displayLayer=True, + intermediateObject=True, + parentHidden=True, + visibility=True): + """Is `node` visible? + + Returns whether a node is hidden by one of the following methods: + - The node exists (always checked) + - The node must be a dagNode (always checked) + - The node's visibility is off. + - The node is set as intermediate Object. + - The node is in a disabled displayLayer. + - Whether any of its parent nodes is hidden. + + Roughly based on: http://ewertb.soundlinker.com/mel/mel.098.php + + Returns: + bool: Whether the node is visible in the scene + + """ + + # Only existing objects can be visible + if not cmds.objExists(node): + return False + + # Only dagNodes can be visible + if not cmds.objectType(node, isAType='dagNode'): + return False + + if visibility: + if not cmds.getAttr('{0}.visibility'.format(node)): + return False + + if intermediateObject and cmds.objectType(node, isAType='shape'): + if cmds.getAttr('{0}.intermediateObject'.format(node)): + return False + + if displayLayer: + # Display layers set overrideEnabled and overrideVisibility on members + if cmds.attributeQuery('overrideEnabled', node=node, exists=True): + override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) + override_visibility = cmds.getAttr('{}.overrideVisibility'.format( + node)) + if override_enabled and override_visibility: + return False + + if parentHidden: + parents = cmds.listRelatives(node, parent=True, fullPath=True) + if parents: + parent = parents[0] + if not is_visible(parent, + displayLayer=displayLayer, + intermediateObject=False, + parentHidden=parentHidden, + visibility=visibility): + return False + + return True + + +def extract_alembic(file, + startFrame=None, + endFrame=None, + selection=True, + uvWrite=True, + eulerFilter=True, + dataFormat="ogawa", + verbose=False, + **kwargs): + """Extract a single Alembic Cache. + + This extracts an Alembic cache using the `-selection` flag to minimize + the extracted content to solely what was Collected into the instance. + + Arguments: + + startFrame (float): Start frame of output. Ignored if `frameRange` + provided. + + endFrame (float): End frame of output. Ignored if `frameRange` + provided. + + frameRange (tuple or str): Two-tuple with start and end frame or a + string formatted as: "startFrame endFrame". This argument + overrides `startFrame` and `endFrame` arguments. + + dataFormat (str): The data format to use for the cache, + defaults to "ogawa" + + verbose (bool): When on, outputs frame number information to the + Script Editor or output window during extraction. + + noNormals (bool): When on, normal data from the original polygon + objects is not included in the exported Alembic cache file. + + renderableOnly (bool): When on, any non-renderable nodes or hierarchy, + such as hidden objects, are not included in the Alembic file. + Defaults to False. + + stripNamespaces (bool): When on, any namespaces associated with the + exported objects are removed from the Alembic file. For example, an + object with the namespace taco:foo:bar appears as bar in the + Alembic file. + + uvWrite (bool): When on, UV data from polygon meshes and subdivision + objects are written to the Alembic file. Only the current UV map is + included. + + worldSpace (bool): When on, the top node in the node hierarchy is + stored as world space. By default, these nodes are stored as local + space. Defaults to False. + + eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with + an Euler filter. Euler filtering helps resolve irregularities in + rotations especially if X, Y, and Z rotations exceed 360 degrees. + Defaults to True. + + """ + + # Ensure alembic exporter is loaded + cmds.loadPlugin('AbcExport', quiet=True) + + # Alembic Exporter requires forward slashes + file = file.replace('\\', '/') + + # Pass the start and end frame on as `frameRange` so that it + # never conflicts with that argument + if "frameRange" not in kwargs: + # Fallback to maya timeline if no start or end frame provided. + if startFrame is None: + startFrame = cmds.playbackOptions(query=True, + animationStartTime=True) + if endFrame is None: + endFrame = cmds.playbackOptions(query=True, + animationEndTime=True) + + # Ensure valid types are converted to frame range + assert isinstance(startFrame, _alembic_options["startFrame"]) + assert isinstance(endFrame, _alembic_options["endFrame"]) + kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame) + else: + # Allow conversion from tuple for `frameRange` + frame_range = kwargs["frameRange"] + if isinstance(frame_range, (list, tuple)): + assert len(frame_range) == 2 + kwargs["frameRange"] = "{0} {1}".format(frame_range[0], + frame_range[1]) + + # Assemble options + options = { + "selection": selection, + "uvWrite": uvWrite, + "eulerFilter": eulerFilter, + "dataFormat": dataFormat + } + options.update(kwargs) + + # Validate options + for key, value in options.copy().items(): + + # Discard unknown options + if key not in _alembic_options: + log.warning("extract_alembic() does not support option '%s'. " + "Flag will be ignored..", key) + options.pop(key) + continue + + # Validate value type + valid_types = _alembic_options[key] + if not isinstance(value, valid_types): + raise TypeError("Alembic option unsupported type: " + "{0} (expected {1})".format(value, valid_types)) + + # Ignore empty values, like an empty string, since they mess up how + # job arguments are built + if isinstance(value, (list, tuple)): + value = [x for x in value if x.strip()] + + # Ignore option completely if no values remaining + if not value: + options.pop(key) + continue + + options[key] = value + + # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ + maya_version = int(cmds.about(version=True)) + if maya_version >= 2018: + options['autoSubd'] = options.pop('writeCreases', False) + + # Format the job string from options + job_args = list() + for key, value in options.items(): + if isinstance(value, (list, tuple)): + for entry in value: + job_args.append("-{} {}".format(key, entry)) + elif isinstance(value, bool): + # Add only when state is set to True + if value: + job_args.append("-{0}".format(key)) + else: + job_args.append("-{0} {1}".format(key, value)) + + job_str = " ".join(job_args) + job_str += ' -file "%s"' % file + + # Ensure output directory exists + parent_dir = os.path.dirname(file) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + if verbose: + log.debug("Preparing Alembic export with options: %s", + json.dumps(options, indent=4)) + log.debug("Extracting Alembic with job arguments: %s", job_str) + + # Perform extraction + print("Alembic Job Arguments : {}".format(job_str)) + + # Disable the parallel evaluation temporarily to ensure no buggy + # exports are made. (PLN-31) + # TODO: Make sure this actually fixes the issues + with evaluation("off"): + cmds.AbcExport(j=job_str, verbose=verbose) + + if verbose: + log.debug("Extracted Alembic to: %s", file) + + return file + + +# region ID +def get_id_required_nodes(referenced_nodes=False, nodes=None): + """Filter out any node which are locked (reference) or readOnly + + Args: + referenced_nodes (bool): set True to filter out reference nodes + nodes (list, Optional): nodes to consider + Returns: + nodes (set): list of filtered nodes + """ + + lookup = None + if nodes is None: + # Consider all nodes + nodes = cmds.ls() + else: + # Build a lookup for the only allowed nodes in output based + # on `nodes` input of the function (+ ensure long names) + lookup = set(cmds.ls(nodes, long=True)) + + def _node_type_exists(node_type): + try: + cmds.nodeType(node_type, isTypeName=True) + return True + except RuntimeError: + return False + + # `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly + # remove default nodes and reference nodes + camera_shapes = ["frontShape", "sideShape", "topShape", "perspShape"] + + ignore = set() + if not referenced_nodes: + ignore |= set(cmds.ls(long=True, referencedNodes=True)) + + # list all defaultNodes to filter out from the rest + ignore |= set(cmds.ls(long=True, defaultNodes=True)) + ignore |= set(cmds.ls(camera_shapes, long=True)) + + # Remove Turtle from the result of `cmds.ls` if Turtle is loaded + # TODO: This should be a less specific check for a single plug-in. + if _node_type_exists("ilrBakeLayer"): + ignore |= set(cmds.ls(type="ilrBakeLayer", long=True)) + + # Establish set of nodes types to include + types = ["objectSet", "file", "mesh", "nurbsCurve", "nurbsSurface"] + + # Check if plugin nodes are available for Maya by checking if the plugin + # is loaded + if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): + types.append("pgYetiMaya") + + # We *always* ignore intermediate shapes, so we filter them out directly + nodes = cmds.ls(nodes, type=types, long=True, noIntermediate=True) + + # The items which need to pass the id to their parent + # Add the collected transform to the nodes + dag = cmds.ls(nodes, type="dagNode", long=True) # query only dag nodes + transforms = cmds.listRelatives(dag, + parent=True, + fullPath=True) or [] + + nodes = set(nodes) + nodes |= set(transforms) + + nodes -= ignore # Remove the ignored nodes + if not nodes: + return nodes + + # Ensure only nodes from the input `nodes` are returned when a + # filter was applied on function call because we also iterated + # to parents and alike + if lookup is not None: + nodes &= lookup + + # Avoid locked nodes + nodes_list = list(nodes) + locked = cmds.lockNode(nodes_list, query=True, lock=True) + for node, lock in zip(nodes_list, locked): + if lock: + log.warning("Skipping locked node: %s" % node) + nodes.remove(node) + + return nodes + + +def get_id(node): + """Get the `cbId` attribute of the given node. + + Args: + node (str): the name of the node to retrieve the attribute from + Returns: + str + + """ + if node is None: + return + + sel = OpenMaya.MSelectionList() + sel.add(node) + + api_node = sel.getDependNode(0) + fn = OpenMaya.MFnDependencyNode(api_node) + + if not fn.hasAttribute("cbId"): + return + + try: + return fn.findPlug("cbId", False).asString() + except RuntimeError: + log.warning("Failed to retrieve cbId on %s", node) + return + + +def generate_ids(nodes, asset_id=None): + """Returns new unique ids for the given nodes. + + Note: This does not assign the new ids, it only generates the values. + + To assign new ids using this method: + >>> nodes = ["a", "b", "c"] + >>> for node, id in generate_ids(nodes): + >>> set_id(node, id) + + To also override any existing values (and assign regenerated ids): + >>> nodes = ["a", "b", "c"] + >>> for node, id in generate_ids(nodes): + >>> set_id(node, id, overwrite=True) + + Args: + nodes (list): List of nodes. + asset_id (str or bson.ObjectId): The database id for the *asset* to + generate for. When None provided the current asset in the + active session is used. + + Returns: + list: A list of (node, id) tuples. + + """ + + if asset_id is None: + # Get the asset ID from the database for the asset of current context + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + assert asset_doc, "No current asset found in Session" + asset_id = asset_doc['_id'] + + node_ids = [] + for node in nodes: + _, uid = str(uuid.uuid4()).rsplit("-", 1) + unique_id = "{}:{}".format(asset_id, uid) + node_ids.append((node, unique_id)) + + return node_ids + + +def set_id(node, unique_id, overwrite=False): + """Add cbId to `node` unless one already exists. + + Args: + node (str): the node to add the "cbId" on + unique_id (str): The unique node id to assign. + This should be generated by `generate_ids`. + overwrite (bool, optional): When True overrides the current value even + if `node` already has an id. Defaults to False. + + Returns: + None + + """ + + exists = cmds.attributeQuery("cbId", node=node, exists=True) + + # Add the attribute if it does not exist yet + if not exists: + cmds.addAttr(node, longName="cbId", dataType="string") + + # Set the value + if not exists or overwrite: + attr = "{0}.cbId".format(node) + cmds.setAttr(attr, unique_id, type="string") + + +def get_attribute(plug, + asString=False, + expandEnvironmentVariables=False, + **kwargs): + """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`. + + Like Pymel getAttr this applies some changes to `maya.cmds.getAttr` + - maya pointlessly returned vector results as a tuple wrapped in a list + (ex. '[(1,2,3)]'). This command unpacks the vector for you. + - when getting a multi-attr, maya would raise an error, but this will + return a list of values for the multi-attr + - added support for getting message attributes by returning the + connections instead + + Note that the asString + expandEnvironmentVariables argument naming + convention matches the `maya.cmds.getAttr` arguments so that it can + act as a direct replacement for it. + + Args: + plug (str): Node's attribute plug as `node.attribute` + asString (bool): Return string value for enum attributes instead + of the index. Note that the return value can be dependent on the + UI language Maya is running in. + expandEnvironmentVariables (bool): Expand any environment variable and + (tilde characters on UNIX) found in string attributes which are + returned. + + Kwargs: + Supports the keyword arguments of `maya.cmds.getAttr` + + Returns: + object: The value of the maya attribute. + + """ + attr_type = cmds.getAttr(plug, type=True) + if asString: + kwargs["asString"] = True + if expandEnvironmentVariables: + kwargs["expandEnvironmentVariables"] = True + try: + res = cmds.getAttr(plug, **kwargs) + except RuntimeError: + if attr_type == "message": + return cmds.listConnections(plug) + + node, attr = plug.split(".", 1) + children = cmds.attributeQuery(attr, node=node, listChildren=True) + if children: + return [ + get_attribute("{}.{}".format(node, child)) + for child in children + ] + + raise + + # Convert vector result wrapped in tuple + if isinstance(res, list) and len(res): + if isinstance(res[0], tuple) and len(res): + if attr_type in {'pointArray', 'vectorArray'}: + return res + return res[0] + + return res + + +def set_attribute(attribute, value, node): + """Adjust attributes based on the value from the attribute data + + If an attribute does not exists on the target it will be added with + the dataType being controlled by the value type. + + Args: + attribute (str): name of the attribute to change + value: the value to change to attribute to + node (str): name of the node + + Returns: + None + """ + + value_type = type(value).__name__ + kwargs = ATTRIBUTE_DICT[value_type] + if not cmds.attributeQuery(attribute, node=node, exists=True): + log.debug("Creating attribute '{}' on " + "'{}'".format(attribute, node)) + cmds.addAttr(node, longName=attribute, **kwargs) + + node_attr = "{}.{}".format(node, attribute) + enum_type = cmds.attributeQuery(attribute, node=node, enum=True) + if enum_type and value_type == "str": + enum_string_values = cmds.attributeQuery( + attribute, node=node, listEnum=True + )[0].split(":") + cmds.setAttr( + "{}.{}".format(node, attribute), enum_string_values.index(value) + ) + elif "dataType" in kwargs: + attr_type = kwargs["dataType"] + cmds.setAttr(node_attr, value, type=attr_type) + else: + cmds.setAttr(node_attr, value) + + +def apply_attributes(attributes, nodes_by_id): + """Alter the attributes to match the state when publishing + + Apply attribute settings from the publish to the node in the scene based + on the UUID which is stored in the cbId attribute. + + Args: + attributes (list): list of dictionaries + nodes_by_id (dict): collection of nodes based on UUID + {uuid: [node, node]} + + """ + + for attr_data in attributes: + nodes = nodes_by_id[attr_data["uuid"]] + attr_value = attr_data["attributes"] + for node in nodes: + for attr, value in attr_value.items(): + set_attribute(attr, value, node) + + +def get_container_members(container): + """Returns the members of a container. + This includes the nodes from any loaded references in the container. + """ + if isinstance(container, dict): + # Assume it's a container dictionary + container = container["objectName"] + + members = cmds.sets(container, query=True) or [] + members = cmds.ls(members, long=True, objectsOnly=True) or [] + all_members = set(members) + + # Include any referenced nodes from any reference in the container + # This is required since we've removed adding ALL nodes of a reference + # into the container set and only add the reference node now. + for ref in cmds.ls(members, exactType="reference", objectsOnly=True): + + # Ignore any `:sharedReferenceNode` + if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): + continue + + # Ignore _UNKNOWN_REF_NODE_ (PLN-160) + if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): + continue + + reference_members = cmds.referenceQuery(ref, nodes=True, dagPath=True) + reference_members = cmds.ls(reference_members, + long=True, + objectsOnly=True) + all_members.update(reference_members) + + return list(all_members) + + +# region LOOKDEV +def list_looks(project_name, asset_id): + """Return all look subsets for the given asset + + This assumes all look subsets start with "look*" in their names. + """ + # # get all subsets with look leading in + # the name associated with the asset + # TODO this should probably look for family 'look' instead of checking + # subset name that can not start with family + subset_docs = get_subsets(project_name, asset_ids=[asset_id]) + return [ + subset_doc + for subset_doc in subset_docs + if subset_doc["name"].startswith("look") + ] + + +def assign_look_by_version(nodes, version_id): + """Assign nodes a specific published look version by id. + + This assumes the nodes correspond with the asset. + + Args: + nodes(list): nodes to assign look to + version_id (bson.ObjectId): database id of the version + + Returns: + None + """ + + project_name = get_current_project_name() + + # Get representations of shader file and relationships + look_representation = get_representation_by_name( + project_name, "ma", version_id + ) + json_representation = get_representation_by_name( + project_name, "json", version_id + ) + + # See if representation is already loaded, if so reuse it. + host = registered_host() + representation_id = str(look_representation['_id']) + for container in host.ls(): + if (container['loader'] == "LookLoader" and + container['representation'] == representation_id): + log.info("Reusing loaded look ..") + container_node = container['objectName'] + break + else: + log.info("Using look for the first time ..") + + # Load file + _loaders = discover_loader_plugins() + loaders = loaders_from_representation(_loaders, representation_id) + Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None) + if Loader is None: + raise RuntimeError("Could not find LookLoader, this is a bug") + + # Reference the look file + with maintained_selection(): + container_node = load_container(Loader, look_representation) + + # Get container members + shader_nodes = get_container_members(container_node) + + # Load relationships + shader_relation = get_representation_path(json_representation) + with open(shader_relation, "r") as f: + relationships = json.load(f) + + # Assign relationships + apply_shaders(relationships, shader_nodes, nodes) + + +def assign_look(nodes, subset="lookDefault"): + """Assigns a look to a node. + + Optimizes the nodes by grouping by asset id and finding + related subset by name. + + Args: + nodes (list): all nodes to assign the look to + subset (str): name of the subset to find + """ + + # Group all nodes per asset id + grouped = defaultdict(list) + for node in nodes: + pype_id = get_id(node) + if not pype_id: + continue + + parts = pype_id.split(":", 1) + grouped[parts[0]].append(node) + + project_name = get_current_project_name() + subset_docs = get_subsets( + project_name, subset_names=[subset], asset_ids=grouped.keys() + ) + subset_docs_by_asset_id = { + str(subset_doc["parent"]): subset_doc + for subset_doc in subset_docs + } + subset_ids = { + subset_doc["_id"] + for subset_doc in subset_docs_by_asset_id.values() + } + last_version_docs = get_last_versions( + project_name, + subset_ids=subset_ids, + fields=["_id", "name", "data.families"] + ) + last_version_docs_by_subset_id = { + last_version_doc["parent"]: last_version_doc + for last_version_doc in last_version_docs + } + + for asset_id, asset_nodes in grouped.items(): + # create objectId for database + subset_doc = subset_docs_by_asset_id.get(asset_id) + if not subset_doc: + log.warning("No subset '{}' found for {}".format(subset, asset_id)) + continue + + last_version = last_version_docs_by_subset_id.get(subset_doc["_id"]) + if not last_version: + log.warning(( + "Not found last version for subset '{}' on asset with id {}" + ).format(subset, asset_id)) + continue + + families = last_version.get("data", {}).get("families") or [] + if "look" not in families: + log.warning(( + "Last version for subset '{}' on asset with id {}" + " does not have look family" + ).format(subset, asset_id)) + continue + + log.debug("Assigning look '{}' ".format( + subset, last_version["name"])) + + assign_look_by_version(asset_nodes, last_version["_id"]) + + +def apply_shaders(relationships, shadernodes, nodes): + """Link shadingEngine to the right nodes based on relationship data + + Relationship data is constructed of a collection of `sets` and `attributes` + `sets` corresponds with the shaderEngines found in the lookdev. + Each set has the keys `name`, `members` and `uuid`, the `members` + hold a collection of node information `name` and `uuid`. + + Args: + relationships (dict): relationship data + shadernodes (list): list of nodes of the shading objectSets (includes + VRayObjectProperties and shadingEngines) + nodes (list): list of nodes to apply shader to + + Returns: + None + """ + + attributes = relationships.get("attributes", []) + shader_data = relationships.get("relationships", {}) + + shading_engines = cmds.ls(shadernodes, type="objectSet", long=True) + assert shading_engines, "Error in retrieving objectSets from reference" + + # region compute lookup + nodes_by_id = defaultdict(list) + for node in nodes: + nodes_by_id[get_id(node)].append(node) + + shading_engines_by_id = defaultdict(list) + for shad in shading_engines: + shading_engines_by_id[get_id(shad)].append(shad) + # endregion + + # region assign shading engines and other sets + for data in shader_data.values(): + # collect all unique IDs of the set members + shader_uuid = data["uuid"] + member_uuids = [member["uuid"] for member in data["members"]] + + filtered_nodes = list() + for m_uuid in member_uuids: + filtered_nodes.extend(nodes_by_id[m_uuid]) + + id_shading_engines = shading_engines_by_id[shader_uuid] + if not id_shading_engines: + log.error("No shader found with cbId " + "'{}'".format(shader_uuid)) + continue + elif len(id_shading_engines) > 1: + log.error("Skipping shader assignment. " + "More than one shader found with cbId " + "'{}'. (found: {})".format(shader_uuid, + id_shading_engines)) + continue + + if not filtered_nodes: + log.warning("No nodes found for shading engine " + "'{0}'".format(id_shading_engines[0])) + continue + try: + cmds.sets(filtered_nodes, forceElement=id_shading_engines[0]) + except RuntimeError as rte: + log.error("Error during shader assignment: {}".format(rte)) + + # endregion + + apply_attributes(attributes, nodes_by_id) + + +# endregion LOOKDEV +def get_isolate_view_sets(): + """Return isolate view sets of all modelPanels. + + Returns: + list: all sets related to isolate view + + """ + + view_sets = set() + for panel in cmds.getPanel(type="modelPanel") or []: + view_set = cmds.modelEditor(panel, query=True, viewObjects=True) + if view_set: + view_sets.add(view_set) + + return view_sets + + +def get_related_sets(node): + """Return objectSets that are relationships for a look for `node`. + + Filters out based on: + - id attribute is NOT `pyblish.avalon.container` + - shapes and deformer shapes (alembic creates meshShapeDeformed) + - set name ends with any from a predefined list + - set in not in viewport set (isolate selected for example) + + Args: + node (str): name of the current node to check + + Returns: + list: The related sets + + """ + + # Ignore specific suffices + ignore_suffices = ["out_SET", "controls_SET", "_INST", "_CON"] + + # Default nodes to ignore + defaults = {"defaultLightSet", "defaultObjectSet"} + + # Ids to ignore + ignored = {"pyblish.avalon.instance", "pyblish.avalon.container"} + + view_sets = get_isolate_view_sets() + + sets = cmds.listSets(object=node, extendToShape=False) + if not sets: + return [] + + # Fix 'no object matches name' errors on nodes returned by listSets. + # In rare cases it can happen that a node is added to an internal maya + # set inaccessible by maya commands, for example check some nodes + # returned by `cmds.listSets(allSets=True)` + sets = cmds.ls(sets) + + # Ignore `avalon.container` + sets = [s for s in sets if + not cmds.attributeQuery("id", node=s, exists=True) or + not cmds.getAttr("%s.id" % s) in ignored] + + # Exclude deformer sets (`type=2` for `maya.cmds.listSets`) + deformer_sets = cmds.listSets(object=node, + extendToShape=False, + type=2) or [] + deformer_sets = set(deformer_sets) # optimize lookup + sets = [s for s in sets if s not in deformer_sets] + + # Ignore when the set has a specific suffix + sets = [s for s in sets if not any(s.endswith(x) for x in ignore_suffices)] + + # Ignore viewport filter view sets (from isolate select and + # viewports) + sets = [s for s in sets if s not in view_sets] + sets = [s for s in sets if s not in defaults] + + return sets + + +def get_container_transforms(container, members=None, root=False): + """Retrieve the root node of the container content + + When a container is created through a Loader the content + of the file will be grouped under a transform. The name of the root + transform is stored in the container information + + Args: + container (dict): the container + members (list): optional and convenience argument + root (bool): return highest node in hierarchy if True + + Returns: + root (list / str): + """ + + if not members: + members = get_container_members(container) + + results = cmds.ls(members, type="transform", long=True) + if root: + root = get_highest_in_hierarchy(results) + if root: + results = root[0] + + return results + + +def get_highest_in_hierarchy(nodes): + """Return highest nodes in the hierarchy that are in the `nodes` list. + + The "highest in hierarchy" are the nodes closest to world: top-most level. + + Args: + nodes (list): The nodes in which find the highest in hierarchies. + + Returns: + list: The highest nodes from the input nodes. + + """ + + # Ensure we use long names + nodes = cmds.ls(nodes, long=True) + lookup = set(nodes) + + highest = [] + for node in nodes: + # If no parents are within the nodes input list + # then this is a highest node + if not any(n in lookup for n in iter_parents(node)): + highest.append(node) + + return highest + + +def iter_parents(node): + """Iter parents of node from its long name. + + Note: The `node` *must* be the long node name. + + Args: + node (str): Node long name. + + Yields: + str: All parent node names (long names) + + """ + while True: + split = node.rsplit("|", 1) + if len(split) == 1 or not split[0]: + return + + node = split[0] + yield node + + +def remove_other_uv_sets(mesh): + """Remove all other UV sets than the current UV set. + + Keep only current UV set and ensure it's the renamed to default 'map1'. + + """ + + uvSets = cmds.polyUVSet(mesh, query=True, allUVSets=True) + current = cmds.polyUVSet(mesh, query=True, currentUVSet=True)[0] + + # Copy over to map1 + if current != 'map1': + cmds.polyUVSet(mesh, uvSet=current, newUVSet='map1', copy=True) + cmds.polyUVSet(mesh, currentUVSet=True, uvSet='map1') + current = 'map1' + + # Delete all non-current UV sets + deleteUVSets = [uvSet for uvSet in uvSets if uvSet != current] + uvSet = None + + # Maya Bug (tested in 2015/2016): + # In some cases the API's MFnMesh will report less UV sets than + # maya.cmds.polyUVSet. This seems to happen when the deletion of UV sets + # has not triggered a cleanup of the UVSet array attribute on the mesh + # node. It will still have extra entries in the attribute, though it will + # not show up in API or UI. Nevertheless it does show up in + # maya.cmds.polyUVSet. To ensure we clean up the array we'll force delete + # the extra remaining 'indices' that we don't want. + + # TODO: Implement a better fix + # The best way to fix would be to get the UVSet indices from api with + # MFnMesh (to ensure we keep correct ones) and then only force delete the + # other entries in the array attribute on the node. But for now we're + # deleting all entries except first one. Note that the first entry could + # never be removed (the default 'map1' always exists and is supposed to + # be undeletable.) + try: + for uvSet in deleteUVSets: + cmds.polyUVSet(mesh, delete=True, uvSet=uvSet) + except RuntimeError as exc: + log.warning('Error uvSet: %s - %s', uvSet, exc) + indices = cmds.getAttr('{0}.uvSet'.format(mesh), + multiIndices=True) + if not indices: + log.warning("No uv set found indices for: %s", mesh) + return + + # Delete from end to avoid shifting indices + # and remove the indices in the attribute + indices = reversed(indices[1:]) + for i in indices: + attr = '{0}.uvSet[{1}]'.format(mesh, i) + cmds.removeMultiInstance(attr, b=True) + + +def get_node_parent(node): + """Return full path name for parent of node""" + parents = cmds.listRelatives(node, parent=True, fullPath=True) + return parents[0] if parents else None + + +def get_id_from_sibling(node, history_only=True): + """Return first node id in the history chain that matches this node. + + The nodes in history must be of the exact same node type and must be + parented under the same parent. + + Optionally, if no matching node is found from the history, all the + siblings of the node that are of the same type are checked. + Additionally to having the same parent, the sibling must be marked as + 'intermediate object'. + + Args: + node (str): node to retrieve the history from + history_only (bool): if True and if nothing found in history, + look for an 'intermediate object' in all the node's siblings + of same type + + Returns: + str or None: The id from the sibling node or None when no id found + on any valid nodes in the history or siblings. + + """ + + node = cmds.ls(node, long=True)[0] + + # Find all similar nodes in history + history = cmds.listHistory(node) + node_type = cmds.nodeType(node) + similar_nodes = cmds.ls(history, exactType=node_type, long=True) + + # Exclude itself + similar_nodes = [x for x in similar_nodes if x != node] + + # The node *must be* under the same parent + parent = get_node_parent(node) + similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent] + + # Check all of the remaining similar nodes and take the first one + # with an id and assume it's the original. + for similar_node in similar_nodes: + _id = get_id(similar_node) + if _id: + return _id + + if not history_only: + # Get siblings of same type + similar_nodes = cmds.listRelatives(parent, + type=node_type, + fullPath=True) + similar_nodes = cmds.ls(similar_nodes, exactType=node_type, long=True) + + # Exclude itself + similar_nodes = [x for x in similar_nodes if x != node] + + # Get all unique ids from siblings in order since + # we consistently take the first one found + sibling_ids = OrderedDict() + for similar_node in similar_nodes: + # Check if "intermediate object" + if not cmds.getAttr(similar_node + ".intermediateObject"): + continue + + _id = get_id(similar_node) + if not _id: + continue + + if _id in sibling_ids: + sibling_ids[_id].append(similar_node) + else: + sibling_ids[_id] = [similar_node] + + if sibling_ids: + first_id, found_nodes = next(iter(sibling_ids.items())) + + # Log a warning if we've found multiple unique ids + if len(sibling_ids) > 1: + log.warning(("Found more than 1 intermediate shape with" + " unique id for '{}'. Using id of first" + " found: '{}'".format(node, found_nodes[0]))) + + return first_id + + +def set_scene_fps(fps, update=True): + """Set FPS from project configuration + + Args: + fps (int, float): desired FPS + update(bool): toggle update animation, default is True + + Returns: + None + + """ + + fps_mapping = { + '15': 'game', + '24': 'film', + '25': 'pal', + '30': 'ntsc', + '48': 'show', + '50': 'palf', + '60': 'ntscf', + '23.976023976023978': '23.976fps', + '29.97002997002997': '29.97fps', + '47.952047952047955': '47.952fps', + '59.94005994005994': '59.94fps', + '44100': '44100fps', + '48000': '48000fps' + } + + unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None) + if unit is None: + raise ValueError("Unsupported FPS value: `%s`" % fps) + + # Get time slider current state + start_frame = cmds.playbackOptions(query=True, minTime=True) + end_frame = cmds.playbackOptions(query=True, maxTime=True) + + # Get animation data + animation_start = cmds.playbackOptions(query=True, animationStartTime=True) + animation_end = cmds.playbackOptions(query=True, animationEndTime=True) + + current_frame = cmds.currentTime(query=True) + + log.info("Setting scene FPS to: '{}'".format(unit)) + cmds.currentUnit(time=unit, updateAnimation=update) + + # Set time slider data back to previous state + cmds.playbackOptions(edit=True, minTime=start_frame) + cmds.playbackOptions(edit=True, maxTime=end_frame) + + # Set animation data + cmds.playbackOptions(edit=True, animationStartTime=animation_start) + cmds.playbackOptions(edit=True, animationEndTime=animation_end) + + cmds.currentTime(current_frame, edit=True, update=True) + + # Force file stated to 'modified' + cmds.file(modified=True) + + +def set_scene_resolution(width, height, pixelAspect): + """Set the render resolution + + Args: + width(int): value of the width + height(int): value of the height + + Returns: + None + + """ + + control_node = "defaultResolution" + current_renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer") + aspect_ratio_attr = "deviceAspectRatio" + + # Give VRay a helping hand as it is slightly different from the rest + if current_renderer == "vray": + aspect_ratio_attr = "aspectRatio" + vray_node = "vraySettings" + if cmds.objExists(vray_node): + control_node = vray_node + else: + log.error("Can't set VRay resolution because there is no node " + "named: `%s`" % vray_node) + + log.info("Setting scene resolution to: %s x %s" % (width, height)) + cmds.setAttr("%s.width" % control_node, width) + cmds.setAttr("%s.height" % control_node, height) + + deviceAspectRatio = ((float(width) / float(height)) * float(pixelAspect)) + cmds.setAttr( + "{}.{}".format(control_node, aspect_ratio_attr), deviceAspectRatio) + cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) + + +def get_fps_for_current_context(): + """Get fps that should be set for current context. + + Todos: + - Skip project value. + - Merge logic with 'get_frame_range' and 'reset_scene_resolution' -> + all the values in the functions can be collected at one place as + they have same requirements. + + Returns: + Union[int, float]: FPS value. + """ + + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.fps"] + ) or {} + fps = asset_doc.get("data", {}).get("fps") + if not fps: + project_doc = get_project(project_name, fields=["data.fps"]) or {} + fps = project_doc.get("data", {}).get("fps") + + if not fps: + fps = 25 + + return convert_to_maya_fps(fps) + + +def get_frame_range(include_animation_range=False): + """Get the current assets frame range and handles. + + Args: + include_animation_range (bool, optional): Whether to include + `animationStart` and `animationEnd` keys to define the outer + range of the timeline. It is excluded by default. + + Returns: + dict: Asset's expected frame range values. + + """ + + # Set frame start/end + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset = get_asset_by_name(project_name, asset_name) + + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + + if frame_start is None or frame_end is None: + cmds.warning("No edit information found for %s" % asset_name) + return + + handle_start = asset["data"].get("handleStart") or 0 + handle_end = asset["data"].get("handleEnd") or 0 + + frame_range = { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end + } + if include_animation_range: + # The animation range values are only included to define whether + # the Maya time slider should include the handles or not. + # Some usages of this function use the full dictionary to define + # instance attributes for which we want to exclude the animation + # keys. That is why these are excluded by default. + task_name = get_current_task_name() + settings = get_project_settings(project_name) + include_handles_settings = settings["maya"]["include_handles"] + current_task = asset.get("data").get("tasks").get(task_name) + + animation_start = frame_start + animation_end = frame_end + + include_handles = include_handles_settings["include_handles_default"] + for item in include_handles_settings["per_task_type"]: + if current_task["type"] in item["task_type"]: + include_handles = item["include_handles"] + break + if include_handles: + animation_start -= int(handle_start) + animation_end += int(handle_end) + + frame_range["animationStart"] = animation_start + frame_range["animationEnd"] = animation_end + + return frame_range + + +def reset_frame_range(playback=True, render=True, fps=True): + """Set frame range to current asset + + Args: + playback (bool, Optional): Whether to set the maya timeline playback + frame range. Defaults to True. + render (bool, Optional): Whether to set the maya render frame range. + Defaults to True. + fps (bool, Optional): Whether to set scene FPS. Defaults to True. + """ + if fps: + set_scene_fps(get_fps_for_current_context()) + + frame_range = get_frame_range(include_animation_range=True) + if not frame_range: + # No frame range data found for asset + return + + frame_start = frame_range["frameStart"] + frame_end = frame_range["frameEnd"] + animation_start = frame_range["animationStart"] + animation_end = frame_range["animationEnd"] + + if playback: + cmds.playbackOptions( + minTime=frame_start, + maxTime=frame_end, + animationStartTime=animation_start, + animationEndTime=animation_end + ) + cmds.currentTime(frame_start) + + if render: + cmds.setAttr("defaultRenderGlobals.startFrame", animation_start) + cmds.setAttr("defaultRenderGlobals.endFrame", animation_end) + + +def reset_scene_resolution(): + """Apply the scene resolution from the project definition + + scene resolution can be overwritten by an asset if the asset.data contains + any information regarding scene resolution . + + Returns: + None + """ + + project_name = get_current_project_name() + project_doc = get_project(project_name) + project_data = project_doc["data"] + asset_data = get_current_project_asset()["data"] + + # Set project resolution + width_key = "resolutionWidth" + height_key = "resolutionHeight" + pixelAspect_key = "pixelAspect" + + width = asset_data.get(width_key, project_data.get(width_key, 1920)) + height = asset_data.get(height_key, project_data.get(height_key, 1080)) + pixelAspect = asset_data.get(pixelAspect_key, + project_data.get(pixelAspect_key, 1)) + + set_scene_resolution(width, height, pixelAspect) + + +def set_context_settings(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + fps + resolution + renderer + + Returns: + None + """ + + + # Set project fps + set_scene_fps(get_fps_for_current_context()) + + reset_scene_resolution() + + # Set frame range. + reset_frame_range() + + # Set colorspace + set_colorspace() + + +# Valid FPS +def validate_fps(): + """Validate current scene FPS and show pop-up when it is incorrect + + Returns: + bool + + """ + + expected_fps = get_fps_for_current_context() + current_fps = mel.eval("currentTimeUnitToFPS()") + + fps_match = current_fps == expected_fps + if not fps_match and not IS_HEADLESS: + from ayon_core.tools.utils import PopupUpdateKeys + + parent = get_main_window() + + dialog = PopupUpdateKeys(parent=parent) + dialog.setModal(True) + dialog.setWindowTitle("Maya scene does not match project FPS") + dialog.set_message( + "Scene {} FPS does not match project {} FPS".format( + current_fps, expected_fps + ) + ) + dialog.set_button_text("Fix") + + # Set new text for button (add optional argument for the popup?) + def on_click(update): + set_scene_fps(expected_fps, update) + + dialog.on_clicked_state.connect(on_click) + dialog.show() + + return False + + return fps_match + + +def bake(nodes, + frame_range=None, + step=1.0, + simulation=True, + preserve_outside_keys=False, + disable_implicit_control=True, + shape=True): + """Bake the given nodes over the time range. + + This will bake all attributes of the node, including custom attributes. + + Args: + nodes (list): Names of transform nodes, eg. camera, light. + frame_range (list): frame range with start and end frame. + or if None then takes timeSliderRange + simulation (bool): Whether to perform a full simulation of the + attributes over time. + preserve_outside_keys (bool): Keep keys that are outside of the baked + range. + disable_implicit_control (bool): When True will disable any + constraints to the object. + shape (bool): When True also bake attributes on the children shapes. + step (float): The step size to sample by. + + Returns: + None + + """ + + # Parse inputs + if not nodes: + return + + assert isinstance(nodes, (list, tuple)), "Nodes must be a list or tuple" + + # If frame range is None fall back to time slider playback time range + if frame_range is None: + frame_range = [cmds.playbackOptions(query=True, minTime=True), + cmds.playbackOptions(query=True, maxTime=True)] + + # If frame range is single frame bake one frame more, + # otherwise maya.cmds.bakeResults gets confused + if frame_range[1] == frame_range[0]: + frame_range[1] += 1 + + # Bake it + with keytangent_default(in_tangent_type='auto', + out_tangent_type='auto'): + cmds.bakeResults(nodes, + simulation=simulation, + preserveOutsideKeys=preserve_outside_keys, + disableImplicitControl=disable_implicit_control, + shape=shape, + sampleBy=step, + time=(frame_range[0], frame_range[1])) + + +def bake_to_world_space(nodes, + frame_range=None, + simulation=True, + preserve_outside_keys=False, + disable_implicit_control=True, + shape=True, + step=1.0): + """Bake the nodes to world space transformation (incl. other attributes) + + Bakes the transforms to world space (while maintaining all its animated + attributes and settings) by duplicating the node. Then parents it to world + and constrains to the original. + + Other attributes are also baked by connecting all attributes directly. + Baking is then done using Maya's bakeResults command. + + See `bake` for the argument documentation. + + Returns: + list: The newly created and baked node names. + + """ + @contextlib.contextmanager + def _unlock_attr(attr): + """Unlock attribute during context if it is locked""" + if not cmds.getAttr(attr, lock=True): + # If not locked, do nothing + yield + return + try: + cmds.setAttr(attr, lock=False) + yield + finally: + cmds.setAttr(attr, lock=True) + + def _get_attrs(node): + """Workaround for buggy shape attribute listing with listAttr + + This will only return keyable settable attributes that have an + incoming connections (those that have a reason to be baked). + + Technically this *may* fail to return attributes driven by complex + expressions for which maya makes no connections, e.g. doing actual + `setAttr` calls in expressions. + + Arguments: + node (str): The node to list attributes for. + + Returns: + list: Keyable attributes with incoming connections. + The attribute may be locked. + + """ + attrs = cmds.listAttr(node, + write=True, + scalar=True, + settable=True, + connectable=True, + keyable=True, + shortNames=True) or [] + valid_attrs = [] + for attr in attrs: + node_attr = '{0}.{1}'.format(node, attr) + + # Sometimes Maya returns 'non-existent' attributes for shapes + # so we filter those out + if not cmds.attributeQuery(attr, node=node, exists=True): + continue + + # We only need those that have a connection, just to be safe + # that it's actually keyable/connectable anyway. + if cmds.connectionInfo(node_attr, + isDestination=True): + valid_attrs.append(attr) + + return valid_attrs + + transform_attrs = {"t", "r", "s", + "tx", "ty", "tz", + "rx", "ry", "rz", + "sx", "sy", "sz"} + + world_space_nodes = [] + with ExitStack() as stack: + delete_bin = stack.enter_context(delete_after()) + # Create the duplicate nodes that are in world-space connected to + # the originals + for node in nodes: + + # Duplicate the node + short_name = node.rsplit("|", 1)[-1] + new_name = "{0}_baked".format(short_name) + new_node = cmds.duplicate(node, + name=new_name, + renameChildren=True)[0] # noqa + + # Parent new node to world + if cmds.listRelatives(new_node, parent=True): + new_node = cmds.parent(new_node, world=True)[0] + + # Temporarily unlock and passthrough connect all attributes + # so we can bake them over time + # Skip transform attributes because we will constrain them later + attrs = set(_get_attrs(node)) - transform_attrs + for attr in attrs: + orig_node_attr = "{}.{}".format(node, attr) + new_node_attr = "{}.{}".format(new_node, attr) + + # unlock during context to avoid connection errors + stack.enter_context(_unlock_attr(new_node_attr)) + cmds.connectAttr(orig_node_attr, + new_node_attr, + force=True) + + # If shapes are also baked then also temporarily unlock and + # passthrough connect all shape attributes for baking + if shape: + children_shapes = cmds.listRelatives(new_node, + children=True, + fullPath=True, + shapes=True) + if children_shapes: + orig_children_shapes = cmds.listRelatives(node, + children=True, + fullPath=True, + shapes=True) + for orig_shape, new_shape in zip(orig_children_shapes, + children_shapes): + attrs = _get_attrs(orig_shape) + for attr in attrs: + orig_node_attr = "{}.{}".format(orig_shape, attr) + new_node_attr = "{}.{}".format(new_shape, attr) + + # unlock during context to avoid connection errors + stack.enter_context(_unlock_attr(new_node_attr)) + cmds.connectAttr(orig_node_attr, + new_node_attr, + force=True) + + # Constraint transforms + for attr in transform_attrs: + transform_attr = "{}.{}".format(new_node, attr) + stack.enter_context(_unlock_attr(transform_attr)) + delete_bin.extend(cmds.parentConstraint(node, new_node, mo=False)) + delete_bin.extend(cmds.scaleConstraint(node, new_node, mo=False)) + + world_space_nodes.append(new_node) + + bake(world_space_nodes, + frame_range=frame_range, + step=step, + simulation=simulation, + preserve_outside_keys=preserve_outside_keys, + disable_implicit_control=disable_implicit_control, + shape=shape) + + return world_space_nodes + + +def load_capture_preset(data): + """Convert OpenPype Extract Playblast settings to `capture` arguments + + Input data is the settings from: + `project_settings/maya/publish/ExtractPlayblast/capture_preset` + + Args: + data (dict): Capture preset settings from OpenPype settings + + Returns: + dict: `capture.capture` compatible keyword arguments + + """ + + options = dict() + viewport_options = dict() + viewport2_options = dict() + camera_options = dict() + + # Straight key-value match from settings to capture arguments + options.update(data["Codec"]) + options.update(data["Generic"]) + options.update(data["Resolution"]) + + camera_options.update(data['Camera Options']) + viewport_options.update(data["Renderer"]) + + # DISPLAY OPTIONS + disp_options = {} + for key, value in data['Display Options'].items(): + if key.startswith('background'): + # Convert background, backgroundTop, backgroundBottom colors + if len(value) == 4: + # Ignore alpha + convert RGB to float + value = [ + float(value[0]) / 255, + float(value[1]) / 255, + float(value[2]) / 255 + ] + disp_options[key] = value + elif key == "displayGradient": + disp_options[key] = value + + options['display_options'] = disp_options + + # Viewport Options has a mixture of Viewport2 Options and Viewport Options + # to pass along to capture. So we'll need to differentiate between the two + VIEWPORT2_OPTIONS = { + "textureMaxResolution", + "renderDepthOfField", + "ssaoEnable", + "ssaoSamples", + "ssaoAmount", + "ssaoRadius", + "ssaoFilterRadius", + "hwFogStart", + "hwFogEnd", + "hwFogAlpha", + "hwFogFalloff", + "hwFogColorR", + "hwFogColorG", + "hwFogColorB", + "hwFogDensity", + "motionBlurEnable", + "motionBlurSampleCount", + "motionBlurShutterOpenFraction", + "lineAAEnable" + } + for key, value in data['Viewport Options'].items(): + + # There are some keys we want to ignore + if key in {"override_viewport_options", "high_quality"}: + continue + + # First handle special cases where we do value conversion to + # separate option values + if key == 'textureMaxResolution': + viewport2_options['textureMaxResolution'] = value + if value > 0: + viewport2_options['enableTextureMaxRes'] = True + viewport2_options['textureMaxResMode'] = 1 + else: + viewport2_options['enableTextureMaxRes'] = False + viewport2_options['textureMaxResMode'] = 0 + + elif key == 'multiSample': + viewport2_options['multiSampleEnable'] = value > 0 + viewport2_options['multiSampleCount'] = value + + elif key == 'alphaCut': + viewport2_options['transparencyAlgorithm'] = 5 + viewport2_options['transparencyQuality'] = 1 + + elif key == 'hwFogFalloff': + # Settings enum value string to integer + viewport2_options['hwFogFalloff'] = int(value) + + # Then handle Viewport 2.0 Options + elif key in VIEWPORT2_OPTIONS: + viewport2_options[key] = value + + # Then assume remainder is Viewport Options + else: + viewport_options[key] = value + + options['viewport_options'] = viewport_options + options['viewport2_options'] = viewport2_options + options['camera_options'] = camera_options + + # use active sound track + scene = capture.parse_active_scene() + options['sound'] = scene['sound'] + + return options + + +def get_attr_in_layer(attr, layer): + """Return attribute value in specified renderlayer. + + Same as cmds.getAttr but this gets the attribute's value in a + given render layer without having to switch to it. + + Warning for parent attribute overrides: + Attributes that have render layer overrides to their parent attribute + are not captured correctly since they do not have a direct connection. + For example, an override to sphere.rotate when querying sphere.rotateX + will not return correctly! + + Note: This is much faster for Maya's renderLayer system, yet the code + does no optimized query for render setup. + + Args: + attr (str): attribute name, ex. "node.attribute" + layer (str): layer name + + Returns: + The return value from `maya.cmds.getAttr` + + """ + + try: + if cmds.mayaHasRenderSetup(): + from . import lib_rendersetup + return lib_rendersetup.get_attr_in_layer(attr, layer) + except AttributeError: + pass + + # Ignore complex query if we're in the layer anyway + current_layer = cmds.editRenderLayerGlobals(query=True, + currentRenderLayer=True) + if layer == current_layer: + return cmds.getAttr(attr) + + connections = cmds.listConnections(attr, + plugs=True, + source=False, + destination=True, + type="renderLayer") or [] + connections = filter(lambda x: x.endswith(".plug"), connections) + if not connections: + return cmds.getAttr(attr) + + # Some value types perform a conversion when assigning + # TODO: See if there's a maya method to allow this conversion + # instead of computing it ourselves. + attr_type = cmds.getAttr(attr, type=True) + conversion = None + if attr_type == "time": + conversion = mel.eval('currentTimeUnitToFPS()') # returns float + elif attr_type == "doubleAngle": + # Radians to Degrees: 180 / pi + # TODO: This will likely only be correct when Maya units are set + # to degrees + conversion = 57.2957795131 + elif attr_type == "doubleLinear": + raise NotImplementedError("doubleLinear conversion not implemented.") + + for connection in connections: + if connection.startswith(layer + "."): + attr_split = connection.split(".") + if attr_split[0] == layer: + attr = ".".join(attr_split[0:-1]) + value = cmds.getAttr("%s.value" % attr) + if conversion: + value *= conversion + return value + + else: + # When connections are present, but none + # to the specific renderlayer than the layer + # should have the "defaultRenderLayer"'s value + layer = "defaultRenderLayer" + for connection in connections: + if connection.startswith(layer): + attr_split = connection.split(".") + if attr_split[0] == "defaultRenderLayer": + attr = ".".join(attr_split[0:-1]) + value = cmds.getAttr("%s.value" % attr) + if conversion: + value *= conversion + return value + + return cmds.getAttr(attr) + + +def fix_incompatible_containers(): + """Backwards compatibility: old containers to use new ReferenceLoader""" + old_loaders = { + "MayaAsciiLoader", + "AbcLoader", + "ModelLoader", + "CameraLoader", + "RigLoader", + "FBXLoader" + } + host = registered_host() + for container in host.ls(): + loader = container['loader'] + if loader in old_loaders: + log.info( + "Converting legacy container loader {} to " + "ReferenceLoader: {}".format(loader, container["objectName"]) + ) + cmds.setAttr(container["objectName"] + ".loader", + "ReferenceLoader", type="string") + + +def _null(*args): + pass + + +class shelf(): + '''A simple class to build shelves in maya. Since the build method is empty, + it should be extended by the derived class to build the necessary shelf + elements. By default it creates an empty shelf called "customShelf".''' + + ########################################################################### + '''This is an example shelf.''' + # class customShelf(_shelf): + # def build(self): + # self.addButon(label="button1") + # self.addButon("button2") + # self.addButon("popup") + # p = cmds.popupMenu(b=1) + # self.addMenuItem(p, "popupMenuItem1") + # self.addMenuItem(p, "popupMenuItem2") + # sub = self.addSubMenu(p, "subMenuLevel1") + # self.addMenuItem(sub, "subMenuLevel1Item1") + # sub2 = self.addSubMenu(sub, "subMenuLevel2") + # self.addMenuItem(sub2, "subMenuLevel2Item1") + # self.addMenuItem(sub2, "subMenuLevel2Item2") + # self.addMenuItem(sub, "subMenuLevel1Item2") + # self.addMenuItem(p, "popupMenuItem3") + # self.addButon("button3") + # customShelf() + ########################################################################### + + def __init__(self, name="customShelf", iconPath="", preset={}): + self.name = name + + self.iconPath = iconPath + + self.labelBackground = (0, 0, 0, 0) + self.labelColour = (.9, .9, .9) + + self.preset = preset + + self._cleanOldShelf() + cmds.setParent(self.name) + self.build() + + def build(self): + '''This method should be overwritten in derived classes to actually + build the shelf elements. Otherwise, nothing is added to the shelf.''' + for item in self.preset['items']: + if not item.get('command'): + item['command'] = self._null + if item['type'] == 'button': + self.addButon(item['name'], + command=item['command'], + icon=item['icon']) + if item['type'] == 'menuItem': + self.addMenuItem(item['parent'], + item['name'], + command=item['command'], + icon=item['icon']) + if item['type'] == 'subMenu': + self.addMenuItem(item['parent'], + item['name'], + command=item['command'], + icon=item['icon']) + + def addButon(self, label, icon="commandButton.png", + command=_null, doubleCommand=_null): + ''' + Adds a shelf button with the specified label, command, + double click command and image. + ''' + cmds.setParent(self.name) + if icon: + icon = os.path.join(self.iconPath, icon) + print(icon) + cmds.shelfButton(width=37, height=37, image=icon, label=label, + command=command, dcc=doubleCommand, + imageOverlayLabel=label, olb=self.labelBackground, + olc=self.labelColour) + + def addMenuItem(self, parent, label, command=_null, icon=""): + ''' + Adds a shelf button with the specified label, command, + double click command and image. + ''' + if icon: + icon = os.path.join(self.iconPath, icon) + print(icon) + return cmds.menuItem(p=parent, label=label, c=command, i="") + + def addSubMenu(self, parent, label, icon=None): + ''' + Adds a sub menu item with the specified label and icon to + the specified parent popup menu. + ''' + if icon: + icon = os.path.join(self.iconPath, icon) + print(icon) + return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) + + def _cleanOldShelf(self): + ''' + Checks if the shelf exists and empties it if it does + or creates it if it does not. + ''' + if cmds.shelfLayout(self.name, ex=1): + if cmds.shelfLayout(self.name, q=1, ca=1): + for each in cmds.shelfLayout(self.name, q=1, ca=1): + cmds.deleteUI(each) + else: + cmds.shelfLayout(self.name, p="ShelfLayout") + + +def update_content_on_context_change(): + """ + This will update scene content to match new asset on context change + """ + scene_sets = cmds.listSets(allSets=True) + asset_doc = get_current_project_asset() + new_asset = asset_doc["name"] + new_data = asset_doc["data"] + for s in scene_sets: + try: + if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": + attr = cmds.listAttr(s) + print(s) + if "asset" in attr: + print(" - setting asset to: [ {} ]".format(new_asset)) + cmds.setAttr("{}.asset".format(s), + new_asset, type="string") + if "frameStart" in attr: + cmds.setAttr("{}.frameStart".format(s), + new_data["frameStart"]) + if "frameEnd" in attr: + cmds.setAttr("{}.frameEnd".format(s), + new_data["frameEnd"],) + except ValueError: + pass + + +def show_message(title, msg): + from qtpy import QtWidgets + from ayon_core.tools.utils import show_message_dialog + + # Find maya main window + top_level_widgets = {w.objectName(): w for w in + QtWidgets.QApplication.topLevelWidgets()} + + parent = top_level_widgets.get("MayaWindow", None) + if parent is not None: + show_message_dialog(title=title, message=msg, parent=parent) + + +def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None): + """Yield edits as a set of actions.""" + + attributes = relationships.get("attributes", []) + shader_data = relationships.get("relationships", {}) + + shading_engines = cmds.ls(shader_nodes, type="objectSet", long=True) + assert shading_engines, "Error in retrieving objectSets from reference" + + # region compute lookup + shading_engines_by_id = defaultdict(list) + for shad in shading_engines: + shading_engines_by_id[get_id(shad)].append(shad) + # endregion + + # region assign shading engines and other sets + for data in shader_data.values(): + # collect all unique IDs of the set members + shader_uuid = data["uuid"] + member_uuids = [ + (member["uuid"], member.get("components")) + for member in data["members"]] + + filtered_nodes = list() + for _uuid, components in member_uuids: + nodes = nodes_by_id.get(_uuid, None) + if nodes is None: + continue + + if components: + # Assign to the components + nodes = [".".join([node, components]) for node in nodes] + + filtered_nodes.extend(nodes) + + id_shading_engines = shading_engines_by_id[shader_uuid] + if not id_shading_engines: + log.error("{} - No shader found with cbId " + "'{}'".format(label, shader_uuid)) + continue + elif len(id_shading_engines) > 1: + log.error("{} - Skipping shader assignment. " + "More than one shader found with cbId " + "'{}'. (found: {})".format(label, shader_uuid, + id_shading_engines)) + continue + + if not filtered_nodes: + log.warning("{} - No nodes found for shading engine " + "'{}'".format(label, id_shading_engines[0])) + continue + + yield {"action": "assign", + "uuid": data["uuid"], + "nodes": filtered_nodes, + "shader": id_shading_engines[0]} + + for data in attributes: + nodes = nodes_by_id.get(data["uuid"], []) + attr_value = data["attributes"] + yield {"action": "setattr", + "uuid": data["uuid"], + "nodes": nodes, + "attributes": attr_value} + + +def set_colorspace(): + """Set Colorspace from project configuration""" + + project_name = get_current_project_name() + imageio = get_project_settings(project_name)["maya"]["imageio"] + + # ocio compatibility variables + ocio_v2_maya_version = 2022 + maya_version = int(cmds.about(version=True)) + ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version + is_ocio_set = bool(os.environ.get("OCIO")) + + use_workfile_settings = imageio.get("workfile", {}).get("enabled") + if use_workfile_settings: + root_dict = imageio["workfile"] + else: + # TODO: deprecated code from 3.15.5 - remove + # Maya 2022+ introduces new OCIO v2 color management settings that + # can override the old color management preferences. OpenPype has + # separate settings for both so we fall back when necessary. + use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"] + if use_ocio_v2 and not ocio_v2_support: + # Fallback to legacy behavior with a warning + log.warning( + "Color Management Preference v2 is enabled but not " + "supported by current Maya version: {} (< {}). Falling " + "back to legacy settings.".format( + maya_version, ocio_v2_maya_version) + ) + + if use_ocio_v2: + root_dict = imageio["colorManagementPreference_v2"] + else: + root_dict = imageio["colorManagementPreference"] + + if not isinstance(root_dict, dict): + msg = "set_colorspace(): argument should be dictionary" + log.error(msg) + return + + # backward compatibility + # TODO: deprecated code from 3.15.5 - remove with deprecated code above + view_name = root_dict.get("viewTransform") + if view_name is None: + view_name = root_dict.get("viewName") + + log.debug(">> root_dict: {}".format(pformat(root_dict))) + if not root_dict: + return + + # set color spaces for rendering space and view transforms + def _colormanage(**kwargs): + """Wrapper around `cmds.colorManagementPrefs`. + + This logs errors instead of raising an error so color management + settings get applied as much as possible. + + """ + assert len(kwargs) == 1, "Must receive one keyword argument" + try: + cmds.colorManagementPrefs(edit=True, **kwargs) + log.debug("Setting Color Management Preference: {}".format(kwargs)) + except RuntimeError as exc: + log.error(exc) + + # enable color management + cmds.colorManagementPrefs(edit=True, cmEnabled=True) + cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True) + + if use_ocio_v2: + log.info("Using Maya OCIO v2") + if not is_ocio_set: + # Set the Maya 2022+ default OCIO v2 config file path + log.info("Setting default Maya OCIO v2 config") + # Note: Setting "" as value also sets this default however + # introduces a bug where launching a file on startup will prompt + # to save the empty scene before it, so we set using the path. + # This value has been the same for 2022, 2023 and 2024 + path = "/OCIO-configs/Maya2022-default/config.ocio" + cmds.colorManagementPrefs(edit=True, configFilePath=path) + + # set rendering space and view transform + _colormanage(renderingSpaceName=root_dict["renderSpace"]) + _colormanage(viewName=view_name) + _colormanage(displayName=root_dict["displayName"]) + else: + log.info("Using Maya OCIO v1 (legacy)") + if not is_ocio_set: + # Set the Maya default config file path + log.info("Setting default Maya OCIO v1 legacy config") + cmds.colorManagementPrefs(edit=True, configFilePath="legacy") + + # set rendering space and view transform + _colormanage(renderingSpaceName=root_dict["renderSpace"]) + _colormanage(viewTransformName=view_name) + + +@contextlib.contextmanager +def parent_nodes(nodes, parent=None): + # type: (list, str) -> list + """Context manager to un-parent provided nodes and return them back.""" + + def _as_mdagpath(node): + """Return MDagPath for node path.""" + if not node: + return + sel = OpenMaya.MSelectionList() + sel.add(node) + return sel.getDagPath(0) + + # We can only parent dag nodes so we ensure input contains only dag nodes + nodes = cmds.ls(nodes, type="dagNode", long=True) + if not nodes: + # opt-out early + yield + return + + parent_node_path = None + delete_parent = False + if parent: + if not cmds.objExists(parent): + parent_node = cmds.createNode("transform", + name=parent, + skipSelect=False) + delete_parent = True + else: + parent_node = parent + parent_node_path = cmds.ls(parent_node, long=True)[0] + + # Store original parents + node_parents = [] + for node in nodes: + node_parent = get_node_parent(node) + node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent))) + + try: + for node, node_parent in node_parents: + node_parent_path = node_parent.fullPathName() if node_parent else None # noqa + if node_parent_path == parent_node_path: + # Already a child + continue + + if parent_node_path: + cmds.parent(node.fullPathName(), parent_node_path) + else: + cmds.parent(node.fullPathName(), world=True) + + yield + finally: + # Reparent to original parents + for node, original_parent in node_parents: + node_path = node.fullPathName() + if not node_path: + # Node must have been deleted + continue + + node_parent_path = get_node_parent(node_path) + + original_parent_path = None + if original_parent: + original_parent_path = original_parent.fullPathName() + if not original_parent_path: + # Original parent node must have been deleted + continue + + if node_parent_path != original_parent_path: + if not original_parent_path: + cmds.parent(node_path, world=True) + else: + cmds.parent(node_path, original_parent_path) + + if delete_parent: + cmds.delete(parent_node_path) + + +@contextlib.contextmanager +def maintained_time(): + ct = cmds.currentTime(query=True) + try: + yield + finally: + cmds.currentTime(ct, edit=True) + + +def iter_visible_nodes_in_range(nodes, start, end): + """Yield nodes that are visible in start-end frame range. + + - Ignores intermediateObjects completely. + - Considers animated visibility attributes + upstream visibilities. + + This is optimized for large scenes where some nodes in the parent + hierarchy might have some input connections to the visibilities, + e.g. key, driven keys, connections to other attributes, etc. + + This only does a single time step to `start` if current frame is + not inside frame range since the assumption is made that changing + a frame isn't so slow that it beats querying all visibility + plugs through MDGContext on another frame. + + Args: + nodes (list): List of node names to consider. + start (int, float): Start frame. + end (int, float): End frame. + + Returns: + list: List of node names. These will be long full path names so + might have a longer name than the input nodes. + + """ + # States we consider per node + VISIBLE = 1 # always visible + INVISIBLE = 0 # always invisible + ANIMATED = -1 # animated visibility + + # Ensure integers + start = int(start) + end = int(end) + + # Consider only non-intermediate dag nodes and use the "long" names. + nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode") + if not nodes: + return + + with maintained_time(): + # Go to first frame of the range if the current time is outside + # the queried range so can directly query all visible nodes on + # that frame. + current_time = cmds.currentTime(query=True) + if not (start <= current_time <= end): + cmds.currentTime(start) + + visible = cmds.ls(nodes, long=True, visible=True) + for node in visible: + yield node + if len(visible) == len(nodes) or start == end: + # All are visible on frame one, so they are at least visible once + # inside the frame range. + return + + # For the invisible ones check whether its visibility and/or + # any of its parents visibility attributes are animated. If so, it might + # get visible on other frames in the range. + def memodict(f): + """Memoization decorator for a function taking a single argument. + + See: http://code.activestate.com/recipes/ + 578231-probably-the-fastest-memoization-decorator-in-the-/ + """ + + class memodict(dict): + def __missing__(self, key): + ret = self[key] = f(key) + return ret + + return memodict().__getitem__ + + @memodict + def get_state(node): + plug = node + ".visibility" + connections = cmds.listConnections(plug, + source=True, + destination=False) + if connections: + return ANIMATED + else: + return VISIBLE if cmds.getAttr(plug) else INVISIBLE + + visible = set(visible) + invisible = [node for node in nodes if node not in visible] + always_invisible = set() + # Iterate over the nodes by short to long names to iterate the highest + # in hierarchy nodes first. So the collected data can be used from the + # cache for parent queries in next iterations. + node_dependencies = dict() + for node in sorted(invisible, key=len): + + state = get_state(node) + if state == INVISIBLE: + always_invisible.add(node) + continue + + # If not always invisible by itself we should go through and check + # the parents to see if any of them are always invisible. For those + # that are "ANIMATED" we consider that this node is dependent on + # that attribute, we store them as dependency. + dependencies = set() + if state == ANIMATED: + dependencies.add(node) + + traversed_parents = list() + for parent in iter_parents(node): + + if parent in always_invisible or get_state(parent) == INVISIBLE: + # When parent is always invisible then consider this parent, + # this node we started from and any of the parents we + # have traversed in-between to be *always invisible* + always_invisible.add(parent) + always_invisible.add(node) + always_invisible.update(traversed_parents) + break + + # If we have traversed the parent before and its visibility + # was dependent on animated visibilities then we can just extend + # its dependencies for to those for this node and break further + # iteration upwards. + parent_dependencies = node_dependencies.get(parent, None) + if parent_dependencies is not None: + dependencies.update(parent_dependencies) + break + + state = get_state(parent) + if state == ANIMATED: + dependencies.add(parent) + + traversed_parents.append(parent) + + if node not in always_invisible and dependencies: + node_dependencies[node] = dependencies + + if not node_dependencies: + return + + # Now we only have to check the visibilities for nodes that have animated + # visibility dependencies upstream. The fastest way to check these + # visibility attributes across different frames is with Python api 2.0 + # so we do that. + @memodict + def get_visibility_mplug(node): + """Return api 2.0 MPlug with cached memoize decorator""" + sel = OpenMaya.MSelectionList() + sel.add(node) + dag = sel.getDagPath(0) + return OpenMaya.MFnDagNode(dag).findPlug("visibility", True) + + @contextlib.contextmanager + def dgcontext(mtime): + """MDGContext context manager""" + context = OpenMaya.MDGContext(mtime) + try: + previous = context.makeCurrent() + yield context + finally: + previous.makeCurrent() + + # We skip the first frame as we already used that frame to check for + # overall visibilities. And end+1 to include the end frame. + scene_units = OpenMaya.MTime.uiUnit() + for frame in range(start + 1, end + 1): + mtime = OpenMaya.MTime(frame, unit=scene_units) + + # Build little cache so we don't query the same MPlug's value + # again if it was checked on this frame and also is a dependency + # for another node + frame_visibilities = {} + with dgcontext(mtime) as context: + for node, dependencies in list(node_dependencies.items()): + for dependency in dependencies: + dependency_visible = frame_visibilities.get(dependency, + None) + if dependency_visible is None: + mplug = get_visibility_mplug(dependency) + dependency_visible = mplug.asBool(context) + frame_visibilities[dependency] = dependency_visible + + if not dependency_visible: + # One dependency is not visible, thus the + # node is not visible. + break + + else: + # All dependencies are visible. + yield node + # Remove node with dependencies for next frame iterations + # because it was visible at least once. + node_dependencies.pop(node) + + # If no more nodes to process break the frame iterations.. + if not node_dependencies: + break + + +def get_attribute_input(attr): + connections = cmds.listConnections(attr, plugs=True, destination=False) + return connections[0] if connections else None + + +def convert_to_maya_fps(fps): + """Convert any fps to supported Maya framerates.""" + float_framerates = [ + 23.976023976023978, + # WTF is 29.97 df vs fps? + 29.97002997002997, + 47.952047952047955, + 59.94005994005994 + ] + # 44100 fps evaluates as 41000.0. Why? Omitting for now. + int_framerates = [ + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 15, + 16, + 20, + 24, + 25, + 30, + 40, + 48, + 50, + 60, + 75, + 80, + 90, + 100, + 120, + 125, + 150, + 200, + 240, + 250, + 300, + 375, + 400, + 500, + 600, + 750, + 1200, + 1500, + 2000, + 3000, + 6000, + 48000 + ] + + # If input fps is a whole number we'll return. + if float(fps).is_integer(): + # Validate fps is part of Maya's fps selection. + if int(fps) not in int_framerates: + raise ValueError( + "Framerate \"{}\" is not supported in Maya".format(fps) + ) + return int(fps) + else: + # Differences to supported float frame rates. + differences = [] + for i in float_framerates: + differences.append(abs(i - fps)) + + # Validate difference does not stray too far from supported framerates. + min_difference = min(differences) + min_index = differences.index(min_difference) + supported_framerate = float_framerates[min_index] + if min_difference > 0.1: + raise ValueError( + "Framerate \"{}\" strays too far from any supported framerate" + " in Maya. Closest supported framerate is \"{}\"".format( + fps, supported_framerate + ) + ) + + return supported_framerate + + +def write_xgen_file(data, filepath): + """Overwrites data in .xgen files. + + Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath". + + Args: + data (dict): Dictionary of key, value. Key matches with xgen file. + For example: + {"xgDataPath": "some/path"} + filepath (string): Absolute path of .xgen file. + """ + # Generate regex lookup for line to key basically + # match any of the keys in `\t{key}\t\t` + keys = "|".join(re.escape(key) for key in data.keys()) + re_keys = re.compile("^\t({})\t\t".format(keys)) + + lines = [] + with open(filepath, "r") as f: + for line in f: + match = re_keys.match(line) + if match: + key = match.group(1) + value = data[key] + line = "\t{}\t\t{}\n".format(key, value) + + lines.append(line) + + with open(filepath, "w") as f: + f.writelines(lines) + + +def get_color_management_preferences(): + """Get and resolve OCIO preferences.""" + data = { + # Is color management enabled. + "enabled": cmds.colorManagementPrefs( + query=True, cmEnabled=True + ), + "rendering_space": cmds.colorManagementPrefs( + query=True, renderingSpaceName=True + ), + "output_transform": cmds.colorManagementPrefs( + query=True, outputTransformName=True + ), + "output_transform_enabled": cmds.colorManagementPrefs( + query=True, outputTransformEnabled=True + ), + "view_transform": cmds.colorManagementPrefs( + query=True, viewTransformName=True + ) + } + + # Split view and display from view_transform. view_transform comes in + # format of "{view} ({display})". + regex = re.compile(r"^(?P.+) \((?P.+)\)$") + if int(cmds.about(version=True)) <= 2020: + # view_transform comes in format of "{view} {display}" in 2020. + regex = re.compile(r"^(?P.+) (?P.+)$") + + match = regex.match(data["view_transform"]) + if not match: + raise ValueError( + "Unable to parse view and display from Maya view transform: '{}' " + "using regex '{}'".format(data["view_transform"], regex.pattern) + ) + + data.update({ + "display": match.group("display"), + "view": match.group("view") + }) + + # Get config absolute path. + path = cmds.colorManagementPrefs( + query=True, configFilePath=True + ) + + # The OCIO config supports a custom token. + maya_resources_token = "" + maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources() + path = path.replace(maya_resources_token, maya_resources_path) + + data["config"] = path + + return data + + +def get_color_management_output_transform(): + preferences = get_color_management_preferences() + colorspace = preferences["rendering_space"] + if preferences["output_transform_enabled"]: + colorspace = preferences["output_transform"] + return colorspace + + +def image_info(file_path): + # type: (str) -> dict + """Based on tha texture path, get its bit depth and format information. + Take reference from makeTx.py in Arnold: + ImageInfo(filename): Get Image Information for colorspace + AiTextureGetFormat(filename): Get Texture Format + AiTextureGetBitDepth(filename): Get Texture bit depth + Args: + file_path (str): Path to the texture file. + Returns: + dict: Dictionary with the information about the texture file. + """ + from arnold import ( + AiTextureGetBitDepth, + AiTextureGetFormat + ) + # Get Texture Information + img_info = {'filename': file_path} + if os.path.isfile(file_path): + img_info['bit_depth'] = AiTextureGetBitDepth(file_path) # noqa + img_info['format'] = AiTextureGetFormat(file_path) # noqa + else: + img_info['bit_depth'] = 8 + img_info['format'] = "unknown" + return img_info + + +def guess_colorspace(img_info): + # type: (dict) -> str + """Guess the colorspace of the input image filename. + Note: + Reference from makeTx.py + Args: + img_info (dict): Image info generated by :func:`image_info` + Returns: + str: color space name use in the `--colorconvert` + option of maketx. + """ + from arnold import ( + AiTextureInvalidate, + # types + AI_TYPE_BYTE, + AI_TYPE_INT, + AI_TYPE_UINT + ) + try: + if img_info['bit_depth'] <= 16: + if img_info['format'] in (AI_TYPE_BYTE, AI_TYPE_INT, AI_TYPE_UINT): # noqa + return 'sRGB' + else: + return 'linear' + # now discard the image file as AiTextureGetFormat has loaded it + AiTextureInvalidate(img_info['filename']) # noqa + except ValueError: + print(("[maketx] Error: Could not guess" + "colorspace for {}").format(img_info["filename"])) + return "linear" + + +def len_flattened(components): + """Return the length of the list as if it was flattened. + + Maya will return consecutive components as a single entry + when requesting with `maya.cmds.ls` without the `flatten` + flag. Though enabling `flatten` on a large list (e.g. millions) + will result in a slow result. This command will return the amount + of entries in a non-flattened list by parsing the result with + regex. + + Args: + components (list): The non-flattened components. + + Returns: + int: The amount of entries. + + """ + assert isinstance(components, (list, tuple)) + n = 0 + + pattern = re.compile(r"\[(\d+):(\d+)\]") + for c in components: + match = pattern.search(c) + if match: + start, end = match.groups() + n += int(end) - int(start) + 1 + else: + n += 1 + return n + + +def get_all_children(nodes): + """Return all children of `nodes` including each instanced child. + Using maya.cmds.listRelatives(allDescendents=True) includes only the first + instance. As such, this function acts as an optimal replacement with a + focus on a fast query. + + """ + + sel = OpenMaya.MSelectionList() + traversed = set() + iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst) + for node in nodes: + + if node in traversed: + # Ignore if already processed as a child + # before + continue + + sel.clear() + sel.add(node) + dag = sel.getDagPath(0) + + iterator.reset(dag) + # ignore self + iterator.next() # noqa: B305 + while not iterator.isDone(): + + path = iterator.fullPathName() + + if path in traversed: + iterator.prune() + iterator.next() # noqa: B305 + continue + + traversed.add(path) + iterator.next() # noqa: B305 + + return list(traversed) + + +def get_capture_preset(task_name, task_type, subset, project_settings, log): + """Get capture preset for playblasting. + + Logic for transitioning from old style capture preset to new capture preset + profiles. + + Args: + task_name (str): Task name. + take_type (str): Task type. + subset (str): Subset name. + project_settings (dict): Project settings. + log (object): Logging object. + """ + capture_preset = None + filtering_criteria = { + "hosts": "maya", + "families": "review", + "task_names": task_name, + "task_types": task_type, + "subset": subset + } + + plugin_settings = project_settings["maya"]["publish"]["ExtractPlayblast"] + if plugin_settings["profiles"]: + profile = filter_profiles( + plugin_settings["profiles"], + filtering_criteria, + logger=log + ) + capture_preset = profile.get("capture_preset") + else: + log.warning("No profiles present for Extract Playblast") + + # Backward compatibility for deprecated Extract Playblast settings + # without profiles. + if capture_preset is None: + log.debug( + "Falling back to deprecated Extract Playblast capture preset " + "because no new style playblast profiles are defined." + ) + capture_preset = plugin_settings["capture_preset"] + + return capture_preset or {} + + +def get_reference_node(members, log=None): + """Get the reference node from the container members + Args: + members: list of node names + + Returns: + str: Reference node name. + + """ + + # Collect the references without .placeHolderList[] attributes as + # unique entries (objects only) and skipping the sharedReferenceNode. + references = set() + for ref in cmds.ls(members, exactType="reference", objectsOnly=True): + + # Ignore any `:sharedReferenceNode` + if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): + continue + + # Ignore _UNKNOWN_REF_NODE_ (PLN-160) + if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): + continue + + references.add(ref) + + assert references, "No reference node found in container" + + # Get highest reference node (least parents) + highest = min(references, + key=lambda x: len(get_reference_node_parents(x))) + + # Warn the user when we're taking the highest reference node + if len(references) > 1: + if not log: + log = logging.getLogger(__name__) + + log.warning("More than one reference node found in " + "container, using highest reference node: " + "%s (in: %s)", highest, list(references)) + + return highest + + +def get_reference_node_parents(ref): + """Return all parent reference nodes of reference node + + Args: + ref (str): reference node. + + Returns: + list: The upstream parent reference nodes. + + """ + parent = cmds.referenceQuery(ref, + referenceNode=True, + parent=True) + parents = [] + while parent: + parents.append(parent) + parent = cmds.referenceQuery(parent, + referenceNode=True, + parent=True) + return parents + + +def create_rig_animation_instance( + nodes, context, namespace, options=None, log=None +): + """Create an animation publish instance for loaded rigs. + + See the RecreateRigAnimationInstance inventory action on how to use this + for loaded rig containers. + + Arguments: + nodes (list): Member nodes of the rig instance. + context (dict): Representation context of the rig container + namespace (str): Namespace of the rig container + options (dict, optional): Additional loader data + log (logging.Logger, optional): Logger to log to if provided + + Returns: + None + + """ + if options is None: + options = {} + name = context["representation"]["name"] + output = next((node for node in nodes if + node.endswith("out_SET")), None) + controls = next((node for node in nodes if + node.endswith("controls_SET")), None) + if name != "fbx": + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." + + anim_skeleton = next((node for node in nodes if + node.endswith("skeletonAnim_SET")), None) + skeleton_mesh = next((node for node in nodes if + node.endswith("skeletonMesh_SET")), None) + + # Find the roots amongst the loaded nodes + roots = ( + cmds.ls(nodes, assemblies=True, long=True) or + get_highest_in_hierarchy(nodes) + ) + assert roots, "No root nodes in rig, this is a bug." + + custom_subset = options.get("animationSubsetName") + if custom_subset: + formatting_data = { + "asset": context["asset"], + "subset": context['subset']['name'], + "family": ( + context['subset']['data'].get('family') or + context['subset']['data']['families'][0] + ) + } + namespace = get_custom_namespace( + custom_subset.format( + **formatting_data + ) + ) + + if log: + log.info("Creating subset: {}".format(namespace)) + + # Fill creator identifier + creator_identifier = "io.openpype.creators.maya.animation" + + host = registered_host() + create_context = CreateContext(host) + # Create the animation instance + rig_sets = [output, controls, anim_skeleton, skeleton_mesh] + # Remove sets that this particular rig does not have + rig_sets = [s for s in rig_sets if s is not None] + with maintained_selection(): + cmds.select(rig_sets + roots, noExpand=True) + create_context.create( + creator_identifier=creator_identifier, + variant=namespace, + pre_create_data={"use_selection": True} + ) diff --git a/client/ayon_core/hosts/maya/api/lib_renderproducts.py b/client/ayon_core/hosts/maya/api/lib_renderproducts.py new file mode 100644 index 0000000000..7f26145e1d --- /dev/null +++ b/client/ayon_core/hosts/maya/api/lib_renderproducts.py @@ -0,0 +1,1453 @@ +# -*- coding: utf-8 -*- +"""Module handling expected render output from Maya. + +This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`. + +Note: + To implement new renderer, just create new class inheriting from + :class:`ARenderProducts` and add it to :func:`RenderProducts.get()`. + +Attributes: + R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number. + R_FRAME_RANGE (:class:`re.Pattern`): Find frame range. + R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string. + R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes. + R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes. + R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token + in image prefixes. + R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in + image prefixes. + R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled + Renderman frame token in image prefix. + R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman + extension token in image prefix. + R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render + layer token in image prefixes. + R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene + token in image prefixes. + R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera + token in image prefixes. + IMAGE_PREFIXES (dict): Mapping between renderers and their respective + image prefix attribute names. + +Thanks: + Roy Nieterau (BigRoy) / Colorbleed for overhaul of original + *expected_files*. + +""" + +import logging +import re +import os +from abc import ABCMeta, abstractmethod + +import six +import attr + +from . import lib +from . import lib_rendersetup +from ayon_core.pipeline.colorspace import get_ocio_config_views + +from maya import cmds, mel + +log = logging.getLogger(__name__) + +R_SINGLE_FRAME = re.compile(r"^(-?)\d+$") +R_FRAME_RANGE = re.compile(r"^(?P(-?)\d+)-(?P(-?)\d+)$") +R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") +R_LAYER_TOKEN = re.compile( + r".*((?:%l)|(?:)|(?:)).*", re.IGNORECASE +) +R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE) +R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE) +R_REMOVE_AOV_TOKEN = re.compile( + r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE) +# to remove unused renderman tokens +R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) +R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) + +R_SUBSTITUTE_LAYER_TOKEN = re.compile( + r"%l||", re.IGNORECASE +) +R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|", re.IGNORECASE) +R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|", re.IGNORECASE) + +# not sure about the renderman image prefix +IMAGE_PREFIXES = { + "vray": "vraySettings.fileNamePrefix", + "arnold": "defaultRenderGlobals.imageFilePrefix", + "renderman": "rmanGlobals.imageFileFormat", + "redshift": "defaultRenderGlobals.imageFilePrefix", + "mayahardware2": "defaultRenderGlobals.imageFilePrefix" +} + +RENDERMAN_IMAGE_DIR = "/" + + +def has_tokens(string, tokens): + """Return whether any of tokens is in input string (case-insensitive)""" + pattern = "({})".format("|".join(re.escape(token) for token in tokens)) + match = re.search(pattern, string, re.IGNORECASE) + return bool(match) + + +@attr.s +class LayerMetadata(object): + """Data class for Render Layer metadata.""" + frameStart = attr.ib() + frameEnd = attr.ib() + cameras = attr.ib() + sceneName = attr.ib() + layerName = attr.ib() + renderer = attr.ib() + defaultExt = attr.ib() + filePrefix = attr.ib() + frameStep = attr.ib(default=1) + padding = attr.ib(default=4) + + # Render Products + products = attr.ib(init=False, default=attr.Factory(list)) + + # The AOV separator token. Note that not all renderers define an explicit + # render separator but allow to put the AOV/RenderPass token anywhere in + # the file path prefix. For those renderers we'll fall back to whatever + # is between the last occurrences of and tokens. + aov_separator = attr.ib(default="_") + + +@attr.s +class RenderProduct(object): + """Describes an image or other file-like artifact produced by a render. + + Warning: + This currently does NOT return as a product PER render camera. + A single Render Product will generate files per camera. E.g. with two + cameras each render product generates two sequences on disk assuming + the file path prefix correctly uses the tokens. + + """ + productName = attr.ib() + ext = attr.ib() # extension + colorspace = attr.ib() # colorspace + aov = attr.ib(default=None) # source aov + driver = attr.ib(default=None) # source driver + multipart = attr.ib(default=False) # multichannel file + camera = attr.ib(default=None) # used only when rendering + # from multiple cameras + + +def get(layer, render_instance=None): + # type: (str, object) -> ARenderProducts + """Get render details and products for given renderer and render layer. + + Args: + layer (str): Name of render layer + render_instance (pyblish.api.Instance): Publish instance. + If not provided an empty mock instance is used. + + Returns: + ARenderProducts: The correct RenderProducts instance for that + renderlayer. + + Raises: + :exc:`UnsupportedRendererException`: If requested renderer + is not supported. It needs to be implemented by extending + :class:`ARenderProducts` and added to this methods ``if`` + statement. + + """ + + if render_instance is None: + # For now produce a mock instance + class Instance(object): + data = {} + render_instance = Instance() + + renderer_name = lib.get_attr_in_layer( + "defaultRenderGlobals.currentRenderer", + layer=layer + ) + + renderer = { + "arnold": RenderProductsArnold, + "vray": RenderProductsVray, + "redshift": RenderProductsRedshift, + "renderman": RenderProductsRenderman, + "mayahardware2": RenderProductsMayaHardware + }.get(renderer_name.lower(), None) + if renderer is None: + raise UnsupportedRendererException( + "Unsupported renderer: {}".format(renderer_name) + ) + + return renderer(layer, render_instance) + + +@six.add_metaclass(ABCMeta) +class ARenderProducts: + """Abstract class with common code for all renderers. + + Attributes: + renderer (str): name of renderer. + + """ + + renderer = None + + def __init__(self, layer, render_instance): + """Constructor.""" + self.layer = layer + self.render_instance = render_instance + self.multipart = self.get_multipart() + + # Initialize + self.layer_data = self._get_layer_data() + self.layer_data.products = self.get_render_products() + + def get_multipart(self): + raise NotImplementedError( + "The render product implementation does not have a " + "\"get_multipart\" method." + ) + + def has_camera_token(self): + # type: () -> bool + """Check if camera token is in image prefix. + + Returns: + bool: True/False if camera token is present. + + """ + return "" in self.layer_data.filePrefix.lower() + + @abstractmethod + def get_render_products(self): + """To be implemented by renderer class. + + This should return a list of RenderProducts. + + Returns: + list: List of RenderProduct + + """ + + @staticmethod + def sanitize_camera_name(camera): + # type: (str) -> str + """Sanitize camera name. + + Remove Maya illegal characters from camera name. + + Args: + camera (str): Maya camera name. + + Returns: + (str): sanitized camera name + + Example: + >>> ARenderProducts.sanizite_camera_name('test:camera_01') + test_camera_01 + + """ + return re.sub('[^0-9a-zA-Z_]+', '_', camera) + + def get_renderer_prefix(self): + # type: () -> str + """Return prefix for specific renderer. + + This is for most renderers the same and can be overridden if needed. + + Returns: + str: String with image prefix containing tokens + + Raises: + :exc:`UnsupportedRendererException`: If we requested image + prefix for renderer we know nothing about. + See :data:`IMAGE_PREFIXES` for mapping of renderers and + image prefixes. + + """ + try: + prefix_attr = IMAGE_PREFIXES[self.renderer] + except KeyError: + raise UnsupportedRendererException( + "Unsupported renderer {}".format(self.renderer) + ) + + # Note: When this attribute is never set (e.g. on maya launch) then + # this can return None even though it is a string attribute + prefix = self._get_attr(prefix_attr) + + if not prefix: + # Fall back to scene name by default + log.warning("Image prefix not set, using ") + prefix = "" + + return prefix + + def get_render_attribute(self, attribute): + """Get attribute from render options. + + Args: + attribute (str): name of attribute to be looked up. + + Returns: + Attribute value + + """ + return self._get_attr("defaultRenderGlobals", attribute) + + def _get_attr(self, node_attr, attribute=None): + """Return the value of the attribute in the renderlayer + + For readability this allows passing in the attribute in two ways. + + As a single argument: + _get_attr("node.attr") + Or as two arguments: + _get_attr("node", "attr") + + Returns: + Value of the attribute inside the layer this instance is set to. + + """ + + if attribute is None: + plug = node_attr + else: + plug = "{}.{}".format(node_attr, attribute) + + return lib.get_attr_in_layer(plug, layer=self.layer) + + @staticmethod + def extract_separator(file_prefix): + """Extract AOV separator character from the prefix. + + Default behavior extracts the part between + last occurrences of and + + Todo: + This code also triggers for V-Ray which overrides it explicitly + so this code will invalidly debug log it couldn't extract the + AOV separator even though it does set it in RenderProductsVray. + + Args: + file_prefix (str): File prefix with tokens. + + Returns: + str or None: prefix character if it can be extracted. + """ + layer_tokens = ["", ""] + aov_tokens = ["", ""] + + def match_last(tokens, text): + """regex match the last occurrence from a list of tokens""" + pattern = "(?:.*)({})".format("|".join(tokens)) + return re.search(pattern, text, re.IGNORECASE) + + layer_match = match_last(layer_tokens, file_prefix) + aov_match = match_last(aov_tokens, file_prefix) + separator = None + if layer_match and aov_match: + matches = sorted((layer_match, aov_match), + key=lambda match: match.end(1)) + separator = file_prefix[matches[0].end(1):matches[1].start(1)] + return separator + + def _get_layer_data(self): + # type: () -> LayerMetadata + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 1 - get scene name /__________________/ + # ____________________/ + _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + kwargs = {} + file_prefix = self.get_renderer_prefix() + + # If the Render Layer belongs to a Render Setup layer then the + # output name is based on the Render Setup Layer name without + # the `rs_` prefix. + layer_name = self.layer + rs_layer = lib_rendersetup.get_rendersetup_layer(layer_name) + if rs_layer: + layer_name = rs_layer + + if self.layer == "defaultRenderLayer": + # defaultRenderLayer renders as masterLayer + layer_name = "masterLayer" + + separator = self.extract_separator(file_prefix) + if separator: + kwargs["aov_separator"] = separator + else: + log.debug("Couldn't extract aov separator from " + "file prefix: {}".format(file_prefix)) + + # todo: Support Custom Frames sequences 0,5-10,100-120 + # Deadline allows submitting renders with a custom frame list + # to support those cases we might want to allow 'custom frames' + # to be overridden to `ExpectFiles` class? + return LayerMetadata( + frameStart=int(self.get_render_attribute("startFrame")), + frameEnd=int(self.get_render_attribute("endFrame")), + frameStep=int(self.get_render_attribute("byFrameStep")), + padding=int(self.get_render_attribute("extensionPadding")), + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + cameras=self.get_renderable_cameras(), + sceneName=scene_name, + layerName=layer_name, + renderer=self.renderer, + defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), + filePrefix=file_prefix, + **kwargs + ) + + def _generate_file_sequence( + self, layer_data, + force_aov_name=None, + force_ext=None, + force_cameras=None): + # type: (LayerMetadata, str, str, list) -> list + expected_files = [] + cameras = force_cameras or layer_data.cameras + ext = force_ext or layer_data.defaultExt + for cam in cameras: + file_prefix = layer_data.filePrefix + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName), + (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)), + # this is required to remove unfilled aov token, for example + # in Redshift + (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \ + else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name), + + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, ""), + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + for frame in range( + int(layer_data.frameStart), + int(layer_data.frameEnd) + 1, + int(layer_data.frameStep), + ): + frame_str = str(frame).rjust(layer_data.padding, "0") + expected_files.append( + "{}.{}.{}".format(file_prefix, frame_str, ext) + ) + return expected_files + + def get_files(self, product): + # type: (RenderProduct) -> list + """Return list of expected files. + + It will translate render token strings ('', etc.) to + their values. This task is tricky as every renderer deals with this + differently. That's why we expose `get_files` as a method on the + Renderer class so it can be overridden for complex cases. + + Args: + product (RenderProduct): Render product to be used for file + generation. + + Returns: + List of files + + """ + return self._generate_file_sequence( + self.layer_data, + force_aov_name=product.productName, + force_ext=product.ext, + force_cameras=[product.camera] + ) + + def get_renderable_cameras(self): + # type: () -> list + """Get all renderable camera transforms. + + Returns: + list: list of renderable cameras. + + """ + + renderable_cameras = [ + cam for cam in cmds.ls(cameras=True) + if self._get_attr(cam, "renderable") + ] + + # The output produces a sanitized name for using its + # shortest unique path of the transform so we'll return + # at least that unique path. This could include a parent + # name too when two cameras have the same name but are + # in a different hierarchy, e.g. "group1|cam" and "group2|cam" + def get_name(camera): + return cmds.ls(cmds.listRelatives(camera, + parent=True, + fullPath=True))[0] + + return [get_name(cam) for cam in renderable_cameras] + + +class RenderProductsArnold(ARenderProducts): + """Render products for Arnold renderer. + + References: + mtoa.utils.getFileName() + mtoa.utils.ui.common.updateArnoldTargetFilePreview() + + Notes: + - Output Denoising AOVs are not currently included. + - Only Frame/Animation ext: name.#.ext is supported. + - Use Custom extension is not supported. + - and tokens not tested + - With Merge AOVs but in File Name Prefix Arnold + will still NOT merge the aovs. This class correctly resolves + it - but user should be aware. + - File Path Prefix overrides per AOV driver are not implemented + + Attributes: + aiDriverExtension (dict): Arnold AOV driver extension mapping. + Is there a better way? + renderer (str): name of renderer. + + """ + renderer = "arnold" + aiDriverExtension = { + "jpeg": "jpg", + "exr": "exr", + "deepexr": "exr", + "png": "png", + "tiff": "tif", + "mtoa_shaders": "ass", # TODO: research what those last two should be + "maya": "", + } + + def get_renderer_prefix(self): + + prefix = super(RenderProductsArnold, self).get_renderer_prefix() + merge_aovs = self._get_attr("defaultArnoldDriver.mergeAOVs") + if not merge_aovs and "" not in prefix.lower(): + # When Merge AOVs is disabled and token not present + # then Arnold prepends / to the output path. + # todo: It's untested what happens if AOV driver has an + # an explicit override path prefix. + prefix = "/" + prefix + + return prefix + + def get_multipart(self): + multipart = False + multilayer = bool(self._get_attr("defaultArnoldDriver.multipart")) + merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs")) + if multilayer or merge_AOVs: + multipart = True + + return multipart + + def _get_aov_render_products(self, aov, cameras=None): + """Return all render products for the AOV""" + + products = [] + aov_name = self._get_attr(aov, "name") + ai_drivers = cmds.listConnections("{}.outputs".format(aov), + source=True, + destination=False, + type="aiAOVDriver") or [] + if not cameras: + cameras = [ + self.sanitize_camera_name( + self.get_renderable_cameras()[0] + ) + ] + + for ai_driver in ai_drivers: + colorspace = self._get_colorspace( + ai_driver + ".colorManagement" + ) + # todo: check aiAOVDriver.prefix as it could have + # a custom path prefix set for this driver + + # Skip Drivers set only for GUI + # 0: GUI, 1: Batch, 2: GUI and Batch + output_mode = self._get_attr(ai_driver, "outputMode") + if output_mode == 0: # GUI only + log.warning("%s has Output Mode set to GUI, " + "skipping...", ai_driver) + continue + + ai_translator = self._get_attr(ai_driver, "aiTranslator") + try: + ext = self.aiDriverExtension[ai_translator] + except KeyError: + raise AOVError( + "Unrecognized arnold driver format " + "for AOV - {}".format(aov_name) + ) + + # If aov RGBA is selected, arnold will translate it to `beauty` + name = aov_name + if name == "RGBA": + name = "beauty" + + # Support Arnold light groups for AOVs + # Global AOV: When disabled the main layer is + # not written: `{pass}` + # All Light Groups: When enabled, a `{pass}_lgroups` file is + # written and is always merged into a + # single file + # Light Groups List: When set, a product per light + # group is written + # e.g. {pass}_front, {pass}_rim + global_aov = self._get_attr(aov, "globalAov") + if global_aov: + for camera in cameras: + product = RenderProduct( + productName=name, + ext=ext, + aov=aov_name, + driver=ai_driver, + multipart=self.multipart, + camera=camera, + colorspace=colorspace + ) + products.append(product) + + all_light_groups = self._get_attr(aov, "lightGroups") + if all_light_groups: + # All light groups is enabled. A single multipart + # Render Product + for camera in cameras: + product = RenderProduct( + productName=name + "_lgroups", + ext=ext, + aov=aov_name, + driver=ai_driver, + # Always multichannel output + multipart=True, + camera=camera, + colorspace=colorspace + ) + products.append(product) + else: + value = self._get_attr(aov, "lightGroupsList") + if not value: + continue + selected_light_groups = value.strip().split() + for light_group in selected_light_groups: + # Render Product per selected light group + aov_light_group_name = "{}_{}".format(name, light_group) + for camera in cameras: + product = RenderProduct( + productName=aov_light_group_name, + aov=aov_name, + driver=ai_driver, + ext=ext, + camera=camera, + colorspace=colorspace + ) + products.append(product) + + return products + + def _get_colorspace(self, attribute): + """Resolve colorspace from Arnold settings.""" + + def _view_transform(): + preferences = lib.get_color_management_preferences() + views_data = get_ocio_config_views(preferences["config"]) + view_data = views_data[ + "{}/{}".format(preferences["display"], preferences["view"]) + ] + return view_data["colorspace"] + + def _raw(): + preferences = lib.get_color_management_preferences() + return preferences["rendering_space"] + + resolved_values = { + "Raw": _raw, + "Use View Transform": _view_transform, + # Default. Same as Maya Preferences. + "Use Output Transform": lib.get_color_management_output_transform + } + return resolved_values[self._get_attr(attribute)]() + + def get_render_products(self): + """Get all AOVs. + + See Also: + :func:`ARenderProducts.get_render_products()` + + Raises: + :class:`AOVError`: If AOV cannot be determined. + + """ + + if not cmds.ls("defaultArnoldRenderOptions", type="aiOptions"): + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return [] + + # check if camera token is in prefix. If so, and we have list of + # renderable cameras, generate render product for each and every + # of them. + cameras = [ + self.sanitize_camera_name(c) + for c in self.get_renderable_cameras() + ] + + default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey") + colorspace = self._get_colorspace( + "defaultArnoldDriver.colorManagement" + ) + beauty_products = [ + RenderProduct( + productName="beauty", + ext=default_ext, + driver="defaultArnoldDriver", + camera=camera, + colorspace=colorspace + ) for camera in cameras + ] + + # AOVs > Legacy > Maya Render View > Mode + aovs_enabled = bool( + self._get_attr("defaultArnoldRenderOptions.aovMode") + ) + if not aovs_enabled: + return beauty_products + + # Common > File Output > Merge AOVs or + # We don't need to check for Merge AOVs due to overridden + # `get_renderer_prefix()` behavior which forces + has_renderpass_token = ( + "" in self.layer_data.filePrefix.lower() + ) + if not has_renderpass_token: + for product in beauty_products: + product.multipart = True + return beauty_products + + # AOVs are set to be rendered separately. We should expect + # token in path. + # handle aovs from references + use_ref_aovs = self.render_instance.data.get( + "useReferencedAovs", False) or False + + aovs = cmds.ls(type="aiAOV") + if not use_ref_aovs: + ref_aovs = cmds.ls(type="aiAOV", referencedNodes=True) + aovs = list(set(aovs) - set(ref_aovs)) + + products = [] + + # Append the AOV products + for aov in aovs: + enabled = self._get_attr(aov, "enabled") + if not enabled: + continue + + # For now stick to the legacy output format. + aov_products = self._get_aov_render_products(aov, cameras) + products.extend(aov_products) + + if all(product.aov != "RGBA" for product in products): + # Append default 'beauty' as this is arnolds default. + # However, it is excluded whenever a RGBA pass is enabled. + # For legibility add the beauty layer as first entry + products += beauty_products + + # TODO: Output Denoising AOVs? + + return products + + +class RenderProductsVray(ARenderProducts): + """Expected files for V-Ray renderer. + + Notes: + - "Disabled" animation incorrectly returns frames in filename + - "Renumber Frames" is not supported + + Reference: + vrayAddRenderElementImpl() in vrayCreateRenderElementsTab.mel + + """ + # todo: detect whether rendering with V-Ray GPU + whether AOV is supported + + renderer = "vray" + + def get_multipart(self): + multipart = False + image_format = self._get_attr("vraySettings.imageFormatStr") + if image_format == "exr (multichannel)": + multipart = True + + return multipart + + def get_renderer_prefix(self): + # type: () -> str + """Get image prefix for V-Ray. + + This overrides :func:`ARenderProducts.get_renderer_prefix()` as + we must add `` token manually. This is done only for + non-multipart outputs, where `` token doesn't make sense. + + See also: + :func:`ARenderProducts.get_renderer_prefix()` + + """ + prefix = super(RenderProductsVray, self).get_renderer_prefix() + if self.multipart: + return prefix + aov_separator = self._get_aov_separator() + prefix = "{}{}".format(prefix, aov_separator) + return prefix + + def _get_aov_separator(self): + # type: () -> str + """Return the V-Ray AOV/Render Elements separator""" + return self._get_attr( + "vraySettings.fileNameRenderElementSeparator" + ) + + def _get_layer_data(self): + # type: () -> LayerMetadata + """Override to get vray specific extension.""" + layer_data = super(RenderProductsVray, self)._get_layer_data() + + default_ext = self._get_attr("vraySettings.imageFormatStr") + if default_ext in ["exr (multichannel)", "exr (deep)"]: + default_ext = "exr" + layer_data.defaultExt = default_ext + layer_data.padding = self._get_attr("vraySettings.fileNamePadding") + + layer_data.aov_separator = self._get_aov_separator() + + return layer_data + + def get_render_products(self): + """Get all AOVs. + + See Also: + :func:`ARenderProducts.get_render_products()` + + """ + if not cmds.ls("vraySettings", type="VRaySettingsNode"): + # this occurs when Render Setting windows was not opened yet. In + # such case there are no VRay options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return [] + + cameras = [ + self.sanitize_camera_name(c) + for c in self.get_renderable_cameras() + ] + + image_format_str = self._get_attr("vraySettings.imageFormatStr") + default_ext = image_format_str + if default_ext in {"exr (multichannel)", "exr (deep)"}: + default_ext = "exr" + + colorspace = lib.get_color_management_output_transform() + products = [] + + # add beauty as default when not disabled + dont_save_rgb = self._get_attr("vraySettings.dontSaveRgbChannel") + if not dont_save_rgb: + for camera in cameras: + products.append( + RenderProduct( + productName="", + ext=default_ext, + camera=camera, + colorspace=colorspace, + multipart=self.multipart + ) + ) + + # separate alpha file + separate_alpha = self._get_attr("vraySettings.separateAlpha") + if separate_alpha: + for camera in cameras: + products.append( + RenderProduct( + productName="Alpha", + ext=default_ext, + camera=camera, + colorspace=colorspace, + multipart=self.multipart + ) + ) + if self.multipart: + # AOVs are merged in m-channel file, only main layer is rendered + return products + + # handle aovs from references + use_ref_aovs = self.render_instance.data.get( + "useReferencedAovs", False) or False + + # this will have list of all aovs no matter if they are coming from + # reference or not. + aov_types = ["VRayRenderElement", "VRayRenderElementSet"] + aovs = cmds.ls(type=aov_types) + if not use_ref_aovs: + ref_aovs = cmds.ls(type=aov_types, referencedNodes=True) or [] + aovs = list(set(aovs) - set(ref_aovs)) + + for aov in aovs: + enabled = self._get_attr(aov, "enabled") + if not enabled: + continue + + class_type = self._get_attr(aov + ".vrayClassType") + if class_type == "LightMixElement": + # Special case which doesn't define a name by itself but + # instead seems to output multiple Render Products, + # specifically "Self_Illumination" and "Environment" + product_names = ["Self_Illumination", "Environment"] + for camera in cameras: + for name in product_names: + product = RenderProduct(productName=name, + ext=default_ext, + aov=aov, + camera=camera, + colorspace=colorspace) + products.append(product) + # Continue as we've processed this special case AOV + continue + + aov_name = self._get_vray_aov_name(aov) + for camera in cameras: + product = RenderProduct( + productName=aov_name, + ext=default_ext, + aov=aov, + camera=camera, + colorspace=colorspace + ) + products.append(product) + + return products + + def _get_vray_aov_attr(self, node, prefix): + """Get value for attribute that starts with key in name + + V-Ray AOVs have attribute names that include the type + of AOV in the attribute name, for example: + - vray_filename_rawdiffuse + - vray_filename_velocity + - vray_name_gi + - vray_explicit_name_extratex + + To simplify querying the "vray_filename" or "vray_name" + attributes we just find the first attribute that has + that particular "{prefix}_" in the attribute name. + + Args: + node (str): AOV node name + prefix (str): Prefix of the attribute name. + + Returns: + Value of the attribute if it exists, else None + + """ + attrs = cmds.listAttr(node, string="{}_*".format(prefix)) + if not attrs: + return None + + assert len(attrs) == 1, "Found more than one attribute: %s" % attrs + attr = attrs[0] + + return self._get_attr(node, attr) + + def _get_vray_aov_name(self, node): + """Get AOVs name from Vray. + + Args: + node (str): aov node name. + + Returns: + str: aov name. + + """ + + vray_explicit_name = self._get_vray_aov_attr(node, + "vray_explicit_name") + vray_filename = self._get_vray_aov_attr(node, "vray_filename") + vray_name = self._get_vray_aov_attr(node, "vray_name") + final_name = vray_explicit_name or vray_filename or vray_name or None + + class_type = self._get_attr(node, "vrayClassType") + if not vray_explicit_name: + # Explicit name takes precedence and overrides completely + # otherwise add the connected node names to the special cases + # Any namespace colon ':' gets replaced to underscore '_' + # so we sanitize using `sanitize_camera_name` + def _get_source_name(node, attr): + """Return sanitized name of input connection to attribute""" + plug = "{}.{}".format(node, attr) + connections = cmds.listConnections(plug, + source=True, + destination=False) + if connections: + return self.sanitize_camera_name(connections[0]) + + if class_type == "MaterialSelectElement": + # Name suffix is based on the connected material or set + attrs = [ + "vray_mtllist_mtlselect", + "vray_mtl_mtlselect" + ] + for attribute in attrs: + name = _get_source_name(node, attribute) + if name: + final_name += '_{}'.format(name) + break + else: + log.warning("Material Select Element has no " + "selected materials: %s", node) + + elif class_type == "ExtraTexElement": + # Name suffix is based on the connected textures + extratex_type = self._get_attr(node, "vray_type_extratex") + attr = { + 0: "vray_texture_extratex", + 1: "vray_float_texture_extratex", + 2: "vray_int_texture_extratex", + }.get(extratex_type) + name = _get_source_name(node, attr) + if name: + final_name += '_{}'.format(name) + else: + log.warning("Extratex Element has no incoming texture") + + assert final_name, "Output filename not defined for AOV: %s" % node + + return final_name + + +class RenderProductsRedshift(ARenderProducts): + """Expected files for Redshift renderer. + + Notes: + - `get_files()` only supports rendering with frames, like "animation" + + Attributes: + + unmerged_aovs (list): Name of aovs that are not merged into resulting + exr and we need them specified in Render Products output. + + """ + + renderer = "redshift" + unmerged_aovs = {"Cryptomatte"} + + def get_files(self, product): + # When outputting AOVs we need to replace Redshift specific AOV tokens + # with Maya render tokens for generating file sequences. We validate to + # a specific AOV fileprefix so we only need to account for one + # replacement. + if not product.multipart and product.driver: + file_prefix = self._get_attr(product.driver + ".filePrefix") + self.layer_data.filePrefix = file_prefix.replace( + "/", + "//" + ) + + return super(RenderProductsRedshift, self).get_files(product) + + def get_multipart(self): + # For Redshift we don't directly return upon forcing multilayer + # due to some AOVs still being written into separate files, + # like Cryptomatte. + # AOVs are merged in multi-channel file + multipart = False + force_layer = bool( + self._get_attr("redshiftOptions.exrForceMultilayer") + ) + if force_layer: + multipart = True + + return multipart + + def get_renderer_prefix(self): + """Get image prefix for Redshift. + + This overrides :func:`ARenderProducts.get_renderer_prefix()` as + we must add `` token manually. This is done only for + non-multipart outputs, where `` token doesn't make sense. + + See also: + :func:`ARenderProducts.get_renderer_prefix()` + + """ + prefix = super(RenderProductsRedshift, self).get_renderer_prefix() + if self.multipart: + return prefix + separator = self.extract_separator(prefix) + prefix = "{}{}".format(prefix, separator or "_") + return prefix + + def get_render_products(self): + """Get all AOVs. + + See Also: + :func:`ARenderProducts.get_render_products()` + + """ + + if not cmds.ls("redshiftOptions", type="RedshiftOptions"): + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Redshift options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return [] + + cameras = [ + self.sanitize_camera_name(c) + for c in self.get_renderable_cameras() + ] + + # Get Redshift Extension from image format + image_format = self._get_attr("redshiftOptions.imageFormat") # integer + ext = mel.eval("redshiftGetImageExtension(%i)" % image_format) + + use_ref_aovs = self.render_instance.data.get( + "useReferencedAovs", False) or False + + aovs = cmds.ls(type="RedshiftAOV") + if not use_ref_aovs: + ref_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=True) + aovs = list(set(aovs) - set(ref_aovs)) + + products = [] + light_groups_enabled = False + has_beauty_aov = False + colorspace = lib.get_color_management_output_transform() + for aov in aovs: + enabled = self._get_attr(aov, "enabled") + if not enabled: + continue + + aov_type = self._get_attr(aov, "aovType") + if self.multipart and aov_type not in self.unmerged_aovs: + continue + + # Any AOVs that still get processed, like Cryptomatte + # by themselves are not multipart files. + + # Redshift skips rendering of masterlayer without AOV suffix + # when a Beauty AOV is rendered. It overrides the main layer. + if aov_type == "Beauty": + has_beauty_aov = True + + aov_name = self._get_attr(aov, "name") + + # Support light Groups + light_groups = [] + if self._get_attr(aov, "supportsLightGroups"): + all_light_groups = self._get_attr(aov, "allLightGroups") + if all_light_groups: + # All light groups is enabled + light_groups = self._get_redshift_light_groups() + else: + value = self._get_attr(aov, "lightGroupList") + # note: string value can return None when never set + if value: + selected_light_groups = value.strip().split() + light_groups = selected_light_groups + + for light_group in light_groups: + aov_light_group_name = "{}_{}".format(aov_name, + light_group) + for camera in cameras: + product = RenderProduct( + productName=aov_light_group_name, + aov=aov_name, + ext=ext, + multipart=False, + camera=camera, + driver=aov, + colorspace=colorspace) + products.append(product) + + if light_groups: + light_groups_enabled = True + + # Redshift AOV Light Select always renders the global AOV + # even when light groups are present so we don't need to + # exclude it when light groups are active + for camera in cameras: + product = RenderProduct(productName=aov_name, + aov=aov_name, + ext=ext, + multipart=False, + camera=camera, + driver=aov, + colorspace=colorspace) + products.append(product) + + # When a Beauty AOV is added manually, it will be rendered as + # 'Beauty_other' in file name and "standard" beauty will have + # 'Beauty' in its name. When disabled, standard output will be + # without `Beauty`. Except when using light groups. + if light_groups_enabled: + return products + + beauty_name = "BeautyAux" if has_beauty_aov else "" + for camera in cameras: + products.insert(0, + RenderProduct(productName=beauty_name, + ext=ext, + multipart=self.multipart, + camera=camera, + colorspace=colorspace)) + + return products + + @staticmethod + def _get_redshift_light_groups(): + return sorted(mel.eval("redshiftAllAovLightGroups")) + + +class RenderProductsRenderman(ARenderProducts): + """Expected files for Renderman renderer. + + Warning: + This is very rudimentary and needs more love and testing. + """ + + renderer = "renderman" + unmerged_aovs = {"PxrCryptomatte"} + + def get_multipart(self): + # Implemented as display specific in "get_render_products". + return False + + def get_render_products(self): + """Get all AOVs. + + See Also: + :func:`ARenderProducts.get_render_products()` + + """ + from rfm2.api.displays import get_displays # noqa + + colorspace = lib.get_color_management_output_transform() + + cameras = [ + self.sanitize_camera_name(c) + for c in self.get_renderable_cameras() + ] + + if not cameras: + cameras = [ + self.sanitize_camera_name( + self.get_renderable_cameras()[0]) + ] + products = [] + + # NOTE: This is guessing extensions from renderman display types. + # Some of them are just framebuffers, d_texture format can be + # set in display setting. We set those now to None, but it + # should be handled more gracefully. + display_types = { + "d_deepexr": "exr", + "d_it": None, + "d_null": None, + "d_openexr": "exr", + "d_png": "png", + "d_pointcloud": "ptc", + "d_targa": "tga", + "d_texture": None, + "d_tiff": "tif" + } + + displays = get_displays(override_dst="render")["displays"] + for name, display in displays.items(): + enabled = display["params"]["enable"]["value"] + if not enabled: + continue + + # Skip display types not producing any file output. + # Is there a better way to do it? + if not display_types.get(display["driverNode"]["type"]): + continue + + has_cryptomatte = cmds.ls(type=self.unmerged_aovs) + matte_enabled = False + if has_cryptomatte: + for cryptomatte in has_cryptomatte: + cryptomatte_aov = cryptomatte + matte_name = "cryptomatte" + rman_globals = cmds.listConnections(cryptomatte + + ".message") + if rman_globals: + matte_enabled = True + + aov_name = name + if aov_name == "rmanDefaultDisplay": + aov_name = "beauty" + + extensions = display_types.get( + display["driverNode"]["type"], "exr") + + for camera in cameras: + # Create render product and set it as multipart only on + # display types supporting it. In all other cases, Renderman + # will create separate output per channel. + if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa + product = RenderProduct( + productName=aov_name, + ext=extensions, + camera=camera, + multipart=True, + colorspace=colorspace + ) + + if has_cryptomatte and matte_enabled: + cryptomatte = RenderProduct( + productName=matte_name, + aov=cryptomatte_aov, + ext=extensions, + camera=camera, + multipart=True, + colorspace=colorspace + ) + else: + # this code should handle the case where no multipart + # capable format is selected. But since it involves + # shady logic to determine what channel become what + # lets not do that as all productions will use exr anyway. + """ + for channel in display['params']['displayChannels']['value']: # noqa + product = RenderProduct( + productName="{}_{}".format(aov_name, channel), + ext=extensions, + camera=camera, + multipart=False + ) + """ + raise UnsupportedImageFormatException( + "Only exr, deep exr and tiff formats are supported.") + + products.append(product) + + if has_cryptomatte and matte_enabled: + products.append(cryptomatte) + + return products + + def get_files(self, product): + """Get expected files. + + """ + files = super(RenderProductsRenderman, self).get_files(product) + + layer_data = self.layer_data + new_files = [] + + resolved_image_dir = re.sub("", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501 + resolved_image_dir = re.sub("", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501 + for file in files: + new_file = "{}/{}".format(resolved_image_dir, file) + new_files.append(new_file) + + return new_files + + +class RenderProductsMayaHardware(ARenderProducts): + """Expected files for MayaHardware renderer.""" + + renderer = "mayahardware2" + + extensions = [ + {"label": "JPEG", "index": 8, "extension": "jpg"}, + {"label": "PNG", "index": 32, "extension": "png"}, + {"label": "EXR(exr)", "index": 40, "extension": "exr"} + ] + + def get_multipart(self): + # MayaHardware does not support multipart EXRs. + return False + + def _get_extension(self, value): + result = None + if isinstance(value, int): + extensions = { + extension["index"]: extension["extension"] + for extension in self.extensions + } + try: + result = extensions[value] + except KeyError: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + if isinstance(value, six.string_types): + extensions = { + extension["label"]: extension["extension"] + for extension in self.extensions + } + try: + result = extensions[value] + except KeyError: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + if not result: + raise NotImplementedError( + "Could not find extension for {}".format(value) + ) + + return result + + def get_render_products(self): + """Get all AOVs. + See Also: + :func:`ARenderProducts.get_render_products()` + """ + ext = self._get_extension( + self._get_attr("defaultRenderGlobals.imageFormat") + ) + + products = [] + for cam in self.get_renderable_cameras(): + product = RenderProduct( + productName="beauty", + ext=ext, + camera=cam, + colorspace=lib.get_color_management_output_transform() + ) + products.append(product) + + return products + + +class AOVError(Exception): + """Custom exception for determining AOVs.""" + + +class UnsupportedRendererException(Exception): + """Custom exception. + + Raised when requesting data from unsupported renderer. + """ + + +class UnsupportedImageFormatException(Exception): + """Custom exception to report unsupported output image format.""" diff --git a/client/ayon_core/hosts/maya/api/lib_rendersettings.py b/client/ayon_core/hosts/maya/api/lib_rendersettings.py new file mode 100644 index 0000000000..54ee7888b3 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/lib_rendersettings.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- +"""Class for handling Render Settings.""" +import six +import sys + +from ayon_core.lib import Logger +from ayon_core.settings import get_project_settings + +from ayon_core.pipeline import CreatorError, get_current_project_name +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.hosts.maya.api.lib import reset_frame_range + + +class RenderSettings(object): + + _image_prefix_nodes = { + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix', + 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix' + } + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + log = Logger.get_logger("RenderSettings") + + @classmethod + def get_image_prefix_attr(cls, renderer): + return cls._image_prefix_nodes[renderer] + + @staticmethod + def get_padding_attr(renderer): + """Return attribute for renderer that defines frame padding amount""" + if renderer == "vray": + return "vraySettings.fileNamePadding" + else: + return "defaultRenderGlobals.extensionPadding" + + def __init__(self, project_settings=None): + if not project_settings: + project_settings = get_project_settings( + get_current_project_name() + ) + render_settings = project_settings["maya"]["RenderSettings"] + image_prefixes = { + "vray": render_settings["vray_renderer"]["image_prefix"], + "arnold": render_settings["arnold_renderer"]["image_prefix"], + "renderman": render_settings["renderman_renderer"]["image_prefix"], + "redshift": render_settings["redshift_renderer"]["image_prefix"] + } + + # TODO probably should be stored to more explicit attribute + # Renderman only + renderman_settings = render_settings["renderman_renderer"] + _image_dir = { + "renderman": renderman_settings["image_dir"], + "cryptomatte": renderman_settings["cryptomatte_dir"], + "imageDisplay": renderman_settings["imageDisplay_dir"], + "watermark": renderman_settings["watermark_dir"] + } + self._image_prefixes = image_prefixes + self._image_dir = _image_dir + self._project_settings = project_settings + + def set_default_renderer_settings(self, renderer=None): + """Set basic settings based on renderer.""" + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + if not renderer: + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + + asset_doc = get_current_project_asset() + # project_settings/maya/create/CreateRender/aov_separator + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "_" + reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa + + if reset_frame: + start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") + cmds.currentTime(start_frame, edit=True) + + if renderer in self._image_prefix_nodes: + prefix = self._image_prefixes[renderer] + prefix = prefix.replace("{aov_separator}", aov_separator) + cmds.setAttr(self._image_prefix_nodes[renderer], + prefix, type="string") # noqa + else: + print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa + # TODO: handle not having res values in the doc + width = asset_doc["data"].get("resolutionWidth") + height = asset_doc["data"].get("resolutionHeight") + + if renderer == "arnold": + # set renderer settings for Arnold from project settings + self._set_arnold_settings(width, height) + + if renderer == "vray": + self._set_vray_settings(aov_separator, width, height) + + if renderer == "redshift": + self._set_redshift_settings(width, height) + mel.eval("redshiftUpdateActiveAovList") + + if renderer == "renderman": + image_dir = self._image_dir["renderman"] + cmds.setAttr("rmanGlobals.imageOutputDir", + image_dir, type="string") + self._set_renderman_settings(width, height, + aov_separator) + + def _set_arnold_settings(self, width, height): + """Sets settings for Arnold.""" + from mtoa.core import createOptions # noqa + from mtoa.aovs import AOVInterface # noqa + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + createOptions() + render_settings = self._project_settings["maya"]["RenderSettings"] + arnold_render_presets = render_settings["arnold_renderer"] # noqa + # Force resetting settings and AOV list to avoid having to deal with + # AOV checking logic, for now. + # This is a work around because the standard + # function to revert render settings does not reset AOVs list in MtoA + # Fetch current aovs in case there's any. + current_aovs = AOVInterface().getAOVs() + remove_aovs = render_settings["remove_aovs"] + if remove_aovs: + # Remove fetched AOVs + AOVInterface().removeAOVs(current_aovs) + mel.eval("unifiedRenderGlobalsRevertToDefault") + img_ext = arnold_render_presets["image_format"] + img_prefix = arnold_render_presets["image_prefix"] + aovs = arnold_render_presets["aov_list"] + img_tiled = arnold_render_presets["tiled"] + multi_exr = arnold_render_presets["multilayer_exr"] + additional_options = arnold_render_presets["additional_options"] + for aov in aovs: + if aov in current_aovs and not remove_aovs: + continue + AOVInterface('defaultArnoldRenderOptions').addAOV(aov) + + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + + self._set_global_output_settings() + + cmds.setAttr( + "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") + + cmds.setAttr( + "defaultArnoldDriver.ai_translator", img_ext, type="string") + + cmds.setAttr( + "defaultArnoldDriver.exrTiled", img_tiled) + + cmds.setAttr( + "defaultArnoldDriver.mergeAOVs", multi_exr) + self._additional_attribs_setter(additional_options) + reset_frame_range(playback=False, fps=False, render=True) + + def _set_redshift_settings(self, width, height): + """Sets settings for Redshift.""" + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + render_settings = self._project_settings["maya"]["RenderSettings"] + redshift_render_presets = render_settings["redshift_renderer"] + + remove_aovs = render_settings["remove_aovs"] + all_rs_aovs = cmds.ls(type='RedshiftAOV') + if remove_aovs: + for aov in all_rs_aovs: + enabled = cmds.getAttr("{}.enabled".format(aov)) + if enabled: + cmds.delete(aov) + + redshift_aovs = redshift_render_presets["aov_list"] + # list all the aovs + all_rs_aovs = cmds.ls(type='RedshiftAOV') + for rs_aov in redshift_aovs: + rs_layername = "rsAov_{}".format(rs_aov.replace(" ", "")) + if rs_layername in all_rs_aovs: + continue + cmds.rsCreateAov(type=rs_aov) + # update the AOV list + mel.eval("redshiftUpdateActiveAovList") + + rs_p_engine = redshift_render_presets["primary_gi_engine"] + rs_s_engine = redshift_render_presets["secondary_gi_engine"] + + if int(rs_p_engine) or int(rs_s_engine) != 0: + cmds.setAttr("redshiftOptions.GIEnabled", 1) + if int(rs_p_engine) == 0: + # reset the primary GI Engine as default + cmds.setAttr("redshiftOptions.primaryGIEngine", 4) + if int(rs_s_engine) == 0: + # reset the secondary GI Engine as default + cmds.setAttr("redshiftOptions.secondaryGIEngine", 2) + else: + cmds.setAttr("redshiftOptions.GIEnabled", 0) + + cmds.setAttr("redshiftOptions.primaryGIEngine", int(rs_p_engine)) + cmds.setAttr("redshiftOptions.secondaryGIEngine", int(rs_s_engine)) + + additional_options = redshift_render_presets["additional_options"] + ext = redshift_render_presets["image_format"] + img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] + img_ext = img_exts.index(ext) + + self._set_global_output_settings() + cmds.setAttr("redshiftOptions.imageFormat", img_ext) + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_renderman_settings(self, width, height, aov_separator): + """Sets settings for Renderman""" + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + rman_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["renderman_renderer"] + ) + display_filters = rman_render_presets["display_filters"] + d_filters_number = len(display_filters) + for i in range(d_filters_number): + d_node = cmds.ls(typ=display_filters[i]) + if len(d_node) > 0: + filter_nodes = d_node[0] + else: + filter_nodes = cmds.createNode(display_filters[i]) + + cmds.connectAttr(filter_nodes + ".message", + "rmanGlobals.displayFilters[%i]" % i, + force=True) + if filter_nodes.startswith("PxrImageDisplayFilter"): + imageDisplay_dir = self._image_dir["imageDisplay"] + imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + imageDisplay_dir, type="string") + + sample_filters = rman_render_presets["sample_filters"] + s_filters_number = len(sample_filters) + for n in range(s_filters_number): + s_node = cmds.ls(typ=sample_filters[n]) + if len(s_node) > 0: + filter_nodes = s_node[0] + else: + filter_nodes = cmds.createNode(sample_filters[n]) + + cmds.connectAttr(filter_nodes + ".message", + "rmanGlobals.sampleFilters[%i]" % n, + force=True) + + if filter_nodes.startswith("PxrCryptomatte"): + matte_dir = self._image_dir["cryptomatte"] + matte_dir = matte_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + matte_dir, type="string") + elif filter_nodes.startswith("PxrWatermarkFilter"): + watermark_dir = self._image_dir["watermark"] + watermark_dir = watermark_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + watermark_dir, type="string") + + additional_options = rman_render_presets["additional_options"] + + self._set_global_output_settings() + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + + def _set_vray_settings(self, aov_separator, width, height): + # type: (str, int, int) -> None + """Sets important settings for Vray.""" + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + + settings = cmds.ls(type="VRaySettingsNode") + node = settings[0] if settings else cmds.createNode("VRaySettingsNode") + render_settings = self._project_settings["maya"]["RenderSettings"] + vray_render_presets = render_settings["vray_renderer"] + # vrayRenderElement + remove_aovs = render_settings["remove_aovs"] + all_vray_aovs = cmds.ls(type='VRayRenderElement') + lightSelect_aovs = cmds.ls(type='VRayRenderElementSet') + if remove_aovs: + for aov in all_vray_aovs: + # remove all aovs except LightSelect + enabled = cmds.getAttr("{}.enabled".format(aov)) + if enabled: + cmds.delete(aov) + # remove LightSelect + for light_aovs in lightSelect_aovs: + light_enabled = cmds.getAttr("{}.enabled".format(light_aovs)) + if light_enabled: + cmds.delete(lightSelect_aovs) + + vray_aovs = vray_render_presets["aov_list"] + for renderlayer in vray_aovs: + renderElement = "vrayAddRenderElement {}".format(renderlayer) + RE_name = mel.eval(renderElement) + # if there is more than one same render element + if RE_name.endswith("1"): + cmds.delete(RE_name) + # Set aov separator + # First we need to explicitly set the UI items in Render Settings + # because that is also what V-Ray updates to when that Render Settings + # UI did initialize before and refreshes again. + MENU = "vrayRenderElementSeparator" + if cmds.optionMenuGrp(MENU, query=True, exists=True): + items = cmds.optionMenuGrp(MENU, query=True, ill=True) + separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 + try: + sep_idx = separators.index(aov_separator) + except ValueError: + six.reraise( + CreatorError, + CreatorError( + "AOV character {} not in {}".format( + aov_separator, separators)), + sys.exc_info()[2]) + + cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) + + # Set the render element attribute as string. This is also what V-Ray + # sets whenever the `vrayRenderElementSeparator` menu items switch + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format(node), + aov_separator, + type="string" + ) + + # Set render file format to exr + ext = vray_render_presets["image_format"] + cmds.setAttr("{}.imageFormatStr".format(node), ext, type="string") + + # animType + cmds.setAttr("{}.animType".format(node), 1) + + # resolution + cmds.setAttr("{}.width".format(node), width) + cmds.setAttr("{}.height".format(node), height) + + additional_options = vray_render_presets["additional_options"] + + self._additional_attribs_setter(additional_options) + + @staticmethod + def _set_global_output_settings(): + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + def _additional_attribs_setter(self, additional_attribs): + # Not all hosts can import this module. + from maya import cmds # noqa: F401 + import maya.mel as mel # noqa: F401 + + for item in additional_attribs: + attribute, value = item + attribute = str(attribute) # ensure str conversion from settings + attribute_type = cmds.getAttr(attribute, type=True) + if attribute_type in {"long", "bool"}: + cmds.setAttr(attribute, int(value)) + elif attribute_type == "string": + cmds.setAttr(attribute, str(value), type="string") + elif attribute_type in {"double", "doubleAngle", "doubleLinear"}: + cmds.setAttr(attribute, float(value)) + else: + self.log.error( + "Attribute {attribute} can not be set due to unsupported " + "type: {attribute_type}".format( + attribute=attribute, + attribute_type=attribute_type) + ) diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/client/ayon_core/hosts/maya/api/lib_rendersetup.py similarity index 99% rename from openpype/hosts/maya/api/lib_rendersetup.py rename to client/ayon_core/hosts/maya/api/lib_rendersetup.py index 440ee21a52..fb6dd13ce0 100644 --- a/openpype/hosts/maya/api/lib_rendersetup.py +++ b/client/ayon_core/hosts/maya/api/lib_rendersetup.py @@ -19,7 +19,7 @@ UniqueOverride ) -from openpype.hosts.maya.api.lib import get_attribute +from ayon_core.hosts.maya.api.lib import get_attribute EXACT_MATCH = 0 PARENT_MATCH = 1 diff --git a/client/ayon_core/hosts/maya/api/menu.py b/client/ayon_core/hosts/maya/api/menu.py new file mode 100644 index 0000000000..7478739496 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/menu.py @@ -0,0 +1,261 @@ +import os +import logging +from functools import partial + +from qtpy import QtWidgets, QtGui + +import maya.utils +import maya.cmds as cmds + +from ayon_core.pipeline import ( + get_current_asset_name, + get_current_task_name +) +from ayon_core.pipeline.workfile import BuildWorkfile +from ayon_core.tools.utils import host_tools +from ayon_core.hosts.maya.api import lib, lib_rendersettings +from .lib import get_main_window, IS_HEADLESS +from ..tools import show_look_assigner + +from .workfile_template_builder import ( + create_placeholder, + update_placeholder, + build_workfile_template, + update_workfile_template, +) + +log = logging.getLogger(__name__) + +MENU_NAME = "op_maya_menu" + + +def _get_menu(menu_name=None): + """Return the menu instance if it currently exists in Maya""" + if menu_name is None: + menu_name = MENU_NAME + + widgets = {w.objectName(): w for w in QtWidgets.QApplication.allWidgets()} + return widgets.get(menu_name) + + +def get_context_label(): + return "{}, {}".format( + get_current_asset_name(), + get_current_task_name() + ) + + +def install(project_settings): + if cmds.about(batch=True): + log.info("Skipping openpype.menu initialization in batch mode..") + return + + def add_menu(): + pyblish_icon = host_tools.get_pyblish_icon() + parent_widget = get_main_window() + cmds.menu( + MENU_NAME, + label=os.environ.get("AYON_MENU_LABEL") or "AYON", + tearOff=True, + parent="MayaWindow" + ) + + # Create context menu + cmds.menuItem( + "currentContext", + label=get_context_label(), + parent=MENU_NAME, + enable=False + ) + + cmds.setParent("..", menu=True) + + cmds.menuItem(divider=True) + + cmds.menuItem( + "Create...", + command=lambda *args: host_tools.show_publisher( + parent=parent_widget, + tab="create" + ) + ) + + cmds.menuItem( + "Load...", + command=lambda *args: host_tools.show_loader( + parent=parent_widget, + use_context=True + ) + ) + + cmds.menuItem( + "Publish...", + command=lambda *args: host_tools.show_publisher( + parent=parent_widget, + tab="publish" + ), + image=pyblish_icon + ) + + cmds.menuItem( + "Manage...", + command=lambda *args: host_tools.show_scene_inventory( + parent=parent_widget + ) + ) + + cmds.menuItem( + "Library...", + command=lambda *args: host_tools.show_library_loader( + parent=parent_widget + ) + ) + + cmds.menuItem(divider=True) + + cmds.menuItem( + "Work Files...", + command=lambda *args: host_tools.show_workfiles( + parent=parent_widget + ), + ) + + cmds.menuItem( + "Set Frame Range", + command=lambda *args: lib.reset_frame_range() + ) + + cmds.menuItem( + "Set Resolution", + command=lambda *args: lib.reset_scene_resolution() + ) + + cmds.menuItem( + "Set Colorspace", + command=lambda *args: lib.set_colorspace(), + ) + + cmds.menuItem( + "Set Render Settings", + command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa + ) + + cmds.menuItem(divider=True, parent=MENU_NAME) + cmds.menuItem( + "Build First Workfile", + parent=MENU_NAME, + command=lambda *args: BuildWorkfile().process() + ) + + cmds.menuItem( + "Look assigner...", + command=lambda *args: show_look_assigner( + parent_widget + ) + ) + + cmds.menuItem( + "Experimental tools...", + command=lambda *args: host_tools.show_experimental_tools_dialog( + parent_widget + ) + ) + + builder_menu = cmds.menuItem( + "Template Builder", + subMenu=True, + tearOff=True, + parent=MENU_NAME + ) + cmds.menuItem( + "Create Placeholder", + parent=builder_menu, + command=create_placeholder + ) + cmds.menuItem( + "Update Placeholder", + parent=builder_menu, + command=update_placeholder + ) + cmds.menuItem( + "Build Workfile from template", + parent=builder_menu, + command=build_workfile_template + ) + cmds.menuItem( + "Update Workfile from template", + parent=builder_menu, + command=update_workfile_template + ) + + cmds.setParent(MENU_NAME, menu=True) + + def add_scripts_menu(project_settings): + try: + import scriptsmenu.launchformaya as launchformaya + except ImportError: + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + config = project_settings["maya"]["scriptsmenu"]["definition"] + _menu = project_settings["maya"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Maya menu + studio_menu = launchformaya.main( + title=_menu.title(), + objectName=_menu.title().lower().replace(" ", "_") + ) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) + + # Allow time for uninstallation to finish. + # We use Maya's executeDeferred instead of QTimer.singleShot + # so that it only gets called after Maya UI has initialized too. + # This is crucial with Maya 2020+ which initializes without UI + # first as a QCoreApplication + maya.utils.executeDeferred(add_menu) + cmds.evalDeferred(partial(add_scripts_menu, project_settings), + lowestPriority=True) + + +def uninstall(): + menu = _get_menu() + if menu: + log.info("Attempting to uninstall ...") + + try: + menu.deleteLater() + del menu + except Exception as e: + log.error(e) + + +def popup(): + """Pop-up the existing menu near the mouse cursor.""" + menu = _get_menu() + cursor = QtGui.QCursor() + point = cursor.pos() + menu.exec_(point) + + +def update_menu_task_label(): + """Update the task label in Avalon menu to current session""" + + if IS_HEADLESS: + return + + object_name = "{}|currentContext".format(MENU_NAME) + if not cmds.menuItem(object_name, query=True, exists=True): + log.warning("Can't find menuItem: {}".format(object_name)) + return + + label = get_context_label() + cmds.menuItem(object_name, edit=True, label=label) diff --git a/client/ayon_core/hosts/maya/api/pipeline.py b/client/ayon_core/hosts/maya/api/pipeline.py new file mode 100644 index 0000000000..95617cb90a --- /dev/null +++ b/client/ayon_core/hosts/maya/api/pipeline.py @@ -0,0 +1,767 @@ +import json +import base64 +import os +import errno +import logging +import contextlib +import shutil + +from maya import utils, cmds, OpenMaya +import maya.api.OpenMaya as om + +import pyblish.api + +from ayon_core.settings import get_project_settings +from ayon_core.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost, + HostDirmap, +) +from ayon_core.tools.utils import host_tools +from ayon_core.tools.workfiles.lock_dialog import WorkfileLockDialog +from ayon_core.lib import ( + register_event_callback, + emit_event +) +from ayon_core.pipeline import ( + legacy_io, + get_current_project_name, + register_loader_plugin_path, + register_inventory_action_path, + register_creator_plugin_path, + deregister_loader_plugin_path, + deregister_inventory_action_path, + deregister_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.pipeline.workfile.lock_workfile import ( + create_workfile_lock, + remove_workfile_lock, + is_workfile_locked, + is_workfile_lock_enabled +) +from ayon_core.hosts.maya import MAYA_ROOT_DIR +from ayon_core.hosts.maya.lib import create_workspace_mel + +from . import menu, lib +from .workfile_template_builder import MayaPlaceholderLoadPlugin +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) + +log = logging.getLogger("ayon_core.hosts.maya") + +PLUGINS_DIR = os.path.join(MAYA_ROOT_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + +AVALON_CONTAINERS = ":AVALON_CONTAINERS" + + +class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "maya" + + def __init__(self): + super(MayaHost, self).__init__() + self._op_events = {} + + def install(self): + project_name = get_current_project_name() + project_settings = get_project_settings(project_name) + # process path mapping + dirmap_processor = MayaDirmap("maya", project_name, project_settings) + dirmap_processor.process_dirmap() + + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_host("mayabatch") + pyblish.api.register_host("mayapy") + pyblish.api.register_host("maya") + + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + self.log.info(PUBLISH_PATH) + + self.log.info("Installing callbacks ... ") + register_event_callback("init", on_init) + + _set_project() + + if lib.IS_HEADLESS: + self.log.info(( + "Running in headless mode, skipping Maya save/open/new" + " callback installation.." + )) + + return + + self._register_callbacks() + + menu.install(project_settings) + + register_event_callback("save", on_save) + register_event_callback("open", on_open) + register_event_callback("new", on_new) + register_event_callback("before.save", on_before_save) + register_event_callback("after.save", on_after_save) + register_event_callback("before.close", on_before_close) + register_event_callback("before.file.open", before_file_open) + register_event_callback("taskChanged", on_task_changed) + register_event_callback("workfile.open.before", before_workfile_open) + register_event_callback("workfile.save.before", before_workfile_save) + register_event_callback( + "workfile.save.before", workfile_save_before_xgen + ) + register_event_callback("workfile.save.after", after_workfile_save) + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_containers(self): + return ls() + + def get_workfile_build_placeholder_plugins(self): + return [ + MayaPlaceholderLoadPlugin + ] + + @contextlib.contextmanager + def maintained_selection(self): + with lib.maintained_selection(): + yield + + def get_context_data(self): + data = cmds.fileInfo("OpenPypeContext", query=True) + if not data: + return {} + + data = data[0] # Maya seems to return a list + decoded = base64.b64decode(data).decode("utf-8") + return json.loads(decoded) + + def update_context_data(self, data, changes): + json_str = json.dumps(data) + encoded = base64.b64encode(json_str.encode("utf-8")) + return cmds.fileInfo("OpenPypeContext", encoded) + + def _register_callbacks(self): + for handler, event in self._op_events.copy().items(): + if event is None: + continue + + try: + OpenMaya.MMessage.removeCallback(event) + self._op_events[handler] = None + except RuntimeError as exc: + self.log.info(exc) + + self._op_events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save + ) + + self._op_events[_after_scene_save] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterSave, + _after_scene_save + ) + ) + + self._op_events[_before_scene_save] = ( + OpenMaya.MSceneMessage.addCheckCallback( + OpenMaya.MSceneMessage.kBeforeSaveCheck, + _before_scene_save + ) + ) + + self._op_events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterNew, _on_scene_new + ) + + self._op_events[_on_maya_initialized] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kMayaInitialized, + _on_maya_initialized + ) + ) + + self._op_events[_on_scene_open] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kAfterOpen, + _on_scene_open + ) + ) + + self._op_events[_before_scene_open] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kBeforeOpen, + _before_scene_open + ) + ) + + self._op_events[_before_close_maya] = ( + OpenMaya.MSceneMessage.addCallback( + OpenMaya.MSceneMessage.kMayaExiting, + _before_close_maya + ) + ) + + self.log.info("Installed event handler _on_scene_save..") + self.log.info("Installed event handler _before_scene_save..") + self.log.info("Installed event handler _on_after_save..") + self.log.info("Installed event handler _on_scene_new..") + self.log.info("Installed event handler _on_maya_initialized..") + self.log.info("Installed event handler _on_scene_open..") + self.log.info("Installed event handler _check_lock_file..") + self.log.info("Installed event handler _before_close_maya..") + + +def _set_project(): + """Sets the maya project to the current Session's work directory. + + Returns: + None + + """ + workdir = legacy_io.Session["AVALON_WORKDIR"] + + try: + os.makedirs(workdir) + except OSError as e: + # An already existing working directory is fine. + if e.errno == errno.EEXIST: + pass + else: + raise + + cmds.workspace(workdir, openWorkspace=True) + + +def _on_maya_initialized(*args): + emit_event("init") + + if cmds.about(batch=True): + log.warning("Running batch mode ...") + return + + # Keep reference to the main Window, once a main window exists. + lib.get_main_window() + + +def _on_scene_new(*args): + emit_event("new") + + +def _after_scene_save(*arg): + emit_event("after.save") + + +def _on_scene_save(*args): + emit_event("save") + + +def _on_scene_open(*args): + emit_event("open") + + +def _before_close_maya(*args): + emit_event("before.close") + + +def _before_scene_open(*args): + emit_event("before.file.open") + + +def _before_scene_save(return_code, client_data): + + # Default to allowing the action. Registered + # callbacks can optionally set this to False + # in order to block the operation. + OpenMaya.MScriptUtil.setBool(return_code, True) + + emit_event( + "before.save", + {"return_code": return_code} + ) + + +def _remove_workfile_lock(): + """Remove workfile lock on current file""" + if not handle_workfile_locks(): + return + filepath = current_file() + log.info("Removing lock on current file {}...".format(filepath)) + if filepath: + remove_workfile_lock(filepath) + + +def handle_workfile_locks(): + if lib.IS_HEADLESS: + return False + project_name = get_current_project_name() + return is_workfile_lock_enabled(MayaHost.name, project_name) + + +def uninstall(): + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + pyblish.api.deregister_host("mayabatch") + pyblish.api.deregister_host("mayapy") + pyblish.api.deregister_host("maya") + + deregister_loader_plugin_path(LOAD_PATH) + deregister_creator_plugin_path(CREATE_PATH) + deregister_inventory_action_path(INVENTORY_PATH) + + menu.uninstall() + + +def parse_container(container): + """Return the container node's full container data. + + Args: + container (str): A container node name. + + Returns: + dict: The container schema data for this container node. + + """ + data = lib.read(container) + + # Backwards compatibility pre-schemas for containers + data["schema"] = data.get("schema", "openpype:container-1.0") + + # Append transient data + data["objectName"] = container + + return data + + +def _ls(): + """Yields Avalon container node names. + + Used by `ls()` to retrieve the nodes and then query the full container's + data. + + Yields: + str: Avalon container node name (objectSet) + + """ + + def _maya_iterate(iterator): + """Helper to iterate a maya iterator""" + while not iterator.isDone(): + yield iterator.thisNode() + iterator.next() + + ids = {AVALON_CONTAINER_ID, + # Backwards compatibility + "pyblish.mindbender.container"} + + # Iterate over all 'set' nodes in the scene to detect whether + # they have the avalon container ".id" attribute. + fn_dep = om.MFnDependencyNode() + iterator = om.MItDependencyNodes(om.MFn.kSet) + for mobject in _maya_iterate(iterator): + if mobject.apiTypeStr != "kSet": + # Only match by exact type + continue + + fn_dep.setObject(mobject) + if not fn_dep.hasAttribute("id"): + continue + + plug = fn_dep.findPlug("id", True) + value = plug.asString() + if value in ids: + yield fn_dep.name() + + +def ls(): + """Yields containers from active Maya scene + + This is the host-equivalent of api.ls(), but instead of listing + assets on disk, it lists assets already loaded in Maya; once loaded + they are called 'containers' + + Yields: + dict: container + + """ + container_names = _ls() + for container in sorted(container_names): + yield parse_container(container) + + +def containerise(name, + namespace, + nodes, + context, + loader=None, + suffix="CON"): + """Bundle `nodes` into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + nodes (list): Long names of nodes to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + + """ + container = cmds.sets(nodes, name="%s_%s_%s" % (namespace, name, suffix)) + + data = [ + ("schema", "openpype:container-2.0"), + ("id", AVALON_CONTAINER_ID), + ("name", name), + ("namespace", namespace), + ("loader", loader), + ("representation", context["representation"]["_id"]), + ] + + for key, value in data: + cmds.addAttr(container, longName=key, dataType="string") + cmds.setAttr(container + "." + key, str(value), type="string") + + main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet") + if not main_container: + main_container = cmds.sets(empty=True, name=AVALON_CONTAINERS) + + # Implement #399: Maya 2019+ hide AVALON_CONTAINERS on creation.. + if cmds.attributeQuery("hiddenInOutliner", + node=main_container, + exists=True): + cmds.setAttr(main_container + ".hiddenInOutliner", True) + else: + main_container = main_container[0] + + cmds.sets(container, addElement=main_container) + + # Implement #399: Maya 2019+ hide containers in outliner + if cmds.attributeQuery("hiddenInOutliner", + node=container, + exists=True): + cmds.setAttr(container + ".hiddenInOutliner", True) + + return container + + +def on_init(): + log.info("Running callback on init..") + + def safe_deferred(fn): + """Execute deferred the function in a try-except""" + + def _fn(): + """safely call in deferred callback""" + try: + fn() + except Exception as exc: + print(exc) + + try: + utils.executeDeferred(_fn) + except Exception as exc: + print(exc) + + # Force load Alembic so referenced alembics + # work correctly on scene open + cmds.loadPlugin("AbcImport", quiet=True) + cmds.loadPlugin("AbcExport", quiet=True) + + # Force load objExport plug-in (requested by artists) + cmds.loadPlugin("objExport", quiet=True) + + if not lib.IS_HEADLESS: + launch_workfiles = os.environ.get("WORKFILES_STARTUP") + if launch_workfiles: + safe_deferred(host_tools.show_workfiles) + + from .customize import ( + override_component_mask_commands, + override_toolbox_ui + ) + safe_deferred(override_component_mask_commands) + safe_deferred(override_toolbox_ui) + + +def on_before_save(): + """Run validation for scene's FPS prior to saving""" + return lib.validate_fps() + + +def on_after_save(): + """Check if there is a lockfile after save""" + check_lock_on_current_file() + + +def check_lock_on_current_file(): + + """Check if there is a user opening the file""" + if not handle_workfile_locks(): + return + log.info("Running callback on checking the lock file...") + + # add the lock file when opening the file + filepath = current_file() + # Skip if current file is 'untitled' + if not filepath: + return + + if is_workfile_locked(filepath): + # add lockfile dialog + workfile_dialog = WorkfileLockDialog(filepath) + if not workfile_dialog.exec_(): + cmds.file(new=True) + return + + create_workfile_lock(filepath) + + +def on_before_close(): + """Delete the lock file after user quitting the Maya Scene""" + log.info("Closing Maya...") + # delete the lock file + filepath = current_file() + if handle_workfile_locks(): + remove_workfile_lock(filepath) + + +def before_file_open(): + """check lock file when the file changed""" + # delete the lock file + _remove_workfile_lock() + + +def on_save(): + """Automatically add IDs to new nodes + + Any transform of a mesh, without an existing ID, is given one + automatically on file save. + """ + log.info("Running callback on save..") + # remove lockfile if users jumps over from one scene to another + _remove_workfile_lock() + + # Generate ids of the current context on nodes in the scene + nodes = lib.get_id_required_nodes(referenced_nodes=False) + for node, new_id in lib.generate_ids(nodes): + lib.set_id(node, new_id, overwrite=False) + + +def on_open(): + """On scene open let's assume the containers have changed.""" + + from ayon_core.tools.utils import SimplePopup + + # Validate FPS after update_task_from_path to + # ensure it is using correct FPS for the asset + lib.validate_fps() + lib.fix_incompatible_containers() + + if any_outdated_containers(): + log.warning("Scene has outdated content.") + + # Find maya main window + parent = lib.get_main_window() + if parent is None: + log.info("Skipping outdated content pop-up " + "because Maya window can't be found.") + else: + + # Show outdated pop-up + def _on_show_inventory(): + host_tools.show_scene_inventory(parent=parent) + + dialog = SimplePopup(parent=parent) + dialog.setWindowTitle("Maya scene has outdated content") + dialog.set_message("There are outdated containers in " + "your Maya scene.") + dialog.on_clicked.connect(_on_show_inventory) + dialog.show() + + # create lock file for the maya scene + check_lock_on_current_file() + + +def on_new(): + """Set project resolution and fps when create a new file""" + log.info("Running callback on new..") + with lib.suspended_refresh(): + lib.set_context_settings() + + _remove_workfile_lock() + + +def on_task_changed(): + """Wrapped function of app initialize and maya's on task changed""" + # Run + menu.update_menu_task_label() + + workdir = legacy_io.Session["AVALON_WORKDIR"] + if os.path.exists(workdir): + log.info("Updating Maya workspace for task change to %s", workdir) + _set_project() + + # Set Maya fileDialog's start-dir to /scenes + frule_scene = cmds.workspace(fileRuleEntry="scene") + cmds.optionVar(stringValue=("browserLocationmayaBinaryscene", + workdir + "/" + frule_scene)) + + else: + log.warning(( + "Can't set project for new context because path does not exist: {}" + ).format(workdir)) + + with lib.suspended_refresh(): + lib.set_context_settings() + lib.update_content_on_context_change() + + +def before_workfile_open(): + if handle_workfile_locks(): + _remove_workfile_lock() + + +def before_workfile_save(event): + project_name = get_current_project_name() + if handle_workfile_locks(): + _remove_workfile_lock() + workdir_path = event["workdir_path"] + if workdir_path: + create_workspace_mel(workdir_path, project_name) + + +def workfile_save_before_xgen(event): + """Manage Xgen external files when switching context. + + Xgen has various external files that needs to be unique and relative to the + workfile, so we need to copy and potentially overwrite these files when + switching context. + + Args: + event (Event) - openpype/lib/events.py + """ + if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True): + return + + import xgenm + + current_work_dir = legacy_io.Session["AVALON_WORKDIR"].replace("\\", "/") + expected_work_dir = event.data["workdir_path"].replace("\\", "/") + if current_work_dir == expected_work_dir: + return + + palettes = cmds.ls(type="xgmPalette", long=True) + if not palettes: + return + + transfers = [] + overwrites = [] + attribute_changes = {} + attrs = ["xgFileName", "xgBaseFile"] + for palette in palettes: + sanitized_palette = palette.replace("|", "") + project_path = xgenm.getAttr("xgProjectPath", sanitized_palette) + _, maya_extension = os.path.splitext(event.data["filename"]) + + for attr in attrs: + node_attr = "{}.{}".format(palette, attr) + attr_value = cmds.getAttr(node_attr) + + if not attr_value: + continue + + source = os.path.join(project_path, attr_value) + + attr_value = event.data["filename"].replace( + maya_extension, + "__{}{}".format( + sanitized_palette.replace(":", "__"), + os.path.splitext(attr_value)[1] + ) + ) + target = os.path.join(expected_work_dir, attr_value) + + transfers.append((source, target)) + attribute_changes[node_attr] = attr_value + + relative_path = xgenm.getAttr( + "xgDataPath", sanitized_palette + ).split(os.pathsep)[0] + absolute_path = relative_path.replace("${PROJECT}", project_path) + for root, _, files in os.walk(absolute_path): + for f in files: + source = os.path.join(root, f).replace("\\", "/") + target = source.replace(project_path, expected_work_dir + "/") + transfers.append((source, target)) + if os.path.exists(target): + overwrites.append(target) + + # Ask user about overwriting files. + if overwrites: + log.warning( + "WARNING! Potential loss of data.\n\n" + "Found duplicate Xgen files in new context.\n{}".format( + "\n".join(overwrites) + ) + ) + return + + for source, destination in transfers: + if not os.path.exists(os.path.dirname(destination)): + os.makedirs(os.path.dirname(destination)) + shutil.copy(source, destination) + + for attribute, value in attribute_changes.items(): + cmds.setAttr(attribute, value, type="string") + + +def after_workfile_save(event): + workfile_name = event["filename"] + if ( + handle_workfile_locks() + and workfile_name + and not is_workfile_locked(workfile_name) + ): + create_workfile_lock(workfile_name) + + +class MayaDirmap(HostDirmap): + def on_enable_dirmap(self): + cmds.dirmap(en=True) + + def dirmap_routine(self, source_path, destination_path): + cmds.dirmap(m=(source_path, destination_path)) + cmds.dirmap(m=(destination_path, source_path)) diff --git a/client/ayon_core/hosts/maya/api/plugin.py b/client/ayon_core/hosts/maya/api/plugin.py new file mode 100644 index 0000000000..c5e3f42d10 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/plugin.py @@ -0,0 +1,957 @@ +import json +import os +from abc import ABCMeta + +import qargparse +import six +from maya import cmds +from maya.app.renderSetup.model import renderSetup + +from ayon_core.lib import BoolDef, Logger +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( + AVALON_CONTAINER_ID, + Anatomy, + + CreatedInstance, + Creator as NewCreator, + AutoCreator, + HiddenCreator, + + CreatorError, + LegacyCreator, + LoaderPlugin, + get_representation_path, +) +from ayon_core.pipeline.load import LoadError +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline.create import get_subset_name + +from . import lib +from .lib import imprint, read +from .pipeline import containerise + +log = Logger.get_logger() + + +def _get_attr(node, attr, default=None): + """Helper to get attribute which allows attribute to not exist.""" + if not cmds.attributeQuery(attr, node=node, exists=True): + return default + return cmds.getAttr("{}.{}".format(node, attr)) + + +# Backwards compatibility: these functions has been moved to lib. +def get_reference_node(*args, **kwargs): + """Get the reference node from the container members + + Deprecated: + This function was moved and will be removed in 3.16.x. + """ + msg = "Function 'get_reference_node' has been moved." + log.warning(msg) + cmds.warning(msg) + return lib.get_reference_node(*args, **kwargs) + + +def get_reference_node_parents(*args, **kwargs): + """ + Deprecated: + This function was moved and will be removed in 3.16.x. + """ + msg = "Function 'get_reference_node_parents' has been moved." + log.warning(msg) + cmds.warning(msg) + return lib.get_reference_node_parents(*args, **kwargs) + + +class Creator(LegacyCreator): + defaults = ['Main'] + + def process(self): + nodes = list() + + with lib.undo_chunk(): + if (self.options or {}).get("useSelection"): + nodes = cmds.ls(selection=True) + + instance = cmds.sets(nodes, name=self.name) + lib.imprint(instance, self.data) + + return instance + + +@six.add_metaclass(ABCMeta) +class MayaCreatorBase(object): + + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `maya_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `maya_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("maya_cached_subsets") is None: + cache = dict() + cache_legacy = dict() + + for node in cmds.ls(type="objectSet"): + + if _get_attr(node, attr="id") != "pyblish.avalon.instance": + continue + + creator_id = _get_attr(node, attr="creator_identifier") + if creator_id is not None: + # creator instance + cache.setdefault(creator_id, []).append(node) + else: + # legacy instance + family = _get_attr(node, attr="family") + if family is None: + # must be a broken instance + continue + + cache_legacy.setdefault(family, []).append(node) + + shared_data["maya_cached_subsets"] = cache + shared_data["maya_cached_legacy_subsets"] = cache_legacy + return shared_data + + def get_publish_families(self): + """Return families for the instances of this creator. + + Allow a Creator to define multiple families so that a creator can + e.g. specify `usd` and `usdMaya` and another USD creator can also + specify `usd` but apply different extractors like `usdMultiverse`. + + There is no need to override this method if you only have the + primary family defined by the `family` property as that will always + be set. + + Returns: + list: families for instances of this creator + + """ + return [] + + def imprint_instance_node(self, node, data): + + # We never store the instance_node as value on the node since + # it's the node name itself + data.pop("instance_node", None) + data.pop("instance_id", None) + + # Don't store `families` since it's up to the creator itself + # to define the initial publish families - not a stored attribute of + # `families` + data.pop("families", None) + + # We store creator attributes at the root level and assume they + # will not clash in names with `subset`, `task`, etc. and other + # default names. This is just so these attributes in many cases + # are still editable in the maya UI by artists. + # note: pop to move to end of dict to sort attributes last on the node + creator_attributes = data.pop("creator_attributes", {}) + + # We only flatten value types which `imprint` function supports + json_creator_attributes = {} + for key, value in dict(creator_attributes).items(): + if isinstance(value, (list, tuple, dict)): + creator_attributes.pop(key) + json_creator_attributes[key] = value + + # Flatten remaining creator attributes to the node itself + data.update(creator_attributes) + + # We know the "publish_attributes" will be complex data of + # settings per plugins, we'll store this as a flattened json structure + # pop to move to end of dict to sort attributes last on the node + data["publish_attributes"] = json.dumps( + data.pop("publish_attributes", {}) + ) + + # Persist the non-flattened creator attributes (special value types, + # like multiselection EnumDef) + data["creator_attributes"] = json.dumps(json_creator_attributes) + + # Since we flattened the data structure for creator attributes we want + # to correctly detect which flattened attributes should end back in the + # creator attributes when reading the data from the node, so we store + # the relevant keys as a string + data["__creator_attributes_keys"] = ",".join(creator_attributes.keys()) + + # Kill any existing attributes just so we can imprint cleanly again + for attr in data.keys(): + if cmds.attributeQuery(attr, node=node, exists=True): + cmds.deleteAttr("{}.{}".format(node, attr)) + + return imprint(node, data) + + def read_instance_node(self, node): + node_data = read(node) + + # Never care about a cbId attribute on the object set + # being read as 'data' + node_data.pop("cbId", None) + + # Make sure we convert any creator attributes from the json string + creator_attributes = node_data.get("creator_attributes") + if creator_attributes: + node_data["creator_attributes"] = json.loads(creator_attributes) + else: + node_data["creator_attributes"] = {} + + # Move the relevant attributes into "creator_attributes" that + # we flattened originally + creator_attribute_keys = node_data.pop("__creator_attributes_keys", + "").split(",") + for key in creator_attribute_keys: + if key in node_data: + node_data["creator_attributes"][key] = node_data.pop(key) + + # Make sure we convert any publish attributes from the json string + publish_attributes = node_data.get("publish_attributes") + if publish_attributes: + node_data["publish_attributes"] = json.loads(publish_attributes) + + # Explicitly re-parse the node name + node_data["instance_node"] = node + node_data["instance_id"] = node + + # If the creator plug-in specifies + families = self.get_publish_families() + if families: + node_data["families"] = families + + return node_data + + def _default_collect_instances(self): + self.cache_subsets(self.collection_shared_data) + cached_subsets = self.collection_shared_data["maya_cached_subsets"] + for node in cached_subsets.get(self.identifier, []): + node_data = self.read_instance_node(node) + + created_instance = CreatedInstance.from_existing(node_data, self) + self._add_instance_to_context(created_instance) + + def _default_update_instances(self, update_list): + for created_inst, _changes in update_list: + data = created_inst.data_to_store() + node = data.get("instance_node") + + self.imprint_instance_node(node, data) + + def _default_remove_instances(self, instances): + """Remove specified instance from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + for instance in instances: + node = instance.data.get("instance_node") + if node: + cmds.delete(node) + + self._remove_instance_from_context(instance) + + +@six.add_metaclass(ABCMeta) +class MayaCreator(NewCreator, MayaCreatorBase): + + settings_category = "maya" + + def create(self, subset_name, instance_data, pre_create_data): + + members = list() + if pre_create_data.get("use_selection"): + members = cmds.ls(selection=True) + + # Allow a Creator to define multiple families + publish_families = self.get_publish_families() + if publish_families: + families = instance_data.setdefault("families", []) + for family in self.get_publish_families(): + if family not in families: + families.append(family) + + with lib.undo_chunk(): + instance_node = cmds.sets(members, name=subset_name) + instance_data["instance_node"] = instance_node + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) + return instance + + def collect_instances(self): + return self._default_collect_instances() + + def update_instances(self, update_list): + return self._default_update_instances(update_list) + + def remove_instances(self, instances): + return self._default_remove_instances(instances) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", + label="Use selection", + default=True) + ] + + +class MayaAutoCreator(AutoCreator, MayaCreatorBase): + """Automatically triggered creator for Maya. + + The plugin is not visible in UI, and 'create' method does not expect + any arguments. + """ + + settings_category = "maya" + + def collect_instances(self): + return self._default_collect_instances() + + def update_instances(self, update_list): + return self._default_update_instances(update_list) + + def remove_instances(self, instances): + return self._default_remove_instances(instances) + + +class MayaHiddenCreator(HiddenCreator, MayaCreatorBase): + """Hidden creator for Maya. + + The plugin is not visible in UI, and it does not have strictly defined + arguments for 'create' method. + """ + + settings_category = "maya" + + def create(self, *args, **kwargs): + return MayaCreator.create(self, *args, **kwargs) + + def collect_instances(self): + return self._default_collect_instances() + + def update_instances(self, update_list): + return self._default_update_instances(update_list) + + def remove_instances(self, instances): + return self._default_remove_instances(instances) + + +def ensure_namespace(namespace): + """Make sure the namespace exists. + + Args: + namespace (str): The preferred namespace name. + + Returns: + str: The generated or existing namespace + + """ + exists = cmds.namespace(exists=namespace) + if exists: + return namespace + else: + return cmds.namespace(add=namespace) + + +class RenderlayerCreator(NewCreator, MayaCreatorBase): + """Creator which creates an instance per renderlayer in the workfile. + + Create and manages renderlayer subset per renderLayer in workfile. + This generates a singleton node in the scene which, if it exists, tells the + Creator to collect Maya rendersetup renderlayers as individual instances. + As such, triggering create doesn't actually create the instance node per + layer but only the node which tells the Creator it may now collect + an instance per renderlayer. + + """ + + # These are required to be overridden in subclass + singleton_node_name = "" + + # These are optional to be overridden in subclass + layer_instance_prefix = None + + def _get_singleton_node(self, return_all=False): + nodes = lib.lsattr("pre_creator_identifier", self.identifier) + if nodes: + return nodes if return_all else nodes[0] + + def create(self, subset_name, instance_data, pre_create_data): + # A Renderlayer is never explicitly created using the create method. + # Instead, renderlayers from the scene are collected. Thus "create" + # would only ever be called to say, 'hey, please refresh collect' + self.create_singleton_node() + + # if no render layers are present, create default one with + # asterisk selector + rs = renderSetup.instance() + if not rs.getRenderLayers(): + render_layer = rs.createRenderLayer("Main") + collection = render_layer.createCollection("defaultCollection") + collection.getSelector().setPattern('*') + + # By RenderLayerCreator.create we make it so that the renderlayer + # instances directly appear even though it just collects scene + # renderlayers. This doesn't actually 'create' any scene contents. + self.collect_instances() + + def create_singleton_node(self): + if self._get_singleton_node(): + raise CreatorError("A Render instance already exists - only " + "one can be configured.") + + with lib.undo_chunk(): + node = cmds.sets(empty=True, name=self.singleton_node_name) + lib.imprint(node, data={ + "pre_creator_identifier": self.identifier + }) + + return node + + def collect_instances(self): + + # We only collect if the global render instance exists + if not self._get_singleton_node(): + return + + rs = renderSetup.instance() + layers = rs.getRenderLayers() + for layer in layers: + layer_instance_node = self.find_layer_instance_node(layer) + if layer_instance_node: + data = self.read_instance_node(layer_instance_node) + instance = CreatedInstance.from_existing(data, creator=self) + else: + # No existing scene instance node for this layer. Note that + # this instance will not have the `instance_node` data yet + # until it's been saved/persisted at least once. + project_name = self.create_context.get_current_project_name() + asset_name = self.create_context.get_current_asset_name() + instance_data = { + "folderPath": asset_name, + "task": self.create_context.get_current_task_name(), + "variant": layer.name(), + } + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + layer.name(), + instance_data["task"], + asset_doc, + project_name) + + instance = CreatedInstance( + family=self.family, + subset_name=subset_name, + data=instance_data, + creator=self + ) + + instance.transient_data["layer"] = layer + self._add_instance_to_context(instance) + + def find_layer_instance_node(self, layer): + connected_sets = cmds.listConnections( + "{}.message".format(layer.name()), + source=False, + destination=True, + type="objectSet" + ) or [] + + for node in connected_sets: + if not cmds.attributeQuery("creator_identifier", + node=node, + exists=True): + continue + + creator_identifier = cmds.getAttr(node + ".creator_identifier") + if creator_identifier == self.identifier: + self.log.info("Found node: {}".format(node)) + return node + + def _create_layer_instance_node(self, layer): + + # We only collect if a CreateRender instance exists + create_render_set = self._get_singleton_node() + if not create_render_set: + raise CreatorError("Creating a renderlayer instance node is not " + "allowed if no 'CreateRender' instance exists") + + namespace = "_{}".format(self.singleton_node_name) + namespace = ensure_namespace(namespace) + + name = "{}:{}".format(namespace, layer.name()) + render_set = cmds.sets(name=name, empty=True) + + # Keep an active link with the renderlayer so we can retrieve it + # later by a physical maya connection instead of relying on the layer + # name + cmds.addAttr(render_set, longName="renderlayer", at="message") + cmds.connectAttr("{}.message".format(layer.name()), + "{}.renderlayer".format(render_set), force=True) + + # Add the set to the 'CreateRender' set. + cmds.sets(render_set, forceElement=create_render_set) + + return render_set + + def update_instances(self, update_list): + # We only generate the persisting layer data into the scene once + # we save with the UI on e.g. validate or publish + for instance, _changes in update_list: + instance_node = instance.data.get("instance_node") + + # Ensure a node exists to persist the data to + if not instance_node: + layer = instance.transient_data["layer"] + instance_node = self._create_layer_instance_node(layer) + instance.data["instance_node"] = instance_node + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) + + def imprint_instance_node(self, node, data): + # Do not ever try to update the `renderlayer` since it'll try + # to remove the attribute and recreate it but fail to keep it a + # message attribute link. We only ever imprint that on the initial + # node creation. + # TODO: Improve how this is handled + data.pop("renderlayer", None) + data.get("creator_attributes", {}).pop("renderlayer", None) + + return super(RenderlayerCreator, self).imprint_instance_node(node, + data=data) + + def remove_instances(self, instances): + """Remove specified instances from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + # Instead of removing the single instance or renderlayers we instead + # remove the CreateRender node this creator relies on to decide whether + # it should collect anything at all. + nodes = self._get_singleton_node(return_all=True) + if nodes: + cmds.delete(nodes) + + # Remove ALL the instances even if only one gets deleted + for instance in list(self.create_context.instances): + if instance.get("creator_identifier") == self.identifier: + self._remove_instance_from_context(instance) + + # Remove the stored settings per renderlayer too + node = instance.data.get("instance_node") + if node and cmds.objExists(node): + cmds.delete(node) + + def get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + # creator.family != 'render' as expected + return get_subset_name(self.layer_instance_prefix, + variant, + task_name, + asset_doc, + project_name) + + +class Loader(LoaderPlugin): + hosts = ["maya"] + + load_settings = {} # defined in settings + + @classmethod + def apply_settings(cls, project_settings, system_settings): + super(Loader, cls).apply_settings(project_settings, system_settings) + cls.load_settings = project_settings['maya']['load'] + + def get_custom_namespace_and_group(self, context, options, loader_key): + """Queries Settings to get custom template for namespace and group. + + Group template might be empty >> this forces to not wrap imported items + into separate group. + + Args: + context (dict) + options (dict): artist modifiable options from dialog + loader_key (str): key to get separate configuration from Settings + ('reference_loader'|'import_loader') + """ + + options["attach_to_root"] = True + custom_naming = self.load_settings[loader_key] + + if not custom_naming['namespace']: + raise LoadError("No namespace specified in " + "Maya ReferenceLoader settings") + elif not custom_naming['group_name']: + self.log.debug("No custom group_name, no group will be created.") + options["attach_to_root"] = False + + asset = context['asset'] + subset = context['subset'] + formatting_data = { + "asset_name": asset['name'], + "asset_type": asset['type'], + "folder": { + "name": asset["name"], + }, + "subset": subset['name'], + "family": ( + subset['data'].get('family') or + subset['data']['families'][0] + ) + } + + custom_namespace = custom_naming['namespace'].format( + **formatting_data + ) + + custom_group_name = custom_naming['group_name'].format( + **formatting_data + ) + + return custom_group_name, custom_namespace, options + + +class ReferenceLoader(Loader): + """A basic ReferenceLoader for Maya + + This will implement the basic behavior for a loader to inherit from that + will containerize the reference and will implement the `remove` and + `update` logic. + + """ + + options = [ + qargparse.Integer( + "count", + label="Count", + default=1, + min=1, + help="How many times to load?" + ), + qargparse.Double3( + "offset", + label="Position Offset", + help="Offset loaded models for easier selection." + ), + qargparse.Boolean( + "attach_to_root", + label="Group imported asset", + default=True, + help="Should a group be created to encapsulate" + " imported representation ?" + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None + ): + path = self.filepath_from_context(context) + assert os.path.exists(path), "%s does not exist." % path + + custom_group_name, custom_namespace, options = \ + self.get_custom_namespace_and_group(context, options, + "reference_loader") + + count = options.get("count") or 1 + + loaded_containers = [] + for c in range(0, count): + namespace = lib.get_custom_namespace(custom_namespace) + group_name = "{}:{}".format( + namespace, + custom_group_name + ) + + options['group_name'] = group_name + + # Offset loaded subset + if "offset" in options: + offset = [i * c for i in options["offset"]] + options["translate"] = offset + + self.log.info(options) + + self.process_reference( + context=context, + name=name, + namespace=namespace, + options=options + ) + + # Only containerize if any nodes were loaded by the Loader + nodes = self[:] + if not nodes: + return + + ref_node = lib.get_reference_node(nodes, self.log) + container = containerise( + name=name, + namespace=namespace, + nodes=[ref_node], + context=context, + loader=self.__class__.__name__ + ) + loaded_containers.append(container) + self._organize_containers(nodes, container) + c += 1 + + return loaded_containers + + def process_reference(self, context, name, namespace, options): + """To be implemented by subclass""" + raise NotImplementedError("Must be implemented by subclass") + + def update(self, container, representation): + from maya import cmds + + from ayon_core.hosts.maya.api.lib import get_container_members + + node = container["objectName"] + + path = get_representation_path(representation) + + # Get reference node from container members + members = get_container_members(node) + reference_node = lib.get_reference_node(members, self.log) + namespace = cmds.referenceQuery(reference_node, namespace=True) + + file_type = { + "ma": "mayaAscii", + "mb": "mayaBinary", + "abc": "Alembic", + "fbx": "FBX", + "usd": "USD Import" + }.get(representation["name"]) + + assert file_type, "Unsupported representation: %s" % representation + + assert os.path.exists(path), "%s does not exist." % path + + # Need to save alembic settings and reapply, cause referencing resets + # them to incoming data. + alembic_attrs = ["speed", "offset", "cycleType", "time"] + alembic_data = {} + if representation["name"] == "abc": + alembic_nodes = cmds.ls( + "{}:*".format(namespace), type="AlembicNode" + ) + if alembic_nodes: + for attr in alembic_attrs: + node_attr = "{}.{}".format(alembic_nodes[0], attr) + data = { + "input": lib.get_attribute_input(node_attr), + "value": cmds.getAttr(node_attr) + } + + alembic_data[attr] = data + else: + self.log.debug("No alembic nodes found in {}".format(members)) + + try: + path = self.prepare_root_value(path, + representation["context"] + ["project"] + ["name"]) + content = cmds.file(path, + loadReference=reference_node, + type=file_type, + returnNewNodes=True) + except RuntimeError as exc: + # When changing a reference to a file that has load errors the + # command will raise an error even if the file is still loaded + # correctly (e.g. when raising errors on Arnold attributes) + # When the file is loaded and has content, we consider it's fine. + if not cmds.referenceQuery(reference_node, isLoaded=True): + raise + + content = cmds.referenceQuery(reference_node, + nodes=True, + dagPath=True) + if not content: + raise + + self.log.warning("Ignoring file read error:\n%s", exc) + + self._organize_containers(content, container["objectName"]) + + # Reapply alembic settings. + if representation["name"] == "abc" and alembic_data: + alembic_nodes = cmds.ls( + "{}:*".format(namespace), type="AlembicNode" + ) + if alembic_nodes: + alembic_node = alembic_nodes[0] # assume single AlembicNode + for attr, data in alembic_data.items(): + node_attr = "{}.{}".format(alembic_node, attr) + input = lib.get_attribute_input(node_attr) + if data["input"]: + if data["input"] != input: + cmds.connectAttr( + data["input"], node_attr, force=True + ) + else: + if input: + cmds.disconnectAttr(input, node_attr) + cmds.setAttr(node_attr, data["value"]) + + # Fix PLN-40 for older containers created with Avalon that had the + # `.verticesOnlySet` set to True. + if cmds.getAttr("{}.verticesOnlySet".format(node)): + self.log.info("Setting %s.verticesOnlySet to False", node) + cmds.setAttr("{}.verticesOnlySet".format(node), False) + + # Remove any placeHolderList attribute entries from the set that + # are remaining from nodes being removed from the referenced file. + members = cmds.sets(node, query=True) + invalid = [x for x in members if ".placeHolderList" in x] + if invalid: + cmds.sets(invalid, remove=node) + + # Update metadata + cmds.setAttr("{}.representation".format(node), + str(representation["_id"]), + type="string") + + # When an animation or pointcache gets connected to an Xgen container, + # the compound attribute "xgenContainers" gets created. When animation + # containers gets updated we also need to update the cacheFileName on + # the Xgen collection. + compound_name = "xgenContainers" + if cmds.objExists("{}.{}".format(node, compound_name)): + import xgenm + container_amount = cmds.getAttr( + "{}.{}".format(node, compound_name), size=True + ) + # loop through all compound children + for i in range(container_amount): + attr = "{}.{}[{}].container".format(node, compound_name, i) + objectset = cmds.listConnections(attr)[0] + reference_node = cmds.sets(objectset, query=True)[0] + palettes = cmds.ls( + cmds.referenceQuery(reference_node, nodes=True), + type="xgmPalette" + ) + for palette in palettes: + for description in xgenm.descriptions(palette): + xgenm.setAttr( + "cacheFileName", + path.replace("\\", "/"), + palette, + description, + "SplinePrimitive" + ) + + # Refresh UI and viewport. + de = xgenm.xgGlobal.DescriptionEditor + de.refresh("Full") + + def remove(self, container): + """Remove an existing `container` from Maya scene + + Deprecated; this functionality is replaced by `api.remove()` + + Arguments: + container (openpype:container-1.0): Which container + to remove from scene. + + """ + from maya import cmds + + node = container["objectName"] + + # Assume asset has been referenced + members = cmds.sets(node, query=True) + reference_node = lib.get_reference_node(members, self.log) + + assert reference_node, ("Imported container not supported; " + "container must be referenced.") + + self.log.info("Removing '%s' from Maya.." % container["name"]) + + namespace = cmds.referenceQuery(reference_node, namespace=True) + fname = cmds.referenceQuery(reference_node, filename=True) + cmds.file(fname, removeReference=True) + + try: + cmds.delete(node) + except ValueError: + # Already implicitly deleted by Maya upon removing reference + pass + + try: + # If container is not automatically cleaned up by May (issue #118) + cmds.namespace(removeNamespace=namespace, + deleteNamespaceContent=True) + except RuntimeError: + pass + + def prepare_root_value(self, file_url, project_name): + """Replace root value with env var placeholder. + + Use ${AYON_PROJECT_ROOT_WORK} (or any other root) instead of proper + root value when storing referenced url into a workfile. + Useful for remote workflows with SiteSync. + + Args: + file_url (str) + project_name (dict) + Returns: + (str) + """ + settings = get_project_settings(project_name) + use_env_var_as_root = (settings["maya"] + ["maya-dirmap"] + ["use_env_var_as_root"]) + if use_env_var_as_root: + anatomy = Anatomy(project_name) + file_url = anatomy.replace_root_with_env_key(file_url, '${{{}}}') + + return file_url + + @staticmethod + def _organize_containers(nodes, container): + # type: (list, str) -> None + """Put containers in loaded data to correct hierarchy.""" + for node in nodes: + id_attr = "{}.id".format(node) + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr(id_attr) == AVALON_CONTAINER_ID: + cmds.sets(node, forceElement=container) diff --git a/openpype/hosts/maya/api/render_setup_tools.py b/client/ayon_core/hosts/maya/api/render_setup_tools.py similarity index 100% rename from openpype/hosts/maya/api/render_setup_tools.py rename to client/ayon_core/hosts/maya/api/render_setup_tools.py diff --git a/openpype/hosts/maya/api/setdress.py b/client/ayon_core/hosts/maya/api/setdress.py similarity index 99% rename from openpype/hosts/maya/api/setdress.py rename to client/ayon_core/hosts/maya/api/setdress.py index 7624aacd0f..7a1054cc49 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/client/ayon_core/hosts/maya/api/setdress.py @@ -9,14 +9,14 @@ from maya import cmds -from openpype.client import ( +from ayon_core.client import ( get_version_by_name, get_last_version_by_subset_id, get_representation_by_id, get_representation_by_name, get_representation_parents, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( schema, discover_loader_plugins, loaders_from_representation, @@ -26,7 +26,7 @@ get_representation_path, get_current_project_name, ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( matrix_equals, unique_namespace, get_container_transforms, diff --git a/openpype/hosts/maya/api/shader_definition_editor.py b/client/ayon_core/hosts/maya/api/shader_definition_editor.py similarity index 97% rename from openpype/hosts/maya/api/shader_definition_editor.py rename to client/ayon_core/hosts/maya/api/shader_definition_editor.py index 6edafb1f35..04e8dded6f 100644 --- a/openpype/hosts/maya/api/shader_definition_editor.py +++ b/client/ayon_core/hosts/maya/api/shader_definition_editor.py @@ -6,8 +6,8 @@ """ import os from qtpy import QtWidgets, QtCore, QtGui -from openpype.client.mongo import OpenPypeMongoConnection -from openpype import resources +from ayon_core.client.mongo import OpenPypeMongoConnection +from ayon_core import resources import gridfs @@ -31,7 +31,7 @@ def __init__(self, parent=None): self.setObjectName("shaderDefinitionEditor") self.setWindowTitle("OpenPype shader name definition editor") - icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) self.setWindowIcon(icon) self.setWindowFlags(QtCore.Qt.Window) self.setParent(parent) diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py new file mode 100644 index 0000000000..c3c61e5444 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py @@ -0,0 +1,375 @@ +import json + +from maya import cmds + +from ayon_core.pipeline import registered_host, get_current_asset_name +from ayon_core.pipeline.workfile.workfile_template_builder import ( + TemplateAlreadyImported, + AbstractTemplateBuilder, + PlaceholderPlugin, + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from ayon_core.tools.workfile_template_build import ( + WorkfileBuildPlaceholderDialog, +) + +from .lib import read, imprint, get_reference_node, get_main_window + +PLACEHOLDER_SET = "PLACEHOLDERS_SET" + + +class MayaTemplateBuilder(AbstractTemplateBuilder): + """Concrete implementation of AbstractTemplateBuilder for maya""" + + use_legacy_creators = True + + def import_template(self, path): + """Import template into current scene. + Block if a template is already loaded. + + Args: + path (str): A path to current template (usually given by + get_template_preset implementation) + + Returns: + bool: Whether the template was successfully imported or not + """ + + if cmds.objExists(PLACEHOLDER_SET): + raise TemplateAlreadyImported(( + "Build template already loaded\n" + "Clean scene if needed (File > New Scene)" + )) + + cmds.sets(name=PLACEHOLDER_SET, empty=True) + new_nodes = cmds.file( + path, + i=True, + returnNewNodes=True, + preserveReferences=True, + loadReferenceDepth="all", + ) + + # make default cameras non-renderable + default_cameras = [cam for cam in cmds.ls(cameras=True) + if cmds.camera(cam, query=True, startupCamera=True)] + for cam in default_cameras: + if not cmds.attributeQuery("renderable", node=cam, exists=True): + self.log.debug( + "Camera {} has no attribute 'renderable'".format(cam) + ) + continue + cmds.setAttr("{}.renderable".format(cam), 0) + + cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) + + imported_sets = cmds.ls(new_nodes, set=True) + if not imported_sets: + return True + + # update imported sets information + asset_name = get_current_asset_name() + for node in imported_sets: + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": + continue + if not cmds.attributeQuery("asset", node=node, exists=True): + continue + + cmds.setAttr( + "{}.asset".format(node), asset_name, type="string") + + return True + + +class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): + identifier = "maya.load" + label = "Maya load" + + def _collect_scene_placeholders(self): + # Cache placeholder data to shared data + placeholder_nodes = self.builder.get_shared_populate_data( + "placeholder_nodes" + ) + if placeholder_nodes is None: + attributes = cmds.ls("*.plugin_identifier", long=True) + placeholder_nodes = {} + for attribute in attributes: + node_name = attribute.rpartition(".")[0] + placeholder_nodes[node_name] = ( + self._parse_placeholder_node_data(node_name) + ) + + self.builder.set_shared_populate_data( + "placeholder_nodes", placeholder_nodes + ) + return placeholder_nodes + + def _parse_placeholder_node_data(self, node_name): + placeholder_data = read(node_name) + parent_name = ( + cmds.getAttr(node_name + ".parent", asString=True) + or node_name.rpartition("|")[0] + or "" + ) + if parent_name: + siblings = cmds.listRelatives(parent_name, children=True) + else: + siblings = cmds.ls(assemblies=True) + node_shortname = node_name.rpartition("|")[2] + current_index = cmds.getAttr(node_name + ".index", asString=True) + if current_index < 0: + current_index = siblings.index(node_shortname) + + placeholder_data.update({ + "parent": parent_name, + "index": current_index + }) + return placeholder_data + + def _create_placeholder_name(self, placeholder_data): + placeholder_name_parts = placeholder_data["builder_type"].split("_") + + pos = 1 + # add family in any + placeholder_family = placeholder_data["family"] + if placeholder_family: + placeholder_name_parts.insert(pos, placeholder_family) + pos += 1 + + # add loader arguments if any + loader_args = placeholder_data["loader_args"] + if loader_args: + loader_args = json.loads(loader_args.replace('\'', '\"')) + values = [v for v in loader_args.values()] + for value in values: + placeholder_name_parts.insert(pos, value) + pos += 1 + + placeholder_name = "_".join(placeholder_name_parts) + + return placeholder_name.capitalize() + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + try: + containers = cmds.sets("AVALON_CONTAINERS", q=True) + except ValueError: + containers = [] + + loaded_representation_ids = { + cmds.getAttr(container + ".representation") + for container in containers + } + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def create_placeholder(self, placeholder_data): + selection = cmds.ls(selection=True) + if len(selection) > 1: + raise ValueError("More then one item are selected") + + parent = selection[0] if selection else None + + placeholder_data["plugin_identifier"] = self.identifier + + placeholder_name = self._create_placeholder_name(placeholder_data) + + placeholder = cmds.spaceLocator(name=placeholder_name)[0] + if parent: + placeholder = cmds.parent(placeholder, selection[0])[0] + + imprint(placeholder, placeholder_data) + + # Add helper attributes to keep placeholder info + cmds.addAttr( + placeholder, + longName="parent", + hidden=True, + dataType="string" + ) + cmds.addAttr( + placeholder, + longName="index", + hidden=True, + attributeType="short", + defaultValue=-1 + ) + + cmds.setAttr(placeholder + ".parent", "", type="string") + + def update_placeholder(self, placeholder_item, placeholder_data): + node_name = placeholder_item.scene_identifier + new_values = {} + for key, value in placeholder_data.items(): + placeholder_value = placeholder_item.data.get(key) + if value != placeholder_value: + new_values[key] = value + placeholder_item.data[key] = value + + for key in new_values.keys(): + cmds.deleteAttr(node_name + "." + key) + + imprint(node_name, new_values) + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, placeholder_data in scene_placeholders.items(): + if placeholder_data.get("plugin_identifier") != self.identifier: + continue + + # TODO do data validations and maybe upgrades if they are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # Hide placeholder and add them to placeholder set + node = placeholder.scene_identifier + + cmds.sets(node, addElement=PLACEHOLDER_SET) + cmds.hide(node) + cmds.setAttr(node + ".hiddenInOutliner", True) + + def delete_placeholder(self, placeholder): + """Remove placeholder if building was successful""" + cmds.delete(placeholder.scene_identifier) + + def load_succeed(self, placeholder, container): + self._parent_in_hierarchy(placeholder, container) + + def _parent_in_hierarchy(self, placeholder, container): + """Parent loaded container to placeholder's parent. + + ie : Set loaded content as placeholder's sibling + + Args: + container (str): Placeholder loaded containers + """ + + if not container: + return + + roots = cmds.sets(container, q=True) + ref_node = None + try: + ref_node = get_reference_node(roots) + except AssertionError as e: + self.log.info(e.args[0]) + + nodes_to_parent = [] + for root in roots: + if ref_node: + ref_root = cmds.referenceQuery(root, nodes=True)[0] + ref_root = ( + cmds.listRelatives(ref_root, parent=True, path=True) or + [ref_root] + ) + nodes_to_parent.extend(ref_root) + continue + if root.endswith("_RN"): + # Backwards compatibility for hardcoded reference names. + refRoot = cmds.referenceQuery(root, n=True)[0] + refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] + nodes_to_parent.extend(refRoot) + elif root not in cmds.listSets(allSets=True): + nodes_to_parent.append(root) + + elif not cmds.sets(root, q=True): + return + + # Move loaded nodes to correct index in outliner hierarchy + placeholder_form = cmds.xform( + placeholder.scene_identifier, + q=True, + matrix=True, + worldSpace=True + ) + scene_parent = cmds.listRelatives( + placeholder.scene_identifier, parent=True, fullPath=True + ) + for node in set(nodes_to_parent): + cmds.reorder(node, front=True) + cmds.reorder(node, relative=placeholder.data["index"]) + cmds.xform(node, matrix=placeholder_form, ws=True) + if scene_parent: + cmds.parent(node, scene_parent) + else: + cmds.parent(node, world=True) + + holding_sets = cmds.listSets(object=placeholder.scene_identifier) + if not holding_sets: + return + for holding_set in holding_sets: + cmds.sets(roots, forceElement=holding_set) + + +def build_workfile_template(*args): + builder = MayaTemplateBuilder(registered_host()) + builder.build_template() + + +def update_workfile_template(*args): + builder = MayaTemplateBuilder(registered_host()) + builder.rebuild_template() + + +def create_placeholder(*args): + host = registered_host() + builder = MayaTemplateBuilder(host) + window = WorkfileBuildPlaceholderDialog(host, builder, + parent=get_main_window()) + window.show() + + +def update_placeholder(*args): + host = registered_host() + builder = MayaTemplateBuilder(host) + placeholder_items_by_id = { + placeholder_item.scene_identifier: placeholder_item + for placeholder_item in builder.get_placeholders() + } + placeholder_items = [] + for node_name in cmds.ls(selection=True, long=True): + if node_name in placeholder_items_by_id: + placeholder_items.append(placeholder_items_by_id[node_name]) + + # TODO show UI at least + if len(placeholder_items) == 0: + raise ValueError("No node selected") + + if len(placeholder_items) > 1: + raise ValueError("Too many selected nodes") + + placeholder_item = placeholder_items[0] + window = WorkfileBuildPlaceholderDialog(host, builder, + parent=get_main_window()) + window.set_update_mode(placeholder_item) + window.exec_() diff --git a/openpype/hosts/maya/api/workio.py b/client/ayon_core/hosts/maya/api/workio.py similarity index 100% rename from openpype/hosts/maya/api/workio.py rename to client/ayon_core/hosts/maya/api/workio.py diff --git a/openpype/hosts/maya/hooks/pre_auto_load_plugins.py b/client/ayon_core/hosts/maya/hooks/pre_auto_load_plugins.py similarity index 88% rename from openpype/hosts/maya/hooks/pre_auto_load_plugins.py rename to client/ayon_core/hosts/maya/hooks/pre_auto_load_plugins.py index 4b1ea698a6..ed294da125 100644 --- a/openpype/hosts/maya/hooks/pre_auto_load_plugins.py +++ b/client/ayon_core/hosts/maya/hooks/pre_auto_load_plugins.py @@ -1,4 +1,4 @@ -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class MayaPreAutoLoadPlugins(PreLaunchHook): @@ -23,7 +23,7 @@ def execute(self): # Force post initialization so our dedicated plug-in load can run # prior to Maya opening a scene file. - key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" + key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" self.launch_context.env[key] = "1" self.log.debug("Explicit plugins loading.") diff --git a/openpype/hosts/maya/hooks/pre_copy_mel.py b/client/ayon_core/hosts/maya/hooks/pre_copy_mel.py similarity index 82% rename from openpype/hosts/maya/hooks/pre_copy_mel.py rename to client/ayon_core/hosts/maya/hooks/pre_copy_mel.py index 6cd2c69e20..7198f98131 100644 --- a/openpype/hosts/maya/hooks/pre_copy_mel.py +++ b/client/ayon_core/hosts/maya/hooks/pre_copy_mel.py @@ -1,5 +1,5 @@ -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.hosts.maya.lib import create_workspace_mel +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.hosts.maya.lib import create_workspace_mel class PreCopyMel(PreLaunchHook): diff --git a/openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py b/client/ayon_core/hosts/maya/hooks/pre_open_workfile_post_initialization.py similarity index 86% rename from openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py rename to client/ayon_core/hosts/maya/hooks/pre_open_workfile_post_initialization.py index 1fe3c3ca2c..6bf678474f 100644 --- a/openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py +++ b/client/ayon_core/hosts/maya/hooks/pre_open_workfile_post_initialization.py @@ -1,4 +1,4 @@ -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class MayaPreOpenWorkfilePostInitialization(PreLaunchHook): @@ -22,5 +22,5 @@ def execute(self): self.data.pop("start_last_workfile") self.log.debug("Opening workfile post initialization.") - key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" + key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" self.launch_context.env[key] = "1" diff --git a/client/ayon_core/hosts/maya/lib.py b/client/ayon_core/hosts/maya/lib.py new file mode 100644 index 0000000000..6fa8dfdce9 --- /dev/null +++ b/client/ayon_core/hosts/maya/lib.py @@ -0,0 +1,25 @@ +import os +from ayon_core.settings import get_project_settings +from ayon_core.lib import Logger + + +def create_workspace_mel(workdir, project_name, project_settings=None): + dst_filepath = os.path.join(workdir, "workspace.mel") + if os.path.exists(dst_filepath): + return + + if not os.path.exists(workdir): + os.makedirs(workdir) + + if not project_settings: + project_settings = get_project_settings(project_name) + mel_script = project_settings["maya"].get("mel_workspace") + + # Skip if mel script in settings is empty + if not mel_script: + log = Logger.get_logger("create_workspace_mel") + log.debug("File 'workspace.mel' not created. Settings value is empty.") + return + + with open(dst_filepath, "w") as mel_file: + mel_file.write(mel_script) diff --git a/openpype/hosts/max/plugins/__init__.py b/client/ayon_core/hosts/maya/plugins/__init__.py similarity index 100% rename from openpype/hosts/max/plugins/__init__.py rename to client/ayon_core/hosts/maya/plugins/__init__.py diff --git a/client/ayon_core/hosts/maya/plugins/create/convert_legacy.py b/client/ayon_core/hosts/maya/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..029ea25b40 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/convert_legacy.py @@ -0,0 +1,178 @@ +from ayon_core.pipeline.create.creator_plugins import SubsetConvertorPlugin +from ayon_core.hosts.maya.api import plugin +from ayon_core.hosts.maya.api.lib import read + +from ayon_core.client import get_asset_by_name + +from maya import cmds +from maya.app.renderSetup.model import renderSetup + + +class MayaLegacyConvertor(SubsetConvertorPlugin, + plugin.MayaCreatorBase): + """Find and convert any legacy subsets in the scene. + + This Convertor will find all legacy subsets in the scene and will + transform them to the current system. Since the old subsets doesn't + retain any information about their original creators, the only mapping + we can do is based on their families. + + Its limitation is that you can have multiple creators creating subset + of the same family and there is no way to handle it. This code should + nevertheless cover all creators that came with OpenPype. + + """ + identifier = "io.openpype.creators.maya.legacy" + + # Cases where the identifier or new family doesn't correspond to the + # original family on the legacy instances + special_family_conversions = { + "rendering": "io.openpype.creators.maya.renderlayer", + } + + def find_instances(self): + + self.cache_subsets(self.collection_shared_data) + legacy = self.collection_shared_data.get("maya_cached_legacy_subsets") + if not legacy: + return + + self.add_convertor_item("Convert legacy instances") + + def convert(self): + self.remove_convertor_item() + + # We can't use the collected shared data cache here + # we re-query it here directly to convert all found. + cache = {} + self.cache_subsets(cache) + legacy = cache.get("maya_cached_legacy_subsets") + if not legacy: + return + + # From all current new style manual creators find the mapping + # from family to identifier + family_to_id = {} + for identifier, creator in self.create_context.creators.items(): + family = getattr(creator, "family", None) + if not family: + continue + + if family in family_to_id: + # We have a clash of family -> identifier. Multiple + # new style creators use the same family + self.log.warning("Clash on family->identifier: " + "{}".format(identifier)) + family_to_id[family] = identifier + + family_to_id.update(self.special_family_conversions) + + # We also embed the current 'task' into the instance since legacy + # instances didn't store that data on the instances. The old style + # logic was thus to be live to the current task to begin with. + data = dict() + data["task"] = self.create_context.get_current_task_name() + for family, instance_nodes in legacy.items(): + if family not in family_to_id: + self.log.warning( + "Unable to convert legacy instance with family '{}'" + " because there is no matching new creator's family" + "".format(family) + ) + continue + + creator_id = family_to_id[family] + creator = self.create_context.creators[creator_id] + data["creator_identifier"] = creator_id + + if isinstance(creator, plugin.RenderlayerCreator): + self._convert_per_renderlayer(instance_nodes, data, creator) + else: + self._convert_regular(instance_nodes, data) + + def _convert_regular(self, instance_nodes, data): + # We only imprint the creator identifier for it to identify + # as the new style creator + for instance_node in instance_nodes: + self.imprint_instance_node(instance_node, + data=data.copy()) + + def _convert_per_renderlayer(self, instance_nodes, data, creator): + # Split the instance into an instance per layer + rs = renderSetup.instance() + layers = rs.getRenderLayers() + if not layers: + self.log.error( + "Can't convert legacy renderlayer instance because no existing" + " renderSetup layers exist in the scene." + ) + return + + creator_attribute_names = { + attr_def.key for attr_def in creator.get_instance_attr_defs() + } + + for instance_node in instance_nodes: + + # Ensure we have the new style singleton node generated + # TODO: Make function public + singleton_node = creator._get_singleton_node() + if singleton_node: + self.log.error( + "Can't convert legacy renderlayer instance '{}' because" + " new style instance '{}' already exists".format( + instance_node, + singleton_node + ) + ) + continue + + creator.create_singleton_node() + + # We are creating new nodes to replace the original instance + # Copy the attributes of the original instance to the new node + original_data = read(instance_node) + + # The family gets converted to the new family (this is due to + # "rendering" family being converted to "renderlayer" family) + original_data["family"] = creator.family + + # recreate subset name as without it would be + # `renderingMain` vs correct `renderMain` + project_name = self.create_context.get_current_project_name() + asset_doc = get_asset_by_name(project_name, + original_data["asset"]) + subset_name = creator.get_subset_name( + original_data["variant"], + data["task"], + asset_doc, + project_name) + original_data["subset"] = subset_name + + # Convert to creator attributes when relevant + creator_attributes = {} + for key in list(original_data.keys()): + # Iterate in order of the original attributes to preserve order + # in the output creator attributes + if key in creator_attribute_names: + creator_attributes[key] = original_data.pop(key) + original_data["creator_attributes"] = creator_attributes + + # For layer in maya layers + for layer in layers: + layer_instance_node = creator.find_layer_instance_node(layer) + if not layer_instance_node: + # TODO: Make function public + layer_instance_node = creator._create_layer_instance_node( + layer + ) + + # Transfer the main attributes of the original instance + layer_data = original_data.copy() + layer_data.update(data) + + self.imprint_instance_node(layer_instance_node, + data=layer_data) + + # Delete the legacy instance node + cmds.delete(instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation.py b/client/ayon_core/hosts/maya/plugins/create/create_animation.py new file mode 100644 index 0000000000..e6849b4468 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_animation.py @@ -0,0 +1,89 @@ +from ayon_core.hosts.maya.api import ( + lib, + plugin +) +from ayon_core.lib import ( + BoolDef, + TextDef +) + + +class CreateAnimation(plugin.MayaHiddenCreator): + """Animation output for character rigs + + We hide the animation creator from the UI since the creation of it is + automated upon loading a rig. There's an inventory action to recreate it + for loaded rigs if by chance someone deleted the animation instance. + """ + identifier = "io.openpype.creators.maya.animation" + name = "animationDefault" + label = "Animation" + family = "animation" + icon = "male" + + write_color_sets = False + write_face_sets = False + include_parent_hierarchy = False + include_user_defined_attributes = False + + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=self.write_color_sets), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=self.write_face_sets), + BoolDef("writeNormals", + label="Write normals", + tooltip="Write normals with the deforming geometry", + default=True), + BoolDef("renderableOnly", + label="Renderable Only", + tooltip="Only export renderable visible shapes", + default=False), + BoolDef("visibleOnly", + label="Visible Only", + tooltip="Only export dag objects visible during " + "frame range", + default=False), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=self.include_parent_hierarchy), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + BoolDef("includeUserDefinedAttributes", + label="Include User Defined Attributes", + default=self.include_user_defined_attributes), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ]) + + # TODO: Implement these on a Deadline plug-in instead? + """ + # Default to not send to farm. + self.data["farm"] = False + self.data["priority"] = 50 + """ + + return defs + + def apply_settings(self, project_settings): + super(CreateAnimation, self).apply_settings(project_settings) + # Hardcoding creator to be enabled due to existing settings would + # disable the creator causing the creator plugin to not be + # discoverable. + self.enabled = True diff --git a/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py similarity index 97% rename from openpype/hosts/maya/plugins/create/create_arnold_scene_source.py rename to client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py index 1ef132725f..a9455620b8 100644 --- a/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) -from openpype.lib import ( +from ayon_core.lib import ( NumberDef, BoolDef ) diff --git a/openpype/hosts/maya/plugins/create/create_assembly.py b/client/ayon_core/hosts/maya/plugins/create/create_assembly.py similarity index 82% rename from openpype/hosts/maya/plugins/create/create_assembly.py rename to client/ayon_core/hosts/maya/plugins/create/create_assembly.py index 813fe4da04..2b78271a49 100644 --- a/openpype/hosts/maya/plugins/create/create_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_assembly.py @@ -1,4 +1,4 @@ -from openpype.hosts.maya.api import plugin +from ayon_core.hosts.maya.api import plugin class CreateAssembly(plugin.MayaCreator): diff --git a/client/ayon_core/hosts/maya/plugins/create/create_camera.py b/client/ayon_core/hosts/maya/plugins/create/create_camera.py new file mode 100644 index 0000000000..37d5a817a5 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_camera.py @@ -0,0 +1,36 @@ +from ayon_core.hosts.maya.api import ( + lib, + plugin +) +from ayon_core.lib import BoolDef + + +class CreateCamera(plugin.MayaCreator): + """Single baked camera""" + + identifier = "io.openpype.creators.maya.camera" + label = "Camera" + family = "camera" + icon = "video-camera" + + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + BoolDef("bakeToWorldSpace", + label="Bake to World-Space", + tooltip="Bake to World-Space", + default=True), + ]) + + return defs + + +class CreateCameraRig(plugin.MayaCreator): + """Complex hierarchy with camera.""" + + identifier = "io.openpype.creators.maya.camerarig" + label = "Camera Rig" + family = "camerarig" + icon = "video-camera" diff --git a/client/ayon_core/hosts/maya/plugins/create/create_layout.py b/client/ayon_core/hosts/maya/plugins/create/create_layout.py new file mode 100644 index 0000000000..bd61fa44c6 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_layout.py @@ -0,0 +1,21 @@ +from ayon_core.hosts.maya.api import plugin +from ayon_core.lib import BoolDef + + +class CreateLayout(plugin.MayaCreator): + """A grouped package of loaded content""" + + identifier = "io.openpype.creators.maya.layout" + label = "Layout" + family = "layout" + icon = "cubes" + + def get_instance_attr_defs(self): + + return [ + BoolDef("groupLoadedAssets", + label="Group Loaded Assets", + tooltip="Enable this when you want to publish group of " + "loaded asset", + default=False) + ] diff --git a/client/ayon_core/hosts/maya/plugins/create/create_look.py b/client/ayon_core/hosts/maya/plugins/create/create_look.py new file mode 100644 index 0000000000..4655ec1377 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_look.py @@ -0,0 +1,47 @@ +from ayon_core.hosts.maya.api import ( + plugin, + lib +) +from ayon_core.lib import ( + BoolDef, + TextDef +) + + +class CreateLook(plugin.MayaCreator): + """Shader connections defining shape look""" + + identifier = "io.openpype.creators.maya.look" + label = "Look" + family = "look" + icon = "paint-brush" + + make_tx = True + rs_tex = False + + def get_instance_attr_defs(self): + + return [ + # TODO: This value should actually get set on create! + TextDef("renderLayer", + # TODO: Bug: Hidden attribute's label is still shown in UI? + hidden=True, + default=lib.get_current_renderlayer(), + label="Renderlayer", + tooltip="Renderlayer to extract the look from"), + BoolDef("maketx", + label="MakeTX", + tooltip="Whether to generate .tx files for your textures", + default=self.make_tx), + BoolDef("rstex", + label="Convert textures to .rstex", + tooltip="Whether to generate Redshift .rstex files for " + "your textures", + default=self.rs_tex) + ] + + def get_pre_create_attr_defs(self): + # Show same attributes on create but include use selection + defs = super(CreateLook, self).get_pre_create_attr_defs() + defs.extend(self.get_instance_attr_defs()) + return defs diff --git a/openpype/hosts/maya/plugins/create/create_matchmove.py b/client/ayon_core/hosts/maya/plugins/create/create_matchmove.py similarity index 90% rename from openpype/hosts/maya/plugins/create/create_matchmove.py rename to client/ayon_core/hosts/maya/plugins/create/create_matchmove.py index e64eb6a471..00de553404 100644 --- a/openpype/hosts/maya/plugins/create/create_matchmove.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_matchmove.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) -from openpype.lib import BoolDef +from ayon_core.lib import BoolDef class CreateMatchmove(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_maya_usd.py b/client/ayon_core/hosts/maya/plugins/create/create_maya_usd.py similarity index 98% rename from openpype/hosts/maya/plugins/create/create_maya_usd.py rename to client/ayon_core/hosts/maya/plugins/create/create_maya_usd.py index cc9a14bd3a..f6c8a55e68 100644 --- a/openpype/hosts/maya/plugins/create/create_maya_usd.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_maya_usd.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import ( BoolDef, EnumDef, TextDef diff --git a/openpype/hosts/maya/plugins/create/create_mayascene.py b/client/ayon_core/hosts/maya/plugins/create/create_mayascene.py similarity index 84% rename from openpype/hosts/maya/plugins/create/create_mayascene.py rename to client/ayon_core/hosts/maya/plugins/create/create_mayascene.py index b61c97aebf..c4024d3710 100644 --- a/openpype/hosts/maya/plugins/create/create_mayascene.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_mayascene.py @@ -1,4 +1,4 @@ -from openpype.hosts.maya.api import plugin +from ayon_core.hosts.maya.api import plugin class CreateMayaScene(plugin.MayaCreator): diff --git a/client/ayon_core/hosts/maya/plugins/create/create_model.py b/client/ayon_core/hosts/maya/plugins/create/create_model.py new file mode 100644 index 0000000000..67e6b87190 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_model.py @@ -0,0 +1,43 @@ +from ayon_core.hosts.maya.api import plugin +from ayon_core.lib import ( + BoolDef, + TextDef +) + + +class CreateModel(plugin.MayaCreator): + """Polygonal static geometry""" + + identifier = "io.openpype.creators.maya.model" + label = "Model" + family = "model" + icon = "cube" + default_variants = ["Main", "Proxy", "_MD", "_HD", "_LD"] + + write_color_sets = False + write_face_sets = False + + def get_instance_attr_defs(self): + + return [ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=self.write_color_sets), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=self.write_face_sets), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=False), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ] diff --git a/openpype/hosts/maya/plugins/create/create_multishot_layout.py b/client/ayon_core/hosts/maya/plugins/create/create_multishot_layout.py similarity index 95% rename from openpype/hosts/maya/plugins/create/create_multishot_layout.py rename to client/ayon_core/hosts/maya/plugins/create/create_multishot_layout.py index 8f5c423202..d05c5ae9a1 100644 --- a/openpype/hosts/maya/plugins/create/create_multishot_layout.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_multishot_layout.py @@ -5,16 +5,15 @@ ) from maya import cmds # noqa: F401 -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_assets -from openpype.hosts.maya.api import plugin -from openpype.lib import BoolDef, EnumDef, TextDef -from openpype.pipeline import ( +from ayon_core.client import get_assets +from ayon_core.hosts.maya.api import plugin +from ayon_core.lib import BoolDef, EnumDef, TextDef +from ayon_core.pipeline import ( Creator, get_current_asset_name, get_current_project_name, ) -from openpype.pipeline.create import CreatorError +from ayon_core.pipeline.create import CreatorError class CreateMultishotLayout(plugin.MayaCreator): @@ -208,8 +207,3 @@ def get_related_shots(self, folder_path: str): "name", "label", "path", "folderType", "id" ] ) - - -# blast this creator if Ayon server is not enabled -if not AYON_SERVER_ENABLED: - del CreateMultishotLayout diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_look.py b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_look.py similarity index 89% rename from openpype/hosts/maya/plugins/create/create_multiverse_look.py rename to client/ayon_core/hosts/maya/plugins/create/create_multiverse_look.py index f27eb57fc1..11e13b2748 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_look.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_look.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin +from ayon_core.lib import ( BoolDef, EnumDef ) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd.py similarity index 98% rename from openpype/hosts/maya/plugins/create/create_multiverse_usd.py rename to client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd.py index 2963d4d5b6..af0ffa9f23 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import ( BoolDef, NumberDef, TextDef, diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_comp.py similarity index 95% rename from openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py rename to client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_comp.py index 66ddd83eda..202fbbcbc8 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import ( BoolDef, NumberDef, EnumDef diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_over.py similarity index 96% rename from openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py rename to client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_over.py index 166dbf6515..cca2b54392 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import ( BoolDef, NumberDef, EnumDef diff --git a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..832e0bfbc5 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py @@ -0,0 +1,88 @@ +from maya import cmds + +from ayon_core.hosts.maya.api import ( + lib, + plugin +) +from ayon_core.lib import ( + BoolDef, + TextDef +) + + +class CreatePointCache(plugin.MayaCreator): + """Alembic pointcache for animated data""" + + identifier = "io.openpype.creators.maya.pointcache" + label = "Pointcache" + family = "pointcache" + icon = "gears" + write_color_sets = False + write_face_sets = False + include_user_defined_attributes = False + + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=False), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=False), + BoolDef("renderableOnly", + label="Renderable Only", + tooltip="Only export renderable visible shapes", + default=False), + BoolDef("visibleOnly", + label="Visible Only", + tooltip="Only export dag objects visible during " + "frame range", + default=False), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=False), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + BoolDef("refresh", + label="Refresh viewport during export", + default=False), + BoolDef("includeUserDefinedAttributes", + label="Include User Defined Attributes", + default=self.include_user_defined_attributes), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + default="", + placeholder="prefix1, prefix2") + ]) + + # TODO: Implement these on a Deadline plug-in instead? + """ + # Default to not send to farm. + self.data["farm"] = False + self.data["priority"] = 50 + """ + + return defs + + def create(self, subset_name, instance_data, pre_create_data): + + instance = super(CreatePointCache, self).create( + subset_name, instance_data, pre_create_data + ) + instance_node = instance.get("instance_node") + + # For Arnold standin proxy + proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) + cmds.sets(proxy_set, forceElement=instance_node) diff --git a/openpype/hosts/maya/plugins/create/create_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/create/create_proxy_abc.py similarity index 95% rename from openpype/hosts/maya/plugins/create/create_proxy_abc.py rename to client/ayon_core/hosts/maya/plugins/create/create_proxy_abc.py index d89470ebee..8b8cedd7ab 100644 --- a/openpype/hosts/maya/plugins/create/create_proxy_abc.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_proxy_abc.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef, TextDef ) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_redshift_proxy.py b/client/ayon_core/hosts/maya/plugins/create/create_redshift_proxy.py new file mode 100644 index 0000000000..72c86a0b74 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_redshift_proxy.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +"""Creator of Redshift proxy subset types.""" + +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import BoolDef + + +class CreateRedshiftProxy(plugin.MayaCreator): + """Create instance of Redshift Proxy subset.""" + + identifier = "io.openpype.creators.maya.redshiftproxy" + label = "Redshift Proxy" + family = "redshiftproxy" + icon = "gears" + + def get_instance_attr_defs(self): + + defs = [ + BoolDef("animation", + label="Export animation", + default=False) + ] + + defs.extend(lib.collect_animation_defs()) + return defs diff --git a/client/ayon_core/hosts/maya/plugins/create/create_render.py b/client/ayon_core/hosts/maya/plugins/create/create_render.py new file mode 100644 index 0000000000..f537f249cd --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_render.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +"""Create ``Render`` instance in Maya.""" + +from ayon_core.hosts.maya.api import ( + lib_rendersettings, + plugin +) +from ayon_core.pipeline import CreatorError +from ayon_core.lib import ( + BoolDef, + NumberDef, +) + + +class CreateRenderlayer(plugin.RenderlayerCreator): + """Create and manages renderlayer subset per renderLayer in workfile. + + This generates a single node in the scene which tells the Creator to if + it exists collect Maya rendersetup renderlayers as individual instances. + As such, triggering create doesn't actually create the instance node per + layer but only the node which tells the Creator it may now collect + the renderlayers. + + """ + + identifier = "io.openpype.creators.maya.renderlayer" + family = "renderlayer" + label = "Render" + icon = "eye" + + layer_instance_prefix = "render" + singleton_node_name = "renderingMain" + + render_settings = {} + + @classmethod + def apply_settings(cls, project_settings): + cls.render_settings = project_settings["maya"]["RenderSettings"] + + def create(self, subset_name, instance_data, pre_create_data): + # Only allow a single render instance to exist + if self._get_singleton_node(): + raise CreatorError("A Render instance already exists - only " + "one can be configured.") + + # Apply default project render settings on create + if self.render_settings.get("apply_render_settings"): + lib_rendersettings.RenderSettings().set_default_renderer_settings() + + super(CreateRenderlayer, self).create(subset_name, + instance_data, + pre_create_data) + + def get_instance_attr_defs(self): + """Create instance settings.""" + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + BoolDef("extendFrames", + label="Extend Frames", + tooltip="Extends the frames on top of the previous " + "publish.\nIf the previous was 1001-1050 and you " + "would now submit 1020-1070 only the new frames " + "1051-1070 would be rendered and published " + "together with the previously rendered frames.\n" + "If 'overrideExistingFrame' is enabled it *will* " + "render any existing frames.", + default=False), + BoolDef("overrideExistingFrame", + label="Override Existing Frame", + tooltip="Override existing rendered frames " + "(if they exist).", + default=True), + + # TODO: Should these move to submit_maya_deadline plugin? + # Tile rendering + BoolDef("tileRendering", + label="Enable tiled rendering", + default=False), + NumberDef("tilesX", + label="Tiles X", + default=2, + minimum=1, + decimals=0), + NumberDef("tilesY", + label="Tiles Y", + default=2, + minimum=1, + decimals=0), + + # Additional settings + BoolDef("convertToScanline", + label="Convert to Scanline", + tooltip="Convert the output images to scanline images", + default=False), + BoolDef("useReferencedAovs", + label="Use Referenced AOVs", + tooltip="Consider the AOVs from referenced scenes as well", + default=False), + + BoolDef("renderSetupIncludeLights", + label="Render Setup Include Lights", + default=self.render_settings.get("enable_all_lights", + False)) + ] diff --git a/openpype/hosts/maya/plugins/create/create_rendersetup.py b/client/ayon_core/hosts/maya/plugins/create/create_rendersetup.py similarity index 91% rename from openpype/hosts/maya/plugins/create/create_rendersetup.py rename to client/ayon_core/hosts/maya/plugins/create/create_rendersetup.py index dd64a0a842..dc47325a34 100644 --- a/openpype/hosts/maya/plugins/create/create_rendersetup.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_rendersetup.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin -from openpype.pipeline import CreatorError +from ayon_core.hosts.maya.api import plugin +from ayon_core.pipeline import CreatorError class CreateRenderSetup(plugin.MayaCreator): diff --git a/client/ayon_core/hosts/maya/plugins/create/create_review.py b/client/ayon_core/hosts/maya/plugins/create/create_review.py new file mode 100644 index 0000000000..6f7c0ca802 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_review.py @@ -0,0 +1,143 @@ +import json + +from maya import cmds + +from ayon_core.hosts.maya.api import ( + lib, + plugin +) +from ayon_core.lib import ( + BoolDef, + NumberDef, + EnumDef +) +from ayon_core.pipeline import CreatedInstance +from ayon_core.client import get_asset_by_name + +TRANSPARENCIES = [ + "preset", + "simple", + "object sorting", + "weighted average", + "depth peeling", + "alpha cut" +] + + +class CreateReview(plugin.MayaCreator): + """Playblast reviewable""" + + identifier = "io.openpype.creators.maya.review" + label = "Review" + family = "review" + icon = "video-camera" + + useMayaTimeline = True + panZoom = False + + # Overriding "create" method to prefill values from settings. + def create(self, subset_name, instance_data, pre_create_data): + + members = list() + if pre_create_data.get("use_selection"): + members = cmds.ls(selection=True) + + project_name = self.project_name + asset_name = instance_data["folderPath"] + asset_doc = get_asset_by_name(project_name, asset_name) + task_name = instance_data["task"] + preset = lib.get_capture_preset( + task_name, + asset_doc["data"]["tasks"][task_name]["type"], + subset_name, + self.project_settings, + self.log + ) + self.log.debug( + "Using preset: {}".format( + json.dumps(preset, indent=4, sort_keys=True) + ) + ) + + with lib.undo_chunk(): + instance_node = cmds.sets(members, name=subset_name) + instance_data["instance_node"] = instance_node + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + + creator_attribute_defs_by_key = { + x.key: x for x in instance.creator_attribute_defs + } + mapping = { + "review_width": preset["Resolution"]["width"], + "review_height": preset["Resolution"]["height"], + "isolate": preset["Generic"]["isolate_view"], + "imagePlane": preset["Viewport Options"]["imagePlane"], + "panZoom": preset["Generic"]["pan_zoom"] + } + for key, value in mapping.items(): + creator_attribute_defs_by_key[key].default = value + + self._add_instance_to_context(instance) + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) + return instance + + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + # Option for using Maya or asset frame range in settings. + if not self.useMayaTimeline: + # Update the defaults to be the asset frame range + frame_range = lib.get_frame_range() + defs_by_key = {attr_def.key: attr_def for attr_def in defs} + for key, value in frame_range.items(): + if key not in defs_by_key: + raise RuntimeError("Attribute definition not found to be " + "updated for key: {}".format(key)) + attr_def = defs_by_key[key] + attr_def.default = value + + defs.extend([ + NumberDef("review_width", + label="Review width", + tooltip="A value of zero will use the asset resolution.", + decimals=0, + minimum=0, + default=0), + NumberDef("review_height", + label="Review height", + tooltip="A value of zero will use the asset resolution.", + decimals=0, + minimum=0, + default=0), + BoolDef("keepImages", + label="Keep Images", + tooltip="Whether to also publish along the image sequence " + "next to the video reviewable.", + default=False), + BoolDef("isolate", + label="Isolate render members of instance", + tooltip="When enabled only the members of the instance " + "will be included in the playblast review.", + default=False), + BoolDef("imagePlane", + label="Show Image Plane", + default=True), + EnumDef("transparency", + label="Transparency", + items=TRANSPARENCIES), + BoolDef("panZoom", + label="Enable camera pan/zoom", + default=True), + EnumDef("displayLights", + label="Display Lights", + items=lib.DISPLAY_LIGHTS_ENUM), + ]) + + return defs diff --git a/client/ayon_core/hosts/maya/plugins/create/create_rig.py b/client/ayon_core/hosts/maya/plugins/create/create_rig.py new file mode 100644 index 0000000000..e49e3040ba --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_rig.py @@ -0,0 +1,32 @@ +from maya import cmds + +from ayon_core.hosts.maya.api import plugin + + +class CreateRig(plugin.MayaCreator): + """Artist-friendly rig with controls to direct motion""" + + identifier = "io.openpype.creators.maya.rig" + label = "Rig" + family = "rig" + icon = "wheelchair" + + def create(self, subset_name, instance_data, pre_create_data): + + instance = super(CreateRig, self).create(subset_name, + instance_data, + pre_create_data) + + instance_node = instance.get("instance_node") + + self.log.info("Creating Rig instance set up ...") + # TODO๏ผšchange name (_controls_SET -> _rigs_SET) + controls = cmds.sets(name=subset_name + "_controls_SET", empty=True) + # TODO๏ผšchange name (_out_SET -> _geo_SET) + pointcache = cmds.sets(name=subset_name + "_out_SET", empty=True) + skeleton = cmds.sets( + name=subset_name + "_skeletonAnim_SET", empty=True) + skeleton_mesh = cmds.sets( + name=subset_name + "_skeletonMesh_SET", empty=True) + cmds.sets([controls, pointcache, + skeleton, skeleton_mesh], forceElement=instance_node) diff --git a/openpype/hosts/maya/plugins/create/create_setdress.py b/client/ayon_core/hosts/maya/plugins/create/create_setdress.py similarity index 85% rename from openpype/hosts/maya/plugins/create/create_setdress.py rename to client/ayon_core/hosts/maya/plugins/create/create_setdress.py index 23a706380a..dfc38f5d76 100644 --- a/openpype/hosts/maya/plugins/create/create_setdress.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_setdress.py @@ -1,5 +1,5 @@ -from openpype.hosts.maya.api import plugin -from openpype.lib import BoolDef +from ayon_core.hosts.maya.api import plugin +from ayon_core.lib import BoolDef class CreateSetDress(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/client/ayon_core/hosts/maya/plugins/create/create_unreal_skeletalmesh.py similarity index 97% rename from openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py rename to client/ayon_core/hosts/maya/plugins/create/create_unreal_skeletalmesh.py index b4151bac99..550a9cdb0f 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Creator for Unreal Skeletal Meshes.""" -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import ( +from ayon_core.hosts.maya.api import plugin, lib +from ayon_core.lib import ( BoolDef, TextDef ) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/client/ayon_core/hosts/maya/plugins/create/create_unreal_staticmesh.py similarity index 98% rename from openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py rename to client/ayon_core/hosts/maya/plugins/create/create_unreal_staticmesh.py index 025b39fa55..d1fac03bdf 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" -from openpype.hosts.maya.api import plugin, lib +from ayon_core.hosts.maya.api import plugin, lib from maya import cmds # noqa diff --git a/openpype/hosts/maya/plugins/create/create_unreal_yeticache.py b/client/ayon_core/hosts/maya/plugins/create/create_unreal_yeticache.py similarity index 92% rename from openpype/hosts/maya/plugins/create/create_unreal_yeticache.py rename to client/ayon_core/hosts/maya/plugins/create/create_unreal_yeticache.py index c9f9cd9ba8..4cd7288cfc 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_yeticache.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_unreal_yeticache.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) -from openpype.lib import NumberDef +from ayon_core.lib import NumberDef class CreateYetiCache(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_vrayproxy.py b/client/ayon_core/hosts/maya/plugins/create/create_vrayproxy.py similarity index 95% rename from openpype/hosts/maya/plugins/create/create_vrayproxy.py rename to client/ayon_core/hosts/maya/plugins/create/create_vrayproxy.py index b0a95538e1..7d16c5bc2c 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayproxy.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_vrayproxy.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( plugin, lib ) -from openpype.lib import BoolDef +from ayon_core.lib import BoolDef class CreateVrayProxy(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_vrayscene.py b/client/ayon_core/hosts/maya/plugins/create/create_vrayscene.py similarity index 92% rename from openpype/hosts/maya/plugins/create/create_vrayscene.py rename to client/ayon_core/hosts/maya/plugins/create/create_vrayscene.py index 2726979d30..3642f5f689 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayscene.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_vrayscene.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- """Create instance of vrayscene.""" -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib_rendersettings, plugin ) -from openpype.pipeline import CreatorError -from openpype.lib import BoolDef +from ayon_core.pipeline import CreatorError +from ayon_core.lib import BoolDef class CreateVRayScene(plugin.RenderlayerCreator): diff --git a/client/ayon_core/hosts/maya/plugins/create/create_workfile.py b/client/ayon_core/hosts/maya/plugins/create/create_workfile.py new file mode 100644 index 0000000000..396ad6ffbb --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_workfile.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" +from ayon_core.pipeline import CreatedInstance, AutoCreator +from ayon_core.client import get_asset_by_name, get_asset_name_identifier +from ayon_core.hosts.maya.api import plugin +from maya import cmds + + +class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.maya.workfile" + label = "Workfile" + family = "workfile" + icon = "fa5.file" + + default_variant = "Main" + + def create(self): + + variant = self.default_variant + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + project_name = self.project_name + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + if current_instance is None: + current_instance_asset = None + else: + current_instance_asset = current_instance["folderPath"] + + if current_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": variant + } + data.update( + self.get_dynamic_data( + variant, task_name, asset_doc, + project_name, host_name, current_instance) + ) + self.log.info("Auto-creating workfile instance...") + current_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(current_instance) + elif ( + current_instance_asset != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + asset_name = get_asset_name_identifier(asset_doc) + + current_instance["folderPath"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + def collect_instances(self): + self.cache_subsets(self.collection_shared_data) + cached_subsets = self.collection_shared_data["maya_cached_subsets"] + for node in cached_subsets.get(self.identifier, []): + node_data = self.read_instance_node(node) + + created_instance = CreatedInstance.from_existing(node_data, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + data = created_inst.data_to_store() + node = data.get("instance_node") + if not node: + node = self.create_node() + created_inst["instance_node"] = node + data = created_inst.data_to_store() + + self.imprint_instance_node(node, data) + + def create_node(self): + node = cmds.sets(empty=True, name="workfileMain") + cmds.setAttr(node + ".hiddenInOutliner", True) + return node diff --git a/openpype/hosts/maya/plugins/create/create_xgen.py b/client/ayon_core/hosts/maya/plugins/create/create_xgen.py similarity index 79% rename from openpype/hosts/maya/plugins/create/create_xgen.py rename to client/ayon_core/hosts/maya/plugins/create/create_xgen.py index eaafb0959a..4e0d41b689 100644 --- a/openpype/hosts/maya/plugins/create/create_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_xgen.py @@ -1,4 +1,4 @@ -from openpype.hosts.maya.api import plugin +from ayon_core.hosts.maya.api import plugin class CreateXgen(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/create/create_yeti_cache.py similarity index 92% rename from openpype/hosts/maya/plugins/create/create_yeti_cache.py rename to client/ayon_core/hosts/maya/plugins/create/create_yeti_cache.py index ca002392d4..82b18f113a 100644 --- a/openpype/hosts/maya/plugins/create/create_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_yeti_cache.py @@ -1,8 +1,8 @@ -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) -from openpype.lib import NumberDef +from ayon_core.lib import NumberDef class CreateYetiCache(plugin.MayaCreator): diff --git a/openpype/hosts/maya/plugins/create/create_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/create/create_yeti_rig.py similarity index 95% rename from openpype/hosts/maya/plugins/create/create_yeti_rig.py rename to client/ayon_core/hosts/maya/plugins/create/create_yeti_rig.py index 445bcf46d8..df3c89a64d 100644 --- a/openpype/hosts/maya/plugins/create/create_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_yeti_rig.py @@ -1,6 +1,6 @@ from maya import cmds -from openpype.hosts.maya.api import ( +from ayon_core.hosts.maya.api import ( lib, plugin ) diff --git a/openpype/hosts/maya/plugins/inventory/connect_geometry.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py similarity index 97% rename from openpype/hosts/maya/plugins/inventory/connect_geometry.py rename to client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py index 03154b7afe..13c9de4693 100644 --- a/openpype/hosts/maya/plugins/inventory/connect_geometry.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py @@ -1,7 +1,7 @@ from maya import cmds -from openpype.pipeline import InventoryAction, get_representation_context -from openpype.hosts.maya.api.lib import get_id +from ayon_core.pipeline import InventoryAction, get_representation_context +from ayon_core.hosts.maya.api.lib import get_id class ConnectGeometry(InventoryAction): diff --git a/openpype/hosts/maya/plugins/inventory/connect_xgen.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py similarity index 99% rename from openpype/hosts/maya/plugins/inventory/connect_xgen.py rename to client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py index 177971f176..2a198addf2 100644 --- a/openpype/hosts/maya/plugins/inventory/connect_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py @@ -1,7 +1,7 @@ from maya import cmds import xgenm -from openpype.pipeline import ( +from ayon_core.pipeline import ( InventoryAction, get_representation_context, get_representation_path ) diff --git a/openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py similarity index 98% rename from openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py rename to client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py index 924a1a4627..19498e5c1c 100644 --- a/openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py @@ -4,10 +4,10 @@ from maya import cmds -from openpype.pipeline import ( +from ayon_core.pipeline import ( InventoryAction, get_representation_context, get_representation_path ) -from openpype.hosts.maya.api.lib import get_container_members, get_id +from ayon_core.hosts.maya.api.lib import get_container_members, get_id class ConnectYetiRig(InventoryAction): diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/client/ayon_core/hosts/maya/plugins/inventory/import_modelrender.py similarity index 97% rename from openpype/hosts/maya/plugins/inventory/import_modelrender.py rename to client/ayon_core/hosts/maya/plugins/inventory/import_modelrender.py index 3b30695146..e2cac22836 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,16 +1,16 @@ import re import json -from openpype.client import ( +from ayon_core.client import ( get_representation_by_id, get_representations ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( InventoryAction, get_representation_context, get_current_project_name, ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( maintained_selection, apply_shaders ) diff --git a/openpype/hosts/maya/plugins/inventory/import_reference.py b/client/ayon_core/hosts/maya/plugins/inventory/import_reference.py similarity index 87% rename from openpype/hosts/maya/plugins/inventory/import_reference.py rename to client/ayon_core/hosts/maya/plugins/inventory/import_reference.py index 3f3b85ba6c..771cb96a57 100644 --- a/openpype/hosts/maya/plugins/inventory/import_reference.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/import_reference.py @@ -1,7 +1,7 @@ from maya import cmds -from openpype.pipeline import InventoryAction -from openpype.hosts.maya.api.lib import get_reference_node +from ayon_core.pipeline import InventoryAction +from ayon_core.hosts.maya.api.lib import get_reference_node class ImportReference(InventoryAction): diff --git a/openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py b/client/ayon_core/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py similarity index 92% rename from openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py rename to client/ayon_core/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py index 39bc59fbbf..36d9864e99 100644 --- a/openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py @@ -1,8 +1,8 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( InventoryAction, get_representation_context ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( create_rig_animation_instance, get_container_members, ) diff --git a/client/ayon_core/hosts/maya/plugins/inventory/select_containers.py b/client/ayon_core/hosts/maya/plugins/inventory/select_containers.py new file mode 100644 index 0000000000..f0bb2fc376 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/inventory/select_containers.py @@ -0,0 +1,46 @@ +from maya import cmds + +from ayon_core.pipeline import InventoryAction, registered_host +from ayon_core.hosts.maya.api.lib import get_container_members + + +class SelectInScene(InventoryAction): + """Select nodes in the scene from selected containers in scene inventory""" + + label = "Select in scene" + icon = "search" + color = "#888888" + order = 99 + + def process(self, containers): + + all_members = [] + for container in containers: + members = get_container_members(container) + all_members.extend(members) + cmds.select(all_members, replace=True, noExpand=True) + + +class HighlightBySceneSelection(InventoryAction): + """Select containers in scene inventory from the current scene selection""" + + label = "Highlight by scene selection" + icon = "search" + color = "#888888" + order = 100 + + def process(self, containers): + + selection = set(cmds.ls(selection=True, long=True, objectsOnly=True)) + host = registered_host() + + to_select = [] + for container in host.get_containers(): + members = get_container_members(container) + if any(member in selection for member in members): + to_select.append(container["objectName"]) + + return { + "objectNames": to_select, + "options": {"clear": True} + } diff --git a/openpype/hosts/maya/plugins/load/_load_animation.py b/client/ayon_core/hosts/maya/plugins/load/_load_animation.py similarity index 91% rename from openpype/hosts/maya/plugins/load/_load_animation.py rename to client/ayon_core/hosts/maya/plugins/load/_load_animation.py index 0781735bc4..bf7f3859e1 100644 --- a/openpype/hosts/maya/plugins/load/_load_animation.py +++ b/client/ayon_core/hosts/maya/plugins/load/_load_animation.py @@ -1,4 +1,4 @@ -import openpype.hosts.maya.api.plugin +import ayon_core.hosts.maya.api.plugin import maya.cmds as cmds @@ -14,7 +14,7 @@ def _process_reference(file_url, name, namespace, options): Returns: list: list of object nodes """ - from openpype.hosts.maya.api.lib import unique_namespace + from ayon_core.hosts.maya.api.lib import unique_namespace # Get name from asset being loaded # Assuming name is subset name from the animation, we split the number # suffix from the name to ensure the namespace is unique @@ -43,7 +43,7 @@ def _process_reference(file_url, name, namespace, options): return nodes -class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): +class AbcLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): """Loader to reference an Alembic file""" families = ["animation", @@ -72,7 +72,7 @@ def process_reference(self, context, name, namespace, options): return nodes -class FbxLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): +class FbxLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): """Loader to reference an Fbx files""" families = ["animation", diff --git a/client/ayon_core/hosts/maya/plugins/load/actions.py b/client/ayon_core/hosts/maya/plugins/load/actions.py new file mode 100644 index 0000000000..f979623544 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/actions.py @@ -0,0 +1,186 @@ +"""A module containing generic loader actions that will display in the Loader. + +""" +import qargparse +from ayon_core.pipeline import load +from ayon_core.hosts.maya.api.lib import ( + maintained_selection, + get_custom_namespace +) +import ayon_core.hosts.maya.api.plugin + + +class SetFrameRangeLoader(load.LoaderPlugin): + """Set frame range excluding pre- and post-handles""" + + families = ["animation", + "camera", + "proxyAbc", + "pointcache"] + representations = ["abc"] + + label = "Set frame range" + order = 11 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import maya.cmds as cmds + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print("Skipping setting frame range because start or " + "end frame data is missing..") + return + + cmds.playbackOptions(minTime=start, + maxTime=end, + animationStartTime=start, + animationEndTime=end) + + +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): + """Set frame range including pre- and post-handles""" + + families = ["animation", + "camera", + "proxyAbc", + "pointcache"] + representations = ["abc"] + + label = "Set frame range (with handles)" + order = 12 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + import maya.cmds as cmds + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print("Skipping setting frame range because start or " + "end frame data is missing..") + return + + # Include handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) + + cmds.playbackOptions(minTime=start, + maxTime=end, + animationStartTime=start, + animationEndTime=end) + + +class ImportMayaLoader(ayon_core.hosts.maya.api.plugin.Loader): + """Import action for Maya (unmanaged) + + Warning: + The loaded content will be unmanaged and is *not* visible in the + scene inventory. It's purely intended to merge content into your scene + so you could also use it as a new base. + + """ + representations = ["ma", "mb", "obj"] + families = [ + "model", + "pointcache", + "proxyAbc", + "animation", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "camera", + "rig", + "camerarig", + "staticMesh", + "workfile" + ] + + label = "Import" + order = 10 + icon = "arrow-circle-down" + color = "#775555" + + options = [ + qargparse.Boolean( + "clean_import", + label="Clean import", + default=False, + help="Should all occurrences of cbId be purged?" + ) + ] + + def load(self, context, name=None, namespace=None, data=None): + import maya.cmds as cmds + + choice = self.display_warning() + if choice is False: + return + + custom_group_name, custom_namespace, options = \ + self.get_custom_namespace_and_group(context, data, + "import_loader") + + namespace = get_custom_namespace(custom_namespace) + + if not options.get("attach_to_root", True): + custom_group_name = namespace + + path = self.filepath_from_context(context) + with maintained_selection(): + nodes = cmds.file(path, + i=True, + preserveReferences=True, + namespace=namespace, + returnNewNodes=True, + groupReference=options.get("attach_to_root", + True), + groupName=custom_group_name) + + if data.get("clean_import", False): + remove_attributes = ["cbId"] + for node in nodes: + for attr in remove_attributes: + if cmds.attributeQuery(attr, node=node, exists=True): + full_attr = "{}.{}".format(node, attr) + print("Removing {}".format(full_attr)) + cmds.deleteAttr(full_attr) + + # We do not containerize imported content, it remains unmanaged + return + + def display_warning(self): + """Show warning to ensure the user can't import models by accident + + Returns: + bool + + """ + + from qtpy import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + buttons = accept | QtWidgets.QMessageBox.Cancel + + message = "Are you sure you want import this" + state = QtWidgets.QMessageBox.warning(None, + "Are you sure?", + message, + buttons=buttons, + defaultButton=accept) + + return state == accept diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py similarity index 97% rename from openpype/hosts/maya/plugins/load/load_arnold_standin.py rename to client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py index 2e1329f201..c690d1c205 100644 --- a/openpype/hosts/maya/plugins/load/load_arnold_standin.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py @@ -3,19 +3,19 @@ import maya.cmds as cmds -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, legacy_io, get_representation_path ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( unique_namespace, get_attribute_input, maintained_selection, convert_to_maya_fps ) -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.pipeline import containerise def is_sequence(files): diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/client/ayon_core/hosts/maya/plugins/load/load_assembly.py similarity index 92% rename from openpype/hosts/maya/plugins/load/load_assembly.py rename to client/ayon_core/hosts/maya/plugins/load/load_assembly.py index 0a2733e03c..e119dfe1c3 100644 --- a/openpype/hosts/maya/plugins/load/load_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_assembly.py @@ -1,13 +1,13 @@ import maya.cmds as cmds -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, remove_container ) -from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace -from openpype.hosts.maya.api import setdress +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.lib import unique_namespace +from ayon_core.hosts.maya.api import setdress class AssemblyLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_audio.py b/client/ayon_core/hosts/maya/plugins/load/load_audio.py new file mode 100644 index 0000000000..deeeac66f2 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_audio.py @@ -0,0 +1,113 @@ +from maya import cmds, mel + +from ayon_core.pipeline import ( + load, + get_representation_path, +) +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.lib import unique_namespace, get_container_members + + +class AudioLoader(load.LoaderPlugin): + """Specific loader of audio.""" + + families = ["audio"] + label = "Load audio" + representations = ["wav"] + icon = "volume-up" + color = "orange" + + def load(self, context, name, namespace, data): + + start_frame = cmds.playbackOptions(query=True, min=True) + sound_node = cmds.sound( + file=self.filepath_from_context(context), offset=start_frame + ) + cmds.timeControl( + mel.eval("$gPlayBackSlider=$gPlayBackSlider"), + edit=True, + sound=sound_node, + displaySound=True + ) + + asset = context["asset"]["name"] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + return containerise( + name=name, + namespace=namespace, + nodes=[sound_node], + context=context, + loader=self.__class__.__name__ + ) + + def update(self, container, representation): + + members = get_container_members(container) + audio_nodes = cmds.ls(members, type="audio") + + assert audio_nodes is not None, "Audio node not found." + audio_node = audio_nodes[0] + + current_sound = cmds.timeControl( + mel.eval("$gPlayBackSlider=$gPlayBackSlider"), + query=True, + sound=True + ) + activate_sound = current_sound == audio_node + + path = get_representation_path(representation) + + cmds.sound( + audio_node, + edit=True, + file=path + ) + + # The source start + end does not automatically update itself to the + # length of thew new audio file, even though maya does do that when + # creating a new audio node. So to update we compute it manually. + # This would however override any source start and source end a user + # might have done on the original audio node after load. + audio_frame_count = cmds.getAttr("{}.frameCount".format(audio_node)) + audio_sample_rate = cmds.getAttr("{}.sampleRate".format(audio_node)) + duration_in_seconds = audio_frame_count / audio_sample_rate + fps = mel.eval('currentTimeUnitToFPS()') # workfile FPS + source_start = 0 + source_end = (duration_in_seconds * fps) + cmds.setAttr("{}.sourceStart".format(audio_node), source_start) + cmds.setAttr("{}.sourceEnd".format(audio_node), source_end) + + if activate_sound: + # maya by default deactivates it from timeline on file change + cmds.timeControl( + mel.eval("$gPlayBackSlider=$gPlayBackSlider"), + edit=True, + sound=audio_node, + displaySound=True + ) + + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/client/ayon_core/hosts/maya/plugins/load/load_gpucache.py similarity index 94% rename from openpype/hosts/maya/plugins/load/load_gpucache.py rename to client/ayon_core/hosts/maya/plugins/load/load_gpucache.py index 344f2fd060..00a76d374b 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_gpucache.py @@ -2,13 +2,13 @@ import maya.cmds as cmds -from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace -from openpype.pipeline import ( +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.lib import unique_namespace +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.settings import get_project_settings +from ayon_core.settings import get_project_settings class GpuCacheLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_image.py b/client/ayon_core/hosts/maya/plugins/load/load_image.py new file mode 100644 index 0000000000..aedeb63e3d --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_image.py @@ -0,0 +1,338 @@ +import os +import copy + +from ayon_core.lib import EnumDef +from ayon_core.pipeline import ( + load, + get_representation_context, + get_current_host_name, +) +from ayon_core.pipeline.load.utils import get_representation_path_from_context +from ayon_core.pipeline.colorspace import ( + get_imageio_file_rules_colorspace_from_filepath, + get_imageio_config, + get_imageio_file_rules +) +from ayon_core.settings import get_project_settings + +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.lib import ( + unique_namespace, + namespaced +) + +from maya import cmds + + +def create_texture(): + """Create place2dTexture with file node with uv connections + + Mimics Maya "file [Texture]" creation. + """ + + place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d") + file = cmds.shadingNode("file", asTexture=True, name="file") + + connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV", + "mirrorU", "mirrorV", "stagger", "wrapV", "wrapU", + "repeatUV", "offset", "noiseUV", "vertexUvThree", + "vertexUvTwo", "vertexUvOne", "vertexCameraOne"] + for attr in connections: + src = "{}.{}".format(place, attr) + dest = "{}.{}".format(file, attr) + cmds.connectAttr(src, dest) + + cmds.connectAttr(place + '.outUV', file + '.uvCoord') + cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize') + + return file, place + + +def create_projection(): + """Create texture with place3dTexture and projection + + Mimics Maya "file [Projection]" creation. + """ + + file, place = create_texture() + projection = cmds.shadingNode("projection", asTexture=True, + name="projection") + place3d = cmds.shadingNode("place3dTexture", asUtility=True, + name="place3d") + + cmds.connectAttr(place3d + '.worldInverseMatrix[0]', + projection + ".placementMatrix") + cmds.connectAttr(file + '.outColor', projection + ".image") + + return file, place, projection, place3d + + +def create_stencil(): + """Create texture with extra place2dTexture offset and stencil + + Mimics Maya "file [Stencil]" creation. + """ + + file, place = create_texture() + + place_stencil = cmds.shadingNode("place2dTexture", asUtility=True, + name="place2d_stencil") + stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil") + + for src_attr, dest_attr in [ + ("outUV", "uvCoord"), + ("outUvFilterSize", "uvFilterSize") + ]: + src_plug = "{}.{}".format(place_stencil, src_attr) + cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr)) + cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr)) + + return file, place, stencil, place_stencil + + +class FileNodeLoader(load.LoaderPlugin): + """File node loader.""" + + families = ["image", "plate", "render"] + label = "Load file node" + representations = ["exr", "tif", "png", "jpg"] + icon = "image" + color = "orange" + order = 2 + + options = [ + EnumDef( + "mode", + items={ + "texture": "Texture", + "projection": "Projection", + "stencil": "Stencil" + }, + default="texture", + label="Texture Mode" + ) + ] + + def load(self, context, name, namespace, data): + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + with namespaced(namespace, new=True) as namespace: + # Create the nodes within the namespace + nodes = { + "texture": create_texture, + "projection": create_projection, + "stencil": create_stencil + }[data.get("mode", "texture")]() + + file_node = cmds.ls(nodes, type="file")[0] + + self._apply_representation_context(context, file_node) + + # For ease of access for the user select all the nodes and select + # the file node last so that UI shows its attributes by default + cmds.select(list(nodes) + [file_node], replace=True) + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__ + ) + + def update(self, container, representation): + + members = cmds.sets(container['objectName'], query=True) + file_node = cmds.ls(members, type="file")[0] + + context = get_representation_context(representation) + self._apply_representation_context(context, file_node) + + # Update representation + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + def _apply_representation_context(self, context, file_node): + """Update the file node to match the context. + + This sets the file node's attributes for: + - file path + - udim tiling mode (if it is an udim tile) + - use frame extension (if it is a sequence) + - colorspace + + """ + + repre_context = context["representation"]["context"] + has_frames = repre_context.get("frame") is not None + has_udim = repre_context.get("udim") is not None + + # Set UV tiling mode if UDIM tiles + if has_udim: + cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles + else: + cmds.setAttr(file_node + ".uvTilingMode", 0) # off + + # Enable sequence if publish has `startFrame` and `endFrame` and + # `startFrame != endFrame` + if has_frames and self._is_sequence(context): + # When enabling useFrameExtension maya automatically + # connects an expression to .frameExtension to set + # the current frame. However, this expression is generated + # with some delay and thus it'll show a warning if frame 0 + # doesn't exist because we're explicitly setting the + # token. + cmds.setAttr(file_node + ".useFrameExtension", True) + else: + cmds.setAttr(file_node + ".useFrameExtension", False) + + # Set the file node path attribute + path = self._format_path(context) + cmds.setAttr(file_node + ".fileTextureName", path, type="string") + + # Set colorspace + colorspace = self._get_colorspace(context) + if colorspace: + cmds.setAttr(file_node + ".colorSpace", colorspace, type="string") + else: + self.log.debug("Unknown colorspace - setting colorspace skipped.") + + def _is_sequence(self, context): + """Check whether frameStart and frameEnd are not the same.""" + version = context.get("version", {}) + representation = context.get("representation", {}) + + for doc in [representation, version]: + # Frame range can be set on version or representation. + # When set on representation it overrides version data. + data = doc.get("data", {}) + start = data.get("frameStartHandle", data.get("frameStart", None)) + end = data.get("frameEndHandle", data.get("frameEnd", None)) + + if start is None or end is None: + continue + + if start != end: + return True + else: + return False + + return False + + def _get_colorspace(self, context): + """Return colorspace of the file to load. + + Retrieves the explicit colorspace from the publish. If no colorspace + data is stored with published content then project imageio settings + are used to make an assumption of the colorspace based on the file + rules. If no file rules match then None is returned. + + Returns: + str or None: The colorspace of the file or None if not detected. + + """ + + # We can't apply color spaces if management is not enabled + if not cmds.colorManagementPrefs(query=True, cmEnabled=True): + return + + representation = context["representation"] + colorspace_data = representation.get("data", {}).get("colorspaceData") + if colorspace_data: + return colorspace_data["colorspace"] + + # Assume colorspace from filepath based on project settings + project_name = context["project"]["name"] + host_name = get_current_host_name() + project_settings = get_project_settings(project_name) + + config_data = get_imageio_config( + project_name, host_name, + project_settings=project_settings + ) + + # ignore if host imageio is not enabled + if not config_data: + return + + file_rules = get_imageio_file_rules( + project_name, host_name, + project_settings=project_settings + ) + + path = get_representation_path_from_context(context) + colorspace = get_imageio_file_rules_colorspace_from_filepath( + path, + host_name, + project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) + + return colorspace + + def _format_path(self, context): + """Format the path with correct tokens for frames and udim tiles.""" + + context = copy.deepcopy(context) + representation = context["representation"] + template = representation.get("data", {}).get("template") + if not template: + # No template to find token locations for + return get_representation_path_from_context(context) + + def _placeholder(key): + # Substitute with a long placeholder value so that potential + # custom formatting with padding doesn't find its way into + # our formatting, so that wouldn't be padded as 0 + return "___{}___".format(key) + + # We format UDIM and Frame numbers with their specific tokens. To do so + # we in-place change the representation context data to format the path + # with our own data + tokens = { + "frame": "", + "udim": "" + } + has_tokens = False + repre_context = representation["context"] + for key, _token in tokens.items(): + if key in repre_context: + repre_context[key] = _placeholder(key) + has_tokens = True + + # Replace with our custom template that has the tokens set + representation["data"]["template"] = template + path = get_representation_path_from_context(context) + + if has_tokens: + for key, token in tokens.items(): + if key in repre_context: + path = path.replace(_placeholder(key), token) + + return path diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/client/ayon_core/hosts/maya/plugins/load/load_image_plane.py similarity index 98% rename from openpype/hosts/maya/plugins/load/load_image_plane.py rename to client/ayon_core/hosts/maya/plugins/load/load_image_plane.py index 117f4f4202..fb27e6597a 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_image_plane.py @@ -1,17 +1,17 @@ from qtpy import QtWidgets, QtCore -from openpype.client import ( +from ayon_core.client import ( get_asset_by_id, get_subset_by_id, get_version_by_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, get_current_project_name, ) -from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.lib import ( unique_namespace, namespaced, pairwise, diff --git a/client/ayon_core/hosts/maya/plugins/load/load_look.py b/client/ayon_core/hosts/maya/plugins/load/load_look.py new file mode 100644 index 0000000000..ba5891469d --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_look.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +"""Look loader.""" +import json +from collections import defaultdict + +from qtpy import QtWidgets + +from ayon_core.client import get_representation_by_name +from ayon_core.pipeline import ( + get_current_project_name, + get_representation_path, +) +import ayon_core.hosts.maya.api.plugin +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.lib import get_reference_node + +from ayon_core.tools.utils import ScrollMessageBox + + +class LookLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): + """Specific loader for lookdev""" + + families = ["look"] + representations = ["ma"] + + label = "Reference look" + order = -10 + icon = "code-fork" + color = "orange" + + def process_reference(self, context, name, namespace, options): + from maya import cmds + + with lib.maintained_selection(): + file_url = self.prepare_root_value( + file_url=self.filepath_from_context(context), + project_name=context["project"]["name"] + ) + nodes = cmds.file(file_url, + namespace=namespace, + reference=True, + returnNewNodes=True) + + self[:] = nodes + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ + Called by Scene Inventory when look should be updated to current + version. + If any reference edits cannot be applied, eg. shader renamed and + material not present, reference is unloaded and cleaned. + All failed edits are highlighted to the user via message box. + + Args: + container: object that has look to be updated + representation: (dict): relationship data to get proper + representation from DB and persisted + data in .json + Returns: + None + """ + from maya import cmds + + # Get reference node from container members + members = lib.get_container_members(container) + reference_node = get_reference_node(members, log=self.log) + + shader_nodes = cmds.ls(members, type='shadingEngine') + orig_nodes = set(self._get_nodes_with_shader(shader_nodes)) + + # Trigger the regular reference update on the ReferenceLoader + super(LookLoader, self).update(container, representation) + + # get new applied shaders and nodes from new version + shader_nodes = cmds.ls(members, type='shadingEngine') + nodes = set(self._get_nodes_with_shader(shader_nodes)) + + project_name = get_current_project_name() + json_representation = get_representation_by_name( + project_name, "json", representation["parent"] + ) + + # Load relationships + shader_relation = get_representation_path(json_representation) + with open(shader_relation, "r") as f: + json_data = json.load(f) + + # update of reference could result in failed edits - material is not + # present because of renaming etc. If so highlight failed edits to user + failed_edits = cmds.referenceQuery(reference_node, + editStrings=True, + failedEdits=True, + successfulEdits=False) + if failed_edits: + # clean references - removes failed reference edits + cmds.file(cr=reference_node) # cleanReference + + # reapply shading groups from json representation on orig nodes + lib.apply_shaders(json_data, shader_nodes, orig_nodes) + + msg = ["During reference update some edits failed.", + "All successful edits were kept intact.\n", + "Failed and removed edits:"] + msg.extend(failed_edits) + + msg = ScrollMessageBox(QtWidgets.QMessageBox.Warning, + "Some reference edit failed", + msg) + msg.exec_() + + attributes = json_data.get("attributes", []) + + # region compute lookup + nodes_by_id = defaultdict(list) + for node in nodes: + nodes_by_id[lib.get_id(node)].append(node) + lib.apply_attributes(attributes, nodes_by_id) + + def _get_nodes_with_shader(self, shader_nodes): + """ + Returns list of nodes belonging to specific shaders + Args: + shader_nodes: of Shader groups + Returns + node names + """ + from maya import cmds + + for shader in shader_nodes: + future = cmds.listHistory(shader, future=True) + connections = cmds.listConnections(future, + type='mesh') + if connections: + # Ensure unique entries only to optimize query and results + connections = list(set(connections)) + return cmds.listRelatives(connections, + shapes=True, + fullPath=True) or [] + return [] diff --git a/client/ayon_core/hosts/maya/plugins/load/load_matchmove.py b/client/ayon_core/hosts/maya/plugins/load/load_matchmove.py new file mode 100644 index 0000000000..885d2dbae1 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_matchmove.py @@ -0,0 +1,30 @@ +from maya import mel +from ayon_core.pipeline import load + +class MatchmoveLoader(load.LoaderPlugin): + """ + This will run matchmove script to create track in scene. + + Supported script types are .py and .mel + """ + + families = ["matchmove"] + representations = ["py", "mel"] + defaults = ["Camera", "Object", "Mocap"] + + label = "Run matchmove script" + icon = "empire" + color = "orange" + + def load(self, context, name, namespace, data): + path = self.filepath_from_context(context) + if path.lower().endswith(".py"): + exec(open(path).read()) + + elif path.lower().endswith(".mel"): + mel.eval('source "{}"'.format(path)) + + else: + self.log.error("Unsupported script type") + + return True diff --git a/openpype/hosts/maya/plugins/load/load_maya_usd.py b/client/ayon_core/hosts/maya/plugins/load/load_maya_usd.py similarity index 94% rename from openpype/hosts/maya/plugins/load/load_maya_usd.py rename to client/ayon_core/hosts/maya/plugins/load/load_maya_usd.py index 2fb1a625a5..c2bea1501c 100644 --- a/openpype/hosts/maya/plugins/load/load_maya_usd.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_maya_usd.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- import maya.cmds as cmds -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.pipeline.load import get_representation_path_from_context -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline.load import get_representation_path_from_context +from ayon_core.hosts.maya.api.lib import ( namespaced, unique_namespace ) -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.pipeline import containerise class MayaUsdLoader(load.LoaderPlugin): diff --git a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py b/client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd.py similarity index 95% rename from openpype/hosts/maya/plugins/load/load_multiverse_usd.py rename to client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd.py index cad42b55f9..a9ba2b8773 100644 --- a/openpype/hosts/maya/plugins/load/load_multiverse_usd.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd.py @@ -3,17 +3,17 @@ from maya import mel import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( maintained_selection, namespaced, unique_namespace ) -from openpype.hosts.maya.api.pipeline import containerise -from openpype.client import get_representation_by_id +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.client import get_representation_by_id class MultiverseUsdLoader(load.LoaderPlugin): diff --git a/openpype/hosts/maya/plugins/load/load_multiverse_usd_over.py b/client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd_over.py similarity index 95% rename from openpype/hosts/maya/plugins/load/load_multiverse_usd_over.py rename to client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd_over.py index be8d78607b..d448dc74a8 100644 --- a/openpype/hosts/maya/plugins/load/load_multiverse_usd_over.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_multiverse_usd_over.py @@ -5,15 +5,15 @@ import qargparse -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( maintained_selection ) -from openpype.hosts.maya.api.pipeline import containerise -from openpype.client import get_representation_by_id +from ayon_core.hosts.maya.api.pipeline import containerise +from ayon_core.client import get_representation_by_id class MultiverseUsdOverLoader(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_redshift_proxy.py b/client/ayon_core/hosts/maya/plugins/load/load_redshift_proxy.py new file mode 100644 index 0000000000..dd378602c9 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_redshift_proxy.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +"""Loader for Redshift proxy.""" +import os +import clique + +import maya.cmds as cmds + +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( + load, + get_representation_path +) +from ayon_core.hosts.maya.api.lib import ( + namespaced, + maintained_selection, + unique_namespace +) +from ayon_core.hosts.maya.api.pipeline import containerise + + +class RedshiftProxyLoader(load.LoaderPlugin): + """Load Redshift proxy""" + + families = ["redshiftproxy"] + representations = ["rs"] + + label = "Import Redshift Proxy" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, options=None): + """Plugin entry point.""" + try: + family = context["representation"]["context"]["family"] + except ValueError: + family = "redshiftproxy" + + asset_name = context['asset']["name"] + namespace = namespace or unique_namespace( + asset_name + "_", + prefix="_" if asset_name[0].isdigit() else "", + suffix="_", + ) + + # Ensure Redshift for Maya is loaded. + cmds.loadPlugin("redshift4maya", quiet=True) + + path = self.filepath_from_context(context) + with maintained_selection(): + cmds.namespace(addNamespace=namespace) + with namespaced(namespace, new=False): + nodes, group_node = self.create_rs_proxy(name, path) + + self[:] = nodes + if not nodes: + return + + # colour the group node + project_name = context["project"]["name"] + settings = get_project_settings(project_name) + colors = settings['maya']['load']['colors'] + c = colors.get(family) + if c is not None: + cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) + cmds.setAttr("{0}.outlinerColor".format(group_node), + c[0], c[1], c[2]) + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def update(self, container, representation): + + node = container['objectName'] + assert cmds.objExists(node), "Missing container" + + members = cmds.sets(node, query=True) or [] + rs_meshes = cmds.ls(members, type="RedshiftProxyMesh") + assert rs_meshes, "Cannot find RedshiftProxyMesh in container" + + filename = get_representation_path(representation) + + for rs_mesh in rs_meshes: + cmds.setAttr("{}.fileName".format(rs_mesh), + filename, + type="string") + + # Update metadata + cmds.setAttr("{}.representation".format(node), + str(representation["_id"]), + type="string") + + def remove(self, container): + + # Delete container and its contents + if cmds.objExists(container['objectName']): + members = cmds.sets(container['objectName'], query=True) or [] + cmds.delete([container['objectName']] + members) + + # Remove the namespace, if empty + namespace = container['namespace'] + if cmds.namespace(exists=namespace): + members = cmds.namespaceInfo(namespace, listNamespace=True) + if not members: + cmds.namespace(removeNamespace=namespace) + else: + self.log.warning("Namespace not deleted because it " + "still has members: %s", namespace) + + def switch(self, container, representation): + self.update(container, representation) + + def create_rs_proxy(self, name, path): + """Creates Redshift Proxies showing a proxy object. + + Args: + name (str): Proxy name. + path (str): Path to proxy file. + + Returns: + (str, str): Name of mesh with Redshift proxy and its parent + transform. + + """ + rs_mesh = cmds.createNode( + 'RedshiftProxyMesh', name="{}_RS".format(name)) + mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) + + cmds.setAttr("{}.fileName".format(rs_mesh), + path, + type="string") + + cmds.connectAttr("{}.outMesh".format(rs_mesh), + "{}.inMesh".format(mesh_shape)) + + # TODO: use the assigned shading group as shaders if existed + # assign default shader to redshift proxy + if cmds.ls("initialShadingGroup", type="shadingEngine"): + cmds.sets(mesh_shape, forceElement="initialShadingGroup") + + group_node = cmds.group(empty=True, name="{}_GRP".format(name)) + mesh_transform = cmds.listRelatives(mesh_shape, + parent=True, fullPath=True) + cmds.parent(mesh_transform, group_node) + nodes = [rs_mesh, mesh_shape, group_node] + + # determine if we need to enable animation support + files_in_folder = os.listdir(os.path.dirname(path)) + collections, remainder = clique.assemble(files_in_folder) + + if collections: + cmds.setAttr("{}.useFrameExtension".format(rs_mesh), 1) + + return nodes, group_node diff --git a/client/ayon_core/hosts/maya/plugins/load/load_reference.py b/client/ayon_core/hosts/maya/plugins/load/load_reference.py new file mode 100644 index 0000000000..36bd2e5969 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_reference.py @@ -0,0 +1,352 @@ +import os +import difflib +import contextlib + +from maya import cmds +import qargparse + +from ayon_core.settings import get_project_settings +import ayon_core.hosts.maya.api.plugin +from ayon_core.hosts.maya.api.lib import ( + maintained_selection, + get_container_members, + parent_nodes, + create_rig_animation_instance +) + + +@contextlib.contextmanager +def preserve_modelpanel_cameras(container, log=None): + """Preserve camera members of container in the modelPanels. + + This is used to ensure a camera remains in the modelPanels after updating + to a new version. + + """ + + # Get the modelPanels that used the old camera + members = get_container_members(container) + old_cameras = set(cmds.ls(members, type="camera", long=True)) + if not old_cameras: + # No need to manage anything + yield + return + + panel_cameras = {} + for panel in cmds.getPanel(type="modelPanel"): + cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), + long=True)[0] + + # Often but not always maya returns the transform from the + # modelPanel as opposed to the camera shape, so we convert it + # to explicitly be the camera shape + if cmds.nodeType(cam) != "camera": + cam = cmds.listRelatives(cam, + children=True, + fullPath=True, + type="camera")[0] + if cam in old_cameras: + panel_cameras[panel] = cam + + if not panel_cameras: + # No need to manage anything + yield + return + + try: + yield + finally: + new_members = get_container_members(container) + new_cameras = set(cmds.ls(new_members, type="camera", long=True)) + if not new_cameras: + return + + for panel, cam_name in panel_cameras.items(): + new_camera = None + if cam_name in new_cameras: + new_camera = cam_name + elif len(new_cameras) == 1: + new_camera = next(iter(new_cameras)) + else: + # Multiple cameras in the updated container but not an exact + # match detected by name. Find the closest match + matches = difflib.get_close_matches(word=cam_name, + possibilities=new_cameras, + n=1) + if matches: + new_camera = matches[0] # best match + if log: + log.info("Camera in '{}' restored with " + "closest match camera: {} (before: {})" + .format(panel, new_camera, cam_name)) + + if not new_camera: + # Unable to find the camera to re-apply in the modelpanel + continue + + cmds.modelPanel(panel, edit=True, camera=new_camera) + + +class ReferenceLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): + """Reference file""" + + families = ["model", + "pointcache", + "proxyAbc", + "animation", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "camera", + "rig", + "camerarig", + "staticMesh", + "skeletalMesh", + "mvLook", + "matchmove"] + + representations = ["ma", "abc", "fbx", "mb"] + + label = "Reference" + order = -10 + icon = "code-fork" + color = "orange" + + def process_reference(self, context, name, namespace, options): + import maya.cmds as cmds + + try: + family = context["representation"]["context"]["family"] + except ValueError: + family = "model" + + project_name = context["project"]["name"] + # True by default to keep legacy behaviours + attach_to_root = options.get("attach_to_root", True) + group_name = options["group_name"] + + # no group shall be created + if not attach_to_root: + group_name = namespace + + kwargs = {} + if "file_options" in options: + kwargs["options"] = options["file_options"] + if "file_type" in options: + kwargs["type"] = options["file_type"] + + path = self.filepath_from_context(context) + with maintained_selection(): + cmds.loadPlugin("AbcImport.mll", quiet=True) + + file_url = self.prepare_root_value(path, project_name) + nodes = cmds.file(file_url, + namespace=namespace, + sharedReferenceFile=False, + reference=True, + returnNewNodes=True, + groupReference=attach_to_root, + groupName=group_name, + **kwargs) + + shapes = cmds.ls(nodes, shapes=True, long=True) + + new_nodes = (list(set(nodes) - set(shapes))) + + # if there are cameras, try to lock their transforms + self._lock_camera_transforms(new_nodes) + + current_namespace = cmds.namespaceInfo(currentNamespace=True) + + if current_namespace != ":": + group_name = current_namespace + ":" + group_name + + self[:] = new_nodes + + if attach_to_root: + group_name = "|" + group_name + roots = cmds.listRelatives(group_name, + children=True, + fullPath=True) or [] + + if family not in {"layout", "setdress", + "mayaAscii", "mayaScene"}: + # QUESTION Why do we need to exclude these families? + with parent_nodes(roots, parent=None): + cmds.xform(group_name, zeroTransformPivots=True) + + settings = get_project_settings(project_name) + + display_handle = settings['maya']['load'].get( + 'reference_loader', {} + ).get('display_handle', True) + cmds.setAttr( + "{}.displayHandle".format(group_name), display_handle + ) + + colors = settings['maya']['load']['colors'] + c = colors.get(family) + if c is not None: + cmds.setAttr("{}.useOutlinerColor".format(group_name), 1) + cmds.setAttr("{}.outlinerColor".format(group_name), + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255)) + + cmds.setAttr( + "{}.displayHandle".format(group_name), display_handle + ) + # get bounding box + bbox = cmds.exactWorldBoundingBox(group_name) + # get pivot position on world space + pivot = cmds.xform(group_name, q=True, sp=True, ws=True) + # center of bounding box + cx = (bbox[0] + bbox[3]) / 2 + cy = (bbox[1] + bbox[4]) / 2 + cz = (bbox[2] + bbox[5]) / 2 + # add pivot position to calculate offset + cx = cx + pivot[0] + cy = cy + pivot[1] + cz = cz + pivot[2] + # set selection handle offset to center of bounding box + cmds.setAttr("{}.selectHandleX".format(group_name), cx) + cmds.setAttr("{}.selectHandleY".format(group_name), cy) + cmds.setAttr("{}.selectHandleZ".format(group_name), cz) + + if family == "rig": + self._post_process_rig(namespace, context, options) + else: + if "translate" in options: + if not attach_to_root and new_nodes: + root_nodes = cmds.ls(new_nodes, assemblies=True, + long=True) + # we assume only a single root is ever loaded + group_name = root_nodes[0] + cmds.setAttr("{}.translate".format(group_name), + *options["translate"]) + return new_nodes + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + with preserve_modelpanel_cameras(container, log=self.log): + super(ReferenceLoader, self).update(container, representation) + + # We also want to lock camera transforms on any new cameras in the + # reference or for a camera which might have changed names. + members = get_container_members(container) + self._lock_camera_transforms(members) + + def _post_process_rig(self, namespace, context, options): + + nodes = self[:] + create_rig_animation_instance( + nodes, context, namespace, options=options, log=self.log + ) + + def _lock_camera_transforms(self, nodes): + cameras = cmds.ls(nodes, type="camera") + if not cameras: + return + + # Check the Maya version, lockTransform has been introduced since + # Maya 2016.5 Ext 2 + version = int(cmds.about(version=True)) + if version >= 2016: + for camera in cameras: + cmds.camera(camera, edit=True, lockTransform=True) + else: + self.log.warning("This version of Maya does not support locking of" + " transforms of cameras.") + + +class MayaUSDReferenceLoader(ReferenceLoader): + """Reference USD file to native Maya nodes using MayaUSDImport reference""" + + label = "Reference Maya USD" + families = ["usd"] + representations = ["usd"] + extensions = {"usd", "usda", "usdc"} + + options = ReferenceLoader.options + [ + qargparse.Boolean( + "readAnimData", + label="Load anim data", + default=True, + help="Load animation data from USD file" + ), + qargparse.Boolean( + "useAsAnimationCache", + label="Use as animation cache", + default=True, + help=( + "Imports geometry prims with time-sampled point data using a " + "point-based deformer that references the imported " + "USD file.\n" + "This provides better import and playback performance when " + "importing time-sampled geometry from USD, and should " + "reduce the weight of the resulting Maya scene." + ) + ), + qargparse.Boolean( + "importInstances", + label="Import instances", + default=True, + help=( + "Import USD instanced geometries as Maya instanced shapes. " + "Will flatten the scene otherwise." + ) + ), + qargparse.String( + "primPath", + label="Prim Path", + default="/", + help=( + "Name of the USD scope where traversing will begin.\n" + "The prim at the specified primPath (including the prim) will " + "be imported.\n" + "Specifying the pseudo-root (/) means you want " + "to import everything in the file.\n" + "If the passed prim path is empty, it will first try to " + "import the defaultPrim for the rootLayer if it exists.\n" + "Otherwise, it will behave as if the pseudo-root was passed " + "in." + ) + ) + ] + + file_type = "USD Import" + + def process_reference(self, context, name, namespace, options): + cmds.loadPlugin("mayaUsdPlugin", quiet=True) + + def bool_option(key, default): + # Shorthand for getting optional boolean file option from options + value = int(bool(options.get(key, default))) + return "{}={}".format(key, value) + + def string_option(key, default): + # Shorthand for getting optional string file option from options + value = str(options.get(key, default)) + return "{}={}".format(key, value) + + options["file_options"] = ";".join([ + string_option("primPath", default="/"), + bool_option("importInstances", default=True), + bool_option("useAsAnimationCache", default=True), + bool_option("readAnimData", default=True), + # TODO: Expose more parameters + # "preferredMaterial=none", + # "importRelativeTextures=Automatic", + # "useCustomFrameRange=0", + # "startTime=0", + # "endTime=0", + # "importUSDZTextures=0" + ]) + options["file_type"] = self.file_type + + return super(MayaUSDReferenceLoader, self).process_reference( + context, name, namespace, options + ) diff --git a/openpype/hosts/maya/plugins/load/load_rendersetup.py b/client/ayon_core/hosts/maya/plugins/load/load_rendersetup.py similarity index 94% rename from openpype/hosts/maya/plugins/load/load_rendersetup.py rename to client/ayon_core/hosts/maya/plugins/load/load_rendersetup.py index 8b85f11958..e77e270663 100644 --- a/openpype/hosts/maya/plugins/load/load_rendersetup.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_rendersetup.py @@ -10,12 +10,12 @@ import sys import six -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.pipeline import containerise from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -35,7 +35,7 @@ class RenderSetupLoader(load.LoaderPlugin): def load(self, context, name, namespace, data): """Load RenderSetup settings.""" - # from openpype.hosts.maya.api.lib import namespaced + # from ayon_core.hosts.maya.api.lib import namespaced asset = context['asset']['name'] namespace = namespace or lib.unique_namespace( diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_arnold.py similarity index 95% rename from openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py rename to client/ayon_core/hosts/maya/plugins/load/load_vdb_to_arnold.py index 0f674a69c4..98f98330d7 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_arnold.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_arnold.py @@ -1,7 +1,7 @@ import os -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_representation_path ) @@ -21,8 +21,8 @@ class LoadVDBtoArnold(load.LoaderPlugin): def load(self, context, name, namespace, data): from maya import cmds - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace + from ayon_core.hosts.maya.api.pipeline import containerise + from ayon_core.hosts.maya.api.lib import unique_namespace try: family = context["representation"]["context"]["family"] diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_redshift.py similarity index 95% rename from openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py rename to client/ayon_core/hosts/maya/plugins/load/load_vdb_to_redshift.py index 28cfdc7129..426e85cf7c 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_redshift.py @@ -1,7 +1,7 @@ import os -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_representation_path ) @@ -27,8 +27,8 @@ class LoadVDBtoRedShift(load.LoaderPlugin): def load(self, context, name=None, namespace=None, data=None): from maya import cmds - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace + from ayon_core.hosts.maya.api.pipeline import containerise + from ayon_core.hosts.maya.api.lib import unique_namespace try: family = context["representation"]["context"]["family"] diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_vray.py similarity index 97% rename from openpype/hosts/maya/plugins/load/load_vdb_to_vray.py rename to client/ayon_core/hosts/maya/plugins/load/load_vdb_to_vray.py index 46f2dd674d..ca0519900b 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -1,7 +1,7 @@ import os -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_representation_path ) @@ -85,8 +85,8 @@ class LoadVDBtoVRay(load.LoaderPlugin): def load(self, context, name, namespace, data): - from openpype.hosts.maya.api.lib import unique_namespace - from openpype.hosts.maya.api.pipeline import containerise + from ayon_core.hosts.maya.api.lib import unique_namespace + from ayon_core.hosts.maya.api.pipeline import containerise path = self.filepath_from_context(context) assert os.path.exists(path), ( @@ -166,7 +166,7 @@ def load(self, context, name, namespace, data): def _set_path(self, grid_node, path, show_preset_popup=True): - from openpype.hosts.maya.api.lib import attribute_values + from ayon_core.hosts.maya.api.lib import attribute_values from maya import cmds def _get_filename_from_folder(path): diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/client/ayon_core/hosts/maya/plugins/load/load_vrayproxy.py similarity index 96% rename from openpype/hosts/maya/plugins/load/load_vrayproxy.py rename to client/ayon_core/hosts/maya/plugins/load/load_vrayproxy.py index 9d926a33ed..9b36303b64 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_vrayproxy.py @@ -9,19 +9,19 @@ import maya.cmds as cmds -from openpype.client import get_representation_by_name -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.client import get_representation_by_name +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( maintained_selection, namespaced, unique_namespace ) -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.pipeline import containerise class VRayProxyLoader(load.LoaderPlugin): diff --git a/openpype/hosts/maya/plugins/load/load_vrayscene.py b/client/ayon_core/hosts/maya/plugins/load/load_vrayscene.py similarity index 96% rename from openpype/hosts/maya/plugins/load/load_vrayscene.py rename to client/ayon_core/hosts/maya/plugins/load/load_vrayscene.py index 3a2c3a47f2..92d2b32549 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayscene.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_vrayscene.py @@ -1,17 +1,17 @@ # -*- coding: utf-8 -*- import os import maya.cmds as cmds # noqa -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.hosts.maya.api.lib import ( +from ayon_core.hosts.maya.api.lib import ( maintained_selection, namespaced, unique_namespace ) -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api.pipeline import containerise class VRaySceneLoader(load.LoaderPlugin): diff --git a/openpype/hosts/maya/plugins/load/load_xgen.py b/client/ayon_core/hosts/maya/plugins/load/load_xgen.py similarity index 96% rename from openpype/hosts/maya/plugins/load/load_xgen.py rename to client/ayon_core/hosts/maya/plugins/load/load_xgen.py index 2ad6ad55bc..4c38835350 100644 --- a/openpype/hosts/maya/plugins/load/load_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_xgen.py @@ -6,18 +6,18 @@ from qtpy import QtWidgets -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.lib import ( +import ayon_core.hosts.maya.api.plugin +from ayon_core.hosts.maya.api.lib import ( maintained_selection, get_container_members, attribute_values, write_xgen_file ) -from openpype.hosts.maya.api import current_file -from openpype.pipeline import get_representation_path +from ayon_core.hosts.maya.api import current_file +from ayon_core.pipeline import get_representation_path -class XgenLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): +class XgenLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): """Load Xgen as reference""" families = ["xgen"] diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py similarity index 98% rename from openpype/hosts/maya/plugins/load/load_yeti_cache.py rename to client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py index 4a11ea9a2c..d2fc1c0ab0 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py @@ -6,13 +6,13 @@ import clique from maya import cmds -from openpype.settings import get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import ( load, get_representation_path ) -from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.pipeline import containerise +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.pipeline import containerise # Do not reset these values on update but only apply on first load diff --git a/openpype/hosts/maya/plugins/load/load_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py similarity index 87% rename from openpype/hosts/maya/plugins/load/load_yeti_rig.py rename to client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py index 6cfcffe27d..2572e550e2 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py @@ -1,11 +1,11 @@ import maya.cmds as cmds -from openpype.settings import get_current_project_settings -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api import lib +from ayon_core.settings import get_current_project_settings +import ayon_core.hosts.maya.api.plugin +from ayon_core.hosts.maya.api import lib -class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): +class YetiRigLoader(ayon_core.hosts.maya.api.plugin.ReferenceLoader): """This loader will load Yeti rig.""" families = ["yetiRig"] diff --git a/openpype/hosts/maya/plugins/__init__.py b/client/ayon_core/hosts/maya/plugins/publish/__init__.py similarity index 100% rename from openpype/hosts/maya/plugins/__init__.py rename to client/ayon_core/hosts/maya/plugins/publish/__init__.py diff --git a/openpype/hosts/maya/plugins/publish/collect_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_animation.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_animation.py diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 90079c715a..2d621353e6 100644 --- a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -from openpype.hosts.maya.api.lib import get_all_children +from ayon_core.hosts.maya.api.lib import get_all_children class CollectArnoldSceneSource(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/collect_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/collect_assembly.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/collect_assembly.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_assembly.py index f64d6bee44..eebbbd4447 100644 --- a/openpype/hosts/maya/plugins/publish/collect_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_assembly.py @@ -2,8 +2,8 @@ import pyblish.api from maya import cmds, mel -from openpype.hosts.maya import api -from openpype.hosts.maya.api import lib +from ayon_core.hosts.maya import api +from ayon_core.hosts.maya.api import lib # TODO : Publish of assembly: -unique namespace for all assets, VALIDATOR! diff --git a/openpype/hosts/maya/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/maya/plugins/publish/collect_current_file.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_current_file.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_current_file.py diff --git a/openpype/hosts/maya/plugins/publish/collect_fbx_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_fbx_animation.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/collect_fbx_animation.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_fbx_animation.py index aef8765e9c..d8fd7a16e8 100644 --- a/openpype/hosts/maya/plugins/publish/collect_fbx_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_fbx_animation.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from maya import cmds # noqa import pyblish.api -from openpype.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline import OptionalPyblishPluginMixin class CollectFbxAnimation(pyblish.api.InstancePlugin, diff --git a/openpype/hosts/maya/plugins/publish/collect_fbx_camera.py b/client/ayon_core/hosts/maya/plugins/publish/collect_fbx_camera.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_fbx_camera.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_fbx_camera.py diff --git a/openpype/hosts/maya/plugins/publish/collect_file_dependencies.py b/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_file_dependencies.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py diff --git a/openpype/hosts/maya/plugins/publish/collect_gltf.py b/client/ayon_core/hosts/maya/plugins/publish/collect_gltf.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_gltf.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_gltf.py diff --git a/openpype/hosts/maya/plugins/publish/collect_history.py b/client/ayon_core/hosts/maya/plugins/publish/collect_history.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_history.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_history.py diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_inputs.py b/client/ayon_core/hosts/maya/plugins/publish/collect_inputs.py new file mode 100644 index 0000000000..d0b1029a03 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_inputs.py @@ -0,0 +1,213 @@ +import copy + +from maya import cmds +import maya.api.OpenMaya as om +import pyblish.api + +from ayon_core.pipeline import registered_host +from ayon_core.hosts.maya.api.lib import get_container_members +from ayon_core.hosts.maya.api.lib_rendersetup import get_shader_in_layer + + +def iter_history(nodes, + filter=om.MFn.kInvalid, + direction=om.MItDependencyGraph.kUpstream): + """Iterate unique upstream history for list of nodes. + + This acts as a replacement to maya.cmds.listHistory. + It's faster by about 2x-3x. It returns less than + maya.cmds.listHistory as it excludes the input nodes + from the output (unless an input node was history + for another input node). It also excludes duplicates. + + Args: + nodes (list): Maya node names to start search from. + filter (om.MFn.Type): Filter to only specific types. + e.g. to dag nodes using om.MFn.kDagNode + direction (om.MItDependencyGraph.Direction): Direction to traverse in. + Defaults to upstream. + + Yields: + str: Node names in upstream history. + + """ + if not nodes: + return + + sel = om.MSelectionList() + for node in nodes: + sel.add(node) + + it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator + handle = om.MObjectHandle + + traversed = set() + fn_dep = om.MFnDependencyNode() + fn_dag = om.MFnDagNode() + for i in range(sel.length()): + + start_node = sel.getDependNode(i) + start_node_hash = handle(start_node).hashCode() + if start_node_hash in traversed: + continue + + it.resetTo(start_node, + filter=filter, + direction=direction) + while not it.isDone(): + + node = it.currentNode() + node_hash = handle(node).hashCode() + + if node_hash in traversed: + it.prune() + it.next() # noqa: B305 + continue + + traversed.add(node_hash) + + if node.hasFn(om.MFn.kDagNode): + fn_dag.setObject(node) + yield fn_dag.fullPathName() + else: + fn_dep.setObject(node) + yield fn_dep.name() + + it.next() # noqa: B305 + + +def collect_input_containers(containers, nodes): + """Collect containers that contain any of the node in `nodes`. + + This will return any loaded Avalon container that contains at least one of + the nodes. As such, the Avalon container is an input for it. Or in short, + there are member nodes of that container. + + Returns: + list: Input avalon containers + + """ + # Assume the containers have collected their cached '_members' data + # in the collector. + return [container for container in containers + if any(node in container["_members"] for node in nodes)] + + +class CollectUpstreamInputs(pyblish.api.InstancePlugin): + """Collect input source inputs for this publish. + + This will include `inputs` data of which loaded publishes were used in the + generation of this publish. This leaves an upstream trace to what was used + as input. + + """ + + label = "Collect Inputs" + order = pyblish.api.CollectorOrder + 0.34 + hosts = ["maya"] + + def process(self, instance): + + # For large scenes the querying of "host.ls()" can be relatively slow + # e.g. up to a second. Many instances calling it easily slows this + # down. As such, we cache it so we trigger it only once. + # todo: Instead of hidden cache make "CollectContainers" plug-in + cache_key = "__cache_containers" + scene_containers = instance.context.data.get(cache_key, None) + if scene_containers is None: + # Query the scenes' containers if there's no cache yet + host = registered_host() + scene_containers = list(host.ls()) + for container in scene_containers: + # Embed the members into the container dictionary + container_members = set(get_container_members(container)) + container["_members"] = container_members + instance.context.data["__cache_containers"] = scene_containers + + # Collect the relevant input containers for this instance + if "renderlayer" in set(instance.data.get("families", [])): + # Special behavior for renderlayers + self.log.debug("Collecting renderlayer inputs....") + containers = self._collect_renderlayer_inputs(scene_containers, + instance) + + else: + # Basic behavior + nodes = instance[:] + + # Include any input connections of history with long names + # For optimization purposes only trace upstream from shape nodes + # looking for used dag nodes. This way having just a constraint + # on a transform is also ignored which tended to give irrelevant + # inputs for the majority of our use cases. We tend to care more + # about geometry inputs. + shapes = cmds.ls(nodes, + type=("mesh", "nurbsSurface", "nurbsCurve"), + noIntermediate=True) + if shapes: + history = list(iter_history(shapes, filter=om.MFn.kShape)) + history = cmds.ls(history, long=True) + + # Include the transforms in the collected history as shapes + # are excluded from containers + transforms = cmds.listRelatives(cmds.ls(history, shapes=True), + parent=True, + fullPath=True, + type="transform") + if transforms: + history.extend(transforms) + + if history: + nodes = list(set(nodes + history)) + + # Collect containers for the given set of nodes + containers = collect_input_containers(scene_containers, + nodes) + + inputs = [c["representation"] for c in containers] + instance.data["inputRepresentations"] = inputs + self.log.debug("Collected inputs: %s" % inputs) + + def _collect_renderlayer_inputs(self, scene_containers, instance): + """Collects inputs from nodes in renderlayer, incl. shaders + camera""" + + # Get the renderlayer + renderlayer = instance.data.get("renderlayer") + + if renderlayer == "defaultRenderLayer": + # Assume all loaded containers in the scene are inputs + # for the masterlayer + return copy.deepcopy(scene_containers) + else: + # Get the members of the layer + members = cmds.editRenderLayerMembers(renderlayer, + query=True, + fullNames=True) or [] + + # In some cases invalid objects are returned from + # `editRenderLayerMembers` so we filter them out + members = cmds.ls(members, long=True) + + # Include all children + children = cmds.listRelatives(members, + allDescendents=True, + fullPath=True) or [] + members.extend(children) + + # Include assigned shaders in renderlayer + shapes = cmds.ls(members, shapes=True, long=True) + shaders = set() + for shape in shapes: + shape_shaders = get_shader_in_layer(shape, layer=renderlayer) + if not shape_shaders: + continue + shaders.update(shape_shaders) + members.extend(shaders) + + # Explicitly include the camera being rendered in renderlayer + cameras = instance.data.get("cameras") + members.extend(cameras) + + containers = collect_input_containers(scene_containers, members) + + return containers diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_instances.py b/client/ayon_core/hosts/maya/plugins/publish/collect_instances.py new file mode 100644 index 0000000000..0b29851db0 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_instances.py @@ -0,0 +1,110 @@ +from maya import cmds + +import pyblish.api +from ayon_core.hosts.maya.api.lib import get_all_children + + +class CollectNewInstances(pyblish.api.InstancePlugin): + """Gather members for instances and pre-defined attribute + + This collector takes into account assets that are associated with + an objectSet and marked with a unique identifier; + + Identifier: + id (str): "pyblish.avalon.instance" + + Limitations: + - Does not take into account nodes connected to those + within an objectSet. Extractors are assumed to export + with history preserved, but this limits what they will + be able to achieve and the amount of data available + to validators. An additional collector could also + append this input data into the instance, as we do + for `pype.rig` with collect_history. + + """ + + label = "Collect New Instance Data" + order = pyblish.api.CollectorOrder + hosts = ["maya"] + + valid_empty_families = {"workfile", "renderlayer"} + + def process(self, instance): + + objset = instance.data.get("instance_node") + if not objset: + self.log.debug("Instance has no `instance_node` data") + + # TODO: We might not want to do this in the future + # Merge creator attributes into instance.data just backwards compatible + # code still runs as expected + creator_attributes = instance.data.get("creator_attributes", {}) + if creator_attributes: + instance.data.update(creator_attributes) + + members = cmds.sets(objset, query=True) or [] + if members: + # Collect members + members = cmds.ls(members, long=True) or [] + + dag_members = cmds.ls(members, type="dagNode", long=True) + children = get_all_children(dag_members) + children = cmds.ls(children, noIntermediate=True, long=True) + parents = ( + self.get_all_parents(members) + if creator_attributes.get("includeParentHierarchy", True) + else [] + ) + members_hierarchy = list(set(members + children + parents)) + + instance[:] = members_hierarchy + + elif instance.data["family"] not in self.valid_empty_families: + self.log.warning("Empty instance: \"%s\" " % objset) + # Store the exact members of the object set + instance.data["setMembers"] = members + + # TODO: This might make more sense as a separate collector + # Convert frame values to integers + for attr_name in ( + "handleStart", "handleEnd", "frameStart", "frameEnd", + ): + value = instance.data.get(attr_name) + if value is not None: + instance.data[attr_name] = int(value) + + # Append start frame and end frame to label if present + if "frameStart" in instance.data and "frameEnd" in instance.data: + # Take handles from context if not set locally on the instance + for key in ["handleStart", "handleEnd"]: + if key not in instance.data: + value = instance.context.data[key] + if value is not None: + value = int(value) + instance.data[key] = value + + instance.data["frameStartHandle"] = int( + instance.data["frameStart"] - instance.data["handleStart"] + ) + instance.data["frameEndHandle"] = int( + instance.data["frameEnd"] + instance.data["handleEnd"] + ) + + def get_all_parents(self, nodes): + """Get all parents by using string operations (optimization) + + Args: + nodes (list): the nodes which are found in the objectSet + + Returns: + list + """ + + parents = [] + for node in nodes: + splitted = node.split("|") + items = ["|".join(splitted[0:i]) for i in range(2, len(splitted))] + parents.extend(items) + + return list(set(parents)) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/client/ayon_core/hosts/maya/plugins/publish/collect_look.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/collect_look.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_look.py index 72682f7800..00e1855b19 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_look.py @@ -6,7 +6,7 @@ from maya import cmds # noqa import pyblish.api -from openpype.hosts.maya.api import lib +from ayon_core.hosts.maya.api import lib SHAPE_ATTRS = ["castsShadows", "receiveShadows", diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_scene_time.py b/client/ayon_core/hosts/maya/plugins/publish/collect_maya_scene_time.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_maya_scene_time.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_maya_scene_time.py diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_units.py b/client/ayon_core/hosts/maya/plugins/publish/collect_maya_units.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_maya_units.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_maya_units.py diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py b/client/ayon_core/hosts/maya/plugins/publish/collect_maya_workspace.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_maya_workspace.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_maya_workspace.py diff --git a/openpype/hosts/maya/plugins/publish/collect_model.py b/client/ayon_core/hosts/maya/plugins/publish/collect_model.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_model.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_model.py diff --git a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py b/client/ayon_core/hosts/maya/plugins/publish/collect_multiverse_look.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/collect_multiverse_look.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_multiverse_look.py index bcb979edfc..31c0d0eaa1 100644 --- a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_multiverse_look.py @@ -4,7 +4,7 @@ from maya import cmds import pyblish.api -from openpype.hosts.maya.api import lib +from ayon_core.hosts.maya.api import lib SHAPE_ATTRS = ["castsShadows", "receiveShadows", diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_pointcache.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_pointcache.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_pointcache.py diff --git a/openpype/hosts/maya/plugins/publish/collect_remove_marked.py b/client/ayon_core/hosts/maya/plugins/publish/collect_remove_marked.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_remove_marked.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_remove_marked.py diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_render.py b/client/ayon_core/hosts/maya/plugins/publish/collect_render.py new file mode 100644 index 0000000000..e4221a091c --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_render.py @@ -0,0 +1,340 @@ +# -*- coding: utf-8 -*- +"""Collect render data. + +This collector will go through render layers in maya and prepare all data +needed to create instances and their representations for submission and +publishing on farm. + +Requires: + instance -> families + instance -> setMembers + + context -> currentFile + context -> workspaceDir + context -> user + + session -> AVALON_ASSET + +Optional: + +Provides: + instance -> label + instance -> subset + instance -> attachTo + instance -> setMembers + instance -> publish + instance -> frameStart + instance -> frameEnd + instance -> byFrameStep + instance -> renderer + instance -> family + instance -> families + instance -> asset + instance -> time + instance -> author + instance -> source + instance -> expectedFiles + instance -> resolutionWidth + instance -> resolutionHeight + instance -> pixelAspect +""" + +import os +import platform +import json + +from maya import cmds + +import pyblish.api + +from ayon_core.pipeline import KnownPublishError +from ayon_core.lib import get_formatted_current_time +from ayon_core.hosts.maya.api.lib_renderproducts import ( + get as get_layer_render_products, + UnsupportedRendererException +) +from ayon_core.hosts.maya.api import lib + + +class CollectMayaRender(pyblish.api.InstancePlugin): + """Gather all publishable render layers from renderSetup.""" + + order = pyblish.api.CollectorOrder + 0.01 + hosts = ["maya"] + families = ["renderlayer"] + label = "Collect Render Layers" + sync_workfile_version = False + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + def process(self, instance): + + # TODO: Re-add force enable of workfile instance? + # TODO: Re-add legacy layer support with LAYER_ prefix but in Creator + # TODO: Set and collect active state of RenderLayer in Creator using + # renderlayer.isRenderable() + context = instance.context + + layer = instance.data["transientData"]["layer"] + objset = instance.data.get("instance_node") + filepath = context.data["currentFile"].replace("\\", "/") + workspace = context.data["workspaceDir"] + + # check if layer is renderable + if not layer.isRenderable(): + msg = "Render layer [ {} ] is not " "renderable".format( + layer.name() + ) + self.log.warning(msg) + + # detect if there are sets (subsets) to attach render to + sets = cmds.sets(objset, query=True) or [] + attach_to = [] + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): + continue + + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.debug(" -> attach render to: {}".format(s)) + + layer_name = layer.name() + + # collect all frames we are expecting to be rendered + # return all expected files for all cameras and aovs in given + # frame range + try: + layer_render_products = get_layer_render_products(layer.name()) + except UnsupportedRendererException as exc: + raise KnownPublishError(exc) + render_products = layer_render_products.layer_data.products + assert render_products, "no render products generated" + expected_files = [] + multipart = False + for product in render_products: + if product.multipart: + multipart = True + product_name = product.productName + if product.camera and layer_render_products.has_camera_token(): + product_name = "{}{}".format( + product.camera, + "_{}".format(product_name) if product_name else "") + expected_files.append( + { + product_name: layer_render_products.get_files( + product) + }) + + has_cameras = any(product.camera for product in render_products) + assert has_cameras, "No render cameras found." + + self.log.debug("multipart: {}".format( + multipart)) + assert expected_files, "no file names were generated, this is a bug" + self.log.debug( + "expected files: {}".format( + json.dumps(expected_files, indent=4, sort_keys=True) + ) + ) + + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attach_to: + assert isinstance(expected_files, list), ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported" + ) + + # append full path + aov_dict = {} + image_directory = os.path.join( + cmds.workspace(query=True, rootDirectory=True), + cmds.workspace(fileRuleEntry="images") + ) + # replace relative paths with absolute. Render products are + # returned as list of dictionaries. + publish_meta_path = None + for aov in expected_files: + full_paths = [] + aov_first_key = list(aov.keys())[0] + for file in aov[aov_first_key]: + full_path = os.path.join(image_directory, file) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + publish_meta_path = os.path.dirname(full_path) + aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] + self.log.debug(full_exp_files) + + if publish_meta_path is None: + raise KnownPublishError("Unable to detect any expected output " + "images for: {}. Make sure you have a " + "renderable camera and a valid frame " + "range set for your renderlayer." + "".format(instance.name)) + + frame_start_render = int(self.get_render_attribute( + "startFrame", layer=layer_name)) + frame_end_render = int(self.get_render_attribute( + "endFrame", layer=layer_name)) + + if (int(context.data["frameStartHandle"]) == frame_start_render + and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501 + + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + frame_start = context.data["frameStart"] + frame_end = context.data["frameEnd"] + frame_start_handle = context.data["frameStartHandle"] + frame_end_handle = context.data["frameEndHandle"] + else: + handle_start = 0 + handle_end = 0 + frame_start = frame_start_render + frame_end = frame_end_render + frame_start_handle = frame_start_render + frame_end_handle = frame_end_render + + # find common path to store metadata + # so if image prefix is branching to many directories + # metadata file will be located in top-most common + # directory. + # TODO: use `os.path.commonpath()` after switch to Python 3 + publish_meta_path = os.path.normpath(publish_meta_path) + common_publish_meta_path = os.path.splitdrive( + publish_meta_path)[0] + if common_publish_meta_path: + common_publish_meta_path += os.path.sep + for part in publish_meta_path.replace( + common_publish_meta_path, "").split(os.path.sep): + common_publish_meta_path = os.path.join( + common_publish_meta_path, part) + if part == layer_name: + break + + # TODO: replace this terrible linux hotfix with real solution :) + if platform.system().lower() in ["linux", "darwin"]: + common_publish_meta_path = "/" + common_publish_meta_path + + self.log.debug( + "Publish meta path: {}".format(common_publish_meta_path)) + + # Get layer specific settings, might be overrides + colorspace_data = lib.get_color_management_preferences() + data = { + "farm": True, + "attachTo": attach_to, + + "multipartExr": multipart, + "review": instance.data.get("review") or False, + + # Frame range + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartHandle": frame_start_handle, + "frameEndHandle": frame_end_handle, + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), + + # Renderlayer + "renderer": self.get_render_attribute( + "currentRenderer", layer=layer_name).lower(), + "setMembers": layer._getLegacyNodeName(), # legacy renderlayer + "renderlayer": layer_name, + + # todo: is `time` and `author` still needed? + "time": get_formatted_current_time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath, + "expectedFiles": full_exp_files, + "publishRenderMetadataFolder": common_publish_meta_path, + "renderProducts": layer_render_products, + "resolutionWidth": lib.get_attr_in_layer( + "defaultResolution.width", layer=layer_name + ), + "resolutionHeight": lib.get_attr_in_layer( + "defaultResolution.height", layer=layer_name + ), + "pixelAspect": lib.get_attr_in_layer( + "defaultResolution.pixelAspect", layer=layer_name + ), + + # todo: Following are likely not needed due to collecting from the + # instance itself if they are attribute definitions + "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 + "tilesX": instance.data.get("tilesX") or 2, + "tilesY": instance.data.get("tilesY") or 2, + "convertToScanline": instance.data.get( + "convertToScanline") or False, + "useReferencedAovs": instance.data.get( + "useReferencedAovs") or instance.data.get( + "vrayUseReferencedAovs") or False, + "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 + "renderSetupIncludeLights": instance.data.get( + "renderSetupIncludeLights" + ), + "colorspaceConfig": colorspace_data["config"], + "colorspaceDisplay": colorspace_data["display"], + "colorspaceView": colorspace_data["view"], + } + + rr_settings = ( + context.data["system_settings"]["modules"]["royalrender"] + ) + if rr_settings["enabled"]: + data["rrPathName"] = instance.data.get("rrPathName") + self.log.debug(data["rrPathName"]) + + if self.sync_workfile_version: + data["version"] = context.data["version"] + for _instance in context: + if _instance.data['family'] == "workfile": + _instance.data["version"] = context.data["version"] + + # Define nice label + label = "{0} ({1})".format(layer_name, instance.data["asset"]) + label += " [{0}-{1}]".format( + int(data["frameStartHandle"]), int(data["frameEndHandle"]) + ) + data["label"] = label + + # Override frames should be False if extendFrames is False. This is + # to ensure it doesn't go off doing crazy unpredictable things + extend_frames = instance.data.get("extendFrames", False) + if not extend_frames: + instance.data["overrideExistingFrame"] = False + + # Update the instace + instance.data.update(data) + + @staticmethod + def get_render_attribute(attr, layer): + """Get attribute from render options. + + Args: + attr (str): name of attribute to be looked up + layer (str): name of render layer + + Returns: + Attribute value + + """ + return lib.get_attr_in_layer( + "defaultRenderGlobals.{}".format(attr), layer=layer + ) diff --git a/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py b/client/ayon_core/hosts/maya/plugins/publish/collect_render_layer_aovs.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_render_layer_aovs.py index 035c531a9b..585eca5dce 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_render_layer_aovs.py @@ -2,7 +2,7 @@ import pyblish.api -from openpype.hosts.maya.api import lib +from ayon_core.hosts.maya.api import lib class CollectRenderLayerAOVS(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py b/client/ayon_core/hosts/maya/plugins/publish/collect_renderable_camera.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/collect_renderable_camera.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_renderable_camera.py index 4443e2e0db..97d857079b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_renderable_camera.py @@ -2,7 +2,7 @@ from maya import cmds -from openpype.hosts.maya.api.lib_rendersetup import get_attr_in_layer +from ayon_core.hosts.maya.api.lib_rendersetup import get_attr_in_layer class CollectRenderableCamera(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_review.py b/client/ayon_core/hosts/maya/plugins/publish/collect_review.py new file mode 100644 index 0000000000..679a21243a --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_review.py @@ -0,0 +1,186 @@ +from maya import cmds, mel + +import pyblish.api + +from ayon_core.client import get_subset_by_name +from ayon_core.pipeline import KnownPublishError +from ayon_core.hosts.maya.api import lib + + +class CollectReview(pyblish.api.InstancePlugin): + """Collect Review data + + """ + + order = pyblish.api.CollectorOrder + 0.3 + label = 'Collect Review Data' + families = ["review"] + + def process(self, instance): + + # Get panel. + instance.data["panel"] = cmds.playblast( + activeEditor=True + ).rsplit("|", 1)[-1] + + # get cameras + members = instance.data['setMembers'] + self.log.debug('members: {}'.format(members)) + cameras = cmds.ls(members, long=True, dag=True, cameras=True) + camera = cameras[0] if cameras else None + + context = instance.context + objectset = { + i.data.get("instance_node") for i in context + } + + # Collect display lights. + display_lights = instance.data.get("displayLights", "default") + if display_lights == "project_settings": + settings = instance.context.data["project_settings"] + settings = settings["maya"]["publish"]["ExtractPlayblast"] + settings = settings["capture_preset"]["Viewport Options"] + display_lights = settings["displayLights"] + + # Collect camera focal length. + burninDataMembers = instance.data.get("burninDataMembers", {}) + if camera is not None: + attr = camera + ".focalLength" + if lib.get_attribute_input(attr): + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + 1 + time_range = range(int(start), int(end)) + focal_length = [cmds.getAttr(attr, time=t) for t in time_range] + else: + focal_length = cmds.getAttr(attr) + + burninDataMembers["focalLength"] = focal_length + + # Account for nested instances like model. + reviewable_subsets = list(set(members) & objectset) + if reviewable_subsets: + if len(reviewable_subsets) > 1: + raise KnownPublishError( + "Multiple attached subsets for review are not supported. " + "Attached: {}".format(", ".join(reviewable_subsets)) + ) + + reviewable_subset = reviewable_subsets[0] + self.log.debug( + "Subset attached to review: {}".format(reviewable_subset) + ) + + # Find the relevant publishing instance in the current context + reviewable_inst = next(inst for inst in context + if inst.name == reviewable_subset) + data = reviewable_inst.data + + self.log.debug( + 'Adding review family to {}'.format(reviewable_subset) + ) + if data.get('families'): + data['families'].append('review') + else: + data['families'] = ['review'] + + data["cameras"] = cameras + data['review_camera'] = camera + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] + data['handleStart'] = instance.data["handleStart"] + data['handleEnd'] = instance.data["handleEnd"] + data["frameStart"] = instance.data["frameStart"] + data["frameEnd"] = instance.data["frameEnd"] + data['step'] = instance.data['step'] + # this (with other time related data) should be set on + # representations. Once plugins like Extract Review start + # using representations, this should be removed from here + # as Extract Playblast is already adding fps to representation. + data['fps'] = context.data['fps'] + data['review_width'] = instance.data['review_width'] + data['review_height'] = instance.data['review_height'] + data["isolate"] = instance.data["isolate"] + data["panZoom"] = instance.data.get("panZoom", False) + data["panel"] = instance.data["panel"] + data["displayLights"] = display_lights + data["burninDataMembers"] = burninDataMembers + + for key, value in instance.data["publish_attributes"].items(): + data["publish_attributes"][key] = value + + # The review instance must be active + cmds.setAttr(str(instance) + '.active', 1) + + instance.data['remove'] = True + + else: + project_name = instance.context.data["projectName"] + asset_doc = instance.context.data['assetEntity'] + task = instance.context.data["task"] + legacy_subset_name = task + 'Review' + subset_doc = get_subset_by_name( + project_name, + legacy_subset_name, + asset_doc["_id"], + fields=["_id"] + ) + if subset_doc: + self.log.debug("Existing subsets found, keep legacy name.") + instance.data['subset'] = legacy_subset_name + + instance.data["cameras"] = cameras + instance.data['review_camera'] = camera + instance.data['frameStartFtrack'] = \ + instance.data["frameStartHandle"] + instance.data['frameEndFtrack'] = \ + instance.data["frameEndHandle"] + instance.data["displayLights"] = display_lights + instance.data["burninDataMembers"] = burninDataMembers + # this (with other time related data) should be set on + # representations. Once plugins like Extract Review start + # using representations, this should be removed from here + # as Extract Playblast is already adding fps to representation. + instance.data["fps"] = instance.context.data["fps"] + + # make ftrack publishable + instance.data.setdefault("families", []).append('ftrack') + + cmds.setAttr(str(instance) + '.active', 1) + + # Collect audio + playback_slider = mel.eval('$tmpVar=$gPlayBackSlider') + audio_name = cmds.timeControl(playback_slider, + query=True, + sound=True) + display_sounds = cmds.timeControl( + playback_slider, query=True, displaySound=True + ) + + def get_audio_node_data(node): + return { + "offset": cmds.getAttr("{}.offset".format(node)), + "filename": cmds.getAttr("{}.filename".format(node)) + } + + audio_data = [] + + if audio_name: + audio_data.append(get_audio_node_data(audio_name)) + + elif display_sounds: + start_frame = int(cmds.playbackOptions(query=True, min=True)) + end_frame = int(cmds.playbackOptions(query=True, max=True)) + + for node in cmds.ls(type="audio"): + # Check if frame range and audio range intersections, + # for whether to include this audio node or not. + duration = cmds.getAttr("{}.duration".format(node)) + start_audio = cmds.getAttr("{}.offset".format(node)) + end_audio = start_audio + duration + + if start_audio <= end_frame and end_audio > start_frame: + audio_data.append(get_audio_node_data(node)) + + instance.data["audio"] = audio_data diff --git a/openpype/hosts/maya/plugins/publish/collect_rig_sets.py b/client/ayon_core/hosts/maya/plugins/publish/collect_rig_sets.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_rig_sets.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_rig_sets.py diff --git a/openpype/hosts/maya/plugins/publish/collect_skeleton_mesh.py b/client/ayon_core/hosts/maya/plugins/publish/collect_skeleton_mesh.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_skeleton_mesh.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_skeleton_mesh.py diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py b/client/ayon_core/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_unreal_skeletalmesh.py diff --git a/openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py b/client/ayon_core/hosts/maya/plugins/publish/collect_unreal_staticmesh.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_unreal_staticmesh.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_unreal_staticmesh.py diff --git a/openpype/hosts/maya/plugins/publish/collect_user_defined_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_user_defined_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py b/client/ayon_core/hosts/maya/plugins/publish/collect_vrayproxy.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_vrayproxy.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_vrayproxy.py diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/client/ayon_core/hosts/maya/plugins/publish/collect_vrayscene.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/collect_vrayscene.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_vrayscene.py index b9181337a9..0efefe72c7 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_vrayscene.py @@ -7,9 +7,9 @@ import pyblish.api -from openpype.pipeline import legacy_io -from openpype.lib import get_formatted_current_time -from openpype.hosts.maya.api import lib +from ayon_core.pipeline import legacy_io +from ayon_core.lib import get_formatted_current_time +from ayon_core.hosts.maya.api import lib class CollectVrayScene(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/maya/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_workfile.py diff --git a/openpype/hosts/maya/plugins/publish/collect_workscene_fps.py b/client/ayon_core/hosts/maya/plugins/publish/collect_workscene_fps.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/collect_workscene_fps.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_workscene_fps.py diff --git a/openpype/hosts/maya/plugins/publish/collect_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/collect_xgen.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/collect_xgen.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_xgen.py index 45648e1776..f8f506376d 100644 --- a/openpype/hosts/maya/plugins/publish/collect_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_xgen.py @@ -3,7 +3,7 @@ from maya import cmds import pyblish.api -from openpype.hosts.maya.api.lib import get_attribute_input +from ayon_core.hosts.maya.api.lib import get_attribute_input class CollectXgen(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/collect_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/collect_yeti_cache.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py index 7a1516997a..067a7bc532 100644 --- a/openpype/hosts/maya/plugins/publish/collect_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py @@ -2,7 +2,7 @@ import pyblish.api -from openpype.hosts.maya.api import lib +from ayon_core.hosts.maya.api import lib SETTINGS = { diff --git a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_rig.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/collect_yeti_rig.py rename to client/ayon_core/hosts/maya/plugins/publish/collect_yeti_rig.py index f82f7b69cd..8964e17f14 100644 --- a/openpype/hosts/maya/plugins/publish/collect_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_rig.py @@ -5,8 +5,8 @@ import pyblish.api -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import KnownPublishError +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import KnownPublishError SETTINGS = {"renderDensity", diff --git a/openpype/hosts/maya/plugins/publish/determine_future_version.py b/client/ayon_core/hosts/maya/plugins/publish/determine_future_version.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/determine_future_version.py rename to client/ayon_core/hosts/maya/plugins/publish/determine_future_version.py diff --git a/openpype/hosts/maya/plugins/publish/extract_active_view_thumbnail.py b/client/ayon_core/hosts/maya/plugins/publish/extract_active_view_thumbnail.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_active_view_thumbnail.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_active_view_thumbnail.py index 483ae6d9d3..b5054b4846 100644 --- a/openpype/hosts/maya/plugins/publish/extract_active_view_thumbnail.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_active_view_thumbnail.py @@ -4,7 +4,7 @@ import pyblish.api import tempfile -from openpype.hosts.maya.api.lib import IS_HEADLESS +from ayon_core.hosts.maya.api.lib import IS_HEADLESS class ExtractActiveViewThumbnail(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py index 46cc9090bb..ed8f2ad40c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -5,8 +5,8 @@ from maya import cmds import arnold -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib class ExtractArnoldSceneSource(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/extract_assembly.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py index 86ffdcef24..2c23f9b752 100644 --- a/openpype/hosts/maya/plugins/publish/extract_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py @@ -1,8 +1,8 @@ import os import json -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import extract_alembic +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import extract_alembic from maya import cmds diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py b/client/ayon_core/hosts/maya/plugins/publish/extract_camera_alembic.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_camera_alembic.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_camera_alembic.py index 43803743bc..b9561e299e 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_alembic.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_camera_alembic.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib class ExtractCameraAlembic(publish.Extractor, diff --git a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py b/client/ayon_core/hosts/maya/plugins/publish/extract_camera_mayaScene.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_camera_mayaScene.py index f67e9db14f..8ca1fd9d3a 100644 --- a/openpype/hosts/maya/plugins/publish/extract_camera_mayaScene.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_camera_mayaScene.py @@ -6,9 +6,9 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib -from openpype.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib +from ayon_core.lib import ( BoolDef ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx.py b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx.py new file mode 100644 index 0000000000..bb2a6dad07 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +import os + +from maya import cmds # noqa +import maya.mel as mel # noqa +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection +from ayon_core.hosts.maya.api import fbx + + +class ExtractFBX(publish.Extractor): + """Extract FBX from Maya. + + This extracts reproducible FBX exports ignoring any of the + settings set on the local machine in the FBX export options window. + + """ + order = pyblish.api.ExtractorOrder + label = "Extract FBX" + families = ["fbx"] + + def process(self, instance): + fbx_exporter = fbx.FBXExtractor(log=self.log) + + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) + + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.debug("Extracting FBX to: {0}".format(path)) + + members = instance.data["setMembers"] + self.log.debug("Members: {0}".format(members)) + self.log.debug("Instance: {0}".format(instance[:])) + + fbx_exporter.set_options_from_instance(instance) + + # Export + with maintained_selection(): + fbx_exporter.export(members, path) + cmds.select(members, r=1, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.debug("Extract FBX successful to: {0}".format(path)) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py new file mode 100644 index 0000000000..ee66ed2fb7 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +import os + +from maya import cmds # noqa +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import fbx +from ayon_core.hosts.maya.api.lib import ( + namespaced, get_namespace, strip_namespace +) + + +class ExtractFBXAnimation(publish.Extractor): + """Extract Rig in FBX format from Maya. + + This extracts the rig in fbx with the constraints + and referenced asset content included. + This also optionally extract animated rig in fbx with + geometries included. + + """ + order = pyblish.api.ExtractorOrder + label = "Extract Animation (FBX)" + hosts = ["maya"] + families = ["animation.fbx"] + + def process(self, instance): + # Define output path + staging_dir = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(staging_dir, filename) + path = path.replace("\\", "/") + + fbx_exporter = fbx.FBXExtractor(log=self.log) + out_members = instance.data.get("animated_skeleton", []) + # Export + instance.data["constraints"] = True + instance.data["skeletonDefinitions"] = True + instance.data["referencedAssetsContent"] = True + fbx_exporter.set_options_from_instance(instance) + # Export from the rig's namespace so that the exported + # FBX does not include the namespace but preserves the node + # names as existing in the rig workfile + if not out_members: + skeleton_set = [ + i for i in instance + if i.endswith("skeletonAnim_SET") + ] + self.log.debug( + "Top group of animated skeleton not found in " + "{}.\nSkipping fbx animation extraction.".format(skeleton_set)) + return + + namespace = get_namespace(out_members[0]) + relative_out_members = [ + strip_namespace(node, namespace) for node in out_members + ] + with namespaced( + ":" + namespace, + new=False, + relative_names=True + ) as namespace: + fbx_exporter.export(relative_out_members, path) + + representations = instance.data.setdefault("representations", []) + representations.append({ + 'name': 'fbx', + 'ext': 'fbx', + 'files': filename, + "stagingDir": staging_dir + }) + + self.log.debug( + "Extracted FBX animation to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_gltf.py b/client/ayon_core/hosts/maya/plugins/publish/extract_gltf.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/extract_gltf.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_gltf.py index 6d72d28525..ff11bf0c1f 100644 --- a/openpype/hosts/maya/plugins/publish/extract_gltf.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_gltf.py @@ -3,9 +3,9 @@ from maya import cmds, mel import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.gltf import extract_gltf +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.gltf import extract_gltf class ExtractGLB(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_gpu_cache.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_gpu_cache.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_gpu_cache.py index 16436c6fe4..19825b769c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_gpu_cache.py @@ -2,7 +2,7 @@ from maya import cmds -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractGPUCache(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_import_reference.py b/client/ayon_core/hosts/maya/plugins/publish/extract_import_reference.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/extract_import_reference.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_import_reference.py index 1fdee28d0c..2a43a30b8d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_import_reference.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_import_reference.py @@ -6,10 +6,10 @@ import pyblish.api import tempfile -from openpype.lib import run_subprocess -from openpype.pipeline import publish -from openpype.pipeline.publish import OptionalPyblishPluginMixin -from openpype.hosts.maya.api import lib +from ayon_core.lib import run_subprocess +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import OptionalPyblishPluginMixin +from ayon_core.hosts.maya.api import lib class ExtractImportReference(publish.Extractor, diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_layout.py b/client/ayon_core/hosts/maya/plugins/publish/extract_layout.py new file mode 100644 index 0000000000..f6e663174a --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_layout.py @@ -0,0 +1,163 @@ +import math +import os +import json + +from maya import cmds +from maya.api import OpenMaya as om + +from ayon_core.client import get_representation_by_id +from ayon_core.pipeline import publish + + +class ExtractLayout(publish.Extractor): + """Extract a layout.""" + + label = "Extract Layout" + hosts = ["maya"] + families = ["layout"] + project_container = "AVALON_CONTAINERS" + optional = True + + def process(self, instance): + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.debug("Performing extraction..") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_data = [] + # TODO representation queries can be refactored to be faster + project_name = instance.context.data["projectName"] + + for asset in cmds.sets(str(instance), query=True): + # Find the container + project_container = self.project_container + container_list = cmds.ls(project_container) + if len(container_list) == 0: + self.log.warning("Project container is not found!") + self.log.warning("The asset(s) may not be properly loaded after published") # noqa + continue + + grp_loaded_ass = instance.data.get("groupLoadedAssets", False) + if grp_loaded_ass: + asset_list = cmds.listRelatives(asset, children=True) + for asset in asset_list: + grp_name = asset.split(':')[0] + else: + grp_name = asset.split(':')[0] + containers = cmds.ls("{}*_CON".format(grp_name)) + if len(containers) == 0: + self.log.warning("{} isn't from the loader".format(asset)) + self.log.warning("It may not be properly loaded after published") # noqa + continue + container = containers[0] + + representation_id = cmds.getAttr( + "{}.representation".format(container)) + + representation = get_representation_by_id( + project_name, + representation_id, + fields=["parent", "context.family"] + ) + + self.log.debug(representation) + + version_id = representation.get("parent") + family = representation.get("context").get("family") + + json_element = { + "family": family, + "instance_name": cmds.getAttr( + "{}.namespace".format(container)), + "representation": str(representation_id), + "version": str(version_id) + } + + loc = cmds.xform(asset, query=True, translation=True) + rot = cmds.xform(asset, query=True, rotation=True, euler=True) + scl = cmds.xform(asset, query=True, relative=True, scale=True) + + json_element["transform"] = { + "translation": { + "x": loc[0], + "y": loc[1], + "z": loc[2] + }, + "rotation": { + "x": math.radians(rot[0]), + "y": math.radians(rot[1]), + "z": math.radians(rot[2]) + }, + "scale": { + "x": scl[0], + "y": scl[1], + "z": scl[2] + } + } + + row_length = 4 + t_matrix_list = cmds.xform(asset, query=True, matrix=True) + + transform_mm = om.MMatrix(t_matrix_list) + transform = om.MTransformationMatrix(transform_mm) + + t = transform.translation(om.MSpace.kWorld) + t = om.MVector(t.x, t.z, -t.y) + transform.setTranslation(t, om.MSpace.kWorld) + transform.rotateBy( + om.MEulerRotation(math.radians(-90), 0, 0), om.MSpace.kWorld) + transform.scaleBy([1.0, 1.0, -1.0], om.MSpace.kObject) + + t_matrix_list = list(transform.asMatrix()) + + t_matrix = [] + for i in range(0, len(t_matrix_list), row_length): + t_matrix.append(t_matrix_list[i:i + row_length]) + + json_element["transform_matrix"] = [ + list(row) + for row in t_matrix + ] + + basis_list = [ + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, -1, 0, + 0, 0, 0, 1 + ] + + basis_mm = om.MMatrix(basis_list) + basis = om.MTransformationMatrix(basis_mm) + + b_matrix_list = list(basis.asMatrix()) + b_matrix = [] + + for i in range(0, len(b_matrix_list), row_length): + b_matrix.append(b_matrix_list[i:i + row_length]) + + json_element["basis"] = [] + for row in b_matrix: + json_element["basis"].append(list(row)) + + json_data.append(json_element) + + json_filename = "{}.json".format(instance.name) + json_path = os.path.join(stagingdir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(json_representation) + + self.log.debug("Extracted instance '%s' to: %s", + instance.name, json_representation) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_look.py b/client/ayon_core/hosts/maya/plugins/publish/extract_look.py new file mode 100644 index 0000000000..7f97a7bf82 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_look.py @@ -0,0 +1,890 @@ +# -*- coding: utf-8 -*- +"""Maya look extractor.""" +import sys +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +import contextlib +import json +import logging +import os +import tempfile +import six +import attr + +import pyblish.api + +from maya import cmds # noqa + +from ayon_core.lib import ( + find_executable, + source_hash, + run_subprocess, + get_oiio_tool_args, + ToolNotFoundError, +) + +from ayon_core.pipeline import publish, KnownPublishError +from ayon_core.hosts.maya.api import lib + +# Modes for transfer +COPY = 1 +HARDLINK = 2 + + +@attr.s +class TextureResult(object): + """The resulting texture of a processed file for a resource""" + # Path to the file + path = attr.ib() + # Colorspace of the resulting texture. This might not be the input + # colorspace of the texture if a TextureProcessor has processed the file. + colorspace = attr.ib() + # Hash generated for the texture using ayon_core.lib.source_hash + file_hash = attr.ib() + # The transfer mode, e.g. COPY or HARDLINK + transfer_mode = attr.ib() + + +def find_paths_by_hash(texture_hash): + """Find the texture hash key in the dictionary. + + All paths that originate from it. + + Args: + texture_hash (str): Hash of the texture. + + Return: + str: path to texture if found. + + """ + raise KnownPublishError( + "This is a bug. \"find_paths_by_hash\" is not compatible with AYON." + ) + + +@contextlib.contextmanager +def no_workspace_dir(): + """Force maya to a fake temporary workspace directory. + + Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory' + + This helps to avoid Maya automatically remapping image paths to files + relative to the currently set directory. + + """ + + # Store current workspace + original = cmds.workspace(query=True, directory=True) + + # Set a fake workspace + fake_workspace_dir = tempfile.mkdtemp() + cmds.workspace(directory=fake_workspace_dir) + + try: + yield + finally: + try: + cmds.workspace(directory=original) + except RuntimeError: + # If the original workspace directory didn't exist either + # ignore the fact that it fails to reset it to the old path + pass + + # Remove the temporary directory + os.rmdir(fake_workspace_dir) + + +@six.add_metaclass(ABCMeta) +class TextureProcessor: + + extension = None + + def __init__(self, log=None): + if log is None: + log = logging.getLogger(self.__class__.__name__) + self.log = log + + def apply_settings(self, system_settings, project_settings): + """Apply OpenPype system/project settings to the TextureProcessor + + Args: + system_settings (dict): OpenPype system settings + project_settings (dict): OpenPype project settings + + Returns: + None + + """ + pass + + @abstractmethod + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the `source` texture. + + Must be implemented on inherited class. + + This must always return a TextureResult even when it does not generate + a texture. If it doesn't generate a texture then it should return a + TextureResult using the input path and colorspace. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + pass + + def __repr__(self): + # Log instance as class name + return self.__class__.__name__ + + +class MakeRSTexBin(TextureProcessor): + """Make `.rstexbin` using `redshiftTextureProcessor`""" + + extension = ".rstexbin" + + def process(self, + source, + colorspace, + color_management, + staging_dir): + + texture_processor_path = self.get_redshift_tool( + "redshiftTextureProcessor" + ) + if not texture_processor_path: + raise KnownPublishError("Must have Redshift available.") + + subprocess_args = [ + texture_processor_path, + source + ] + + # if color management is enabled we pass color space information + if color_management["enabled"]: + config_path = color_management["config"] + if not os.path.exists(config_path): + raise RuntimeError("OCIO config not found at: " + "{}".format(config_path)) + + if not os.getenv("OCIO"): + self.log.debug( + "OCIO environment variable not set." + "Setting it with OCIO config from Maya." + ) + os.environ["OCIO"] = config_path + + self.log.debug("converting colorspace {0} to redshift render " + "colorspace".format(colorspace)) + subprocess_args.extend(["-cs", colorspace]) + + hash_args = ["rstex"] + texture_hash = source_hash(source, *hash_args) + + # Redshift stores the output texture next to the input but with + # the extension replaced to `.rstexbin` + basename, ext = os.path.splitext(source) + destination = "{}{}".format(basename, self.extension) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args, logger=self.log) + except Exception: + self.log.error("Texture .rstexbin conversion failed", + exc_info=True) + six.reraise(*sys.exc_info()) + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) + + @staticmethod + def get_redshift_tool(tool_name): + """Path to redshift texture processor. + + On Windows it adds .exe extension if missing from tool argument. + + Args: + tool_name (string): Tool name. + + Returns: + str: Full path to redshift texture processor executable. + """ + if "REDSHIFT_COREDATAPATH" not in os.environ: + raise RuntimeError("Must have Redshift available.") + + redshift_tool_path = os.path.join( + os.environ["REDSHIFT_COREDATAPATH"], + "bin", + tool_name + ) + + return find_executable(redshift_tool_path) + + +class MakeTX(TextureProcessor): + """Make `.tx` using `maketx` with some default settings. + + Some hardcoded arguments passed to `maketx` are based on the defaults used + in Arnold's txManager tool. + + """ + + extension = ".tx" + + def __init__(self, log=None): + super(MakeTX, self).__init__(log=log) + self.extra_args = [] + + def apply_settings(self, system_settings, project_settings): + # Allow extra maketx arguments from project settings + args_settings = ( + project_settings["maya"]["publish"] + .get("ExtractLook", {}).get("maketx_arguments", []) + ) + extra_args = [] + for arg_data in args_settings: + argument = arg_data["argument"] + parameters = arg_data["parameters"] + if not argument: + self.log.debug("Ignoring empty parameter from " + "`maketx_arguments` setting..") + continue + + extra_args.append(argument) + extra_args.extend(parameters) + + self.extra_args = extra_args + + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the texture. + + This function requires the `maketx` executable to be available in an + OpenImageIO toolset detectable by OpenPype. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + + try: + maketx_args = get_oiio_tool_args("maketx") + except ToolNotFoundError: + raise KnownPublishError( + "OpenImageIO is not available on the machine") + + # Define .tx filepath in staging if source file is not .tx + fname, ext = os.path.splitext(os.path.basename(source)) + if ext == ".tx": + # Do nothing if the source file is already a .tx file. + return TextureResult( + path=source, + file_hash=source_hash(source), + colorspace=colorspace, + transfer_mode=COPY + ) + + # Hardcoded default arguments for maketx conversion based on Arnold's + # txManager in Maya + args = [ + # unpremultiply before conversion (recommended when alpha present) + "--unpremult", + # use oiio-optimized settings for tile-size, planarconfig, metadata + "--oiio", + "--filter", "lanczos3", + ] + if color_management["enabled"]: + config_path = color_management["config"] + if not os.path.exists(config_path): + raise RuntimeError("OCIO config not found at: " + "{}".format(config_path)) + + render_colorspace = color_management["rendering_space"] + + self.log.debug("tx: converting colorspace {0} " + "-> {1}".format(colorspace, + render_colorspace)) + args.extend(["--colorconvert", colorspace, render_colorspace]) + args.extend(["--colorconfig", config_path]) + + else: + # Maya Color management is disabled. We cannot rely on an OCIO + self.log.debug("tx: Maya color management is disabled. No color " + "conversion will be applied to .tx conversion for: " + "{}".format(source)) + # Assume linear + render_colorspace = "linear" + + # Note: The texture hash is only reliable if we include any potential + # conversion arguments provide to e.g. `maketx` + hash_args = ["maketx"] + args + self.extra_args + texture_hash = source_hash(source, *hash_args) + + # Ensure folder exists + resources_dir = os.path.join(staging_dir, "resources") + if not os.path.exists(resources_dir): + os.makedirs(resources_dir) + + self.log.debug("Generating .tx file for %s .." % source) + + subprocess_args = maketx_args + [ + "-v", # verbose + "-u", # update mode + # --checknan doesn't influence the output file but aborts the + # conversion if it finds any. So we can avoid it for the file hash + "--checknan", + source + ] + + subprocess_args.extend(args) + if self.extra_args: + subprocess_args.extend(self.extra_args) + + # Add source hash attribute after other arguments for log readability + # Note: argument is excluded from the hash since it is the hash itself + subprocess_args.extend([ + "--sattrib", + "sourceHash", + texture_hash + ]) + + destination = os.path.join(resources_dir, fname + ".tx") + subprocess_args.extend(["-o", destination]) + + # We want to make sure we are explicit about what OCIO config gets + # used. So when we supply no --colorconfig flag that no fallback to + # an OCIO env var occurs. + env = os.environ.copy() + env.pop("OCIO", None) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args, env=env) + except Exception: + self.log.error("Texture maketx conversion failed", + exc_info=True) + raise + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=render_colorspace, + transfer_mode=COPY + ) + + @staticmethod + def _has_arnold(): + """Return whether the arnold package is available and importable.""" + try: + import arnold # noqa: F401 + return True + except (ImportError, ModuleNotFoundError): + return False + + +class ExtractLook(publish.Extractor): + """Extract Look (Maya Scene + JSON) + + Only extracts the sets (shadingEngines and alike) alongside a .json file + that stores it relationships for the sets and "attribute" data for the + instance members. + + """ + + label = "Extract Look (Maya Scene + JSON)" + hosts = ["maya"] + families = ["look", "mvLook"] + order = pyblish.api.ExtractorOrder + 0.2 + scene_type = "ma" + look_data_type = "json" + + def get_maya_scene_type(self, instance): + """Get Maya scene type from settings. + + Args: + instance (pyblish.api.Instance): Instance with collected + project settings. + + """ + ext_mapping = ( + instance.context.data["project_settings"]["maya"]["ext_mapping"] + ) + if ext_mapping: + self.log.debug("Looking in settings for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.debug( + "Using {} as scene type".format(self.scene_type)) + break + except KeyError: + # no preset found + pass + + return "mayaAscii" if self.scene_type == "ma" else "mayaBinary" + + def process(self, instance): + """Plugin entry point. + + Args: + instance: Instance to process. + + """ + _scene_type = self.get_maya_scene_type(instance) + + # Define extract output file path + dir_path = self.staging_dir(instance) + maya_fname = "{0}.{1}".format(instance.name, self.scene_type) + json_fname = "{0}.{1}".format(instance.name, self.look_data_type) + maya_path = os.path.join(dir_path, maya_fname) + json_path = os.path.join(dir_path, json_fname) + + # Remove all members of the sets so they are not included in the + # exported file by accident + self.log.debug("Processing sets..") + lookdata = instance.data["lookData"] + relationships = lookdata["relationships"] + sets = list(relationships.keys()) + if not sets: + self.log.debug("No sets found for the look") + return + + # Specify texture processing executables to activate + # TODO: Load these more dynamically once we support more processors + processors = [] + context = instance.context + for key, Processor in { + # Instance data key to texture processor mapping + "maketx": MakeTX, + "rstex": MakeRSTexBin + }.items(): + if instance.data.get(key, False): + processor = Processor(log=self.log) + processor.apply_settings(context.data["system_settings"], + context.data["project_settings"]) + processors.append(processor) + + if processors: + self.log.debug("Collected texture processors: " + "{}".format(processors)) + + self.log.debug("Processing resources..") + results = self.process_resources(instance, + staging_dir=dir_path, + processors=processors) + transfers = results["fileTransfers"] + hardlinks = results["fileHardlinks"] + hashes = results["fileHashes"] + remap = results["attrRemap"] + + # Extract in correct render layer + self.log.debug("Extracting look maya scene file: {}".format(maya_path)) + layer = instance.data.get("renderlayer", "defaultRenderLayer") + with lib.renderlayer(layer): + # TODO: Ensure membership edits don't become renderlayer overrides + with lib.empty_sets(sets, force=True): + # To avoid Maya trying to automatically remap the file + # textures relative to the `workspace -directory` we force + # it to a fake temporary workspace. This fixes textures + # getting incorrectly remapped. + with no_workspace_dir(): + with lib.attribute_values(remap): + with lib.maintained_selection(): + cmds.select(sets, noExpand=True) + cmds.file( + maya_path, + force=True, + typ=_scene_type, + exportSelected=True, + preserveReferences=False, + channels=True, + constraints=True, + expressions=True, + constructionHistory=True, + ) + + # Write the JSON data + data = { + "attributes": lookdata["attributes"], + "relationships": relationships + } + + self.log.debug("Extracting json file: {}".format(json_path)) + with open(json_path, "w") as f: + json.dump(data, f) + + if "files" not in instance.data: + instance.data["files"] = [] + if "hardlinks" not in instance.data: + instance.data["hardlinks"] = [] + if "transfers" not in instance.data: + instance.data["transfers"] = [] + + instance.data["files"].append(maya_fname) + instance.data["files"].append(json_fname) + + if instance.data.get("representations") is None: + instance.data["representations"] = [] + + instance.data["representations"].append( + { + "name": self.scene_type, + "ext": self.scene_type, + "files": os.path.basename(maya_fname), + "stagingDir": os.path.dirname(maya_fname), + } + ) + instance.data["representations"].append( + { + "name": self.look_data_type, + "ext": self.look_data_type, + "files": os.path.basename(json_fname), + "stagingDir": os.path.dirname(json_fname), + } + ) + + # Set up the resources transfers/links for the integrator + instance.data["transfers"].extend(transfers) + instance.data["hardlinks"].extend(hardlinks) + + # Source hash for the textures + instance.data["sourceHashes"] = hashes + + self.log.debug("Extracted instance '%s' to: %s" % (instance.name, + maya_path)) + + def _set_resource_result_colorspace(self, resource, colorspace): + """Update resource resulting colorspace after texture processing""" + if "result_color_space" in resource: + if resource["result_color_space"] == colorspace: + return + + self.log.warning( + "Resource already has a resulting colorspace but is now " + "being overridden to a new one: {} -> {}".format( + resource["result_color_space"], colorspace + ) + ) + resource["result_color_space"] = colorspace + + def process_resources(self, instance, staging_dir, processors): + """Process all resources in the instance. + + It is assumed that all resources are nodes using file textures. + + Extract the textures to transfer, possibly convert with maketx and + remap the node paths to the destination path. Note that a source + might be included more than once amongst the resources as they could + be the input file to multiple nodes. + + """ + + resources = instance.data["resources"] + color_management = lib.get_color_management_preferences() + + # TODO: Temporary disable all hardlinking, due to the feature not being + # used or properly working. + self.log.info( + "Forcing copy instead of hardlink." + ) + force_copy = True + + if not force_copy and platform.system().lower() == "windows": + # Temporary fix to NOT create hardlinks on windows machines + self.log.warning( + "Forcing copy instead of hardlink due to issues on Windows..." + ) + force_copy = True + + destinations_cache = {} + + def get_resource_destination_cached(path): + """Get resource destination with cached result per filepath""" + if path not in destinations_cache: + destination = self.get_resource_destination( + path, instance.data["resourcesDir"], processors) + destinations_cache[path] = destination + return destinations_cache[path] + + # Process all resource's individual files + processed_files = {} + transfers = [] + hardlinks = [] + hashes = {} + remap = OrderedDict() + for resource in resources: + colorspace = resource["color_space"] + + for filepath in resource["files"]: + filepath = os.path.normpath(filepath) + + if filepath in processed_files: + # The file was already processed, likely due to usage by + # another resource in the scene. We confirm here it + # didn't do color spaces different than the current + # resource. + processed_file = processed_files[filepath] + self.log.debug( + "File was already processed. Likely used by another " + "resource too: {}".format(filepath) + ) + + if colorspace != processed_file["color_space"]: + self.log.warning( + "File '{}' was already processed using colorspace " + "'{}' instead of the current resource's " + "colorspace '{}'. The already processed texture " + "result's colorspace '{}' will be used." + "".format(filepath, + colorspace, + processed_file["color_space"], + processed_file["result_color_space"])) + + self._set_resource_result_colorspace( + resource, + colorspace=processed_file["result_color_space"] + ) + continue + + texture_result = self._process_texture( + filepath, + processors=processors, + staging_dir=staging_dir, + force_copy=force_copy, + color_management=color_management, + colorspace=colorspace + ) + + # Set the resulting color space on the resource + self._set_resource_result_colorspace( + resource, colorspace=texture_result.colorspace + ) + + processed_files[filepath] = { + "color_space": colorspace, + "result_color_space": texture_result.colorspace, + } + + source = texture_result.path + destination = get_resource_destination_cached(source) + if force_copy or texture_result.transfer_mode == COPY: + transfers.append((source, destination)) + self.log.debug('file will be copied {} -> {}'.format( + source, destination)) + elif texture_result.transfer_mode == HARDLINK: + hardlinks.append((source, destination)) + self.log.debug('file will be hardlinked {} -> {}'.format( + source, destination)) + + # Store the hashes from hash to destination to include in the + # database + hashes[texture_result.file_hash] = destination + + # Set up remapping attributes for the node during the publish + # The order of these can be important if one attribute directly + # affects another, e.g. we set colorspace after filepath because + # maya sometimes tries to guess the colorspace when changing + # filepaths (which is avoidable, but we don't want to have those + # attributes changed in the resulting publish) + # Remap filepath to publish destination + # TODO It would be much better if we could use the destination path + # from the actual processed texture results, but since the + # attribute will need to preserve tokens like , etc for + # now we will define the output path from the attribute value + # including the tokens to persist them. + filepath_attr = resource["attribute"] + remap[filepath_attr] = get_resource_destination_cached( + resource["source"] + ) + + # Preserve color space values (force value after filepath change) + # This will also trigger in the same order at end of context to + # ensure after context it's still the original value. + node = resource["node"] + if cmds.attributeQuery("colorSpace", node=node, exists=True): + color_space_attr = "{}.colorSpace".format(node) + remap[color_space_attr] = resource["result_color_space"] + + self.log.debug("Finished remapping destinations ...") + + return { + "fileTransfers": transfers, + "fileHardlinks": hardlinks, + "fileHashes": hashes, + "attrRemap": remap, + } + + def get_resource_destination(self, filepath, resources_dir, processors): + """Get resource destination path. + + This is utility function to change path if resource file name is + changed by some external tool like `maketx`. + + Args: + filepath (str): Resource source path + resources_dir (str): Destination dir for resources in publish. + processors (list): Texture processors converting resource. + + Returns: + str: Path to resource file + + """ + # Compute destination location + basename, ext = os.path.splitext(os.path.basename(filepath)) + + # Get extension from the last processor + for processor in reversed(processors): + processor_ext = processor.extension + if processor_ext and ext != processor_ext: + self.log.debug("Processor {} overrides extension to '{}' " + "for path: {}".format(processor, + processor_ext, + filepath)) + ext = processor_ext + break + + return os.path.join( + resources_dir, basename + ext + ) + + def _get_existing_hashed_texture(self, texture_hash): + """Return the first found filepath from a texture hash""" + + # If source has been published before with the same settings, + # then don't reprocess but hardlink from the original + existing = find_paths_by_hash(texture_hash) + if existing: + source = next((p for p in existing if os.path.exists(p)), None) + if source: + return source + else: + self.log.warning( + "Paths not found on disk, " + "skipping hardlink: {}".format(existing) + ) + + def _process_texture(self, + filepath, + processors, + staging_dir, + force_copy, + color_management, + colorspace): + """Process a single texture file on disk for publishing. + + This will: + 1. Check whether it's already published, if so it will do hardlink + (if the texture hash is found and force copy is not enabled) + 2. It will process the texture using the supplied texture + processors like MakeTX and MakeRSTexBin if enabled. + 3. Compute the destination path for the source file. + + Args: + filepath (str): The source file path to process. + processors (list): List of TextureProcessor processing the texture + staging_dir (str): The staging directory to write to. + force_copy (bool): Whether to force a copy even if a file hash + might have existed already in the project, otherwise + hardlinking the existing file is allowed. + color_management (dict): Maya's Color Management settings from + `lib.get_color_management_preferences` + colorspace (str): The source colorspace of the resources this + texture belongs to. + + Returns: + TextureResult: The texture result information. + """ + + if len(processors) > 1: + raise KnownPublishError( + "More than one texture processor not supported. " + "Current processors enabled: {}".format(processors) + ) + + for processor in processors: + self.log.debug("Processing texture {} with processor {}".format( + filepath, processor + )) + + processed_result = processor.process(filepath, + colorspace, + color_management, + staging_dir) + if not processed_result: + raise RuntimeError("Texture Processor {} returned " + "no result.".format(processor)) + self.log.debug("Generated processed " + "texture: {}".format(processed_result.path)) + + # TODO: Currently all processors force copy instead of allowing + # hardlinks using source hashes. This should be refactored + return processed_result + + # No texture processing for this file + texture_hash = source_hash(filepath) + if not force_copy: + existing = self._get_existing_hashed_texture(filepath) + if existing: + self.log.debug("Found hash in database, preparing hardlink..") + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=HARDLINK + ) + + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) + + +class ExtractModelRenderSets(ExtractLook): + """Extract model render attribute sets as model metadata + + Only extracts the render attrib sets (NO shadingEngines) alongside + a .json file that stores it relationships for the sets and "attribute" + data for the instance members. + + """ + + label = "Model Render Sets" + hosts = ["maya"] + families = ["model"] + scene_type_prefix = "meta.render." + look_data_type = "meta.render.json" + + def get_maya_scene_type(self, instance): + typ = super(ExtractModelRenderSets, self).get_maya_scene_type(instance) + # add prefix + self.scene_type = self.scene_type_prefix + self.scene_type + + return typ diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/client/ayon_core/hosts/maya/plugins/publish/extract_maya_scene_raw.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_maya_scene_raw.py index a4f313bdf9..5045a8d252 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -4,13 +4,13 @@ from maya import cmds -from openpype.hosts.maya.api.lib import maintained_selection -from openpype.pipeline import AVALON_CONTAINER_ID, publish -from openpype.pipeline.publish import OpenPypePyblishPluginMixin -from openpype.lib import BoolDef +from ayon_core.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import AVALON_CONTAINER_ID, publish +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.lib import BoolDef -class ExtractMayaSceneRaw(publish.Extractor, OpenPypePyblishPluginMixin): +class ExtractMayaSceneRaw(publish.Extractor, AYONPyblishPluginMixin): """Extract as Maya Scene (raw). This will preserve all references, construction history, etc. diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_usd.py b/client/ayon_core/hosts/maya/plugins/publish/extract_maya_usd.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/extract_maya_usd.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_maya_usd.py index 8c32ac1e39..cfaea8e479 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_usd.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_maya_usd.py @@ -6,8 +6,8 @@ from maya import cmds import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection @contextlib.contextmanager diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_model.py b/client/ayon_core/hosts/maya/plugins/publish/extract_model.py new file mode 100644 index 0000000000..b6ae4d537a --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_model.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +"""Extract model as Maya Scene.""" +import os + +from maya import cmds + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib + + +class ExtractModel(publish.Extractor, + publish.OptionalPyblishPluginMixin): + """Extract as Model (Maya Scene). + + Only extracts contents based on the original "setMembers" data to ensure + publishing the least amount of required shapes. From that it only takes + the shapes that are not intermediateObjects + + During export it sets a temporary context to perform a clean extraction. + The context ensures: + - Smooth preview is turned off for the geometry + - Default shader is assigned (no materials are exported) + - Remove display layers + + """ + + label = "Model (Maya Scene)" + hosts = ["maya"] + families = ["model"] + scene_type = "ma" + optional = True + + def process(self, instance): + """Plugin entry point.""" + if not self.is_active(instance.data): + return + + ext_mapping = ( + instance.context.data["project_settings"]["maya"]["ext_mapping"] + ) + if ext_mapping: + self.log.debug("Looking in settings for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.debug( + "Using {} as scene type".format(self.scene_type)) + break + except KeyError: + # no preset found + pass + # Define extract output file path + stagingdir = self.staging_dir(instance) + filename = "{0}.{1}".format(instance.name, self.scene_type) + path = os.path.join(stagingdir, filename) + + # Perform extraction + self.log.debug("Performing extraction ...") + + # Get only the shape contents we need in such a way that we avoid + # taking along intermediateObjects + members = instance.data("setMembers") + members = cmds.ls(members, + dag=True, + shapes=True, + type=("mesh", "nurbsCurve"), + noIntermediate=True, + long=True) + + with lib.no_display_layers(instance): + with lib.displaySmoothness(members, + divisionsU=0, + divisionsV=0, + pointsWire=4, + pointsShaded=1, + polygonObject=1): + with lib.shader(members, + shadingEngine="initialShadingGroup"): + with lib.maintained_selection(): + cmds.select(members, noExpand=True) + cmds.file(path, + force=True, + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 + exportSelected=True, + preserveReferences=False, + channels=False, + constraints=False, + expressions=False, + constructionHistory=False) + + # Store reference for integration + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': self.scene_type, + 'ext': self.scene_type, + 'files': filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + self.log.debug("Extracted instance '%s' to: %s" % (instance.name, + path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_look.py b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_look.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_multiverse_look.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_look.py index c2bebeaee6..2dd8821b3a 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_look.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_look.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection class ExtractMultiverseLook(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd.py index 60185bb152..8c195c25fd 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd.py @@ -5,8 +5,8 @@ from maya import mel import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection class ExtractMultiverseUsd(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py index 7966c4fa93..d31660d1b4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_comp.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection class ExtractMultiverseUsdComposition(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_over.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_over.py index e4a97db6e4..00303e604c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -1,7 +1,7 @@ import os -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection from maya import cmds diff --git a/openpype/hosts/maya/plugins/publish/extract_obj.py b/client/ayon_core/hosts/maya/plugins/publish/extract_obj.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/extract_obj.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_obj.py index ca94130d09..6ce40a8728 100644 --- a/openpype/hosts/maya/plugins/publish/extract_obj.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_obj.py @@ -3,8 +3,8 @@ from maya import cmds import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib class ExtractObj(publish.Extractor): diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_playblast.py b/client/ayon_core/hosts/maya/plugins/publish/extract_playblast.py new file mode 100644 index 0000000000..c019d43b36 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_playblast.py @@ -0,0 +1,106 @@ +import os + +import clique + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib + +from maya import cmds + + +class ExtractPlayblast(publish.Extractor): + """Extract viewport playblast. + + Takes review camera and creates review Quicktime video based on viewport + capture. + + """ + + label = "Extract Playblast" + hosts = ["maya"] + families = ["review"] + optional = True + capture_preset = {} + profiles = None + + def process(self, instance): + self.log.debug("Extracting playblast..") + + # get scene fps + fps = instance.data.get("fps") or instance.context.data.get("fps") + + # if start and end frames cannot be determined, get them + # from Maya timeline + start = instance.data.get("frameStartFtrack") + end = instance.data.get("frameEndFtrack") + if start is None: + start = cmds.playbackOptions(query=True, animationStartTime=True) + if end is None: + end = cmds.playbackOptions(query=True, animationEndTime=True) + + self.log.debug("start: {}, end: {}".format(start, end)) + task_data = instance.data["anatomyData"].get("task", {}) + capture_preset = lib.get_capture_preset( + task_data.get("name"), + task_data.get("type"), + instance.data["subset"], + instance.context.data["project_settings"], + self.log + ) + stagingdir = self.staging_dir(instance) + filename = instance.name + path = os.path.join(stagingdir, filename) + self.log.debug("Outputting images to %s" % path) + # get cameras + camera = instance.data["review_camera"] + preset = lib.generate_capture_preset( + instance, camera, path, + start=start, end=end, + capture_preset=capture_preset) + lib.render_capture_preset(preset) + + # Find playblast sequence + collected_files = os.listdir(stagingdir) + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble(collected_files, + minimum_items=1, + patterns=patterns) + + self.log.debug("Searching playblast collection for: %s", path) + frame_collection = None + for collection in collections: + filebase = collection.format("{head}").rstrip(".") + self.log.debug("Checking collection head: %s", filebase) + if filebase in path: + frame_collection = collection + self.log.debug( + "Found playblast collection: %s", frame_collection + ) + + tags = ["review"] + if not instance.data.get("keepImages"): + tags.append("delete") + + # Add camera node name to representation data + camera_node_name = cmds.listRelatives(camera, parent=True)[0] + + collected_files = list(frame_collection) + # single frame file shouldn't be in list, only as a string + if len(collected_files) == 1: + collected_files = collected_files[0] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": capture_preset["Codec"]["compression"], + "ext": capture_preset["Codec"]["compression"], + "files": collected_files, + "stagingDir": stagingdir, + "frameStart": int(start), + "frameEnd": int(end), + "fps": fps, + "tags": tags, + "camera_name": camera_node_name + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_pointcache.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py index 0cc802fa7a..f2187063fc 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import ( extract_alembic, suspended_refresh, maintained_selection, diff --git a/openpype/hosts/maya/plugins/publish/extract_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_proxy_abc.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py index d9bec87cfd..3637a58614 100644 --- a/openpype/hosts/maya/plugins/publish/extract_proxy_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import ( extract_alembic, suspended_refresh, maintained_selection, diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py new file mode 100644 index 0000000000..5600b980d9 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +"""Redshift Proxy extractor.""" +import os + +from maya import cmds + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection + + +class ExtractRedshiftProxy(publish.Extractor): + """Extract the content of the instance to a redshift proxy file.""" + + label = "Redshift Proxy (.rs)" + hosts = ["maya"] + families = ["redshiftproxy"] + + def process(self, instance): + """Extractor entry point.""" + + staging_dir = self.staging_dir(instance) + file_name = "{}.rs".format(instance.name) + file_path = os.path.join(staging_dir, file_name) + + anim_on = instance.data["animation"] + rs_options = "exportConnectivity=0;enableCompression=1;keepUnused=0;" + repr_files = file_name + + if not anim_on: + # Remove animation information because it is not required for + # non-animated subsets + keys = ["frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "frameStartHandle", + "frameEndHandle"] + for key in keys: + instance.data.pop(key, None) + + else: + start_frame = instance.data["frameStartHandle"] + end_frame = instance.data["frameEndHandle"] + rs_options = "{}startFrame={};endFrame={};frameStep={};".format( + rs_options, start_frame, + end_frame, instance.data["step"] + ) + + root, ext = os.path.splitext(file_path) + # Padding is taken from number of digits of the end_frame. + # Not sure where Redshift is taking it. + repr_files = [ + "{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501 + for frame in range( + int(start_frame), + int(end_frame) + 1, + int(instance.data["step"]) + )] + # vertex_colors = instance.data.get("vertexColors", False) + + # Write out rs file + self.log.debug("Writing: '%s'" % file_path) + with maintained_selection(): + cmds.select(instance.data["setMembers"], noExpand=True) + cmds.file(file_path, + pr=False, + force=True, + type="Redshift Proxy", + exportSelected=True, + options=rs_options) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + self.log.debug("Files: {}".format(repr_files)) + + representation = { + 'name': 'rs', + 'ext': 'rs', + 'files': repr_files, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + self.log.debug("Extracted instance '%s' to: %s" + % (instance.name, staging_dir)) diff --git a/openpype/hosts/maya/plugins/publish/extract_rendersetup.py b/client/ayon_core/hosts/maya/plugins/publish/extract_rendersetup.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/extract_rendersetup.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_rendersetup.py index 7e21f5282e..4815033777 100644 --- a/openpype/hosts/maya/plugins/publish/extract_rendersetup.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_rendersetup.py @@ -2,7 +2,7 @@ import json import maya.app.renderSetup.model.renderSetup as renderSetup -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractRenderSetup(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_rig.py b/client/ayon_core/hosts/maya/plugins/publish/extract_rig.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/extract_rig.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_rig.py index 1ffc9a7dae..13e3d7c6b4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_rig.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_rig.py @@ -4,8 +4,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection class ExtractRig(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_skeleton_mesh.py b/client/ayon_core/hosts/maya/plugins/publish/extract_skeleton_mesh.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/extract_skeleton_mesh.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_skeleton_mesh.py index 50c1fb3bde..a6811d6a6f 100644 --- a/openpype/hosts/maya/plugins/publish/extract_skeleton_mesh.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_skeleton_mesh.py @@ -4,9 +4,9 @@ from maya import cmds # noqa import pyblish.api -from openpype.pipeline import publish -from openpype.pipeline.publish import OptionalPyblishPluginMixin -from openpype.hosts.maya.api import fbx +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import OptionalPyblishPluginMixin +from ayon_core.hosts.maya.api import fbx class ExtractSkeletonMesh(publish.Extractor, diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_thumbnail.py b/client/ayon_core/hosts/maya/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..db26422897 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_thumbnail.py @@ -0,0 +1,120 @@ +import os +import glob +import tempfile + +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib + + +class ExtractThumbnail(publish.Extractor): + """Extract viewport thumbnail. + + Takes review camera and creates a thumbnail based on viewport + capture. + + """ + + label = "Thumbnail" + hosts = ["maya"] + families = ["review"] + + def process(self, instance): + self.log.debug("Extracting thumbnail..") + + camera = instance.data["review_camera"] + + task_data = instance.data["anatomyData"].get("task", {}) + capture_preset = lib.get_capture_preset( + task_data.get("name"), + task_data.get("type"), + instance.data["subset"], + instance.context.data["project_settings"], + self.log + ) + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_thumbnail") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + filename = instance.name + path = os.path.join(dst_staging, filename) + + self.log.debug("Outputting images to %s" % path) + + preset = lib.generate_capture_preset( + instance, camera, path, + start=1, end=1, + capture_preset=capture_preset) + + preset["camera_options"].update({ + "displayGateMask": False, + "displayResolution": False, + "displayFilmGate": False, + "displayFieldChart": False, + "displaySafeAction": False, + "displaySafeTitle": False, + "displayFilmPivot": False, + "displayFilmOrigin": False, + "overscan": 1.0, + }) + path = lib.render_capture_preset(preset) + + playblast = self._fix_playblast_output_path(path) + + _, thumbnail = os.path.split(playblast) + + self.log.debug("file list {}".format(thumbnail)) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, + "stagingDir": dst_staging, + "thumbnail": True + } + instance.data["representations"].append(representation) + + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py index 780ed2377c..8b88bfb9f8 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py @@ -5,8 +5,8 @@ from maya import cmds # noqa -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import ( extract_alembic, suspended_refresh, maintained_selection diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py index 4b36134694..7b44c92194 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_fbx.py @@ -7,8 +7,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api import fbx +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import fbx @contextmanager diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_staticmesh.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_unreal_staticmesh.py index 26ab0827e4..9cf8a543f4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_staticmesh.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_staticmesh.py @@ -6,12 +6,12 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import ( parent_nodes, maintained_selection ) -from openpype.hosts.maya.api import fbx +from ayon_core.hosts.maya.api import fbx class ExtractUnrealStaticMesh(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_unreal_yeticache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_yeticache.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_unreal_yeticache.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_unreal_yeticache.py index 963285093e..9a264959d1 100644 --- a/openpype/hosts/maya/plugins/publish/extract_unreal_yeticache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_yeticache.py @@ -2,7 +2,7 @@ from maya import cmds -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractYetiCache(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/client/ayon_core/hosts/maya/plugins/publish/extract_vrayproxy.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/extract_vrayproxy.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_vrayproxy.py index 21dfcfffc5..28c6e98c33 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_vrayproxy.py @@ -2,8 +2,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import maintained_selection class ExtractVRayProxy(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py b/client/ayon_core/hosts/maya/plugins/publish/extract_vrayscene.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/extract_vrayscene.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_vrayscene.py index b0615149a9..023a15e67a 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_vrayscene.py @@ -3,9 +3,9 @@ import os import re -from openpype.pipeline import publish -from openpype.hosts.maya.api.render_setup_tools import export_in_rs_layer -from openpype.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.render_setup_tools import export_in_rs_layer +from ayon_core.hosts.maya.api.lib import maintained_selection from maya import cmds diff --git a/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py index 4bd01c2df2..d8b352668a 100644 --- a/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -5,9 +5,9 @@ from maya import cmds import pyblish.api -from openpype.hosts.maya.api.lib import extract_alembic -from openpype.pipeline import publish -from openpype.lib import StringTemplate +from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.pipeline import publish +from ayon_core.lib import StringTemplate class ExtractWorkfileXgen(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_xgen.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/extract_xgen.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_xgen.py index 8409330e49..ee864bd89b 100644 --- a/openpype/hosts/maya/plugins/publish/extract_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_xgen.py @@ -5,11 +5,11 @@ from maya import cmds import xgenm -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.lib import ( maintained_selection, attribute_values, write_xgen_file, delete_after ) -from openpype.lib import StringTemplate +from ayon_core.lib import StringTemplate class ExtractXgen(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_yeti_cache.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_yeti_cache.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_yeti_cache.py index b113e02219..b9cd7a1be5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_yeti_cache.py @@ -3,7 +3,7 @@ from maya import cmds -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractYetiCache(publish.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/publish/extract_yeti_rig.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/extract_yeti_rig.py rename to client/ayon_core/hosts/maya/plugins/publish/extract_yeti_rig.py index da67cb911f..7387849736 100644 --- a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_yeti_rig.py @@ -7,8 +7,8 @@ from maya import cmds -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib +from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api import lib @contextlib.contextmanager diff --git a/openpype/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml b/client/ayon_core/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml similarity index 100% rename from openpype/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml rename to client/ayon_core/hosts/maya/plugins/publish/help/submit_maya_remote_publish_deadline.xml diff --git a/openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml b/client/ayon_core/hosts/maya/plugins/publish/help/validate_maya_units.xml similarity index 100% rename from openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml rename to client/ayon_core/hosts/maya/plugins/publish/help/validate_maya_units.xml diff --git a/openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml b/client/ayon_core/hosts/maya/plugins/publish/help/validate_node_ids.xml similarity index 100% rename from openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml rename to client/ayon_core/hosts/maya/plugins/publish/help/validate_node_ids.xml diff --git a/openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml b/client/ayon_core/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml similarity index 100% rename from openpype/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml rename to client/ayon_core/hosts/maya/plugins/publish/help/validate_skeletalmesh_hierarchy.xml diff --git a/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py b/client/ayon_core/hosts/maya/plugins/publish/increment_current_file_deadline.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py rename to client/ayon_core/hosts/maya/plugins/publish/increment_current_file_deadline.py index b5d5847e9f..a9378df8e1 100644 --- a/openpype/hosts/maya/plugins/publish/increment_current_file_deadline.py +++ b/client/ayon_core/hosts/maya/plugins/publish/increment_current_file_deadline.py @@ -17,8 +17,8 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin): def process(self, context): from maya import cmds - from openpype.lib import version_up - from openpype.pipeline.publish import get_errored_plugins_from_context + from ayon_core.lib import version_up + from ayon_core.pipeline.publish import get_errored_plugins_from_context errored_plugins = get_errored_plugins_from_context(context) if any(plugin.__name__ == "MayaSubmitDeadline" diff --git a/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/reset_xgen_attributes.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/reset_xgen_attributes.py diff --git a/client/ayon_core/hosts/maya/plugins/publish/save_scene.py b/client/ayon_core/hosts/maya/plugins/publish/save_scene.py new file mode 100644 index 0000000000..eb7c06a113 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/save_scene.py @@ -0,0 +1,35 @@ +import pyblish.api +from ayon_core.pipeline.workfile.lock_workfile import ( + is_workfile_lock_enabled, + remove_workfile_lock +) + + +class SaveCurrentScene(pyblish.api.ContextPlugin): + """Save current scene + + """ + + label = "Save current file" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["maya"] + families = ["renderlayer", "workfile"] + + def process(self, context): + import maya.cmds as cmds + + current = cmds.file(query=True, sceneName=True) + assert context.data['currentFile'] == current + + # If file has no modifications, skip forcing a file save + if not cmds.file(query=True, modified=True): + self.log.debug("Skipping file save as there " + "are no modifications..") + return + project_name = context.data["projectName"] + project_settings = context.data["project_settings"] + # remove lockfile before saving + if is_workfile_lock_enabled("maya", project_name, project_settings): + remove_workfile_lock(current) + self.log.info("Saving current file: {}".format(current)) + cmds.file(save=True, force=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_animated_reference.py b/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_animated_reference.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py index 4537892d6d..b7f115b38f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animated_reference.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder ) @@ -15,7 +15,7 @@ class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin): families = ["animation.fbx"] label = "Animated Reference Rig" accepted_controllers = ["transform", "locator"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): animated_sets = instance.data.get("animated_skeleton", []) diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_content.py b/client/ayon_core/hosts/maya/plugins/publish/validate_animation_content.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_animation_content.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_animation_content.py index 99acdc7b8f..f33ee1a7e7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_content.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_animation_content.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder ) @@ -18,7 +18,7 @@ class ValidateAnimationContent(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["animation"] label = "Animation Content" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @classmethod def get_invalid(cls, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py b/client/ayon_core/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py index 6f5f03ab39..0adb0a201c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py @@ -1,9 +1,9 @@ import maya.cmds as cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -25,7 +25,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): hosts = ['maya'] label = 'Animation Out Set Related Node Ids' actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py index 7055dc145e..92b4922492 100644 --- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py index 8ce76c8d04..f50fa1ed41 100644 --- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError, RepairAction ) diff --git a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py b/client/ayon_core/hosts/maya/plugins/publish/validate_ass_relative_paths.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_ass_relative_paths.py index 49913fa42b..669708d3a6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_ass_relative_paths.py @@ -5,7 +5,7 @@ from mtoa.core import createOptions import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_name.py b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_name.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_assembly_name.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_assembly_name.py index 00588cd300..03fa0fd779 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_name.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_name.py @@ -1,7 +1,7 @@ import pyblish.api import maya.cmds as cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError ) @@ -15,7 +15,7 @@ class ValidateAssemblyName(pyblish.api.InstancePlugin): label = "Validate Assembly Name" order = pyblish.api.ValidatorOrder families = ["assembly"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] active = False @classmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_namespaces.py similarity index 87% rename from openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_assembly_namespaces.py index 06577f38f7..2d3d8e71ac 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_namespaces.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_namespaces.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError ) @@ -19,7 +19,7 @@ class ValidateAssemblyNamespaces(pyblish.api.InstancePlugin): label = "Validate Assembly Namespaces" order = pyblish.api.ValidatorOrder families = ["assembly"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_transforms.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_assembly_transforms.py index a24455ebaa..5069feb4b6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_assembly_transforms.py @@ -1,8 +1,8 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import PublishValidationError, RepairAction +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import PublishValidationError, RepairAction class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): @@ -27,7 +27,7 @@ class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder + 0.49 label = "Assembly Model Transforms" families = ["assembly"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] prompt_message = ("You are about to reset the matrix to the default values." @@ -44,7 +44,7 @@ def process(self, instance): @classmethod def get_invalid(cls, instance): - from openpype.hosts.maya.api import lib + from ayon_core.hosts.maya.api import lib # Get all transforms in the loaded containers container_roots = cmds.listRelatives(instance.data["nodesHierarchy"], @@ -91,7 +91,7 @@ def repair(cls, instance): from qtpy import QtWidgets - from openpype.hosts.maya.api import lib + from ayon_core.hosts.maya.api import lib # Store namespace in variable, cosmetics thingy choice = QtWidgets.QMessageBox.warning( diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_attributes.py new file mode 100644 index 0000000000..3dfe2f4f2d --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_attributes.py @@ -0,0 +1,112 @@ +from collections import defaultdict + +import pyblish.api +from maya import cmds + +from ayon_core.hosts.maya.api.lib import set_attribute +from ayon_core.pipeline.publish import ( + OptionalPyblishPluginMixin, PublishValidationError, RepairAction, + ValidateContentsOrder) + + +class ValidateAttributes(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Ensure attributes are consistent. + + Attributes to validate and their values comes from the + "maya/attributes.json" preset, which needs this structure: + { + "family": { + "node_name.attribute_name": attribute_value + } + } + """ + + order = ValidateContentsOrder + label = "Attributes" + hosts = ["maya"] + actions = [RepairAction] + optional = True + + attributes = None + + def process(self, instance): + if not self.is_active(instance.data): + return + + # Check for preset existence. + if not self.attributes: + return + + invalid = self.get_invalid(instance, compute=True) + if invalid: + raise PublishValidationError( + "Found attributes with invalid values: {}".format(invalid) + ) + + @classmethod + def get_invalid(cls, instance, compute=False): + if compute: + return cls.get_invalid_attributes(instance) + else: + return instance.data.get("invalid_attributes", []) + + @classmethod + def get_invalid_attributes(cls, instance): + invalid_attributes = [] + + # Filter families. + families = [instance.data["family"]] + families += instance.data.get("families", []) + families = set(families) & set(cls.attributes.keys()) + if not families: + return [] + + # Get all attributes to validate. + attributes = defaultdict(dict) + for family in families: + if family not in cls.attributes: + # No attributes to validate for family + continue + + for preset_attr, preset_value in cls.attributes[family].items(): + node_name, attribute_name = preset_attr.split(".", 1) + attributes[node_name][attribute_name] = preset_value + + if not attributes: + return [] + + # Get invalid attributes. + nodes = cmds.ls(long=True) + for node in nodes: + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + if node_name not in attributes: + continue + + for attr_name, expected in attributes[node_name].items(): + + # Skip if attribute does not exist + if not cmds.attributeQuery(attr_name, node=node, exists=True): + continue + + plug = "{}.{}".format(node, attr_name) + value = cmds.getAttr(plug) + if value != expected: + invalid_attributes.append( + { + "attribute": plug, + "expected": expected, + "current": value + } + ) + + instance.data["invalid_attributes"] = invalid_attributes + return invalid_attributes + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + for data in invalid: + node, attr = data["attribute"].split(".", 1) + value = data["expected"] + set_attribute(node=node, attribute=attr, value=value) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_camera_attributes.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_camera_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_camera_attributes.py index e5745612e9..5e940a48a9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_camera_attributes.py @@ -1,8 +1,8 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder) @@ -19,7 +19,7 @@ class ValidateCameraAttributes(pyblish.api.InstancePlugin): families = ['camera'] hosts = ['maya'] label = 'Camera Attributes' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] DEFAULTS = [ ("filmFitOffset", 0.0), diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_camera_contents.py b/client/ayon_core/hosts/maya/plugins/publish/validate_camera_contents.py new file mode 100644 index 0000000000..7d4c4341fd --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_camera_contents.py @@ -0,0 +1,78 @@ +import pyblish.api +from maya import cmds + +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( + PublishValidationError, ValidateContentsOrder) + + +class ValidateCameraContents(pyblish.api.InstancePlugin): + """Validates Camera instance contents. + + A Camera instance may only hold a SINGLE camera's transform, nothing else. + + It may hold a "locator" as shape, but different shapes are down the + hierarchy. + + """ + + order = ValidateContentsOrder + families = ['camera'] + hosts = ['maya'] + label = 'Camera Contents' + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + validate_shapes = True + + @classmethod + def get_invalid(cls, instance): + + # get cameras + members = instance.data['setMembers'] + shapes = cmds.ls(members, dag=True, shapes=True, long=True) + + # single camera + invalid = [] + cameras = cmds.ls(shapes, type='camera', long=True) + if len(cameras) != 1: + cls.log.error("Camera instance must have a single camera. " + "Found {0}: {1}".format(len(cameras), cameras)) + invalid.extend(cameras) + + # We need to check this edge case because returning an extended + # list when there are no actual cameras results in + # still an empty 'invalid' list + if len(cameras) < 1: + if members: + # If there are members in the instance return all of + # them as 'invalid' so the user can still select invalid + cls.log.error("No cameras found in instance " + "members: {}".format(members)) + return members + + raise PublishValidationError( + "No cameras found in empty instance.") + + if not cls.validate_shapes: + cls.log.debug("Not validating shapes in the camera content" + " because 'validate shapes' is disabled") + return invalid + + # non-camera shapes + valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) + shapes = set(shapes) - set(valid_shapes) + if shapes: + shapes = list(shapes) + cls.log.error("Camera instance should only contain camera " + "shapes. Found: {0}".format(shapes)) + invalid.extend(shapes) + + invalid = list(set(invalid)) + return invalid + + def process(self, instance): + """Process all the nodes in the instance""" + + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError("Invalid camera contents: " + "{0}".format(invalid)) diff --git a/openpype/hosts/maya/plugins/publish/validate_color_sets.py b/client/ayon_core/hosts/maya/plugins/publish/validate_color_sets.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_color_sets.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_color_sets.py index 173fee4179..e69717fad0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_color_sets.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_color_sets.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateMeshOrder, OptionalPyblishPluginMixin, PublishValidationError, @@ -24,7 +24,7 @@ class ValidateColorSets(pyblish.api.Validator, families = ['model'] label = 'Mesh ColorSets' actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py b/client/ayon_core/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py index f072e5e323..55c4973842 100644 --- a/openpype/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_current_renderlayer_renderable.py @@ -1,7 +1,7 @@ import pyblish.api from maya import cmds -from openpype.pipeline.publish import context_plugin_should_run +from ayon_core.pipeline.publish import context_plugin_should_run class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py b/client/ayon_core/hosts/maya/plugins/publish/validate_cycle_error.py similarity index 83% rename from openpype/hosts/maya/plugins/publish/validate_cycle_error.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_cycle_error.py index 24da091246..f969ff533b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_cycle_error.py @@ -1,9 +1,9 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import maintained_selection -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib import maintained_selection +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError, ValidateContentsOrder) @@ -15,7 +15,7 @@ class ValidateCycleError(pyblish.api.InstancePlugin, label = "Cycle Errors" hosts = ["maya"] families = ["rig"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] optional = True def process(self, instance): diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_frame_range.py b/client/ayon_core/hosts/maya/plugins/publish/validate_frame_range.py new file mode 100644 index 0000000000..85cc606b25 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_frame_range.py @@ -0,0 +1,204 @@ +import pyblish.api + +from maya import cmds +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.hosts.maya.api.lib_rendersetup import ( + get_attr_overrides, + get_attr_in_layer, +) +from maya.app.renderSetup.model.override import AbsOverride + + +class ValidateFrameRange(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates the frame ranges. + + This is an optional validator checking if the frame range on instance + matches the frame range specified for the asset. + + It also validates render frame ranges of render layers. + + Repair action will change everything to match the asset frame range. + + This can be turned off by the artist to allow custom ranges. + """ + + label = "Validate Frame Range" + order = ValidateContentsOrder + families = ["animation", + "pointcache", + "camera", + "proxyAbc", + "renderlayer", + "review", + "yeticache"] + optional = True + actions = [RepairAction] + exclude_families = [] + + def process(self, instance): + if not self.is_active(instance.data): + return + + context = instance.context + if instance.data.get("tileRendering"): + self.log.debug( + "Skipping frame range validation because " + "tile rendering is enabled." + ) + return + + frame_start_handle = int(context.data.get("frameStartHandle")) + frame_end_handle = int(context.data.get("frameEndHandle")) + handle_start = int(context.data.get("handleStart")) + handle_end = int(context.data.get("handleEnd")) + frame_start = int(context.data.get("frameStart")) + frame_end = int(context.data.get("frameEnd")) + + inst_start = int(instance.data.get("frameStartHandle")) + inst_end = int(instance.data.get("frameEndHandle")) + inst_frame_start = int(instance.data.get("frameStart")) + inst_frame_end = int(instance.data.get("frameEnd")) + inst_handle_start = int(instance.data.get("handleStart")) + inst_handle_end = int(instance.data.get("handleEnd")) + + # basic sanity checks + assert frame_start_handle <= frame_end_handle, ( + "start frame is lower then end frame") + + # compare with data on instance + errors = [] + if [ef for ef in self.exclude_families + if instance.data["family"] in ef]: + return + if (inst_start != frame_start_handle): + errors.append("Instance start frame [ {} ] doesn't " + "match the one set on asset [ {} ]: " + "{}/{}/{}/{} (handle/start/end/handle)".format( + inst_start, + frame_start_handle, + handle_start, frame_start, frame_end, handle_end + )) + + if (inst_end != frame_end_handle): + errors.append("Instance end frame [ {} ] doesn't " + "match the one set on asset [ {} ]: " + "{}/{}/{}/{} (handle/start/end/handle)".format( + inst_end, + frame_end_handle, + handle_start, frame_start, frame_end, handle_end + )) + + checks = { + "frame start": (frame_start, inst_frame_start), + "frame end": (frame_end, inst_frame_end), + "handle start": (handle_start, inst_handle_start), + "handle end": (handle_end, inst_handle_end) + } + for label, values in checks.items(): + if values[0] != values[1]: + errors.append( + "{} on instance ({}) does not match with the asset " + "({}).".format(label.title(), values[1], values[0]) + ) + + if errors: + report = "Frame range settings are incorrect.\n\n" + for error in errors: + report += "- {}\n\n".format(error) + + raise PublishValidationError(report, title="Frame Range incorrect") + + @classmethod + def repair(cls, instance): + """ + Repair instance container to match asset data. + """ + + if "renderlayer" in instance.data.get("families"): + # Special behavior for renderlayers + cls.repair_renderlayer(instance) + return + + node = instance.data["name"] + context = instance.context + + frame_start_handle = int(context.data.get("frameStartHandle")) + frame_end_handle = int(context.data.get("frameEndHandle")) + handle_start = int(context.data.get("handleStart")) + handle_end = int(context.data.get("handleEnd")) + frame_start = int(context.data.get("frameStart")) + frame_end = int(context.data.get("frameEnd")) + + # Start + if cmds.attributeQuery("handleStart", node=node, exists=True): + cmds.setAttr("{}.handleStart".format(node), handle_start) + cmds.setAttr("{}.frameStart".format(node), frame_start) + else: + # Include start handle in frame start if no separate handleStart + # attribute exists on the node + cmds.setAttr("{}.frameStart".format(node), frame_start_handle) + + # End + if cmds.attributeQuery("handleEnd", node=node, exists=True): + cmds.setAttr("{}.handleEnd".format(node), handle_end) + cmds.setAttr("{}.frameEnd".format(node), frame_end) + else: + # Include end handle in frame end if no separate handleEnd + # attribute exists on the node + cmds.setAttr("{}.frameEnd".format(node), frame_end_handle) + + @classmethod + def repair_renderlayer(cls, instance): + """Apply frame range in render settings""" + + layer = instance.data["renderlayer"] + context = instance.context + + start_attr = "defaultRenderGlobals.startFrame" + end_attr = "defaultRenderGlobals.endFrame" + + frame_start_handle = int(context.data.get("frameStartHandle")) + frame_end_handle = int(context.data.get("frameEndHandle")) + + cls._set_attr_in_layer(start_attr, layer, frame_start_handle) + cls._set_attr_in_layer(end_attr, layer, frame_end_handle) + + @classmethod + def _set_attr_in_layer(cls, node_attr, layer, value): + + if get_attr_in_layer(node_attr, layer=layer) == value: + # Already ok. This can happen if you have multiple renderlayers + # validated and there are no frame range overrides. The first + # layer's repair would have fixed the global value already + return + + overrides = list(get_attr_overrides(node_attr, layer=layer)) + if overrides: + # We set the last absolute override if it is an absolute override + # otherwise we'll add an Absolute override + last_override = overrides[-1][1] + if not isinstance(last_override, AbsOverride): + collection = last_override.parent() + node, attr = node_attr.split(".", 1) + last_override = collection.createAbsoluteOverride(node, attr) + + cls.log.debug("Setting {attr} absolute override in " + "layer '{layer}': {value}".format(layer=layer, + attr=node_attr, + value=value)) + cmds.setAttr(last_override.name() + ".attrValue", value) + + else: + # Set the attribute directly + # (Note that this will set the global attribute) + cls.log.debug("Setting global {attr}: {value}".format( + attr=node_attr, + value=value + )) + cmds.setAttr(node_attr, value) diff --git a/openpype/hosts/maya/plugins/publish/validate_glsl_material.py b/client/ayon_core/hosts/maya/plugins/publish/validate_glsl_material.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_glsl_material.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_glsl_material.py index 3b386c3def..e610a8118c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_glsl_material.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_glsl_material.py @@ -2,11 +2,11 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder ) -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateGLSLMaterial(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py b/client/ayon_core/hosts/maya/plugins/publish/validate_glsl_plugin.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_glsl_plugin.py index da065fcf94..e155315e4f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_glsl_plugin.py @@ -2,7 +2,7 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_instance_has_members.py b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_has_members.py new file mode 100644 index 0000000000..5a530236db --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_has_members.py @@ -0,0 +1,39 @@ +import pyblish.api +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) + + +class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): + """Validates instance objectSet has *any* members.""" + + order = ValidateContentsOrder + hosts = ["maya"] + label = 'Instance has members' + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + + @classmethod + def get_invalid(cls, instance): + invalid = list() + if not instance.data.get("setMembers"): + objectset_name = instance.data['name'] + invalid.append(objectset_name) + + return invalid + + def process(self, instance): + # Allow renderlayer, rendersetup and workfile to be empty + skip_families = {"workfile", "renderlayer", "rendersetup"} + if instance.data.get("family") in skip_families: + return + + invalid = self.get_invalid(instance) + if invalid: + # Invalid will always be a single entry, we log the single name + name = invalid[0] + raise PublishValidationError( + title="Empty instance", + message="Instance '{0}' is empty".format(name) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_in_context.py similarity index 86% rename from openpype/hosts/maya/plugins/publish/validate_instance_in_context.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_instance_in_context.py index 4222e63898..c683c1b30f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_in_context.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_in_context.py @@ -3,9 +3,8 @@ from __future__ import absolute_import import pyblish.api -from openpype import AYON_SERVER_ENABLED -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError, @@ -31,7 +30,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin, optional = True hosts = ["maya"] actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] def process(self, instance): @@ -67,12 +66,8 @@ def get_invalid(cls, instance): def repair(cls, instance): context_asset = cls.get_context_asset(instance) instance_node = instance.data["instance_node"] - if AYON_SERVER_ENABLED: - asset_name_attr = "folderPath" - else: - asset_name_attr = "asset" cmds.setAttr( - "{}.{}".format(instance_node, asset_name_attr), + "{}.folderPath".format(instance_node), context_asset, type="string" ) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_subset.py b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_subset.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/validate_instance_subset.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_instance_subset.py index 69e16efe57..4229cfeb55 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_subset.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_instance_subset.py @@ -2,7 +2,7 @@ import string import six -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) diff --git a/openpype/hosts/maya/plugins/publish/validate_instancer_content.py b/client/ayon_core/hosts/maya/plugins/publish/validate_instancer_content.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_instancer_content.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_instancer_content.py index 236adfb03d..5f57b31868 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instancer_content.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_instancer_content.py @@ -1,8 +1,8 @@ import maya.cmds as cmds import pyblish.api -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import PublishValidationError +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import PublishValidationError class ValidateInstancerContent(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py b/client/ayon_core/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py index 714c6229d6..be6724d7e9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_instancer_frame_ranges.py @@ -3,7 +3,7 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError def is_cache_resource(resource): diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_loaded_plugin.py b/client/ayon_core/hosts/maya/plugins/publish/validate_loaded_plugin.py new file mode 100644 index 0000000000..54a3e16111 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_loaded_plugin.py @@ -0,0 +1,51 @@ +import os +import pyblish.api +import maya.cmds as cmds + +from ayon_core.pipeline.publish import ( + RepairContextAction, + PublishValidationError +) + + +class ValidateLoadedPlugin(pyblish.api.ContextPlugin): + """Ensure there are no unauthorized loaded plugins""" + + label = "Loaded Plugin" + order = pyblish.api.ValidatorOrder + host = ["maya"] + actions = [RepairContextAction] + + @classmethod + def get_invalid(cls, context): + + invalid = [] + loaded_plugin = cmds.pluginInfo(query=True, listPlugins=True) + # get variable from OpenPype settings + whitelist_native_plugins = cls.whitelist_native_plugins + authorized_plugins = cls.authorized_plugins or [] + + for plugin in loaded_plugin: + if not whitelist_native_plugins and os.getenv('MAYA_LOCATION') \ + in cmds.pluginInfo(plugin, query=True, path=True): + continue + if plugin not in authorized_plugins: + invalid.append(plugin) + + return invalid + + def process(self, context): + + invalid = self.get_invalid(context) + if invalid: + raise PublishValidationError( + "Found forbidden plugin name: {}".format(", ".join(invalid)) + ) + + @classmethod + def repair(cls, context): + """Unload forbidden plugins""" + + for plugin in cls.get_invalid(context): + cmds.pluginInfo(plugin, edit=True, autoload=False) + cmds.unloadPlugin(plugin, force=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_contents.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_look_contents.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_contents.py index 433d997840..a8d8ec373a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_contents.py @@ -1,6 +1,6 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder ) @@ -27,7 +27,7 @@ class ValidateLookContents(pyblish.api.InstancePlugin): families = ['look'] hosts = ['maya'] label = 'Look Data Contents' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): """Process all the nodes in the instance""" diff --git a/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py index c3edf5f1c3..d8a9222c36 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_default_shaders_connections.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairContextAction, PublishValidationError diff --git a/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_id_reference_edits.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_id_reference_edits.py index 5075d4050d..1d313bdae4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_id_reference_edits.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_id_reference_edits.py @@ -2,8 +2,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -24,7 +24,7 @@ class ValidateLookIdReferenceEdits(pyblish.api.InstancePlugin): families = ['look'] hosts = ['maya'] label = 'Look Id Reference Edits' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_no_default_shaders.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_no_default_shaders.py index 231331411b..3a67bbd72b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_no_default_shaders.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_no_default_shaders.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -30,7 +30,7 @@ class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): families = ['look'] hosts = ['maya'] label = 'Look No Default Shaders' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] DEFAULT_SHADERS = {"lambert1", "initialShadingGroup", "initialParticleSE", "particleCloud1"} diff --git a/openpype/hosts/maya/plugins/publish/validate_look_sets.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_sets.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_look_sets.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_sets.py index 657bab0479..dd7515c1fb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_sets.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_sets.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -44,7 +44,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin): families = ['look'] hosts = ['maya'] label = 'Look Sets' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): """Process all the nodes in the instance""" diff --git a/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_shading_group.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_look_shading_group.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_shading_group.py index dbe7a70e6a..656b91216b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_shading_group.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_shading_group.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -20,7 +20,7 @@ class ValidateShadingEngine(pyblish.api.InstancePlugin): hosts = ["maya"] label = "Look Shading Engine Naming" actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] # The default connections to check diff --git a/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py b/client/ayon_core/hosts/maya/plugins/publish/validate_look_single_shader.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_look_single_shader.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_look_single_shader.py index acd761a944..c0ffaaf9c0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_single_shader.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_look_single_shader.py @@ -1,8 +1,8 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder) @@ -17,7 +17,7 @@ class ValidateSingleShader(pyblish.api.InstancePlugin): families = ['look'] hosts = ['maya'] label = 'Look Single Shader Per Shape' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] # The default connections to check def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/client/ayon_core/hosts/maya/plugins/publish/validate_maya_units.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_maya_units.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_maya_units.py index ae6dc093a9..eca27d95da 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_maya_units.py @@ -2,9 +2,9 @@ import pyblish.api -import openpype.hosts.maya.api.lib as mayalib -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.lib as mayalib +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.pipeline.publish import ( RepairContextAction, ValidateSceneOrder, PublishXmlValidationError diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py index bde78a98b8..e9d9a17ff4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py @@ -1,15 +1,15 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib import ( maintained_selection, delete_after, undo_chunk, get_attribute, set_attribute ) -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, RepairAction, ValidateMeshOrder, @@ -30,7 +30,7 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin, families = ["model"] label = "Mesh Arnold Attributes" actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_empty.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_empty.py similarity index 89% rename from openpype/hosts/maya/plugins/publish/validate_mesh_empty.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_empty.py index c3264f3d98..934cbae327 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_empty.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_empty.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, PublishValidationError @@ -21,7 +21,7 @@ class ValidateMeshEmpty(pyblish.api.InstancePlugin): families = ["model"] label = "Mesh Empty" actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] @classmethod diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_has_uv.py new file mode 100644 index 0000000000..633fc29732 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -0,0 +1,89 @@ +from maya import cmds + +import pyblish.api +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( + ValidateMeshOrder, + OptionalPyblishPluginMixin, + PublishValidationError +) +from ayon_core.hosts.maya.api.lib import len_flattened + + +class ValidateMeshHasUVs(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validate the current mesh has UVs. + + It validates whether the current UV set has non-zero UVs and + at least more than the vertex count. It's not really bulletproof, + but a simple quick validation to check if there are likely + UVs for every face. + """ + + order = ValidateMeshOrder + hosts = ['maya'] + families = ['model'] + label = 'Mesh Has UVs' + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + optional = True + + @classmethod + def get_invalid(cls, instance): + invalid = [] + + for node in cmds.ls(instance, type='mesh'): + num_vertices = cmds.polyEvaluate(node, vertex=True) + + if num_vertices == 0: + cls.log.warning( + "Skipping \"{}\", cause it does not have any " + "vertices.".format(node) + ) + continue + + uv = cmds.polyEvaluate(node, uv=True) + + if uv == 0: + invalid.append(node) + continue + + vertex = cmds.polyEvaluate(node, vertex=True) + if uv < vertex: + # Workaround: + # Maya can have instanced UVs in a single mesh, for example + # imported from an Alembic. With instanced UVs the UV count + # from `maya.cmds.polyEvaluate(uv=True)` will only result in + # the unique UV count instead of for all vertices. + # + # Note: Maya can save instanced UVs to `mayaAscii` but cannot + # load this as instanced. So saving, opening and saving + # again will lose this information. + map_attr = "{}.map[*]".format(node) + uv_to_vertex = cmds.polyListComponentConversion(map_attr, + toVertex=True) + uv_vertex_count = len_flattened(uv_to_vertex) + if uv_vertex_count < vertex: + invalid.append(node) + else: + cls.log.warning("Node has instanced UV points: " + "{0}".format(node)) + + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + return + + invalid = self.get_invalid(instance) + if invalid: + + names = "
".join( + " - {}".format(node) for node in invalid + ) + + raise PublishValidationError( + title="Mesh has missing UVs", + message="Model meshes are required to have UVs.

" + "Meshes detected with invalid or missing UVs:
" + "{0}".format(names) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py similarity index 83% rename from openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py index f120361583..e76553629f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateMeshOrder +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ValidateMeshOrder class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): @@ -16,7 +16,7 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['model'] label = 'Mesh Lamina Faces' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_ngons.py similarity index 84% rename from openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_ngons.py index 5b67db3307..f8dfe65b32 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_ngons.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_ngons.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ValidateContentsOrder +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateMeshNgons(pyblish.api.Validator): @@ -20,7 +20,7 @@ class ValidateMeshNgons(pyblish.api.Validator): hosts = ["maya"] families = ["model"] label = "Mesh ngons" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py new file mode 100644 index 0000000000..0e9147d978 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py @@ -0,0 +1,64 @@ +from maya import cmds + +import pyblish.api +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( + ValidateMeshOrder, + PublishValidationError +) + + +def _as_report_list(values, prefix="- ", suffix="\n"): + """Return list as bullet point list for a report""" + if not values: + return "" + return prefix + (suffix + prefix).join(values) + + +class ValidateMeshNoNegativeScale(pyblish.api.Validator): + """Ensure that meshes don't have a negative scale. + + Using negatively scaled proxies in a VRayMesh results in inverted + normals. As such we want to avoid this. + + We also avoid this on the rig or model because these are often the + previous steps for those that are cached to proxies so we can catch this + issue early. + + """ + + order = ValidateMeshOrder + hosts = ['maya'] + families = ['model'] + label = 'Mesh No Negative Scale' + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + + @staticmethod + def get_invalid(instance): + meshes = cmds.ls(instance, + type='mesh', + long=True, + noIntermediate=True) + + invalid = [] + for mesh in meshes: + transform = cmds.listRelatives(mesh, parent=True, fullPath=True)[0] + scale = cmds.getAttr("{0}.scale".format(transform))[0] + + if any(x < 0 for x in scale): + invalid.append(mesh) + + return invalid + + def process(self, instance): + """Process all the nodes in the instance 'objectSet'""" + + invalid = self.get_invalid(instance) + + if invalid: + raise PublishValidationError( + "Meshes found with negative scale:\n\n{0}".format( + _as_report_list(sorted(invalid)) + ), + title="Negative scale" + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_manifold.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_manifold.py index 6fd63fb29f..1c7ea10a50 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_manifold.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateMeshOrder, PublishValidationError ) @@ -27,7 +27,7 @@ class ValidateMeshNonManifold(pyblish.api.Validator): hosts = ['maya'] families = ['model'] label = 'Mesh Non-Manifold Edges/Vertices' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 5ec6e5779b..0a8d6cf159 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( ValidateMeshOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -25,7 +25,7 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin, families = ['model'] hosts = ['maya'] label = 'Mesh Edge Length Non Zero' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] optional = True __tolerance = 1e-5 diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py index 7855e79119..1790a94580 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py @@ -2,8 +2,8 @@ import maya.api.OpenMaya as om2 import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, OptionalPyblishPluginMixin, @@ -31,7 +31,7 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator, hosts = ['maya'] families = ['model'] label = 'Mesh Normals Unlocked' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index 88e1507dd3..a13d16e849 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -5,8 +5,8 @@ import maya.api.OpenMaya as om import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateMeshOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -248,7 +248,7 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin, hosts = ['maya'] families = ['model'] label = 'Mesh Has Overlapping UVs' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] optional = True @classmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_shader_connections.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_shader_connections.py index 1db7613999..d55b58cd0d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_shader_connections.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_shader_connections.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, PublishValidationError @@ -94,7 +94,7 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['model'] label = "Mesh Shader Connections" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py index 46364735b9..8dbd0ca264 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, OptionalPyblishPluginMixin @@ -25,7 +25,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin, families = ['model', 'pointcache'] optional = True label = "Mesh Single UV Set" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py index 116fecbcba..c7f405b0cf 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, OptionalPyblishPluginMixin @@ -25,7 +25,7 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin, families = ['model'] optional = True label = "Mesh has map1 UV Set" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py index 7167859444..10b5d77cf3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py @@ -1,9 +1,9 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import len_flattened -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib import len_flattened +from ayon_core.pipeline.publish import ( PublishValidationError, RepairAction, ValidateMeshOrder) @@ -30,7 +30,7 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['model'] label = 'Mesh Vertices Have Edges' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @classmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_model_content.py b/client/ayon_core/hosts/maya/plugins/publish/validate_model_content.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_model_content.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_model_content.py index 19373efad9..8cc2675dc7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_content.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_model_content.py @@ -1,9 +1,9 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -21,7 +21,7 @@ class ValidateModelContent(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["model"] label = "Model Content" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] validate_top_group = True diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/client/ayon_core/hosts/maya/plugins/publish/validate_model_name.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_model_name.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_model_name.py index 11f59bb439..7812877fd3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_model_name.py @@ -8,12 +8,12 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.client.mongo import OpenPypeMongoConnection -from openpype.hosts.maya.api.shader_definition_editor import ( +import ayon_core.hosts.maya.api.action +from ayon_core.client.mongo import OpenPypeMongoConnection +from ayon_core.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ( +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError, ValidateContentsOrder) @@ -31,7 +31,7 @@ class ValidateModelName(pyblish.api.InstancePlugin, hosts = ["maya"] families = ["model"] label = "Model Name" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] material_file = None database_file = DEFINITION_FILENAME diff --git a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py b/client/ayon_core/hosts/maya/plugins/publish/validate_mvlook_contents.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_mvlook_contents.py index ad0fcafc56..8c2bbf2a36 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_mvlook_contents.py @@ -1,7 +1,7 @@ import os import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -18,7 +18,7 @@ class ValidateMvLookContents(pyblish.api.InstancePlugin, families = ['mvLook'] hosts = ['maya'] label = 'Validate mvLook Data' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] # Allow this validation step to be skipped when you just need to # get things pushed through. diff --git a/openpype/hosts/maya/plugins/publish/validate_no_animation.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_animation.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_no_animation.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_animation.py index 9ff189cf83..6e0719628f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_animation.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -31,7 +31,7 @@ class ValidateNoAnimation(pyblish.api.Validator, hosts = ["maya"] families = ["model"] optional = True - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): if not self.is_active(instance.data): diff --git a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_default_camera.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/validate_no_default_camera.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_default_camera.py index f0aa9261f7..9977562ca3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_default_camera.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -27,7 +27,7 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['camera'] label = "No Default Cameras" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_namespace.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_no_namespace.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_namespace.py index 13eeae5859..b9b8aa2708 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_namespace.py @@ -1,13 +1,13 @@ import maya.cmds as cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError ) -import openpype.hosts.maya.api.action +import ayon_core.hosts.maya.api.action def _as_report_list(values, prefix="- ", suffix="\n"): @@ -31,7 +31,7 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['model'] label = 'No Namespaces' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_null_transforms.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_null_transforms.py index 187135fdf3..9899768dc0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_null_transforms.py @@ -1,8 +1,8 @@ import maya.cmds as cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -53,7 +53,7 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin): families = ['model'] label = 'No Empty/Null Transforms' actions = [RepairAction, - openpype.hosts.maya.api.action.SelectInvalidAction] + ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_unknown_nodes.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_unknown_nodes.py index 6ae634be24..5cc3e95593 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_unknown_nodes.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -33,7 +33,7 @@ class ValidateNoUnknownNodes(pyblish.api.InstancePlugin, families = ['model', 'rig'] optional = True label = "Unknown Nodes" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_no_vraymesh.py b/client/ayon_core/hosts/maya/plugins/publish/validate_no_vraymesh.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_no_vraymesh.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_no_vraymesh.py index 22fd1edc29..be8296a820 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_vraymesh.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_no_vraymesh.py @@ -1,6 +1,6 @@ import pyblish.api from maya import cmds -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError def _as_report_list(values, prefix="- ", suffix="\n"): diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids.py similarity index 85% rename from openpype/hosts/maya/plugins/publish/validate_node_ids.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_ids.py index 0c7d647014..f40db988c6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids.py @@ -1,11 +1,11 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidatePipelineOrder, PublishXmlValidationError ) -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib class ValidateNodeIDs(pyblish.api.InstancePlugin): @@ -28,8 +28,8 @@ class ValidateNodeIDs(pyblish.api.InstancePlugin): "yetiRig", "assembly"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] def process(self, instance): """Process all meshes""" diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py index 643c970463..912311cc8d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py @@ -1,9 +1,9 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( PublishValidationError, RepairAction, ValidateContentsOrder) @@ -22,7 +22,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): hosts = ['maya'] label = 'Deformed shape ids' actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_in_database.py similarity index 81% rename from openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_in_database.py index f15aa2efa8..bf12def5e9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_in_database.py @@ -1,10 +1,10 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.client import get_assets -from openpype.hosts.maya.api import lib -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.client import get_assets +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import ( PublishValidationError, ValidatePipelineOrder) @@ -24,8 +24,8 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): hosts = ['maya'] families = ["*"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] def process(self, instance): invalid = self.get_invalid(instance) @@ -44,7 +44,7 @@ def get_invalid(cls, instance): nodes=instance[:]) # check ids against database ids - project_name = legacy_io.active_project() + project_name = instance.context.data["projectName"] asset_docs = get_assets(project_name, fields=["_id"]) db_asset_ids = { str(asset_doc["_id"]) diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_related.py similarity index 84% rename from openpype/hosts/maya/plugins/publish/validate_node_ids_related.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_related.py index 52e706fec9..b2db535fa6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_related.py @@ -1,8 +1,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError, ValidatePipelineOrder) @@ -20,8 +20,8 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin, "rig"] optional = True - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] def process(self, instance): """Process all nodes in instance (including hierarchy)""" diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_unique.py similarity index 88% rename from openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_unique.py index 61386fc939..eeede82caf 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_ids_unique.py @@ -1,12 +1,12 @@ from collections import defaultdict import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidatePipelineOrder, PublishValidationError ) -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib class ValidateNodeIdsUnique(pyblish.api.InstancePlugin): @@ -23,8 +23,8 @@ class ValidateNodeIdsUnique(pyblish.api.InstancePlugin): "rig", "yetiRig"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, - openpype.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.GenerateUUIDsOnInvalidAction] def process(self, instance): """Process all meshes""" diff --git a/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py b/client/ayon_core/hosts/maya/plugins/publish/validate_node_no_ghosting.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_node_no_ghosting.py index 0f608dab2c..297618fd4f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_no_ghosting.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_node_no_ghosting.py @@ -2,8 +2,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateNodeNoGhosting(pyblish.api.InstancePlugin): @@ -22,7 +22,7 @@ class ValidateNodeNoGhosting(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['model', 'rig'] label = "No Ghosting" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] _attributes = {'ghosting': 0} diff --git a/openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_plugin_path_attributes.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_plugin_path_attributes.py index cb5c68e4ab..d672be6fa0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_plugin_path_attributes.py @@ -4,9 +4,9 @@ import pyblish.api -from openpype.hosts.maya.api.lib import pairwise -from openpype.hosts.maya.api.action import SelectInvalidAction -from openpype.pipeline.publish import ( +from ayon_core.hosts.maya.api.lib import pairwise +from ayon_core.hosts.maya.api.action import SelectInvalidAction +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) diff --git a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py b/client/ayon_core/hosts/maya/plugins/publish/validate_render_image_rule.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_render_image_rule.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_render_image_rule.py index 030e41ca1f..576886072d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_image_rule.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_render_image_rule.py @@ -4,7 +4,7 @@ from maya import cmds -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( PublishValidationError, RepairAction, ValidateContentsOrder) diff --git a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py b/client/ayon_core/hosts/maya/plugins/publish/validate_render_no_default_cameras.py similarity index 88% rename from openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_render_no_default_cameras.py index 9d4410186b..32d0470b7f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_render_no_default_cameras.py @@ -2,8 +2,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError, ) @@ -16,7 +16,7 @@ class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['renderlayer'] label = "No Default Cameras Renderable" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py b/client/ayon_core/hosts/maya/plugins/publish/validate_render_single_camera.py similarity index 89% rename from openpype/hosts/maya/plugins/publish/validate_render_single_camera.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_render_single_camera.py index 2c0d604175..f31059f594 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_single_camera.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_render_single_camera.py @@ -3,9 +3,9 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib_rendersettings import RenderSettings -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -23,7 +23,7 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): hosts = ['maya'] families = ["renderlayer", "vrayscene"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/client/ayon_core/hosts/maya/plugins/publish/validate_renderlayer_aovs.py similarity index 82% rename from openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index f8de983e06..71cd6d7112 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -1,9 +1,9 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.client import get_subset_by_name -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import PublishValidationError +import ayon_core.hosts.maya.api.action +from ayon_core.client import get_subset_by_name +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import PublishValidationError class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): @@ -26,7 +26,7 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): label = "Render Passes / AOVs Are Registered" hosts = ["maya"] families = ["renderlayer"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): invalid = self.get_invalid(instance) @@ -37,7 +37,7 @@ def process(self, instance): def get_invalid(self, instance): invalid = [] - project_name = legacy_io.active_project() + project_name = instance.context.data["projectName"] asset_doc = instance.data["assetEntity"] render_passes = instance.data.get("renderPasses", []) for render_pass in render_passes: diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py similarity index 99% rename from openpype/hosts/maya/plugins/publish/validate_rendersettings.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py index f2a98eab32..ed70d81b63 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py @@ -6,13 +6,13 @@ from maya import cmds, mel import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError, ) -from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.lib_rendersettings import RenderSettings +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings def convert_to_int_or_float(string_value): diff --git a/openpype/hosts/maya/plugins/publish/validate_resolution.py b/client/ayon_core/hosts/maya/plugins/publish/validate_resolution.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_resolution.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_resolution.py index 91b473b250..ff552f566d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_resolution.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_resolution.py @@ -1,12 +1,12 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) from maya import cmds -from openpype.pipeline.publish import RepairAction -from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.lib import reset_scene_resolution +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.lib import reset_scene_resolution class ValidateResolution(pyblish.api.InstancePlugin, diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_resources.py b/client/ayon_core/hosts/maya/plugins/publish/validate_resources.py new file mode 100644 index 0000000000..725e86450d --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_resources.py @@ -0,0 +1,60 @@ +import os +from collections import defaultdict + +import pyblish.api +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) + + +class ValidateResources(pyblish.api.InstancePlugin): + """Validates mapped resources. + + These are external files to the current application, for example + these could be textures, image planes, cache files or other linked + media. + + This validates: + - The resources have unique filenames (without extension) + + """ + + order = ValidateContentsOrder + label = "Resources Unique" + + def process(self, instance): + + resources = instance.data.get("resources", []) + if not resources: + self.log.debug("No resources to validate..") + return + + basenames = defaultdict(set) + + for resource in resources: + files = resource.get("files", []) + for filename in files: + + # Use normalized paths in comparison and ignore case + # sensitivity + filename = os.path.normpath(filename).lower() + + basename = os.path.splitext(os.path.basename(filename))[0] + basenames[basename].add(filename) + + invalid_resources = list() + for basename, sources in basenames.items(): + if len(sources) > 1: + invalid_resources.extend(sources) + + self.log.error( + "Non-unique resource name: {0}" + "{0} (sources: {1})".format( + basename, + list(sources) + ) + ) + + if invalid_resources: + raise PublishValidationError("Invalid resources in instance.") diff --git a/openpype/hosts/maya/plugins/publish/validate_review.py b/client/ayon_core/hosts/maya/plugins/publish/validate_review.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_review.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_review.py index 12a2e7f86f..fcfd851368 100644 --- a/openpype/hosts/maya/plugins/publish/validate_review.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_review.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_contents.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_rig_contents.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_contents.py index 106b4024e2..be495a8fb9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_contents.py @@ -1,7 +1,7 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder ) @@ -20,7 +20,7 @@ class ValidateRigContents(pyblish.api.InstancePlugin): label = "Rig Contents" hosts = ["maya"] families = ["rig"] - action = [openpype.hosts.maya.api.action.SelectInvalidAction] + action = [ayon_core.hosts.maya.api.action.SelectInvalidAction] accepted_output = ["mesh", "transform"] accepted_controllers = ["transform"] diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/validate_rig_controllers.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers.py index 82248c57b3..469412dd1a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_controllers.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers.py @@ -2,13 +2,13 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, PublishValidationError ) -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import undo_chunk +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib import undo_chunk class ValidateRigControllers(pyblish.api.InstancePlugin): @@ -34,7 +34,7 @@ class ValidateRigControllers(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["rig"] actions = [RepairAction, - openpype.hosts.maya.api.action.SelectInvalidAction] + ayon_core.hosts.maya.api.action.SelectInvalidAction] # Default controller values CONTROLLER_DEFAULTS = { diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py index 03f6a5f1ab..2227899a5b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_controllers_arnold_attributes.py @@ -2,14 +2,14 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, PublishValidationError ) -from openpype.hosts.maya.api import lib -import openpype.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +import ayon_core.hosts.maya.api.action class ValidateRigControllersArnoldAttributes(pyblish.api.InstancePlugin): @@ -36,7 +36,7 @@ class ValidateRigControllersArnoldAttributes(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["rig"] actions = [RepairAction, - openpype.hosts.maya.api.action.SelectInvalidAction] + ayon_core.hosts.maya.api.action.SelectInvalidAction] attributes = [ "rcurve", diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_joints_hidden.py similarity index 84% rename from openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_joints_hidden.py index 30d95128a2..c6b9d23574 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_joints_hidden.py @@ -2,9 +2,9 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, ) @@ -25,7 +25,7 @@ class ValidateRigJointsHidden(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['rig'] label = "Joints Hidden" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py index 80ac0f27e6..dccf9cc47b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py @@ -2,9 +2,9 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -26,7 +26,7 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): hosts = ['maya'] label = 'Rig Out Set Node Ids' actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] allow_history_only = False diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_output_ids.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_rig_output_ids.py index 343d8e6924..93552ccce0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rig_output_ids.py @@ -4,9 +4,9 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import get_id, set_id -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api.lib import get_id, set_id +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError @@ -30,7 +30,7 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["rig"] actions = [RepairAction, - openpype.hosts.maya.api.action.SelectInvalidAction] + ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): invalid = self.get_invalid(instance, compute=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py b/client/ayon_core/hosts/maya/plugins/publish/validate_scene_set_workspace.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_scene_set_workspace.py index ddcbab8931..6e68cf5d14 100644 --- a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_scene_set_workspace.py @@ -3,7 +3,7 @@ import maya.cmds as cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( PublishValidationError, ValidatePipelineOrder) diff --git a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py b/client/ayon_core/hosts/maya/plugins/publish/validate_setdress_root.py similarity index 91% rename from openpype/hosts/maya/plugins/publish/validate_setdress_root.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_setdress_root.py index 5fd971f8c4..906f6fbd1a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_setdress_root.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_setdress_root.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateSetdressRoot(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_shader_name.py b/client/ayon_core/hosts/maya/plugins/publish/validate_shader_name.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_shader_name.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_shader_name.py index d6486dea7f..cb7f975535 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shader_name.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_shader_name.py @@ -3,8 +3,8 @@ import pyblish.api from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError, ValidateContentsOrder) @@ -20,7 +20,7 @@ class ValidateShaderName(pyblish.api.InstancePlugin, families = ["look"] hosts = ['maya'] label = 'Validate Shaders Name' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] regex = r'(?P.*)_(.*)_SHD' # The default connections to check diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_default_names.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_shape_default_names.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_shape_default_names.py index d8ad366ed8..2f0811a73e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_default_names.py @@ -4,8 +4,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, OptionalPyblishPluginMixin @@ -42,7 +42,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin, families = ['model'] optional = True label = "Shape Default Naming" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] @staticmethod diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_render_stats.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_shape_render_stats.py index f58c0aaf81..ffdb43ef55 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_render_stats.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_render_stats.py @@ -2,8 +2,8 @@ from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, ValidateMeshOrder, ) @@ -16,7 +16,7 @@ class ValidateShapeRenderStats(pyblish.api.Validator): hosts = ['maya'] families = ['model'] label = 'Shape Default Render Stats' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction, + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction] defaults = {'castsShadows': 1, diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_zero.py b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_zero.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_shape_zero.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_shape_zero.py index c7af6a60db..6cf3edf472 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_zero.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_shape_zero.py @@ -2,9 +2,9 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, PublishValidationError @@ -23,7 +23,7 @@ class ValidateShapeZero(pyblish.api.Validator): families = ["model"] label = "Shape Zero (Freeze)" actions = [ - openpype.hosts.maya.api.action.SelectInvalidAction, + ayon_core.hosts.maya.api.action.SelectInvalidAction, RepairAction ] diff --git a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/validate_single_assembly.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_single_assembly.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_single_assembly.py index b768c9c4e8..1987f93e32 100644 --- a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_single_assembly.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateSingleAssembly(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py b/client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py index 9084374c76..ff2ad822b6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_hierarchy.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, ) diff --git a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py b/client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py index 701c80a8af..cc25e769e6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_skeletalmesh_triangulated.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.hosts.maya.api.action import ( +from ayon_core.hosts.maya.api.action import ( SelectInvalidAction, ) -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError diff --git a/openpype/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py b/client/ayon_core/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py index 1dbe1c454c..7c876240ae 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_skeleton_top_group_hierarchy.py @@ -4,7 +4,7 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError diff --git a/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py b/client/ayon_core/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py similarity index 93% rename from openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py index b45d2b120a..c104f0477f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_skinCluster_deformer_set.py @@ -2,8 +2,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin): @@ -19,7 +19,7 @@ class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin): hosts = ['maya'] families = ['fbx'] label = "Skincluster Deformer Relationships" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): """Process all the transform nodes in the instance""" diff --git a/openpype/hosts/maya/plugins/publish/validate_step_size.py b/client/ayon_core/hosts/maya/plugins/publish/validate_step_size.py similarity index 88% rename from openpype/hosts/maya/plugins/publish/validate_step_size.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_step_size.py index 493a6ee65c..524c7b29ba 100644 --- a/openpype/hosts/maya/plugins/publish/validate_step_size.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_step_size.py @@ -1,7 +1,7 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError, ValidateContentsOrder ) @@ -19,7 +19,7 @@ class ValidateStepSize(pyblish.api.InstancePlugin): families = ['camera', 'pointcache', 'animation'] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] MIN = 0.01 MAX = 1.0 diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/client/ayon_core/hosts/maya/plugins/publish/validate_transform_naming_suffix.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index cbc7ee9d5c..1f8d6b7470 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -4,8 +4,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, OptionalPyblishPluginMixin, PublishValidationError @@ -39,7 +39,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin, families = ['model'] optional = True label = 'Suffix Naming Conventions' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] SUFFIX_NAMING_TABLE = {"mesh": ["_GEO", "_GES", "_GEP", "_OSD"], "nurbsCurve": ["_CRV"], "nurbsSurface": ["_NRB"], diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_transform_zero.py b/client/ayon_core/hosts/maya/plugins/publish/validate_transform_zero.py new file mode 100644 index 0000000000..ddf9d3867d --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_transform_zero.py @@ -0,0 +1,78 @@ +from maya import cmds + +import pyblish.api + +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) + + +class ValidateTransformZero(pyblish.api.Validator): + """Transforms can't have any values + + To solve this issue, try freezing the transforms. So long + as the transforms, rotation and scale values are zero, + you're all good. + + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["model"] + label = "Transform Zero (Freeze)" + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + + _identity = [1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0] + _tolerance = 1e-30 + + @classmethod + def get_invalid(cls, instance): + """Returns the invalid transforms in the instance. + + This is the same as checking: + - translate == [0, 0, 0] and rotate == [0, 0, 0] and + scale == [1, 1, 1] and shear == [0, 0, 0] + + .. note:: + This will also catch camera transforms if those + are in the instances. + + Returns: + list: Transforms that are not identity matrix + + """ + + transforms = cmds.ls(instance, type="transform") + + invalid = [] + for transform in transforms: + if ('_LOC' in transform) or ('_loc' in transform): + continue + mat = cmds.xform(transform, q=1, matrix=True, objectSpace=True) + if not all(abs(x-y) < cls._tolerance + for x, y in zip(cls._identity, mat)): + invalid.append(transform) + + return invalid + + def process(self, instance): + """Process all the nodes in the instance "objectSet""" + + invalid = self.get_invalid(instance) + if invalid: + + names = "
".join( + " - {}".format(node) for node in invalid + ) + + raise PublishValidationError( + title="Transform Zero", + message="The model publish allows no transformations. You must" + " freeze transformations to continue.

" + "Nodes found with transform values: " + "{0}".format(names)) diff --git a/openpype/hosts/maya/plugins/publish/validate_unique_names.py b/client/ayon_core/hosts/maya/plugins/publish/validate_unique_names.py similarity index 84% rename from openpype/hosts/maya/plugins/publish/validate_unique_names.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_unique_names.py index 05776ee0f3..55f8933fff 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unique_names.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_unique_names.py @@ -1,8 +1,8 @@ from maya import cmds import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateUniqueNames(pyblish.api.Validator): @@ -16,7 +16,7 @@ class ValidateUniqueNames(pyblish.api.Validator): hosts = ["maya"] families = ["model"] label = "Unique transform name" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] @staticmethod def get_invalid(instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py similarity index 83% rename from openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index e78962bf97..a39ba7c4cc 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -3,8 +3,8 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ValidateMeshOrder -import openpype.hosts.maya.api.action +from ayon_core.pipeline.publish import ValidateMeshOrder +import ayon_core.hosts.maya.api.action class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): @@ -14,7 +14,7 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["staticMesh"] label = "Mesh is Triangulated" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] active = False @classmethod diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py new file mode 100644 index 0000000000..d43e04da60 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +"""Validator for correct naming of Static Meshes.""" +import re + +import pyblish.api + +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline import legacy_io +from ayon_core.settings import get_project_settings +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin, + PublishValidationError +) + + +class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validate name of Unreal Static Mesh + + Unreals naming convention states that staticMesh should start with `SM` + prefix - SM_[Name]_## (Eg. SM_sube_01).These prefixes can be configured + in Settings UI. This plugin also validates other types of + meshes - collision meshes: + + UBX_[RenderMeshName]*: + Boxes are created with the Box objects type in + Max or with the Cube polygonal primitive in Maya. + You cannot move the vertices around or deform it + in any way to make it something other than a + rectangular prism, or else it will not work. + + UCP_[RenderMeshName]*: + Capsules are created with the Capsule object type. + The capsule does not need to have many segments + (8 is a good number) at all because it is + converted into a true capsule for collision. Like + boxes, you should not move the individual + vertices around. + + USP_[RenderMeshName]*: + Spheres are created with the Sphere object type. + The sphere does not need to have many segments + (8 is a good number) at all because it is + converted into a true sphere for collision. Like + boxes, you should not move the individual + vertices around. + + UCX_[RenderMeshName]*: + Convex objects can be any completely closed + convex 3D shape. For example, a box can also be + a convex object + + This validator also checks if collision mesh [RenderMeshName] matches one + of SM_[RenderMeshName]. + + """ + optional = True + order = ValidateContentsOrder + hosts = ["maya"] + families = ["staticMesh"] + label = "Unreal Static Mesh Name" + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] + regex_mesh = r"(?P.*))" + regex_collision = r"(?P.*)" + + @classmethod + def get_invalid(cls, instance): + + invalid = [] + + collision_prefixes = ( + instance.context.data["project_settings"] + ["maya"] + ["create"] + ["CreateUnrealStaticMesh"] + ["collision_prefixes"] + ) + + if cls.validate_mesh: + # compile regex for testing names + regex_mesh = "{}{}".format( + ("_" + cls.static_mesh_prefix) or "", cls.regex_mesh + ) + sm_r = re.compile(regex_mesh) + if not sm_r.match(instance.data.get("subset")): + cls.log.error("Mesh doesn't comply with name validation.") + return True + + if cls.validate_collision: + collision_set = instance.data.get("collisionMembers", None) + # soft-fail is there are no collision objects + if not collision_set: + cls.log.warning("No collision objects to validate.") + return False + + regex_collision = "{}{}_(\\d+)".format( + "(?P({}))_".format( + "|".join("{0}".format(p) for p in collision_prefixes) + ) or "", cls.regex_collision + ) + + cl_r = re.compile(regex_collision) + + asset_name = instance.data["assetEntity"]["name"] + mesh_name = "{}{}".format(asset_name, + instance.data.get("variant", [])) + + for obj in collision_set: + cl_m = cl_r.match(obj) + if not cl_m: + cls.log.error("{} is invalid".format(obj)) + invalid.append(obj) + else: + expected_collision = "{}_{}".format( + cl_m.group("prefix"), + mesh_name + ) + + if not obj.startswith(expected_collision): + + cls.log.error( + "Collision object name doesn't match " + "static mesh name" + ) + cls.log.error("{}_{} != {}_{}*".format( + cl_m.group("prefix"), + cl_m.group("renderName"), + cl_m.group("prefix"), + mesh_name, + )) + invalid.append(obj) + + return invalid + + def process(self, instance): + if not self.is_active(instance.data): + return + + if not self.validate_mesh and not self.validate_collision: + self.log.debug("Validation of both mesh and collision names" + "is disabled.") + return + + if not instance.data.get("collisionMembers", None): + self.log.debug("There are no collision objects to validate") + return + + invalid = self.get_invalid(instance) + + if invalid: + raise PublishValidationError("Model naming is invalid. See log.") diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_up_axis.py similarity index 95% rename from openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_unreal_up_axis.py index a420dcb900..ef7296e628 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_unreal_up_axis.py @@ -3,7 +3,7 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, RepairAction, OptionalPyblishPluginMixin diff --git a/openpype/hosts/maya/plugins/publish/validate_visible_only.py b/client/ayon_core/hosts/maya/plugins/publish/validate_visible_only.py similarity index 88% rename from openpype/hosts/maya/plugins/publish/validate_visible_only.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_visible_only.py index e72782e552..f9e4c9212a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_visible_only.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_visible_only.py @@ -1,8 +1,8 @@ import pyblish.api -from openpype.hosts.maya.api.lib import iter_visible_nodes_in_range -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +from ayon_core.hosts.maya.api.lib import iter_visible_nodes_in_range +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -19,7 +19,7 @@ class ValidateAlembicVisibleOnly(pyblish.api.InstancePlugin): label = "Alembic Visible Only" hosts = ["maya"] families = ["pointcache", "animation"] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_vray.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vray.py similarity index 88% rename from openpype/hosts/maya/plugins/publish/validate_vray.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vray.py index bef5967cc9..db78212658 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vray.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateVray(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py index 14571203ea..54eaa58e74 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py @@ -1,8 +1,8 @@ import pyblish.api from maya import cmds -from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ( +from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline.publish import ( PublishValidationError, RepairAction, ValidateContentsOrder) diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py index 39c721e717..d4e53d69dc 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_referenced_aovs.py @@ -4,7 +4,7 @@ import types from maya import cmds -from openpype.pipeline.publish import RepairContextAction +from ayon_core.pipeline.publish import RepairContextAction class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_translator_settings.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vray_translator_settings.py index 4474f08ba4..f366ee60cf 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_translator_settings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vray_translator_settings.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Validate VRay Translator settings.""" import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( context_plugin_should_run, RepairContextAction, ValidateContentsOrder, diff --git a/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy.py similarity index 94% rename from openpype/hosts/maya/plugins/publish/validate_vrayproxy.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy.py index a106b970b4..7e16006f97 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline import KnownPublishError +from ayon_core.pipeline import KnownPublishError class ValidateVrayProxy(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py b/client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy_members.py similarity index 86% rename from openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy_members.py index 7b726de3a8..1a52771ee6 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vrayproxy_members.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_vrayproxy_members.py @@ -2,8 +2,8 @@ from maya import cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( PublishValidationError ) @@ -16,7 +16,7 @@ class ValidateVrayProxyMembers(pyblish.api.InstancePlugin): label = 'VRay Proxy Members' hosts = ['maya'] families = ['vrayproxy'] - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/validate_xgen.py similarity index 97% rename from openpype/hosts/maya/plugins/publish/validate_xgen.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_xgen.py index a44fa56308..e2c006be9f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_xgen.py @@ -4,7 +4,7 @@ import xgenm import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateXgen(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py similarity index 98% rename from openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py index a8085418e7..a72d930339 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py @@ -1,7 +1,7 @@ from maya import cmds import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline.publish import ValidateContentsOrder class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py similarity index 92% rename from openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py index 2b7249ad94..22545d07fb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_cache_state.py @@ -1,7 +1,7 @@ import pyblish.api import maya.cmds as cmds -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( RepairAction, PublishValidationError ) @@ -22,7 +22,7 @@ class ValidateYetiRigCacheState(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["yetiRig"] actions = [RepairAction, - openpype.hosts.maya.api.action.SelectInvalidAction] + ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): invalid = self.get_invalid(instance) diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py similarity index 90% rename from openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py index 50a27589ad..3d9d8faca8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py @@ -2,8 +2,8 @@ import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( +import ayon_core.hosts.maya.api.action +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) @@ -16,7 +16,7 @@ class ValidateYetiRigInputShapesInInstance(pyblish.api.Validator): hosts = ["maya"] families = ["yetiRig"] label = "Yeti Rig Input Shapes In Instance" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] + actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_settings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_settings.py similarity index 96% rename from openpype/hosts/maya/plugins/publish/validate_yeti_rig_settings.py rename to client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_settings.py index 455bf5291a..22a5ccbaca 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_rig_settings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_yeti_rig_settings.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateYetiRigSettings(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/maya/startup/userSetup.py b/client/ayon_core/hosts/maya/startup/userSetup.py new file mode 100644 index 0000000000..882f2df27c --- /dev/null +++ b/client/ayon_core/hosts/maya/startup/userSetup.py @@ -0,0 +1,69 @@ +import os + +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import install_host, get_current_project_name +from ayon_core.hosts.maya.api import MayaHost + +from maya import cmds + + +host = MayaHost() +install_host(host) + +print("Starting OpenPype usersetup...") + +project_name = get_current_project_name() +settings = get_project_settings(project_name) + +# Loading plugins explicitly. +explicit_plugins_loading = settings["maya"]["explicit_plugins_loading"] +if explicit_plugins_loading["enabled"]: + def _explicit_load_plugins(): + for plugin in explicit_plugins_loading["plugins_to_load"]: + if plugin["enabled"]: + print("Loading plug-in: " + plugin["name"]) + try: + cmds.loadPlugin(plugin["name"], quiet=True) + except RuntimeError as e: + print(e) + + # We need to load plugins deferred as loading them directly does not work + # correctly due to Maya's initialization. + cmds.evalDeferred( + _explicit_load_plugins, + lowestPriority=True + ) + +# Open Workfile Post Initialization. +key = "AYON_OPEN_WORKFILE_POST_INITIALIZATION" +if bool(int(os.environ.get(key, "0"))): + def _log_and_open(): + path = os.environ["AVALON_LAST_WORKFILE"] + print("Opening \"{}\"".format(path)) + cmds.file(path, open=True, force=True) + cmds.evalDeferred( + _log_and_open, + lowestPriority=True + ) + +# Build a shelf. +shelf_preset = settings['maya'].get('project_shelf') +if shelf_preset: + icon_path = os.path.join( + os.environ['OPENPYPE_PROJECT_SCRIPTS'], + project_name, + "icons") + icon_path = os.path.abspath(icon_path) + + for i in shelf_preset['imports']: + import_string = "from {} import {}".format(project_name, i) + print(import_string) + exec(import_string) + + cmds.evalDeferred( + "mlib.shelf(name=shelf_preset['name'], iconPath=icon_path," + " preset=shelf_preset)" + ) + + +print("Finished OpenPype usersetup.") diff --git a/client/ayon_core/hosts/maya/tools/__init__.py b/client/ayon_core/hosts/maya/tools/__init__.py new file mode 100644 index 0000000000..0dd6de2342 --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/__init__.py @@ -0,0 +1,27 @@ +from ayon_core.tools.utils.host_tools import qt_app_context + + +class MayaToolsSingleton: + _look_assigner = None + + +def get_look_assigner_tool(parent): + """Create, cache and return look assigner tool window.""" + if MayaToolsSingleton._look_assigner is None: + from .mayalookassigner import MayaLookAssignerWindow + mayalookassigner_window = MayaLookAssignerWindow(parent) + MayaToolsSingleton._look_assigner = mayalookassigner_window + return MayaToolsSingleton._look_assigner + + +def show_look_assigner(parent=None): + """Look manager is Maya specific tool for look management.""" + + with qt_app_context(): + look_assigner_tool = get_look_assigner_tool(parent) + look_assigner_tool.show() + + # Pull window to the front. + look_assigner_tool.raise_() + look_assigner_tool.activateWindow() + look_assigner_tool.showNormal() diff --git a/openpype/hosts/maya/tools/mayalookassigner/LICENSE b/client/ayon_core/hosts/maya/tools/mayalookassigner/LICENSE similarity index 100% rename from openpype/hosts/maya/tools/mayalookassigner/LICENSE rename to client/ayon_core/hosts/maya/tools/mayalookassigner/LICENSE diff --git a/openpype/hosts/maya/tools/mayalookassigner/__init__.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/__init__.py similarity index 100% rename from openpype/hosts/maya/tools/mayalookassigner/__init__.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/__init__.py diff --git a/openpype/hosts/maya/tools/mayalookassigner/alembic.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/alembic.py similarity index 100% rename from openpype/hosts/maya/tools/mayalookassigner/alembic.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/alembic.py diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/app.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/app.py new file mode 100644 index 0000000000..d73c5e318f --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/app.py @@ -0,0 +1,309 @@ +import sys +import time +import logging + +from qtpy import QtWidgets, QtCore + +from ayon_core import style +from ayon_core.client import get_last_version_by_subset_id +from ayon_core.pipeline import get_current_project_name +from ayon_core.tools.utils.lib import qt_app_context +from ayon_core.hosts.maya.api.lib import ( + assign_look_by_version, + get_main_window +) + +from maya import cmds +# old api for MFileIO +import maya.OpenMaya +import maya.api.OpenMaya as om + +from .widgets import ( + AssetOutliner, + LookOutliner +) +from .commands import ( + get_workfile, + remove_unused_looks +) +from .vray_proxies import vrayproxy_assign_look +from . import arnold_standin + +module = sys.modules[__name__] +module.window = None + + +class MayaLookAssignerWindow(QtWidgets.QWidget): + + def __init__(self, parent=None): + super(MayaLookAssignerWindow, self).__init__(parent=parent) + + self.log = logging.getLogger(__name__) + + # Store callback references + self._callbacks = [] + self._connections_set_up = False + + filename = get_workfile() + + self.setObjectName("lookManager") + self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename)) + self.setWindowFlags(QtCore.Qt.Window) + self.setParent(parent) + + self.resize(750, 500) + + self.setup_ui() + + # Force refresh check on initialization + self._on_renderlayer_switch() + + def setup_ui(self): + """Build the UI""" + + main_splitter = QtWidgets.QSplitter(self) + + # Assets (left) + asset_outliner = AssetOutliner(main_splitter) + + # Looks (right) + looks_widget = QtWidgets.QWidget(main_splitter) + + look_outliner = LookOutliner(looks_widget) # Database look overview + + assign_selected = QtWidgets.QCheckBox( + "Assign to selected only", looks_widget + ) + assign_selected.setToolTip("Whether to assign only to selected nodes " + "or to the full asset") + remove_unused_btn = QtWidgets.QPushButton( + "Remove Unused Looks", looks_widget + ) + + looks_layout = QtWidgets.QVBoxLayout(looks_widget) + looks_layout.addWidget(look_outliner) + looks_layout.addWidget(assign_selected) + looks_layout.addWidget(remove_unused_btn) + + main_splitter.addWidget(asset_outliner) + main_splitter.addWidget(looks_widget) + main_splitter.setSizes([350, 200]) + + # Footer + status = QtWidgets.QStatusBar(self) + status.setSizeGripEnabled(False) + status.setFixedHeight(25) + warn_layer = QtWidgets.QLabel( + "Current Layer is not defaultRenderLayer", self + ) + warn_layer.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + warn_layer.setStyleSheet("color: #DD5555; font-weight: bold;") + warn_layer.setFixedHeight(25) + + footer = QtWidgets.QHBoxLayout() + footer.setContentsMargins(0, 0, 0, 0) + footer.addWidget(status) + footer.addWidget(warn_layer) + + # Build up widgets + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setSpacing(0) + main_layout.addWidget(main_splitter) + main_layout.addLayout(footer) + + # Set column width + asset_outliner.view.setColumnWidth(0, 200) + look_outliner.view.setColumnWidth(0, 150) + + asset_outliner.selection_changed.connect( + self.on_asset_selection_changed) + + asset_outliner.refreshed.connect( + lambda: self.echo("Loaded assets..") + ) + + look_outliner.menu_apply_action.connect(self.on_process_selected) + remove_unused_btn.clicked.connect(remove_unused_looks) + + # Open widgets + self.asset_outliner = asset_outliner + self.look_outliner = look_outliner + self.status = status + self.warn_layer = warn_layer + + # Buttons + self.remove_unused = remove_unused_btn + self.assign_selected = assign_selected + + self._first_show = True + + def setup_connections(self): + """Connect interactive widgets with actions""" + if self._connections_set_up: + return + + # Maya renderlayer switch callback + callback = om.MEventMessage.addEventCallback( + "renderLayerManagerChange", + self._on_renderlayer_switch + ) + self._callbacks.append(callback) + self._connections_set_up = True + + def remove_connection(self): + # Delete callbacks + for callback in self._callbacks: + om.MMessage.removeCallback(callback) + + self._callbacks = [] + self._connections_set_up = False + + def showEvent(self, event): + self.setup_connections() + super(MayaLookAssignerWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + + def closeEvent(self, event): + self.remove_connection() + super(MayaLookAssignerWindow, self).closeEvent(event) + + def _on_renderlayer_switch(self, *args): + """Callback that updates on Maya renderlayer switch""" + + if maya.OpenMaya.MFileIO.isNewingFile(): + # Don't perform a check during file open or file new as + # the renderlayers will not be in a valid state yet. + return + + layer = cmds.editRenderLayerGlobals(query=True, + currentRenderLayer=True) + if layer != "defaultRenderLayer": + self.warn_layer.show() + else: + self.warn_layer.hide() + + def echo(self, message): + self.status.showMessage(message, 1500) + + def refresh(self): + """Refresh the content""" + + # Get all containers and information + self.asset_outliner.clear() + found_items = self.asset_outliner.get_all_assets() + if not found_items: + self.look_outliner.clear() + + def on_asset_selection_changed(self): + """Get selected items from asset loader and fill look outliner""" + + items = self.asset_outliner.get_selected_items() + self.look_outliner.clear() + self.look_outliner.add_items(items) + + def on_process_selected(self): + """Process all selected looks for the selected assets""" + + assets = self.asset_outliner.get_selected_items() + assert assets, "No asset selected" + + # Collect the looks we want to apply (by name) + look_items = self.look_outliner.get_selected_items() + looks = {look["subset"] for look in look_items} + + selection = self.assign_selected.isChecked() + asset_nodes = self.asset_outliner.get_nodes(selection=selection) + + project_name = get_current_project_name() + start = time.time() + for i, (asset, item) in enumerate(asset_nodes.items()): + + # Label prefix + prefix = "({}/{})".format(i + 1, len(asset_nodes)) + + # Assign the first matching look relevant for this asset + # (since assigning multiple to the same nodes makes no sense) + assign_look = next((subset for subset in item["looks"] + if subset["name"] in looks), None) + if not assign_look: + self.echo( + "{} No matching selected look for {}".format(prefix, asset) + ) + continue + + # Get the latest version of this asset's look subset + version = get_last_version_by_subset_id( + project_name, assign_look["_id"], fields=["_id"] + ) + + subset_name = assign_look["name"] + self.echo("{} Assigning {} to {}\t".format( + prefix, subset_name, asset + )) + nodes = item["nodes"] + + # Assign Vray Proxy look. + if cmds.pluginInfo('vrayformaya', query=True, loaded=True): + self.echo("Getting vray proxy nodes ...") + vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) + + for vp in vray_proxies: + if vp in nodes: + vrayproxy_assign_look(vp, subset_name) + + nodes = list(set(nodes).difference(vray_proxies)) + else: + self.echo( + "Could not assign to VRayProxy because vrayformaya plugin " + "is not loaded." + ) + + # Assign Arnold Standin look. + if cmds.pluginInfo("mtoa", query=True, loaded=True): + arnold_standins = set(cmds.ls(type="aiStandIn", long=True)) + + for standin in arnold_standins: + if standin in nodes: + arnold_standin.assign_look(standin, subset_name) + + nodes = list(set(nodes).difference(arnold_standins)) + else: + self.echo( + "Could not assign to aiStandIn because mtoa plugin is not " + "loaded." + ) + + # Assign look + if nodes: + assign_look_by_version(nodes, version_id=version["_id"]) + + end = time.time() + + self.echo("Finished assigning.. ({0:.3f}s)".format(end - start)) + + +def show(): + """Display Loader GUI + + Arguments: + debug (bool, optional): Run loader in debug-mode, + defaults to False + + """ + + try: + module.window.close() + del module.window + except (RuntimeError, AttributeError): + pass + + # Get Maya main window + mainwindow = get_main_window() + + with qt_app_context(): + window = MayaLookAssignerWindow(parent=mainwindow) + window.show() + + module.window = window diff --git a/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/arnold_standin.py similarity index 97% rename from openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/arnold_standin.py index 076b0047bb..9c6877fed8 100644 --- a/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/arnold_standin.py @@ -5,9 +5,9 @@ from maya import cmds -from openpype.pipeline import legacy_io -from openpype.client import get_last_version_by_subset_name -from openpype.hosts.maya import api +from ayon_core.pipeline import get_current_project_name +from ayon_core.client import get_last_version_by_subset_name +from ayon_core.hosts.maya import api from . import lib from .alembic import get_alembic_ids_cache from .usd import is_usd_lib_supported, get_usd_ids_cache @@ -142,7 +142,7 @@ def assign_look(standin, subset): asset_id = node_id.split(":", 1)[0] node_ids_by_asset_id[asset_id].add(node_id) - project_name = legacy_io.active_project() + project_name = get_current_project_name() for asset_id, node_ids in node_ids_by_asset_id.items(): # Get latest look version diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/commands.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/commands.py new file mode 100644 index 0000000000..4d2f1a8443 --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/commands.py @@ -0,0 +1,197 @@ +import os +import logging +from collections import defaultdict + +import maya.cmds as cmds + +from ayon_core.client import get_assets, get_asset_name_identifier +from ayon_core.pipeline import ( + remove_container, + registered_host, + get_current_project_name, +) +from ayon_core.hosts.maya.api import lib + +from .vray_proxies import get_alembic_ids_cache +from . import arnold_standin + +log = logging.getLogger(__name__) + + +def get_workfile(): + path = cmds.file(query=True, sceneName=True) or "untitled" + return os.path.basename(path) + + +def get_workfolder(): + return os.path.dirname(cmds.file(query=True, sceneName=True)) + + +def select(nodes): + cmds.select(nodes) + + +def get_namespace_from_node(node): + """Get the namespace from the given node + + Args: + node (str): name of the node + + Returns: + namespace (str) + + """ + parts = node.rsplit("|", 1)[-1].rsplit(":", 1) + return parts[0] if len(parts) > 1 else u":" + + +def get_selected_nodes(): + """Get information from current selection""" + + selection = cmds.ls(selection=True, long=True) + hierarchy = lib.get_all_children(selection) + return list(set(selection + hierarchy)) + + +def get_all_asset_nodes(): + """Get all assets from the scene, container based + + Returns: + list: list of dictionaries + """ + return cmds.ls(dag=True, noIntermediate=True, long=True) + + +def create_asset_id_hash(nodes): + """Create a hash based on cbId attribute value + Args: + nodes (list): a list of nodes + + Returns: + dict + """ + node_id_hash = defaultdict(list) + for node in nodes: + # iterate over content of reference node + if cmds.nodeType(node) == "reference": + ref_hashes = create_asset_id_hash( + list(set(cmds.referenceQuery(node, nodes=True, dp=True)))) + for asset_id, ref_nodes in ref_hashes.items(): + node_id_hash[asset_id] += ref_nodes + elif cmds.pluginInfo('vrayformaya', query=True, + loaded=True) and cmds.nodeType( + node) == "VRayProxy": + path = cmds.getAttr("{}.fileName".format(node)) + ids = get_alembic_ids_cache(path) + for k, _ in ids.items(): + id = k.split(":")[0] + node_id_hash[id].append(node) + elif cmds.nodeType(node) == "aiStandIn": + for id, _ in arnold_standin.get_nodes_by_id(node).items(): + id = id.split(":")[0] + node_id_hash[id].append(node) + else: + value = lib.get_id(node) + if value is None: + continue + + asset_id = value.split(":")[0] + node_id_hash[asset_id].append(node) + + return dict(node_id_hash) + + +def create_items_from_nodes(nodes): + """Create an item for the view based the container and content of it + + It fetches the look document based on the asset ID found in the content. + The item will contain all important information for the tool to work. + + If there is an asset ID which is not registered in the project's collection + it will log a warning message. + + Args: + nodes (list): list of maya nodes + + Returns: + list of dicts + + """ + + asset_view_items = [] + + id_hashes = create_asset_id_hash(nodes) + + if not id_hashes: + log.warning("No id hashes") + return asset_view_items + + project_name = get_current_project_name() + asset_ids = set(id_hashes.keys()) + fields = {"_id", "name", "data.parents"} + asset_docs = get_assets(project_name, asset_ids, fields=fields) + asset_docs_by_id = { + str(asset_doc["_id"]): asset_doc + for asset_doc in asset_docs + } + + for asset_id, id_nodes in id_hashes.items(): + asset_doc = asset_docs_by_id.get(asset_id) + # Skip if asset id is not found + if not asset_doc: + log.warning( + "Id found on {num} nodes for which no asset is found database," + " skipping '{asset_id}'".format( + num=len(nodes), + asset_id=asset_id + ) + ) + continue + + # Collect available look subsets for this asset + looks = lib.list_looks(project_name, asset_doc["_id"]) + + # Collect namespaces the asset is found in + namespaces = set() + for node in id_nodes: + namespace = get_namespace_from_node(node) + namespaces.add(namespace) + + label = get_asset_name_identifier(asset_doc) + asset_view_items.append({ + "label": label, + "asset": asset_doc, + "looks": looks, + "namespaces": namespaces + }) + + return asset_view_items + + +def remove_unused_looks(): + """Removes all loaded looks for which none of the shaders are used. + + This will cleanup all loaded "LookLoader" containers that are unused in + the current scene. + + """ + + host = registered_host() + + unused = [] + for container in host.ls(): + if container['loader'] == "LookLoader": + members = lib.get_container_members(container['objectName']) + look_sets = cmds.ls(members, type="objectSet") + for look_set in look_sets: + # If the set is used than we consider this look *in use* + if cmds.sets(look_set, query=True): + break + else: + unused.append(container) + + for container in unused: + log.info("Removing unused look container: %s", container['objectName']) + remove_container(container) + + log.info("Finished removing unused looks. (see log for details)") diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/lib.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/lib.py new file mode 100644 index 0000000000..e3ebddb7d4 --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/lib.py @@ -0,0 +1,87 @@ +import json +import logging + +from ayon_core.pipeline import ( + get_current_project_name, + get_representation_path, + registered_host, + discover_loader_plugins, + loaders_from_representation, + load_container +) +from ayon_core.client import get_representation_by_name +from ayon_core.hosts.maya.api import lib + + +log = logging.getLogger(__name__) + + +def get_look_relationships(version_id): + # type: (str) -> dict + """Get relations for the look. + + Args: + version_id (str): Parent version Id. + + Returns: + dict: Dictionary of relations. + """ + + project_name = get_current_project_name() + json_representation = get_representation_by_name( + project_name, representation_name="json", version_id=version_id + ) + + # Load relationships + shader_relation = get_representation_path(json_representation) + with open(shader_relation, "r") as f: + relationships = json.load(f) + + return relationships + + +def load_look(version_id): + # type: (str) -> list + """Load look from version. + + Get look from version and invoke Loader for it. + + Args: + version_id (str): Version ID + + Returns: + list of shader nodes. + + """ + + project_name = get_current_project_name() + # Get representations of shader file and relationships + look_representation = get_representation_by_name( + project_name, representation_name="ma", version_id=version_id + ) + + # See if representation is already loaded, if so reuse it. + host = registered_host() + representation_id = str(look_representation['_id']) + for container in host.ls(): + if (container['loader'] == "LookLoader" and + container['representation'] == representation_id): + log.info("Reusing loaded look ...") + container_node = container['objectName'] + break + else: + log.info("Using look for the first time ...") + + # Load file + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation(all_loaders, representation_id) + loader = next( + (i for i in loaders if i.__name__ == "LookLoader"), None) + if loader is None: + raise RuntimeError("Could not find LookLoader, this is a bug") + + # Reference the look file + with lib.maintained_selection(): + container_node = load_container(loader, look_representation)[0] + + return lib.get_container_members(container_node), container_node diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/models.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/models.py new file mode 100644 index 0000000000..a252f103ec --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/models.py @@ -0,0 +1,129 @@ +from collections import defaultdict + +from qtpy import QtCore +import qtawesome + +from ayon_core.tools.utils import models +from ayon_core.style import get_default_entity_icon_color + + +class AssetModel(models.TreeModel): + + Columns = ["label"] + + def __init__(self, *args, **kwargs): + super(AssetModel, self).__init__(*args, **kwargs) + + self._icon_color = get_default_entity_icon_color() + + def add_items(self, items): + """ + Add items to model with needed data + Args: + items(list): collection of item data + + Returns: + None + """ + + self.beginResetModel() + + # Add the items sorted by label + sorter = lambda x: x["label"] + + for item in sorted(items, key=sorter): + + asset_item = models.Item() + asset_item.update(item) + asset_item["icon"] = "folder" + + # Add namespace children + namespaces = item["namespaces"] + for namespace in sorted(namespaces): + child = models.Item() + child.update(item) + child.update({ + "label": (namespace if namespace != ":" + else "(no namespace)"), + "namespace": namespace, + "looks": item["looks"], + "icon": "folder-o" + }) + asset_item.add_child(child) + + self.add_child(asset_item) + + self.endResetModel() + + def data(self, index, role): + + if not index.isValid(): + return + + if role == models.TreeModel.ItemRole: + node = index.internalPointer() + return node + + # Add icon + if role == QtCore.Qt.DecorationRole: + if index.column() == 0: + node = index.internalPointer() + icon = node.get("icon") + if icon: + return qtawesome.icon( + "fa.{0}".format(icon), + color=self._icon_color + ) + + return super(AssetModel, self).data(index, role) + + +class LookModel(models.TreeModel): + """Model displaying a list of looks and matches for assets""" + + Columns = ["label", "match"] + + def add_items(self, items): + """Add items to model with needed data + + An item exists of: + { + "subset": 'name of subset', + "asset": asset_document + } + + Args: + items(list): collection of item data + + Returns: + None + """ + + self.beginResetModel() + + # Collect the assets per look name (from the items of the AssetModel) + look_subsets = defaultdict(list) + for asset_item in items: + asset = asset_item["asset"] + for look in asset_item["looks"]: + look_subsets[look["name"]].append(asset) + + for subset in sorted(look_subsets.keys()): + assets = look_subsets[subset] + + # Define nice label without "look" prefix for readability + label = subset if not subset.startswith("look") else subset[4:] + + item_node = models.Item() + item_node["label"] = label + item_node["subset"] = subset + + # Amount of matching assets for this look + item_node["match"] = len(assets) + + # Store the assets that have this subset available + item_node["assets"] = assets + + self.add_child(item_node) + + self.endResetModel() diff --git a/openpype/hosts/maya/tools/mayalookassigner/usd.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/usd.py similarity index 100% rename from openpype/hosts/maya/tools/mayalookassigner/usd.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/usd.py diff --git a/openpype/hosts/maya/tools/mayalookassigner/views.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/views.py similarity index 100% rename from openpype/hosts/maya/tools/mayalookassigner/views.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/views.py diff --git a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py similarity index 96% rename from openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py rename to client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py index 97fb832f71..cbd5f7fd5c 100644 --- a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -5,9 +5,9 @@ from maya import cmds -from openpype.client import get_last_version_by_subset_name -from openpype.pipeline import get_current_project_name -import openpype.hosts.maya.lib as maya_lib +from ayon_core.client import get_last_version_by_subset_name +from ayon_core.pipeline import get_current_project_name +import ayon_core.hosts.maya.lib as maya_lib from . import lib from .alembic import get_alembic_ids_cache diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/widgets.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/widgets.py new file mode 100644 index 0000000000..234a1c149e --- /dev/null +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/widgets.py @@ -0,0 +1,256 @@ +import logging +from collections import defaultdict + +from qtpy import QtWidgets, QtCore + +from ayon_core.client import get_asset_name_identifier +from ayon_core.tools.utils.models import TreeModel +from ayon_core.tools.utils.lib import ( + preserve_expanded_rows, + preserve_selection, +) + +from .models import ( + AssetModel, + LookModel +) +from . import commands +from .views import View + +from maya import cmds + + +class AssetOutliner(QtWidgets.QWidget): + refreshed = QtCore.Signal() + selection_changed = QtCore.Signal() + + def __init__(self, parent=None): + super(AssetOutliner, self).__init__(parent) + + title = QtWidgets.QLabel("Assets", self) + title.setAlignment(QtCore.Qt.AlignCenter) + title.setStyleSheet("font-weight: bold; font-size: 12px") + + model = AssetModel() + view = View(self) + view.setModel(model) + view.customContextMenuRequested.connect(self.right_mouse_menu) + view.setSortingEnabled(False) + view.setHeaderHidden(True) + view.setIndentation(10) + + from_all_asset_btn = QtWidgets.QPushButton( + "Get All Assets", self + ) + from_selection_btn = QtWidgets.QPushButton( + "Get Assets From Selection", self + ) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(title) + layout.addWidget(from_all_asset_btn) + layout.addWidget(from_selection_btn) + layout.addWidget(view) + + # Build connections + from_selection_btn.clicked.connect(self.get_selected_assets) + from_all_asset_btn.clicked.connect(self.get_all_assets) + + selection_model = view.selectionModel() + selection_model.selectionChanged.connect(self.selection_changed) + + self.view = view + self.model = model + + self.log = logging.getLogger(__name__) + + def clear(self): + self.model.clear() + + # fix looks remaining visible when no items present after "refresh" + # todo: figure out why this workaround is needed. + self.selection_changed.emit() + + def add_items(self, items): + """Add new items to the outliner""" + + self.model.add_items(items) + self.refreshed.emit() + + def get_selected_items(self): + """Get current selected items from view + + Returns: + list: list of dictionaries + """ + + selection_model = self.view.selectionModel() + return [row.data(TreeModel.ItemRole) + for row in selection_model.selectedRows(0)] + + def get_all_assets(self): + """Add all items from the current scene""" + + with preserve_expanded_rows(self.view): + with preserve_selection(self.view): + self.clear() + nodes = commands.get_all_asset_nodes() + items = commands.create_items_from_nodes(nodes) + self.add_items(items) + return len(items) > 0 + + def get_selected_assets(self): + """Add all selected items from the current scene""" + + with preserve_expanded_rows(self.view): + with preserve_selection(self.view): + self.clear() + nodes = commands.get_selected_nodes() + items = commands.create_items_from_nodes(nodes) + self.add_items(items) + + def get_nodes(self, selection=False): + """Find the nodes in the current scene per asset.""" + + items = self.get_selected_items() + + # Collect all nodes by hash (optimization) + if not selection: + nodes = cmds.ls(dag=True, long=True) + else: + nodes = commands.get_selected_nodes() + id_nodes = commands.create_asset_id_hash(nodes) + + # Collect the asset item entries per asset + # and collect the namespaces we'd like to apply + assets = {} + asset_namespaces = defaultdict(set) + for item in items: + asset_id = str(item["asset"]["_id"]) + asset_name = get_asset_name_identifier(item["asset"]) + asset_namespaces[asset_name].add(item.get("namespace")) + + if asset_name in assets: + continue + + assets[asset_name] = item + assets[asset_name]["nodes"] = id_nodes.get(asset_id, []) + + # Filter nodes to namespace (if only namespaces were selected) + for asset_name in assets: + namespaces = asset_namespaces[asset_name] + + # When None is present there should be no filtering + if None in namespaces: + continue + + # Else only namespaces are selected and *not* the top entry so + # we should filter to only those namespaces. + nodes = assets[asset_name]["nodes"] + nodes = [node for node in nodes if + commands.get_namespace_from_node(node) in namespaces] + assets[asset_name]["nodes"] = nodes + + return assets + + def select_asset_from_items(self): + """Select nodes from listed asset""" + + items = self.get_nodes(selection=False) + nodes = [] + for item in items.values(): + nodes.extend(item["nodes"]) + + commands.select(nodes) + + def right_mouse_menu(self, pos): + """Build RMB menu for asset outliner""" + + active = self.view.currentIndex() # index under mouse + active = active.sibling(active.row(), 0) # get first column + globalpos = self.view.viewport().mapToGlobal(pos) + + menu = QtWidgets.QMenu(self.view) + + # Direct assignment + apply_action = QtWidgets.QAction(menu, text="Select nodes") + apply_action.triggered.connect(self.select_asset_from_items) + + if not active.isValid(): + apply_action.setEnabled(False) + + menu.addAction(apply_action) + + menu.exec_(globalpos) + + +class LookOutliner(QtWidgets.QWidget): + menu_apply_action = QtCore.Signal() + + def __init__(self, parent=None): + super(LookOutliner, self).__init__(parent) + + # Looks from database + title = QtWidgets.QLabel("Looks", self) + title.setAlignment(QtCore.Qt.AlignCenter) + title.setStyleSheet("font-weight: bold; font-size: 12px") + title.setAlignment(QtCore.Qt.AlignCenter) + + model = LookModel() + + # Proxy for dynamic sorting + proxy = QtCore.QSortFilterProxyModel() + proxy.setSourceModel(model) + + view = View(self) + view.setModel(proxy) + view.setMinimumHeight(180) + view.setToolTip("Use right mouse button menu for direct actions") + view.customContextMenuRequested.connect(self.right_mouse_menu) + view.sortByColumn(0, QtCore.Qt.AscendingOrder) + + # look manager layout + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(10) + layout.addWidget(title) + layout.addWidget(view) + + self.view = view + self.model = model + + def clear(self): + self.model.clear() + + def add_items(self, items): + self.model.add_items(items) + + def get_selected_items(self): + """Get current selected items from view + + Returns: + list: list of dictionaries + """ + + items = [i.data(TreeModel.ItemRole) for i in self.view.get_indices()] + return [item for item in items if item is not None] + + def right_mouse_menu(self, pos): + """Build RMB menu for look view""" + + active = self.view.currentIndex() # index under mouse + active = active.sibling(active.row(), 0) # get first column + globalpos = self.view.viewport().mapToGlobal(pos) + + if not active.isValid(): + return + + menu = QtWidgets.QMenu(self.view) + + # Direct assignment + apply_action = QtWidgets.QAction(menu, text="Assign looks..") + apply_action.triggered.connect(self.menu_apply_action) + + menu.addAction(apply_action) + + menu.exec_(globalpos) diff --git a/openpype/hosts/nuke/__init__.py b/client/ayon_core/hosts/nuke/__init__.py similarity index 100% rename from openpype/hosts/nuke/__init__.py rename to client/ayon_core/hosts/nuke/__init__.py diff --git a/client/ayon_core/hosts/nuke/addon.py b/client/ayon_core/hosts/nuke/addon.py new file mode 100644 index 0000000000..4ca4408271 --- /dev/null +++ b/client/ayon_core/hosts/nuke/addon.py @@ -0,0 +1,74 @@ +import os +import platform +from ayon_core.modules import OpenPypeModule, IHostAddon + +NUKE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class NukeAddon(OpenPypeModule, IHostAddon): + name = "nuke" + host_name = "nuke" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to NUKE_PATH + new_nuke_paths = [ + os.path.join(NUKE_ROOT_DIR, "startup") + ] + old_nuke_path = env.get("NUKE_PATH") or "" + for path in old_nuke_path.split(os.pathsep): + if not path: + continue + + norm_path = os.path.normpath(path) + if norm_path not in new_nuke_paths: + new_nuke_paths.append(norm_path) + + env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths) + # Remove auto screen scale factor for Qt + # - let Nuke decide it's value + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + # Remove tkinter library paths if are set + env.pop("TK_LIBRARY", None) + env.pop("TCL_LIBRARY", None) + + # Add vendor to PYTHONPATH + python_path = env["PYTHONPATH"] + python_path_parts = [] + if python_path: + python_path_parts = python_path.split(os.pathsep) + vendor_path = os.path.join(NUKE_ROOT_DIR, "vendor") + python_path_parts.insert(0, vendor_path) + env["PYTHONPATH"] = os.pathsep.join(python_path_parts) + + # Set default values if are not already set via settings + defaults = { + "LOGLEVEL": "DEBUG" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + # Try to add QuickTime to PATH + quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" + if platform.system() == "windows" and os.path.exists(quick_time_path): + path_value = env.get("PATH") or "" + path_paths = [ + path + for path in path_value.split(os.pathsep) + if path + ] + path_paths.append(quick_time_path) + env["PATH"] = os.pathsep.join(path_paths) + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(NUKE_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".nk"] diff --git a/openpype/hosts/nuke/api/__init__.py b/client/ayon_core/hosts/nuke/api/__init__.py similarity index 100% rename from openpype/hosts/nuke/api/__init__.py rename to client/ayon_core/hosts/nuke/api/__init__.py diff --git a/client/ayon_core/hosts/nuke/api/actions.py b/client/ayon_core/hosts/nuke/api/actions.py new file mode 100644 index 0000000000..a7bcb5b44f --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/actions.py @@ -0,0 +1,77 @@ +import pyblish.api + +from ayon_core.pipeline.publish import get_errored_instances_from_context +from .lib import ( + reset_selection, + select_nodes +) + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Nuke when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + label = "Select invalid nodes" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + + errored_instances = get_errored_instances_from_context(context, + plugin=plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = set() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.update(invalid_nodes) + else: + self.log.warning("Plug-in returned to be invalid, " + "but has no selectable nodes.") + + if invalid: + self.log.info("Selecting invalid nodes: {}".format(invalid)) + reset_selection() + select_nodes(invalid) + else: + self.log.info("No invalid nodes found.") + + +class SelectInstanceNodeAction(pyblish.api.Action): + """Select instance node for failed plugin.""" + label = "Select instance node" + on = "failed" # This action is only available on a failed plug-in + icon = "mdi.cursor-default-click" + + def process(self, context, plugin): + + # Get the errored instances for the plug-in + errored_instances = get_errored_instances_from_context( + context, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding instance nodes..") + nodes = set() + for instance in errored_instances: + instance_node = instance.data.get("transientData", {}).get("node") + if not instance_node: + raise RuntimeError( + "No transientData['node'] found on instance: {}".format( + instance + ) + ) + nodes.add(instance_node) + + if nodes: + self.log.info("Selecting instance nodes: {}".format(nodes)) + reset_selection() + select_nodes(nodes) + else: + self.log.info("No instance nodes found.") diff --git a/openpype/hosts/nuke/api/command.py b/client/ayon_core/hosts/nuke/api/command.py similarity index 100% rename from openpype/hosts/nuke/api/command.py rename to client/ayon_core/hosts/nuke/api/command.py diff --git a/openpype/hosts/nuke/api/constants.py b/client/ayon_core/hosts/nuke/api/constants.py similarity index 100% rename from openpype/hosts/nuke/api/constants.py rename to client/ayon_core/hosts/nuke/api/constants.py diff --git a/openpype/hosts/nuke/api/gizmo_menu.py b/client/ayon_core/hosts/nuke/api/gizmo_menu.py similarity index 98% rename from openpype/hosts/nuke/api/gizmo_menu.py rename to client/ayon_core/hosts/nuke/api/gizmo_menu.py index 5838ee8a8a..435e4a5806 100644 --- a/openpype/hosts/nuke/api/gizmo_menu.py +++ b/client/ayon_core/hosts/nuke/api/gizmo_menu.py @@ -2,7 +2,7 @@ import re import nuke -from openpype.lib import Logger +from ayon_core.lib import Logger log = Logger.get_logger(__name__) diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py new file mode 100644 index 0000000000..2ac33de68e --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/lib.py @@ -0,0 +1,3519 @@ +import os +from pprint import pformat +import re +import json +import six +import functools +import warnings +import platform +import tempfile +import contextlib +from collections import OrderedDict + +import nuke +from qtpy import QtCore, QtWidgets + +from ayon_core.client import ( + get_project, + get_asset_by_name, + get_versions, + get_last_versions, + get_representations, +) + +from ayon_core.host import HostDirmap +from ayon_core.tools.utils import host_tools +from ayon_core.pipeline.workfile.workfile_template_builder import ( + TemplateProfileNotFound +) +from ayon_core.lib import ( + env_value_to_bool, + Logger, + get_version_from_path, + StringTemplate, +) + +from ayon_core.settings import ( + get_project_settings, + get_current_project_settings, +) +from ayon_core.addon import AddonsManager +from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.pipeline import ( + discover_legacy_creator_plugins, + Anatomy, + get_current_host_name, + get_current_project_name, + get_current_asset_name, +) +from ayon_core.pipeline.context_tools import ( + get_custom_workfile_template_from_session +) +from ayon_core.pipeline.colorspace import get_imageio_config +from ayon_core.pipeline.workfile import BuildWorkfile +from . import gizmo_menu +from .constants import ASSIST + +from .workio import save_file +from .utils import get_node_outputs + +log = Logger.get_logger(__name__) + +MENU_LABEL = os.getenv("AYON_MENU_LABEL") or "AYON" +NODE_TAB_NAME = MENU_LABEL +DATA_GROUP_KEY = "{}DataGroup".format(MENU_LABEL.capitalize()) +EXCLUDED_KNOB_TYPE_ON_READ = ( + 20, # Tab Knob + 26, # Text Knob (But for backward compatibility, still be read + # if value is not an empty string.) +) +JSON_PREFIX = "JSON:::" +ROOT_DATA_KNOB = "publish_context" +INSTANCE_DATA_KNOB = "publish_instance" + + +class DeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +class Context: + main_window = None + context_action_item = None + project_name = os.getenv("AVALON_PROJECT") + # Workfile related code + workfiles_launched = False + workfiles_tool_timer = None + + # Seems unused + _project_doc = None + + +def get_main_window(): + """Acquire Nuke's main window""" + if Context.main_window is None: + + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "Foundry::UI::DockMainWindow" + for widget in top_widgets: + if ( + widget.inherits("QMainWindow") + and widget.metaObject().className() == name + ): + Context.main_window = widget + break + return Context.main_window + + +def set_node_data(node, knobname, data): + """Write data to node invisible knob + + Will create new in case it doesn't exists + or update the one already created. + + Args: + node (nuke.Node): node object + knobname (str): knob name + data (dict): data to be stored in knob + """ + # if exists then update data + if knobname in node.knobs(): + log.debug("Updating knobname `{}` on node `{}`".format( + knobname, node.name() + )) + update_node_data(node, knobname, data) + return + + log.debug("Creating knobname `{}` on node `{}`".format( + knobname, node.name() + )) + # else create new + knob_value = JSON_PREFIX + json.dumps(data) + knob = nuke.String_Knob(knobname) + knob.setValue(knob_value) + knob.setFlag(nuke.INVISIBLE) + node.addKnob(knob) + + +def get_node_data(node, knobname): + """Read data from node. + + Args: + node (nuke.Node): node object + knobname (str): knob name + + Returns: + dict: data stored in knob + """ + if knobname not in node.knobs(): + return + + rawdata = node[knobname].getValue() + if ( + isinstance(rawdata, six.string_types) + and rawdata.startswith(JSON_PREFIX) + ): + try: + return json.loads(rawdata[len(JSON_PREFIX):]) + except json.JSONDecodeError: + return + + +def update_node_data(node, knobname, data): + """Update already present data. + + Args: + node (nuke.Node): node object + knobname (str): knob name + data (dict): data to update knob value + """ + knob = node[knobname] + node_data = get_node_data(node, knobname) or {} + node_data.update(data) + knob_value = JSON_PREFIX + json.dumps(node_data) + knob.setValue(knob_value) + + +class Knobby(object): + """[DEPRECATED] For creating knob which it's type isn't + mapped in `create_knobs` + + Args: + type (string): Nuke knob type name + value: Value to be set with `Knob.setValue`, put `None` if not required + flags (list, optional): Knob flags to be set with `Knob.setFlag` + *args: Args other than knob name for initializing knob class + + """ + + def __init__(self, type, value, flags=None, *args): + self.type = type + self.value = value + self.flags = flags or [] + self.args = args + + def create(self, name, nice=None): + knob_cls = getattr(nuke, self.type) + knob = knob_cls(name, nice, *self.args) + if self.value is not None: + knob.setValue(self.value) + for flag in self.flags: + knob.setFlag(flag) + return knob + + @staticmethod + def nice_naming(key): + """Convert camelCase name into UI Display Name""" + words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) + return " ".join(words) + + +def create_knobs(data, tab=None): + """Create knobs by data + + Depending on the type of each dict value and creates the correct Knob. + + Mapped types: + bool: nuke.Boolean_Knob + int: nuke.Int_Knob + float: nuke.Double_Knob + list: nuke.Enumeration_Knob + six.string_types: nuke.String_Knob + + dict: If it's a nested dict (all values are dict), will turn into + A tabs group. Or just a knobs group. + + Args: + data (dict): collection of attributes and their value + tab (string, optional): Knobs' tab name + + Returns: + list: A list of `nuke.Knob` objects + + """ + def nice_naming(key): + """Convert camelCase name into UI Display Name""" + words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) + return " ".join(words) + + # Turn key-value pairs into knobs + knobs = list() + + if tab: + knobs.append(nuke.Tab_Knob(tab)) + + for key, value in data.items(): + # Knob name + if isinstance(key, tuple): + name, nice = key + else: + name, nice = key, nice_naming(key) + + # Create knob by value type + if isinstance(value, Knobby): + knobby = value + knob = knobby.create(name, nice) + + elif isinstance(value, float): + knob = nuke.Double_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, bool): + knob = nuke.Boolean_Knob(name, nice) + knob.setValue(value) + knob.setFlag(nuke.STARTLINE) + + elif isinstance(value, int): + knob = nuke.Int_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, six.string_types): + knob = nuke.String_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, list): + knob = nuke.Enumeration_Knob(name, nice, value) + + elif isinstance(value, dict): + if all(isinstance(v, dict) for v in value.values()): + # Create a group of tabs + begain = nuke.BeginTabGroup_Knob() + end = nuke.EndTabGroup_Knob() + begain.setName(name) + end.setName(name + "_End") + knobs.append(begain) + for k, v in value.items(): + knobs += create_knobs(v, tab=k) + knobs.append(end) + else: + # Create a group of knobs + knobs.append(nuke.Tab_Knob( + name, nice, nuke.TABBEGINCLOSEDGROUP)) + knobs += create_knobs(value) + knobs.append( + nuke.Tab_Knob(name + "_End", nice, nuke.TABENDGROUP)) + continue + + else: + raise TypeError("Unsupported type: %r" % type(value)) + + knobs.append(knob) + + return knobs + + +def imprint(node, data, tab=None): + """Store attributes with value on node + + Parse user data into Node knobs. + Use `collections.OrderedDict` to ensure knob order. + + Args: + node(nuke.Node): node object from Nuke + data(dict): collection of attributes and their value + + Returns: + None + + Examples: + ``` + import nuke + from ayon_core.hosts.nuke.api import lib + + node = nuke.createNode("NoOp") + data = { + # Regular type of attributes + "myList": ["x", "y", "z"], + "myBool": True, + "myFloat": 0.1, + "myInt": 5, + + # Creating non-default imprint type of knob + "MyFilePath": lib.Knobby("File_Knob", "/file/path"), + "divider": lib.Knobby("Text_Knob", ""), + + # Manual nice knob naming + ("my_knob", "Nice Knob Name"): "some text", + + # dict type will be created as knob group + "KnobGroup": { + "knob1": 5, + "knob2": "hello", + "knob3": ["a", "b"], + }, + + # Nested dict will be created as tab group + "TabGroup": { + "tab1": {"count": 5}, + "tab2": {"isGood": True}, + "tab3": {"direction": ["Left", "Right"]}, + }, + } + lib.imprint(node, data, tab="Demo") + + ``` + + """ + for knob in create_knobs(data, tab): + node.addKnob(knob) + + +@deprecated +def add_publish_knob(node): + """[DEPRECATED] Add Publish knob to node + + Arguments: + node (nuke.Node): nuke node to be processed + + Returns: + node (nuke.Node): processed nuke node + + """ + if "publish" not in node.knobs(): + body = OrderedDict() + body[("divd", "Publishing")] = Knobby("Text_Knob", '') + body["publish"] = True + imprint(node, body) + return node + + +@deprecated("ayon_core.hosts.nuke.api.lib.set_node_data") +def set_avalon_knob_data(node, data=None, prefix="avalon:"): + """[DEPRECATED] Sets data into nodes's avalon knob + + This function is still used but soon will be deprecated. + Use `set_node_data` instead. + + Arguments: + node (nuke.Node): Nuke node to imprint with data, + data (dict, optional): Data to be imprinted into AvalonTab + prefix (str, optional): filtering prefix + + Returns: + node (nuke.Node) + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or dict() + create = OrderedDict() + + tab_name = NODE_TAB_NAME + editable = ["asset", "subset", "name", "namespace"] + + existed_knobs = node.knobs() + + for key, value in data.items(): + knob_name = prefix + key + gui_name = key + + if knob_name in existed_knobs: + # Set value + try: + node[knob_name].setValue(value) + except TypeError: + node[knob_name].setValue(str(value)) + else: + # New knob + name = (knob_name, gui_name) # Hide prefix on GUI + if key in editable: + create[name] = value + else: + create[name] = Knobby("String_Knob", + str(value), + flags=[nuke.READ_ONLY]) + if tab_name in existed_knobs: + tab_name = None + else: + tab = OrderedDict() + warn = Knobby("Text_Knob", "Warning! Do not change following data!") + divd = Knobby("Text_Knob", "") + head = [ + (("warn", ""), warn), + (("divd", ""), divd), + ] + tab[DATA_GROUP_KEY] = OrderedDict(head + list(create.items())) + create = tab + + imprint(node, create, tab=tab_name) + return node + + +@deprecated("ayon_core.hosts.nuke.api.lib.get_node_data") +def get_avalon_knob_data(node, prefix="avalon:", create=True): + """[DEPRECATED] Gets a data from nodes's avalon knob + + This function is still used but soon will be deprecated. + Use `get_node_data` instead. + + Arguments: + node (obj): Nuke node to search for data, + prefix (str, optional): filtering prefix + + Returns: + data (dict) + """ + + data = {} + if NODE_TAB_NAME not in node.knobs(): + return data + + # check if lists + if not isinstance(prefix, list): + prefix = [prefix] + + # loop prefix + for p in prefix: + # check if the node is avalon tracked + try: + # check if data available on the node + test = node[DATA_GROUP_KEY].value() + log.debug("Only testing if data available: `{}`".format(test)) + except NameError as e: + # if it doesn't then create it + log.debug("Creating avalon knob: `{}`".format(e)) + if create: + node = set_avalon_knob_data(node) + return get_avalon_knob_data(node) + return {} + + # get data from filtered knobs + data.update({k.replace(p, ''): node[k].value() + for k in node.knobs().keys() + if p in k}) + + return data + + +@deprecated +def fix_data_for_node_create(data): + """[DEPRECATED] Fixing data to be used for nuke knobs + """ + for k, v in data.items(): + if isinstance(v, six.text_type): + data[k] = str(v) + if str(v).startswith("0x"): + data[k] = int(v, 16) + return data + + +@deprecated +def add_write_node_legacy(name, **kwarg): + """[DEPRECATED] Adding nuke write node + Arguments: + name (str): nuke node name + kwarg (attrs): data for nuke knobs + Returns: + node (obj): nuke write node + """ + use_range_limit = kwarg.get("use_range_limit", None) + + w = nuke.createNode( + "Write", + "name {}".format(name), + inpanel=False + ) + + w["file"].setValue(kwarg["file"]) + + for k, v in kwarg.items(): + if "frame_range" in k: + continue + log.info([k, v]) + try: + w[k].setValue(v) + except KeyError as e: + log.debug(e) + continue + + if use_range_limit: + w["use_limit"].setValue(True) + w["first"].setValue(kwarg["frame_range"][0]) + w["last"].setValue(kwarg["frame_range"][1]) + + return w + + +def add_write_node(name, file_path, knobs, **kwarg): + """Adding nuke write node + + Arguments: + name (str): nuke node name + kwarg (attrs): data for nuke knobs + + Returns: + node (obj): nuke write node + """ + use_range_limit = kwarg.get("use_range_limit", None) + + w = nuke.createNode( + "Write", + "name {}".format(name), + inpanel=False + ) + + w["file"].setValue(file_path) + + # finally add knob overrides + set_node_knobs_from_settings(w, knobs, **kwarg) + + if use_range_limit: + w["use_limit"].setValue(True) + w["first"].setValue(kwarg["frame_range"][0]) + w["last"].setValue(kwarg["frame_range"][1]) + + return w + + +def read_avalon_data(node): + """Return user-defined knobs from given `node` + + Args: + node (nuke.Node): Nuke node object + + Returns: + list: A list of nuke.Knob object + + """ + def compat_prefixed(knob_name): + if knob_name.startswith("avalon:"): + return knob_name[len("avalon:"):] + elif knob_name.startswith("ak:"): + return knob_name[len("ak:"):] + + data = dict() + + pattern = ("(?<=addUserKnob {)" + "([0-9]*) (\\S*)" # Matching knob type and knob name + "(?=[ |}])") + tcl_script = node.writeKnobs(nuke.WRITE_USER_KNOB_DEFS) + result = re.search(pattern, tcl_script) + + if result: + first_user_knob = result.group(2) + # Collect user knobs from the end of the knob list + for knob in reversed(node.allKnobs()): + knob_name = knob.name() + if not knob_name: + # Ignore unnamed knob + continue + + knob_type = nuke.knob(knob.fullyQualifiedName(), type=True) + value = knob.value() + + if ( + knob_type not in EXCLUDED_KNOB_TYPE_ON_READ or + # For compating read-only string data that imprinted + # by `nuke.Text_Knob`. + (knob_type == 26 and value) + ): + key = compat_prefixed(knob_name) + if key is not None: + data[key] = value + + if knob_name == first_user_knob: + break + + return data + + +def get_node_path(path, padding=4): + """Get filename for the Nuke write with padded number as '#' + + Arguments: + path (str): The path to render to. + + Returns: + tuple: head, padding, tail (extension) + + Examples: + >>> get_frame_path("test.exr") + ('test', 4, '.exr') + + >>> get_frame_path("filename.#####.tif") + ('filename.', 5, '.tif') + + >>> get_frame_path("foobar##.tif") + ('foobar', 2, '.tif') + + >>> get_frame_path("foobar_%08d.tif") + ('foobar_', 8, '.tif') + """ + filename, ext = os.path.splitext(path) + + # Find a final number group + if '%' in filename: + match = re.match('.*?(%[0-9]+d)$', filename) + if match: + padding = int(match.group(1).replace('%', '').replace('d', '')) + # remove number from end since fusion + # will swap it with the frame number + filename = filename.replace(match.group(1), '') + elif '#' in filename: + match = re.match('.*?(#+)$', filename) + + if match: + padding = len(match.group(1)) + # remove number from end since fusion + # will swap it with the frame number + filename = filename.replace(match.group(1), '') + + return filename, padding, ext + + +def get_nuke_imageio_settings(): + return get_project_settings(Context.project_name)["nuke"]["imageio"] + + +@deprecated("ayon_core.hosts.nuke.api.lib.get_nuke_imageio_settings") +def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): + '''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth) + ''' + + assert any([creator, nodeclass]), nuke.message( + "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + + imageio_nodes = get_nuke_imageio_settings()["nodes"] + required_nodes = imageio_nodes["requiredNodes"] + + # HACK: for backward compatibility this needs to be optional + override_nodes = imageio_nodes.get("overrideNodes", []) + + imageio_node = None + for node in required_nodes: + log.info(node) + if ( + nodeclass in node["nukeNodeClass"] + and creator in node["plugins"] + ): + imageio_node = node + break + + log.debug("__ imageio_node: {}".format(imageio_node)) + + # find matching override node + override_imageio_node = None + for onode in override_nodes: + log.info(onode) + if nodeclass not in node["nukeNodeClass"]: + continue + + if creator not in node["plugins"]: + continue + + if ( + onode["subsets"] + and not any( + re.search(s.lower(), subset.lower()) + for s in onode["subsets"] + ) + ): + continue + + override_imageio_node = onode + break + + log.debug("__ override_imageio_node: {}".format(override_imageio_node)) + # add overrides to imageio_node + if override_imageio_node: + # get all knob names in imageio_node + knob_names = [k["name"] for k in imageio_node["knobs"]] + + for oknob in override_imageio_node["knobs"]: + for knob in imageio_node["knobs"]: + # override matching knob name + if oknob["name"] == knob["name"]: + log.debug( + "_ overriding knob: `{}` > `{}`".format( + knob, oknob + )) + if not oknob["value"]: + # remove original knob if no value found in oknob + imageio_node["knobs"].remove(knob) + else: + # override knob value with oknob's + knob["value"] = oknob["value"] + + # add missing knobs into imageio_node + if oknob["name"] not in knob_names: + log.debug( + "_ adding knob: `{}`".format(oknob)) + imageio_node["knobs"].append(oknob) + knob_names.append(oknob["name"]) + + log.info("ImageIO node: {}".format(imageio_node)) + return imageio_node + + +def get_imageio_node_setting(node_class, plugin_name, subset): + ''' Get preset data for dataflow (fileType, compression, bitDepth) + ''' + imageio_nodes = get_nuke_imageio_settings()["nodes"] + required_nodes = imageio_nodes["requiredNodes"] + + imageio_node = None + for node in required_nodes: + log.info(node) + if ( + node_class in node["nukeNodeClass"] + and plugin_name in node["plugins"] + ): + imageio_node = node + break + + log.debug("__ imageio_node: {}".format(imageio_node)) + + if not imageio_node: + return + + # find overrides and update knobs with them + get_imageio_node_override_setting( + node_class, + plugin_name, + subset, + imageio_node["knobs"] + ) + + log.info("ImageIO node: {}".format(imageio_node)) + return imageio_node + + +def get_imageio_node_override_setting( + node_class, plugin_name, subset, knobs_settings +): + ''' Get imageio node overrides from settings + ''' + imageio_nodes = get_nuke_imageio_settings()["nodes"] + override_nodes = imageio_nodes["overrideNodes"] + + # find matching override node + override_imageio_node = None + for onode in override_nodes: + log.debug("__ onode: {}".format(onode)) + log.debug("__ subset: {}".format(subset)) + if node_class not in onode["nukeNodeClass"]: + continue + + if plugin_name not in onode["plugins"]: + continue + + if ( + onode["subsets"] + and not any( + re.search(s.lower(), subset.lower()) + for s in onode["subsets"] + ) + ): + continue + + override_imageio_node = onode + break + + log.debug("__ override_imageio_node: {}".format(override_imageio_node)) + # add overrides to imageio_node + if override_imageio_node: + # get all knob names in imageio_node + knob_names = [k["name"] for k in knobs_settings] + + for oknob in override_imageio_node["knobs"]: + for knob in knobs_settings: + # override matching knob name + if oknob["name"] == knob["name"]: + log.debug( + "_ overriding knob: `{}` > `{}`".format( + knob, oknob + )) + if not oknob["value"]: + # remove original knob if no value found in oknob + knobs_settings.remove(knob) + else: + # override knob value with oknob's + knob["value"] = oknob["value"] + + # add missing knobs into imageio_node + if oknob["name"] not in knob_names: + log.debug( + "_ adding knob: `{}`".format(oknob)) + knobs_settings.append(oknob) + knob_names.append(oknob["name"]) + + return knobs_settings + + +def get_imageio_input_colorspace(filename): + ''' Get input file colorspace based on regex in settings. + ''' + imageio_regex_inputs = ( + get_nuke_imageio_settings()["regexInputs"]["inputs"]) + + preset_clrsp = None + for regexInput in imageio_regex_inputs: + if bool(re.search(regexInput["regex"], filename)): + preset_clrsp = str(regexInput["colorspace"]) + + return preset_clrsp + + +def get_view_process_node(): + reset_selection() + + ipn_node = None + for v_ in nuke.allNodes(filter="Viewer"): + ipn = v_['input_process_node'].getValue() + ipn_node = nuke.toNode(ipn) + + # skip if no input node is set + if not ipn: + continue + + if ipn == "VIEWER_INPUT" and not ipn_node: + # since it is set by default we can ignore it + # nobody usually use this but use it if + # it exists in nodes + continue + + if not ipn_node: + # in case a Viewer node is transferred from + # different workfile with old values + raise NameError(( + "Input process node name '{}' set in " + "Viewer '{}' is doesn't exists in nodes" + ).format(ipn, v_.name())) + + ipn_node.setSelected(True) + + if ipn_node: + return duplicate_node(ipn_node) + + +def on_script_load(): + ''' Callback for ffmpeg support + ''' + if nuke.env["LINUX"]: + nuke.tcl('load ffmpegReader') + nuke.tcl('load ffmpegWriter') + else: + nuke.tcl('load movReader') + nuke.tcl('load movWriter') + + +def check_inventory_versions(): + """ + Actual version idetifier of Loaded containers + + Any time this function is run it will check all nodes and filter only + Loader nodes for its version. It will get all versions from database + and check if the node is having actual version. If not then it will color + it to red. + """ + from .pipeline import parse_container + + # get all Loader nodes by avalon attribute metadata + node_with_repre_id = [] + repre_ids = set() + # Find all containers and collect it's node and representation ids + for node in nuke.allNodes(): + container = parse_container(node) + + if container: + node = nuke.toNode(container["objectName"]) + avalon_knob_data = read_avalon_data(node) + repre_id = avalon_knob_data["representation"] + + repre_ids.add(repre_id) + node_with_repre_id.append((node, repre_id)) + + # Skip if nothing was found + if not repre_ids: + return + + project_name = get_current_project_name() + # Find representations based on found containers + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by id and collect version ids + repre_docs_by_id = {} + version_ids = set() + for repre_doc in repre_docs: + # Use stringed representation id to match value in containers + repre_id = str(repre_doc["_id"]) + repre_docs_by_id[repre_id] = repre_doc + version_ids.add(repre_doc["parent"]) + + version_docs = get_versions( + project_name, version_ids, fields=["_id", "name", "parent"] + ) + # Store versions by id and collect subset ids + version_docs_by_id = {} + subset_ids = set() + for version_doc in version_docs: + version_docs_by_id[version_doc["_id"]] = version_doc + subset_ids.add(version_doc["parent"]) + + # Query last versions based on subset ids + last_versions_by_subset_id = get_last_versions( + project_name, subset_ids=subset_ids, fields=["_id", "parent"] + ) + + # Loop through collected container nodes and their representation ids + for item in node_with_repre_id: + # Some python versions of nuke can't unfold tuple in for loop + node, repre_id = item + repre_doc = repre_docs_by_id.get(repre_id) + # Failsafe for not finding the representation. + if not repre_doc: + log.warning(( + "Could not find the representation on node \"{}\"" + ).format(node.name())) + continue + + version_id = repre_doc["parent"] + version_doc = version_docs_by_id.get(version_id) + if not version_doc: + log.warning(( + "Could not find the version on node \"{}\"" + ).format(node.name())) + continue + + # Get last version based on subset id + subset_id = version_doc["parent"] + last_version = last_versions_by_subset_id[subset_id] + # Check if last version is same as current version + if last_version["_id"] == version_doc["_id"]: + color_value = "0x4ecd25ff" + else: + color_value = "0xd84f20ff" + node["tile_color"].setValue(int(color_value, 16)) + + +def writes_version_sync(): + ''' Callback synchronizing version of publishable write nodes + ''' + try: + rootVersion = get_version_from_path(nuke.root().name()) + padding = len(rootVersion) + new_version = "v" + str("{" + ":0>{}".format(padding) + "}").format( + int(rootVersion) + ) + log.debug("new_version: {}".format(new_version)) + except Exception: + return + + for each in nuke.allNodes(filter="Write"): + # check if the node is avalon tracked + if _NODE_TAB_NAME not in each.knobs(): + continue + + avalon_knob_data = read_avalon_data(each) + + try: + if avalon_knob_data["families"] not in ["render"]: + log.debug(avalon_knob_data["families"]) + continue + + node_file = each["file"].value() + + node_version = "v" + get_version_from_path(node_file) + log.debug("node_version: {}".format(node_version)) + + node_new_file = node_file.replace(node_version, new_version) + each["file"].setValue(node_new_file) + if not os.path.isdir(os.path.dirname(node_new_file)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(node_new_file)) + except Exception as e: + log.warning( + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) + + +def version_up_script(): + ''' Raising working script's version + ''' + import nukescripts + nukescripts.script_and_write_nodes_version_up() + + +def check_subsetname_exists(nodes, subset_name): + """ + Checking if node is not already created to secure there is no duplicity + + Arguments: + nodes (list): list of nuke.Node objects + subset_name (str): name we try to find + + Returns: + bool: True of False + """ + return next((True for n in nodes + if subset_name in read_avalon_data(n).get("subset", "")), + False) + + +def format_anatomy(data): + ''' Helping function for formatting of anatomy paths + + Arguments: + data (dict): dictionary with attributes used for formatting + + Return: + path (str) + ''' + + project_name = get_current_project_name() + anatomy = Anatomy(project_name) + log.debug("__ anatomy.templates: {}".format(anatomy.templates)) + + padding = None + if "frame_padding" in anatomy.templates.keys(): + padding = int(anatomy.templates["frame_padding"]) + elif "render" in anatomy.templates.keys(): + padding = int( + anatomy.templates["render"].get( + "frame_padding" + ) + ) + + version = data.get("version", None) + if not version: + file = script_name() + data["version"] = get_version_from_path(file) + + asset_name = data["folderPath"] + task_name = data["task"] + host_name = get_current_host_name() + context_data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) + data.update(context_data) + data.update({ + "subset": data["subset"], + "family": data["family"], + "frame": "#" * padding, + }) + return anatomy.format(data) + + +def script_name(): + ''' Returns nuke script path + ''' + return nuke.root().knob("name").value() + + +def add_button_write_to_read(node): + name = "createReadNode" + label = "Read From Rendered" + value = "import write_to_read;\ + write_to_read.write_to_read(nuke.thisNode(), allow_relative=False)" + knob = nuke.PyScript_Knob(name, label, value) + knob.clearFlag(nuke.STARTLINE) + node.addKnob(knob) + + +def add_button_clear_rendered(node, path): + name = "clearRendered" + label = "Clear Rendered" + value = "import clear_rendered;\ + clear_rendered.clear_rendered(\"{}\")".format(path) + knob = nuke.PyScript_Knob(name, label, value) + node.addKnob(knob) + + +def create_prenodes( + prev_node, + nodes_setting, + plugin_name=None, + subset=None, + **kwargs +): + last_node = None + for_dependency = {} + for name, node in nodes_setting.items(): + # get attributes + nodeclass = node["nodeclass"] + knobs = node["knobs"] + + # create node + now_node = nuke.createNode( + nodeclass, + "name {}".format(name), + inpanel=False + ) + + # add for dependency linking + for_dependency[name] = { + "node": now_node, + "dependent": node["dependent"] + } + + if all([plugin_name, subset]): + # find imageio overrides + get_imageio_node_override_setting( + now_node.Class(), + plugin_name, + subset, + knobs + ) + + # add data to knob + set_node_knobs_from_settings(now_node, knobs, **kwargs) + + # switch actual node to previous + last_node = now_node + + for _node_name, node_prop in for_dependency.items(): + if not node_prop["dependent"]: + node_prop["node"].setInput( + 0, prev_node) + elif node_prop["dependent"] in for_dependency: + _prev_node = for_dependency[node_prop["dependent"]]["node"] + node_prop["node"].setInput( + 0, _prev_node) + else: + log.warning("Dependency has wrong name of node: {}".format( + node_prop + )) + + return last_node + + +def create_write_node( + name, + data, + input=None, + prenodes=None, + linked_knobs=None, + **kwargs +): + ''' Creating write node which is group node + + Arguments: + name (str): name of node + data (dict): creator write instance data + input (node)[optional]: selected node to connect to + prenodes (dict)[optional]: + nodes to be created before write with dependency + review (bool)[optional]: adding review knob + farm (bool)[optional]: rendering workflow target + kwargs (dict)[optional]: additional key arguments for formatting + + Example: + prenodes = { + "nodeName": { + "nodeclass": "Reformat", + "dependent": [ + following_node_01, + ... + ], + "knobs": [ + { + "type": "text", + "name": "knobname", + "value": "knob value" + }, + ... + ] + }, + ... + } + + + Return: + node (obj): group node with avalon data as Knobs + ''' + prenodes = prenodes or {} + + # filtering variables + plugin_name = data["creator"] + subset = data["subset"] + + # get knob settings for write node + imageio_writes = get_imageio_node_setting( + node_class="Write", + plugin_name=plugin_name, + subset=subset + ) + + for knob in imageio_writes["knobs"]: + if knob["name"] == "file_type": + ext = knob["value"] + + data.update({ + "imageio_writes": imageio_writes, + "ext": ext + }) + anatomy_filled = format_anatomy(data) + + # build file path to workfiles + fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") + data["work"] = fdir + fpath = StringTemplate(data["fpath_template"]).format_strict(data) + + # create directory + if not os.path.isdir(os.path.dirname(fpath)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(fpath)) + + GN = nuke.createNode("Group", "name {}".format(name)) + + prev_node = None + with GN: + if input: + input_name = str(input.name()).replace(" ", "") + # if connected input node was defined + prev_node = nuke.createNode( + "Input", + "name {}".format(input_name), + inpanel=False + ) + else: + # generic input node connected to nothing + prev_node = nuke.createNode( + "Input", + "name {}".format("rgba"), + inpanel=False + ) + + # creating pre-write nodes `prenodes` + last_prenode = create_prenodes( + prev_node, + prenodes, + plugin_name, + subset, + **kwargs + ) + if last_prenode: + prev_node = last_prenode + + # creating write node + write_node = now_node = add_write_node( + "inside_{}".format(name), + fpath, + imageio_writes["knobs"], + **data + ) + # connect to previous node + now_node.setInput(0, prev_node) + + # switch actual node to previous + prev_node = now_node + + now_node = nuke.createNode("Output", "name Output1", inpanel=False) + + # connect to previous node + now_node.setInput(0, prev_node) + + # add divider + GN.addKnob(nuke.Text_Knob('', 'Rendering')) + + # Add linked knobs. + linked_knob_names = [] + + # add input linked knobs and create group only if any input + if linked_knobs: + linked_knob_names.append("_grp-start_") + linked_knob_names.extend(linked_knobs) + linked_knob_names.append("_grp-end_") + + linked_knob_names.append("Render") + + for _k_name in linked_knob_names: + if "_grp-start_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) + GN.addKnob(knob) + elif "_grp-end_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) + GN.addKnob(knob) + else: + if "___" in _k_name: + # add divider + GN.addKnob(nuke.Text_Knob("")) + else: + # add linked knob by _k_name + link = nuke.Link_Knob("") + link.makeLink(write_node.name(), _k_name) + link.setName(_k_name) + + # make render + if "Render" in _k_name: + link.setLabel("Render Local") + link.setFlag(0x1000) + GN.addKnob(link) + + # adding write to read button + add_button_write_to_read(GN) + + # adding write to read button + add_button_clear_rendered(GN, os.path.dirname(fpath)) + + # set tile color + tile_color = next( + iter( + k["value"] for k in imageio_writes["knobs"] + if "tile_color" in k["name"] + ), [255, 0, 0, 255] + ) + GN["tile_color"].setValue( + color_gui_to_int(tile_color)) + + return GN + + +@deprecated("ayon_core.hosts.nuke.api.lib.create_write_node") +def create_write_node_legacy( + name, data, input=None, prenodes=None, + review=True, linked_knobs=None, farm=True +): + ''' Creating write node which is group node + + Arguments: + name (str): name of node + data (dict): data to be imprinted + input (node): selected node to connect to + prenodes (list, optional): list of lists, definitions for nodes + to be created before write + review (bool): adding review knob + + Example: + prenodes = [ + { + "nodeName": { + "class": "" # string + "knobs": [ + ("knobName": value), + ... + ], + "dependent": [ + following_node_01, + ... + ] + } + }, + ... + ] + + Return: + node (obj): group node with avalon data as Knobs + ''' + knob_overrides = data.get("knobs", []) + nodeclass = data["nodeclass"] + creator = data["creator"] + subset = data["subset"] + + imageio_writes = get_created_node_imageio_setting_legacy( + nodeclass, creator, subset + ) + for knob in imageio_writes["knobs"]: + if knob["name"] == "file_type": + representation = knob["value"] + + host_name = get_current_host_name() + try: + data.update({ + "app": host_name, + "imageio_writes": imageio_writes, + "representation": representation, + }) + anatomy_filled = format_anatomy(data) + + except Exception as e: + msg = "problem with resolving anatomy template: {}".format(e) + log.error(msg) + nuke.message(msg) + + # build file path to workfiles + fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") + fpath = data["fpath_template"].format( + work=fdir, version=data["version"], subset=data["subset"], + frame=data["frame"], + ext=representation + ) + + # create directory + if not os.path.isdir(os.path.dirname(fpath)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(fpath)) + + _data = OrderedDict({ + "file": fpath + }) + + # adding dataflow template + log.debug("imageio_writes: `{}`".format(imageio_writes)) + for knob in imageio_writes["knobs"]: + _data[knob["name"]] = knob["value"] + + _data = fix_data_for_node_create(_data) + + log.debug("_data: `{}`".format(_data)) + + if "frame_range" in data.keys(): + _data["frame_range"] = data.get("frame_range", None) + log.debug("_data[frame_range]: `{}`".format(_data["frame_range"])) + + GN = nuke.createNode("Group", "name {}".format(name)) + + prev_node = None + with GN: + if input: + input_name = str(input.name()).replace(" ", "") + # if connected input node was defined + prev_node = nuke.createNode( + "Input", "name {}".format(input_name)) + else: + # generic input node connected to nothing + prev_node = nuke.createNode( + "Input", + "name {}".format("rgba"), + inpanel=False + ) + # creating pre-write nodes `prenodes` + if prenodes: + for node in prenodes: + # get attributes + pre_node_name = node["name"] + klass = node["class"] + knobs = node["knobs"] + dependent = node["dependent"] + + # create node + now_node = nuke.createNode( + klass, + "name {}".format(pre_node_name), + inpanel=False + ) + + # add data to knob + for _knob in knobs: + knob, value = _knob + try: + now_node[knob].value() + except NameError: + log.warning( + "knob `{}` does not exist on node `{}`".format( + knob, now_node["name"].value() + )) + continue + + if not knob and not value: + continue + + log.info((knob, value)) + + if isinstance(value, str): + if "[" in value: + now_node[knob].setExpression(value) + else: + now_node[knob].setValue(value) + + # connect to previous node + if dependent: + if isinstance(dependent, (tuple or list)): + for i, node_name in enumerate(dependent): + input_node = nuke.createNode( + "Input", + "name {}".format(node_name), + inpanel=False + ) + now_node.setInput(1, input_node) + + elif isinstance(dependent, str): + input_node = nuke.createNode( + "Input", + "name {}".format(node_name), + inpanel=False + ) + now_node.setInput(0, input_node) + + else: + now_node.setInput(0, prev_node) + + # switch actual node to previous + prev_node = now_node + + # creating write node + + write_node = now_node = add_write_node_legacy( + "inside_{}".format(name), + **_data + ) + # connect to previous node + now_node.setInput(0, prev_node) + + # switch actual node to previous + prev_node = now_node + + now_node = nuke.createNode("Output", "name Output1", inpanel=False) + + # connect to previous node + now_node.setInput(0, prev_node) + + # imprinting group node + set_avalon_knob_data(GN, data["avalon"]) + add_publish_knob(GN) + add_rendering_knobs(GN, farm) + + if review: + add_review_knob(GN) + + # add divider + GN.addKnob(nuke.Text_Knob('', 'Rendering')) + + # Add linked knobs. + linked_knob_names = [] + + # add input linked knobs and create group only if any input + if linked_knobs: + linked_knob_names.append("_grp-start_") + linked_knob_names.extend(linked_knobs) + linked_knob_names.append("_grp-end_") + + linked_knob_names.append("Render") + + for _k_name in linked_knob_names: + if "_grp-start_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) + GN.addKnob(knob) + elif "_grp-end_" in _k_name: + knob = nuke.Tab_Knob( + "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) + GN.addKnob(knob) + else: + if "___" in _k_name: + # add divider + GN.addKnob(nuke.Text_Knob("")) + else: + # add linked knob by _k_name + link = nuke.Link_Knob("") + link.makeLink(write_node.name(), _k_name) + link.setName(_k_name) + + # make render + if "Render" in _k_name: + link.setLabel("Render Local") + link.setFlag(0x1000) + GN.addKnob(link) + + # adding write to read button + add_button_write_to_read(GN) + + # adding write to read button + add_button_clear_rendered(GN, os.path.dirname(fpath)) + + # Deadline tab. + add_deadline_tab(GN) + + # open the our Tab as default + GN[_NODE_TAB_NAME].setFlag(0) + + # set tile color + tile_color = _data.get("tile_color", "0xff0000ff") + GN["tile_color"].setValue(tile_color) + + # override knob values from settings + for knob in knob_overrides: + knob_type = knob["type"] + knob_name = knob["name"] + knob_value = knob["value"] + if knob_name not in GN.knobs(): + continue + if not knob_value: + continue + + # set correctly knob types + if knob_type == "string": + knob_value = str(knob_value) + if knob_type == "number": + knob_value = int(knob_value) + if knob_type == "decimal_number": + knob_value = float(knob_value) + if knob_type == "bool": + knob_value = bool(knob_value) + if knob_type in ["2d_vector", "3d_vector", "color", "box"]: + knob_value = list(knob_value) + + GN[knob_name].setValue(knob_value) + + return GN + + +def set_node_knobs_from_settings(node, knob_settings, **kwargs): + """ Overriding knob values from settings + + Using `schema_nuke_knob_inputs` for knob type definitions. + + Args: + node (nuke.Node): nuke node + knob_settings (list): list of dict. Keys are `type`, `name`, `value` + kwargs (dict)[optional]: keys for formattable knob settings + """ + for knob in knob_settings: + log.debug("__ knob: {}".format(pformat(knob))) + knob_type = knob["type"] + knob_name = knob["name"] + + if knob_name not in node.knobs(): + continue + + if knob_type == "expression": + knob_expression = knob["expression"] + node[knob_name].setExpression( + knob_expression + ) + continue + + # first deal with formattable knob settings + if knob_type == "formatable": + template = knob["template"] + to_type = knob["to_type"] + try: + _knob_value = template.format( + **kwargs + ) + except KeyError as msg: + raise KeyError( + "Not able to format expression: {}".format(msg)) + + # convert value to correct type + if to_type == "2d_vector": + knob_value = _knob_value.split(";").split(",") + else: + knob_value = _knob_value + + knob_type = to_type + + else: + knob_value = knob["value"] + + if not knob_value: + continue + + knob_value = convert_knob_value_to_correct_type( + knob_type, knob_value) + + node[knob_name].setValue(knob_value) + + +def convert_knob_value_to_correct_type(knob_type, knob_value): + # first convert string types to string + # just to ditch unicode + if isinstance(knob_value, six.text_type): + knob_value = str(knob_value) + + # set correctly knob types + if knob_type == "bool": + knob_value = bool(knob_value) + elif knob_type == "decimal_number": + knob_value = float(knob_value) + elif knob_type == "number": + knob_value = int(knob_value) + elif knob_type == "text": + knob_value = knob_value + elif knob_type == "color_gui": + knob_value = color_gui_to_int(knob_value) + elif knob_type in ["2d_vector", "3d_vector", "color", "box"]: + knob_value = [float(val_) for val_ in knob_value] + + return knob_value + + +def color_gui_to_int(color_gui): + hex_value = ( + "0x{0:0>2x}{1:0>2x}{2:0>2x}{3:0>2x}").format(*color_gui) + return int(hex_value, 16) + + +@deprecated +def add_rendering_knobs(node, farm=True): + ''' Adds additional rendering knobs to given node + + Arguments: + node (obj): nuke node object to be fixed + + Return: + node (obj): with added knobs + ''' + knob_options = ["Use existing frames", "Local"] + if farm: + knob_options.append("On farm") + + if "render" not in node.knobs(): + knob = nuke.Enumeration_Knob("render", "", knob_options) + knob.clearFlag(nuke.STARTLINE) + node.addKnob(knob) + return node + + +@deprecated +def add_review_knob(node): + ''' Adds additional review knob to given node + + Arguments: + node (obj): nuke node object to be fixed + + Return: + node (obj): with added knob + ''' + if "review" not in node.knobs(): + knob = nuke.Boolean_Knob("review", "Review") + knob.setValue(True) + node.addKnob(knob) + return node + + +@deprecated +def add_deadline_tab(node): + # TODO: remove this as it is only linked to legacy create + node.addKnob(nuke.Tab_Knob("Deadline")) + + knob = nuke.Int_Knob("deadlinePriority", "Priority") + knob.setValue(50) + node.addKnob(knob) + + knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Int_Knob("deadlineConcurrentTasks", "Concurrent tasks") + # zero as default will get value from Settings during collection + # instead of being an explicit user override, see precollect_write.py + knob.setValue(0) + node.addKnob(knob) + + knob = nuke.Text_Knob("divd", '') + knob.setValue('') + node.addKnob(knob) + + knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish") + knob.setValue(False) + node.addKnob(knob) + + +@deprecated +def get_deadline_knob_names(): + # TODO: remove this as it is only linked to legacy + # validate_write_deadline_tab + return [ + "Deadline", + "deadlineChunkSize", + "deadlinePriority", + "deadlineConcurrentTasks" + ] + + +def create_backdrop(label="", color=None, layer=0, + nodes=None): + """ + Create Backdrop node + + Arguments: + color (str): nuke compatible string with color code + layer (int): layer of node usually used (self.pos_layer - 1) + label (str): the message + nodes (list): list of nodes to be wrapped into backdrop + + """ + assert isinstance(nodes, list), "`nodes` should be a list of nodes" + + # Calculate bounds for the backdrop node. + bdX = min([node.xpos() for node in nodes]) + bdY = min([node.ypos() for node in nodes]) + bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX + bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY + + # Expand the bounds to leave a little border. Elements are offsets + # for left, top, right and bottom edges respectively + left, top, right, bottom = (-20, -65, 20, 60) + bdX += left + bdY += top + bdW += (right - left) + bdH += (bottom - top) + + bdn = nuke.createNode("BackdropNode") + bdn["z_order"].setValue(layer) + + if color: + bdn["tile_color"].setValue(int(color, 16)) + + bdn["xpos"].setValue(bdX) + bdn["ypos"].setValue(bdY) + bdn["bdwidth"].setValue(bdW) + bdn["bdheight"].setValue(bdH) + + if label: + bdn["label"].setValue(label) + + bdn["note_font_size"].setValue(20) + return bdn + + +class WorkfileSettings(object): + """ + All settings for workfile will be set + + This object is setting all possible root settings to the workfile. + Including Colorspace, Frame ranges, Resolution format. It can set it + to Root node or to any given node. + + Arguments: + root (node): nuke's root node + nodes (list): list of nuke's nodes + nodes_filter (list): filtering classes for nodes + + """ + + def __init__(self, root_node=None, nodes=None, **kwargs): + project_doc = kwargs.get("project") + if project_doc is None: + project_name = get_current_project_name() + project_doc = get_project(project_name) + else: + project_name = project_doc["name"] + + Context._project_doc = project_doc + self._project_name = project_name + self._asset = ( + kwargs.get("asset_name") + or get_current_asset_name() + ) + self._asset_entity = get_asset_by_name(project_name, self._asset) + self._root_node = root_node or nuke.root() + self._nodes = self.get_nodes(nodes=nodes) + + self.data = kwargs + + def get_nodes(self, nodes=None, nodes_filter=None): + + if not isinstance(nodes, list) and not isinstance(nodes_filter, list): + return [n for n in nuke.allNodes()] + elif not isinstance(nodes, list) and isinstance(nodes_filter, list): + nodes = list() + for filter in nodes_filter: + [nodes.append(n) for n in nuke.allNodes(filter=filter)] + return nodes + elif isinstance(nodes, list) and not isinstance(nodes_filter, list): + return [n for n in self._nodes] + elif isinstance(nodes, list) and isinstance(nodes_filter, list): + for filter in nodes_filter: + return [n for n in self._nodes if filter in n.Class()] + + def set_viewers_colorspace(self, viewer_dict): + ''' Adds correct colorspace to viewer + + Arguments: + viewer_dict (dict): adjustments from presets + + ''' + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return + + filter_knobs = [ + "viewerProcess", + "wipe_position" + ] + + erased_viewers = [] + for v in nuke.allNodes(filter="Viewer"): + # set viewProcess to preset from settings + v["viewerProcess"].setValue( + str(viewer_dict["viewerProcess"]) + ) + + if str(viewer_dict["viewerProcess"]) \ + not in v["viewerProcess"].value(): + copy_inputs = v.dependencies() + copy_knobs = {k: v[k].value() for k in v.knobs() + if k not in filter_knobs} + + # delete viewer with wrong settings + erased_viewers.append(v["name"].value()) + nuke.delete(v) + + # create new viewer + nv = nuke.createNode("Viewer") + + # connect to original inputs + for i, n in enumerate(copy_inputs): + nv.setInput(i, n) + + # set copied knobs + for k, v in copy_knobs.items(): + print(k, v) + nv[k].setValue(v) + + # set viewerProcess + nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) + + if erased_viewers: + log.warning( + "Attention! Viewer nodes {} were erased." + "It had wrong color profile".format(erased_viewers)) + + def set_root_colorspace(self, imageio_host): + ''' Adds correct colorspace to root + + Arguments: + imageio_host (dict): host colorspace configurations + + ''' + config_data = get_imageio_config( + project_name=get_current_project_name(), + host_name="nuke" + ) + + workfile_settings = imageio_host["workfile"] + viewer_process_settings = imageio_host["viewer"]["viewerProcess"] + + if not config_data: + # TODO: backward compatibility for old projects - remove later + # perhaps old project overrides is having it set to older version + # with use of `customOCIOConfigPath` + resolved_path = None + if workfile_settings.get("customOCIOConfigPath"): + unresolved_path = workfile_settings["customOCIOConfigPath"] + ocio_paths = unresolved_path[platform.system().lower()] + + for ocio_p in ocio_paths: + resolved_path = str(ocio_p).format(**os.environ) + if not os.path.exists(resolved_path): + continue + + if resolved_path: + # set values to root + self._root_node["colorManagement"].setValue("OCIO") + self._root_node["OCIO_config"].setValue("custom") + self._root_node["customOCIOConfigPath"].setValue( + resolved_path) + else: + # no ocio config found and no custom path used + if self._root_node["colorManagement"].value() \ + not in str(workfile_settings["colorManagement"]): + self._root_node["colorManagement"].setValue( + str(workfile_settings["colorManagement"])) + + # second set ocio version + if self._root_node["OCIO_config"].value() \ + not in str(workfile_settings["OCIO_config"]): + self._root_node["OCIO_config"].setValue( + str(workfile_settings["OCIO_config"])) + + else: + # OCIO config path is defined from prelaunch hook + self._root_node["colorManagement"].setValue("OCIO") + + # print previous settings in case some were found in workfile + residual_path = self._root_node["customOCIOConfigPath"].value() + if residual_path: + log.info("Residual OCIO config path found: `{}`".format( + residual_path + )) + + # we dont need the key anymore + workfile_settings.pop("customOCIOConfigPath", None) + workfile_settings.pop("colorManagement", None) + workfile_settings.pop("OCIO_config", None) + + # get monitor lut from settings respecting Nuke version differences + monitor_lut = workfile_settings.pop("monitorLut", None) + monitor_lut_data = self._get_monitor_settings( + viewer_process_settings, monitor_lut) + + # set monitor related knobs luts (MonitorOut, Thumbnails) + for knob, value_ in monitor_lut_data.items(): + workfile_settings[knob] = value_ + + # then set the rest + for knob, value_ in workfile_settings.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value_, dict): + continue + # skip empty values + if not value_: + continue + if self._root_node[knob].value() not in value_: + self._root_node[knob].setValue(str(value_)) + log.debug("nuke.root()['{}'] changed to: {}".format( + knob, value_)) + + # set ocio config path + if config_data: + config_path = config_data["path"].replace("\\", "/") + log.info("OCIO config path found: `{}`".format( + config_path)) + + # check if there's a mismatch between environment and settings + correct_settings = self._is_settings_matching_environment( + config_data) + + # if there's no mismatch between environment and settings + if correct_settings: + self._set_ocio_config_path_to_workfile(config_data) + + def _get_monitor_settings(self, viewer_lut, monitor_lut): + """ Get monitor settings from viewer and monitor lut + + Args: + viewer_lut (str): viewer lut string + monitor_lut (str): monitor lut string + + Returns: + dict: monitor settings + """ + output_data = {} + m_display, m_viewer = get_viewer_config_from_string(monitor_lut) + v_display, v_viewer = get_viewer_config_from_string(viewer_lut) + + # set monitor lut differently for nuke version 14 + if nuke.NUKE_VERSION_MAJOR >= 14: + output_data["monitorOutLUT"] = create_viewer_profile_string( + m_viewer, m_display, path_like=False) + # monitorLut=thumbnails - viewerProcess makes more sense + output_data["monitorLut"] = create_viewer_profile_string( + v_viewer, v_display, path_like=False) + + if nuke.NUKE_VERSION_MAJOR == 13: + output_data["monitorOutLUT"] = create_viewer_profile_string( + m_viewer, m_display, path_like=False) + # monitorLut=thumbnails - viewerProcess makes more sense + output_data["monitorLut"] = create_viewer_profile_string( + v_viewer, v_display, path_like=True) + if nuke.NUKE_VERSION_MAJOR <= 12: + output_data["monitorLut"] = create_viewer_profile_string( + m_viewer, m_display, path_like=True) + + return output_data + + def _is_settings_matching_environment(self, config_data): + """ Check if OCIO config path is different from environment + + Args: + config_data (dict): OCIO config data from settings + + Returns: + bool: True if settings are matching environment, False otherwise + """ + current_ocio_path = os.environ["OCIO"] + settings_ocio_path = config_data["path"] + + # normalize all paths to forward slashes + current_ocio_path = current_ocio_path.replace("\\", "/") + settings_ocio_path = settings_ocio_path.replace("\\", "/") + + if current_ocio_path != settings_ocio_path: + message = """ +It seems like there's a mismatch between the OCIO config path set in your Nuke +settings and the actual path set in your OCIO environment. + +To resolve this, please follow these steps: +1. Close Nuke if it's currently open. +2. Reopen Nuke. + +Please note the paths for your reference: + +- The OCIO environment path currently set: + `{env_path}` + +- The path in your current Nuke settings: + `{settings_path}` + +Reopening Nuke should synchronize these paths and resolve any discrepancies. +""" + nuke.message( + message.format( + env_path=current_ocio_path, + settings_path=settings_ocio_path + ) + ) + return False + + return True + + def _set_ocio_config_path_to_workfile(self, config_data): + """ Set OCIO config path to workfile + + Path set into nuke workfile. It is trying to replace path with + environment variable if possible. If not, it will set it as it is. + It also saves the script to apply the change, but only if it's not + empty Untitled script. + + Args: + config_data (dict): OCIO config data from settings + + """ + # replace path with env var if possible + ocio_path = self._replace_ocio_path_with_env_var(config_data) + + log.info("Setting OCIO config path to: `{}`".format( + ocio_path)) + + self._root_node["customOCIOConfigPath"].setValue( + ocio_path + ) + self._root_node["OCIO_config"].setValue("custom") + + # only save script if it's not empty + if self._root_node["name"].value() != "": + log.info("Saving script to apply OCIO config path change.") + nuke.scriptSave() + + def _get_included_vars(self, config_template): + """ Get all environment variables included in template + + Args: + config_template (str): OCIO config template from settings + + Returns: + list: list of environment variables included in template + """ + # resolve all environments for whitelist variables + included_vars = [ + "BUILTIN_OCIO_ROOT", + ] + + # include all project root related env vars + for env_var in os.environ: + if env_var.startswith("AYON_PROJECT_ROOT_"): + included_vars.append(env_var) + + # use regex to find env var in template with format {ENV_VAR} + # this way we make sure only template used env vars are included + env_var_regex = r"\{([A-Z0-9_]+)\}" + env_var = re.findall(env_var_regex, config_template) + if env_var: + included_vars.append(env_var[0]) + + return included_vars + + def _replace_ocio_path_with_env_var(self, config_data): + """ Replace OCIO config path with environment variable + + Environment variable is added as TCL expression to path. TCL expression + is also replacing backward slashes found in path for windows + formatted values. + + Args: + config_data (str): OCIO config dict from settings + + Returns: + str: OCIO config path with environment variable TCL expression + """ + config_path = config_data["path"].replace("\\", "/") + config_template = config_data["template"] + + included_vars = self._get_included_vars(config_template) + + # make sure we return original path if no env var is included + new_path = config_path + + for env_var in included_vars: + env_path = os.getenv(env_var) + if not env_path: + continue + + # it has to be directory current process can see + if not os.path.isdir(env_path): + continue + + # make sure paths are in same format + env_path = env_path.replace("\\", "/") + path = config_path.replace("\\", "/") + + # check if env_path is in path and replace to first found positive + if env_path in path: + # with regsub we make sure path format of slashes is correct + resub_expr = ( + "[regsub -all {{\\\\}} [getenv {}] \"/\"]").format(env_var) + + new_path = path.replace( + env_path, resub_expr + ) + break + + return new_path + + def set_writes_colorspace(self): + ''' Adds correct colorspace to write node dict + + ''' + for node in nuke.allNodes(filter="Group", group=self._root_node): + log.info("Setting colorspace to `{}`".format(node.name())) + + # get data from avalon knob + avalon_knob_data = read_avalon_data(node) + node_data = get_node_data(node, INSTANCE_DATA_KNOB) + + if ( + # backward compatibility + # TODO: remove this once old avalon data api will be removed + avalon_knob_data + and avalon_knob_data.get("id") != "pyblish.avalon.instance" + ): + continue + elif ( + node_data + and node_data.get("id") != "pyblish.avalon.instance" + ): + continue + + if ( + # backward compatibility + # TODO: remove this once old avalon data api will be removed + avalon_knob_data + and "creator" not in avalon_knob_data + ): + continue + elif ( + node_data + and "creator_identifier" not in node_data + ): + continue + + nuke_imageio_writes = None + if avalon_knob_data: + # establish families + families = [avalon_knob_data["family"]] + if avalon_knob_data.get("families"): + families.append(avalon_knob_data.get("families")) + + nuke_imageio_writes = get_imageio_node_setting( + node_class=avalon_knob_data["families"], + plugin_name=avalon_knob_data["creator"], + subset=avalon_knob_data["subset"] + ) + elif node_data: + nuke_imageio_writes = get_write_node_template_attr(node) + + log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes)) + + if not nuke_imageio_writes: + return + + write_node = None + + # get into the group node + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write_node = x + node.end() + + if not write_node: + return + + try: + # write all knobs to node + for knob in nuke_imageio_writes["knobs"]: + value = knob["value"] + if isinstance(value, six.text_type): + value = str(value) + if str(value).startswith("0x"): + value = int(value, 16) + + log.debug("knob: {}| value: {}".format( + knob["name"], value + )) + write_node[knob["name"]].setValue(value) + except TypeError: + log.warning( + "Legacy workflow didn't work, switching to current") + + set_node_knobs_from_settings( + write_node, nuke_imageio_writes["knobs"]) + + def set_reads_colorspace(self, read_clrs_inputs): + """ Setting colorspace to Read nodes + + Looping through all read nodes and tries to set colorspace based + on regex rules in presets + """ + changes = {} + for n in nuke.allNodes(): + file = nuke.filename(n) + if n.Class() != "Read": + continue + + # check if any colorspace presets for read is matching + preset_clrsp = None + + for input in read_clrs_inputs: + if not bool(re.search(input["regex"], file)): + continue + preset_clrsp = input["colorspace"] + + if preset_clrsp is not None: + current = n["colorspace"].value() + future = str(preset_clrsp) + if current != future: + changes[n.name()] = { + "from": current, + "to": future + } + + log.debug(changes) + if changes: + msg = "Read nodes are not set to correct colorspace:\n\n" + for nname, knobs in changes.items(): + msg += ( + " - node: '{0}' is now '{1}' but should be '{2}'\n" + ).format(nname, knobs["from"], knobs["to"]) + + msg += "\nWould you like to change it?" + + if nuke.ask(msg): + for nname, knobs in changes.items(): + n = nuke.toNode(nname) + n["colorspace"].setValue(knobs["to"]) + log.info( + "Setting `{0}` to `{1}`".format( + nname, + knobs["to"])) + + def set_colorspace(self): + ''' Setting colorspace following presets + ''' + # get imageio + nuke_colorspace = get_nuke_imageio_settings() + + log.info("Setting colorspace to workfile...") + try: + self.set_root_colorspace(nuke_colorspace) + except AttributeError as _error: + msg = "Set Colorspace to workfile error: {}".format(_error) + nuke.message(msg) + + log.info("Setting colorspace to viewers...") + try: + self.set_viewers_colorspace(nuke_colorspace["viewer"]) + except AttributeError as _error: + msg = "Set Colorspace to viewer error: {}".format(_error) + nuke.message(msg) + + log.info("Setting colorspace to write nodes...") + try: + self.set_writes_colorspace() + except AttributeError as _error: + nuke.message(_error) + log.error(_error) + + log.info("Setting colorspace to read nodes...") + read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", []) + if read_clrs_inputs: + self.set_reads_colorspace(read_clrs_inputs) + + def reset_frame_range_handles(self): + """Set frame range to current asset""" + + if "data" not in self._asset_entity: + msg = "Asset {} don't have set any 'data'".format(self._asset) + log.warning(msg) + nuke.message(msg) + return + + asset_data = self._asset_entity["data"] + + missing_cols = [] + check_cols = ["fps", "frameStart", "frameEnd", + "handleStart", "handleEnd"] + + for col in check_cols: + if col not in asset_data: + missing_cols.append(col) + + if len(missing_cols) > 0: + missing = ", ".join(missing_cols) + msg = "'{}' are not set for asset '{}'!".format( + missing, self._asset) + log.warning(msg) + nuke.message(msg) + return + + # get handles values + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + + fps = float(asset_data["fps"]) + frame_start_handle = int(asset_data["frameStart"]) - handle_start + frame_end_handle = int(asset_data["frameEnd"]) + handle_end + + self._root_node["lock_range"].setValue(False) + self._root_node["fps"].setValue(fps) + self._root_node["first_frame"].setValue(frame_start_handle) + self._root_node["last_frame"].setValue(frame_end_handle) + self._root_node["lock_range"].setValue(True) + + # update node graph so knobs are updated + update_node_graph() + + frame_range = '{0}-{1}'.format( + int(asset_data["frameStart"]), + int(asset_data["frameEnd"]) + ) + + for node in nuke.allNodes(filter="Viewer"): + node['frame_range'].setValue(frame_range) + node['frame_range_lock'].setValue(True) + node['frame_range'].setValue(frame_range) + node['frame_range_lock'].setValue(True) + + if not ASSIST: + set_node_data( + self._root_node, + INSTANCE_DATA_KNOB, + { + "handleStart": int(handle_start), + "handleEnd": int(handle_end) + } + ) + else: + log.warning( + "NukeAssist mode is not allowing " + "updating custom knobs..." + ) + + def reset_resolution(self): + """Set resolution to project resolution.""" + log.info("Resetting resolution") + project_name = get_current_project_name() + asset_data = self._asset_entity["data"] + + format_data = { + "width": int(asset_data.get( + 'resolutionWidth', + asset_data.get('resolution_width'))), + "height": int(asset_data.get( + 'resolutionHeight', + asset_data.get('resolution_height'))), + "pixel_aspect": asset_data.get( + 'pixelAspect', + asset_data.get('pixel_aspect', 1)), + "name": project_name + } + + if any(x_ for x_ in format_data.values() if x_ is None): + msg = ("Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Aspect: `{pixel_aspect}`").format(**format_data) + log.error(msg) + nuke.message(msg) + + existing_format = None + for format in nuke.formats(): + if format_data["name"] == format.name(): + existing_format = format + break + + if existing_format: + # Enforce existing format to be correct. + existing_format.setWidth(format_data["width"]) + existing_format.setHeight(format_data["height"]) + existing_format.setPixelAspect(format_data["pixel_aspect"]) + else: + format_string = self.make_format_string(**format_data) + log.info("Creating new format: {}".format(format_string)) + nuke.addFormat(format_string) + + nuke.root()["format"].setValue(format_data["name"]) + log.info("Format is set.") + + # update node graph so knobs are updated + update_node_graph() + + def make_format_string(self, **kwargs): + if kwargs.get("r"): + return ( + "{width} " + "{height} " + "{x} " + "{y} " + "{r} " + "{t} " + "{pixel_aspect:.2f} " + "{name}".format(**kwargs) + ) + else: + return ( + "{width} " + "{height} " + "{pixel_aspect:.2f} " + "{name}".format(**kwargs) + ) + + def set_context_settings(self): + # replace reset resolution from avalon core to pype's + self.reset_resolution() + # replace reset resolution from avalon core to pype's + self.reset_frame_range_handles() + # add colorspace menu item + self.set_colorspace() + + def set_favorites(self): + from .utils import set_context_favorites + + work_dir = os.getenv("AVALON_WORKDIR") + asset = get_current_asset_name() + favorite_items = OrderedDict() + + # project + # get project's root and split to parts + projects_root = os.path.normpath(work_dir.split( + Context.project_name)[0]) + # add project name + project_dir = os.path.join(projects_root, Context.project_name) + "/" + # add to favorites + favorite_items.update({"Project dir": project_dir.replace("\\", "/")}) + + # asset + asset_root = os.path.normpath(work_dir.split( + asset)[0]) + # add asset name + asset_dir = os.path.join(asset_root, asset) + "/" + # add to favorites + favorite_items.update({"Shot dir": asset_dir.replace("\\", "/")}) + + # workdir + favorite_items.update({"Work dir": work_dir.replace("\\", "/")}) + + set_context_favorites(favorite_items) + + +def get_write_node_template_attr(node): + ''' Gets all defined data from presets + + ''' + + # TODO: add identifiers to settings and rename settings key + plugin_names_mapping = { + "create_write_image": "CreateWriteImage", + "create_write_prerender": "CreateWritePrerender", + "create_write_render": "CreateWriteRender" + } + # get avalon data from node + node_data = get_node_data(node, INSTANCE_DATA_KNOB) + identifier = node_data["creator_identifier"] + + # return template data + return get_imageio_node_setting( + node_class="Write", + plugin_name=plugin_names_mapping[identifier], + subset=node_data["subset"] + ) + + +def get_dependent_nodes(nodes): + """Get all dependent nodes connected to the list of nodes. + + Looking for connections outside of the nodes in incoming argument. + + Arguments: + nodes (list): list of nuke.Node objects + + Returns: + connections_in: dictionary of nodes and its dependencies + connections_out: dictionary of nodes and its dependency + """ + + connections_in = dict() + connections_out = dict() + node_names = [n.name() for n in nodes] + for node in nodes: + inputs = node.dependencies() + outputs = node.dependent() + # collect all inputs outside + test_in = [(i, n) for i, n in enumerate(inputs) + if n.name() not in node_names] + if test_in: + connections_in.update({ + node: test_in + }) + # collect all outputs outside + test_out = [i for i in outputs if i.name() not in node_names] + if test_out: + # only one dependent node is allowed + connections_out.update({ + node: test_out[-1] + }) + + return connections_in, connections_out + + +def update_node_graph(): + # Resetting frame will update knob values + try: + root_node_lock = nuke.root()["lock_range"].value() + nuke.root()["lock_range"].setValue(not root_node_lock) + nuke.root()["lock_range"].setValue(root_node_lock) + + current_frame = nuke.frame() + nuke.frame(1) + nuke.frame(int(current_frame)) + except Exception as error: + log.warning(error) + + +def find_free_space_to_paste_nodes( + nodes, + group=nuke.root(), + direction="right", + offset=300 +): + """ + For getting coordinates in DAG (node graph) for placing new nodes + + Arguments: + nodes (list): list of nuke.Node objects + group (nuke.Node) [optional]: object in which context it is + direction (str) [optional]: where we want it to be placed + [left, right, top, bottom] + offset (int) [optional]: what offset it is from rest of nodes + + Returns: + xpos (int): x coordinace in DAG + ypos (int): y coordinace in DAG + """ + if len(nodes) == 0: + return 0, 0 + + group_xpos = list() + group_ypos = list() + + # get local coordinates of all nodes + nodes_xpos = [n.xpos() for n in nodes] + \ + [n.xpos() + n.screenWidth() for n in nodes] + + nodes_ypos = [n.ypos() for n in nodes] + \ + [n.ypos() + n.screenHeight() for n in nodes] + + # get complete screen size of all nodes to be placed in + nodes_screen_width = max(nodes_xpos) - min(nodes_xpos) + nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos) + + # get screen size (r,l,t,b) of all nodes in `group` + with group: + group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \ + [n.xpos() + n.screenWidth() for n in nuke.allNodes() + if n not in nodes] + group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \ + [n.ypos() + n.screenHeight() for n in nuke.allNodes() + if n not in nodes] + + # calc output left + if direction in "left": + xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output right + if direction in "right": + xpos = max(group_xpos) + abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output top + if direction in "top": + xpos = min(group_xpos) + ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset) + return xpos, ypos + # calc output bottom + if direction in "bottom": + xpos = min(group_xpos) + ypos = max(group_ypos) + abs(offset) + return xpos, ypos + + +@contextlib.contextmanager +def maintained_selection(exclude_nodes=None): + """Maintain selection during context + + Maintain selection during context and unselect + all nodes after context is done. + + Arguments: + exclude_nodes (list[nuke.Node]): list of nodes to be unselected + before context is done + + Example: + >>> with maintained_selection(): + ... node["selected"].setValue(True) + >>> print(node["selected"].value()) + False + """ + if exclude_nodes: + for node in exclude_nodes: + node["selected"].setValue(False) + + previous_selection = nuke.selectedNodes() + + try: + yield + finally: + # unselect all selection in case there is some + reset_selection() + + # and select all previously selected nodes + if previous_selection: + select_nodes(previous_selection) + + +@contextlib.contextmanager +def swap_node_with_dependency(old_node, new_node): + """ Swap node with dependency + + Swap node with dependency and reconnect all inputs and outputs. + It removes old node. + + Arguments: + old_node (nuke.Node): node to be replaced + new_node (nuke.Node): node to replace with + + Example: + >>> old_node_name = old_node["name"].value() + >>> print(old_node_name) + old_node_name_01 + >>> with swap_node_with_dependency(old_node, new_node) as node_name: + ... new_node["name"].setValue(node_name) + >>> print(new_node["name"].value()) + old_node_name_01 + """ + # preserve position + xpos, ypos = old_node.xpos(), old_node.ypos() + # preserve selection after all is done + outputs = get_node_outputs(old_node) + inputs = old_node.dependencies() + node_name = old_node["name"].value() + + try: + nuke.delete(old_node) + + yield node_name + finally: + + # Reconnect inputs + for i, node in enumerate(inputs): + new_node.setInput(i, node) + # Reconnect outputs + if outputs: + for n, pipes in outputs.items(): + for i in pipes: + n.setInput(i, new_node) + # return to original position + new_node.setXYpos(xpos, ypos) + + +def reset_selection(): + """Deselect all selected nodes""" + for node in nuke.selectedNodes(): + node["selected"].setValue(False) + + +def select_nodes(nodes): + """Selects all inputted nodes + + Arguments: + nodes (Union[list, tuple, set]): nuke nodes to be selected + """ + assert isinstance(nodes, (list, tuple, set)), \ + "nodes has to be list, tuple or set" + + for node in nodes: + node["selected"].setValue(True) + + +def launch_workfiles_app(): + """Show workfiles tool on nuke launch. + + Trigger to show workfiles tool on application launch. Can be executed only + once all other calls are ignored. + + Workfiles tool show is deferred after application initialization using + QTimer. + """ + + if Context.workfiles_launched: + return + + Context.workfiles_launched = True + + # get all imortant settings + open_at_start = env_value_to_bool( + env_key="AYON_WORKFILE_TOOL_ON_START", + default=None) + + # return if none is defined + if not open_at_start: + return + + # Show workfiles tool using timer + # - this will be probably triggered during initialization in that case + # the application is not be able to show uis so it must be + # deferred using timer + # - timer should be processed when initialization ends + # When applications starts to process events. + timer = QtCore.QTimer() + timer.timeout.connect(_launch_workfile_app) + timer.setInterval(100) + Context.workfiles_tool_timer = timer + timer.start() + + +def _launch_workfile_app(): + # Safeguard to not show window when application is still starting up + # or is already closing down. + closing_down = QtWidgets.QApplication.closingDown() + starting_up = QtWidgets.QApplication.startingUp() + + # Stop the timer if application finished start up of is closing down + if closing_down or not starting_up: + Context.workfiles_tool_timer.stop() + Context.workfiles_tool_timer = None + + # Skip if application is starting up or closing down + if starting_up or closing_down: + return + + # Make sure on top is enabled on first show so the window is not hidden + # under main nuke window + # - this happened on Centos 7 and it is because the focus of nuke + # changes to the main window after showing because of initialization + # which moves workfiles tool under it + host_tools.show_workfiles(parent=None, on_top=True) + + +@deprecated("ayon_core.hosts.nuke.api.lib.start_workfile_template_builder") +def process_workfile_builder(): + """ [DEPRECATED] Process workfile builder on nuke start + + This function is deprecated and will be removed in future versions. + Use settings for `project_settings/nuke/templated_workfile_build` which are + supported by api `start_workfile_template_builder()`. + """ + + # to avoid looping of the callback, remove it! + nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") + + # get state from settings + project_settings = get_current_project_settings() + workfile_builder = project_settings["nuke"].get( + "workfile_builder", {}) + + # get settings + create_fv_on = workfile_builder.get("create_first_version") or None + builder_on = workfile_builder.get("builder_on_start") or None + + last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") + + # generate first version in file not existing and feature is enabled + if create_fv_on and not os.path.exists(last_workfile_path): + # get custom template path if any + custom_template_path = get_custom_workfile_template_from_session( + project_settings=project_settings + ) + + # if custom template is defined + if custom_template_path: + log.info("Adding nodes from `{}`...".format( + custom_template_path + )) + try: + # import nodes into current script + nuke.nodePaste(custom_template_path) + except RuntimeError: + raise RuntimeError(( + "Template defined for project: {} is not working. " + "Talk to your manager for an advise").format( + custom_template_path)) + + # if builder at start is defined + if builder_on: + log.info("Building nodes from presets...") + # build nodes by defined presets + BuildWorkfile().process() + + log.info("Saving script as version `{}`...".format( + last_workfile_path + )) + # safe file as version + save_file(last_workfile_path) + return + + +def start_workfile_template_builder(): + from .workfile_template_builder import ( + build_workfile_template + ) + + # remove callback since it would be duplicating the workfile + nuke.removeOnCreate(start_workfile_template_builder, nodeClass="Root") + + # to avoid looping of the callback, remove it! + log.info("Starting workfile template builder...") + try: + build_workfile_template(workfile_creation_enabled=True) + except TemplateProfileNotFound: + log.warning("Template profile not found. Skipping...") + + +@deprecated +def recreate_instance(origin_node, avalon_data=None): + """Recreate input instance to different data + + Args: + origin_node (nuke.Node): Nuke node to be recreating from + avalon_data (dict, optional): data to be used in new node avalon_data + + Returns: + nuke.Node: newly created node + """ + knobs_wl = ["render", "publish", "review", "ypos", + "use_limit", "first", "last"] + # get data from avalon knobs + data = get_avalon_knob_data( + origin_node) + + # add input data to avalon data + if avalon_data: + data.update(avalon_data) + + # capture all node knobs allowed in op_knobs + knobs_data = {k: origin_node[k].value() + for k in origin_node.knobs() + for key in knobs_wl + if key in k} + + # get node dependencies + inputs = origin_node.dependencies() + outputs = origin_node.dependent() + + # remove the node + nuke.delete(origin_node) + + # create new node + # get appropriate plugin class + creator_plugin = None + for Creator in discover_legacy_creator_plugins(): + if Creator.__name__ == data["creator"]: + creator_plugin = Creator + break + + # create write node with creator + new_node_name = data["subset"] + new_node = creator_plugin(new_node_name, data["asset"]).process() + + # white listed knobs to the new node + for _k, _v in knobs_data.items(): + try: + print(_k, _v) + new_node[_k].setValue(_v) + except Exception as e: + print(e) + + # connect to original inputs + for i, n in enumerate(inputs): + new_node.setInput(i, n) + + # connect to outputs + if len(outputs) > 0: + for dn in outputs: + dn.setInput(0, new_node) + + return new_node + + +def add_scripts_menu(): + try: + from scriptsmenu import launchfornuke + except ImportError: + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_name = get_current_project_name() + project_settings = get_project_settings(project_name) + config = project_settings["nuke"]["scriptsmenu"]["definition"] + _menu = project_settings["nuke"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Maya menu + studio_menu = launchfornuke.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) + + +def add_scripts_gizmo(): + + # load configuration of custom menu + project_name = get_current_project_name() + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + + for gizmo_settings in project_settings["nuke"]["gizmo"]: + gizmo_list_definition = gizmo_settings["gizmo_definition"] + toolbar_name = gizmo_settings["toolbar_menu_name"] + # gizmo_toolbar_path = gizmo_settings["gizmo_toolbar_path"] + gizmo_source_dir = gizmo_settings.get( + "gizmo_source_dir", {}).get(platform_name) + toolbar_icon_path = gizmo_settings.get( + "toolbar_icon_path", {}).get(platform_name) + + if not gizmo_source_dir: + log.debug("Skipping studio gizmo `{}`, " + "no gizmo path found.".format(toolbar_name) + ) + return + + if not gizmo_list_definition: + log.debug("Skipping studio gizmo `{}`, " + "no definition found.".format(toolbar_name) + ) + return + + if toolbar_icon_path: + try: + toolbar_icon_path = toolbar_icon_path.format(**os.environ) + except KeyError as e: + log.error( + "This environment variable doesn't exist: {}".format(e) + ) + + existing_gizmo_path = [] + for source_dir in gizmo_source_dir: + try: + resolve_source_dir = source_dir.format(**os.environ) + except KeyError as e: + log.error( + "This environment variable doesn't exist: {}".format(e) + ) + continue + if not os.path.exists(resolve_source_dir): + log.warning( + "The source of gizmo `{}` does not exists".format( + resolve_source_dir + ) + ) + continue + existing_gizmo_path.append(resolve_source_dir) + + # run the launcher for Nuke toolbar + toolbar_menu = gizmo_menu.GizmoMenu( + title=toolbar_name, + icon=toolbar_icon_path + ) + + # apply configuration + toolbar_menu.add_gizmo_path(existing_gizmo_path) + toolbar_menu.build_from_configuration(gizmo_list_definition) + + +class NukeDirmap(HostDirmap): + def __init__(self, file_name, *args, **kwargs): + """ + Args: + file_name (str): full path of referenced file from workfiles + *args (tuple): Positional arguments for 'HostDirmap' class + **kwargs (dict): Keyword arguments for 'HostDirmap' class + """ + + self.file_name = file_name + super(NukeDirmap, self).__init__(*args, **kwargs) + + def on_enable_dirmap(self): + pass + + def dirmap_routine(self, source_path, destination_path): + source_path = source_path.lower().replace(os.sep, '/') + destination_path = destination_path.lower().replace(os.sep, '/') + log.debug("Map: {} with: {}->{}".format(self.file_name, + source_path, destination_path)) + if platform.system().lower() == "windows": + self.file_name = self.file_name.lower().replace( + source_path, destination_path) + else: + self.file_name = self.file_name.replace( + source_path, destination_path) + + +class DirmapCache: + """Caching class to get settings and sync_module easily and only once.""" + _project_name = None + _project_settings = None + _sync_module_discovered = False + _sync_module = None + _mapping = None + + @classmethod + def project_name(cls): + if cls._project_name is None: + cls._project_name = os.getenv("AVALON_PROJECT") + return cls._project_name + + @classmethod + def project_settings(cls): + if cls._project_settings is None: + cls._project_settings = get_project_settings(cls.project_name()) + return cls._project_settings + + @classmethod + def sync_module(cls): + if not cls._sync_module_discovered: + cls._sync_module_discovered = True + cls._sync_module = AddonsManager().get("sync_server") + return cls._sync_module + + @classmethod + def mapping(cls): + return cls._mapping + + @classmethod + def set_mapping(cls, mapping): + cls._mapping = mapping + + +def dirmap_file_name_filter(file_name): + """Nuke callback function with single full path argument. + + Checks project settings for potential mapping from source to dest. + """ + + dirmap_processor = NukeDirmap( + file_name, + "nuke", + DirmapCache.project_name(), + DirmapCache.project_settings(), + DirmapCache.sync_module(), + ) + if not DirmapCache.mapping(): + DirmapCache.set_mapping(dirmap_processor.get_mappings()) + + dirmap_processor.process_dirmap(DirmapCache.mapping()) + if os.path.exists(dirmap_processor.file_name): + return dirmap_processor.file_name + return file_name + + +@contextlib.contextmanager +def node_tempfile(): + """Create a temp file where node is pasted during duplication. + + This is to avoid using clipboard for node duplication. + """ + + tmp_file = tempfile.NamedTemporaryFile( + mode="w", prefix="openpype_nuke_temp_", suffix=".nk", delete=False + ) + tmp_file.close() + node_tempfile_path = tmp_file.name + + try: + # Yield the path where node can be copied + yield node_tempfile_path + + finally: + # Remove the file at the end + os.remove(node_tempfile_path) + + +def duplicate_node(node): + reset_selection() + + # select required node for duplication + node.setSelected(True) + + with node_tempfile() as filepath: + # copy selected to temp filepath + nuke.nodeCopy(filepath) + + # reset selection + reset_selection() + + # paste node and selection is on it only + dupli_node = nuke.nodePaste(filepath) + + # reset selection + reset_selection() + + return dupli_node + + +def get_group_io_nodes(nodes): + """Get the input and the output of a group of nodes.""" + + if not nodes: + raise ValueError("there is no nodes in the list") + + input_node = None + output_node = None + + if len(nodes) == 1: + input_node = output_node = nodes[0] + + else: + for node in nodes: + if "Input" in node.name(): + input_node = node + + if "Output" in node.name(): + output_node = node + + if input_node is not None and output_node is not None: + break + + if input_node is None: + log.warning("No Input found") + + if output_node is None: + log.warning("No Output found") + + return input_node, output_node + + +def get_extreme_positions(nodes): + """Get the 4 numbers that represent the box of a group of nodes.""" + + if not nodes: + raise ValueError("there is no nodes in the list") + + nodes_xpos = [n.xpos() for n in nodes] + \ + [n.xpos() + n.screenWidth() for n in nodes] + + nodes_ypos = [n.ypos() for n in nodes] + \ + [n.ypos() + n.screenHeight() for n in nodes] + + min_x, min_y = (min(nodes_xpos), min(nodes_ypos)) + max_x, max_y = (max(nodes_xpos), max(nodes_ypos)) + return min_x, min_y, max_x, max_y + + +def refresh_node(node): + """Correct a bug caused by the multi-threading of nuke. + + Refresh the node to make sure that it takes the desired attributes. + """ + + x = node.xpos() + y = node.ypos() + nuke.autoplaceSnap(node) + node.setXYpos(x, y) + + +def refresh_nodes(nodes): + for node in nodes: + refresh_node(node) + + +def get_names_from_nodes(nodes): + """Get list of nodes names. + + Args: + nodes(List[nuke.Node]): List of nodes to convert into names. + + Returns: + List[str]: Name of passed nodes. + """ + + return [ + node.name() + for node in nodes + ] + + +def get_nodes_by_names(names): + """Get list of nuke nodes based on their names. + + Args: + names (List[str]): List of node names to be found. + + Returns: + List[nuke.Node]: List of nodes found by name. + """ + + return [ + nuke.toNode(name) + for name in names + ] + + +def get_viewer_config_from_string(input_string): + """Convert string to display and viewer string + + Args: + input_string (str): string with viewer + + Raises: + IndexError: if more then one slash in input string + IndexError: if missing closing bracket + + Returns: + tuple[str]: display, viewer + """ + display = None + viewer = input_string + # check if () or / or \ in name + if "/" in viewer: + split = viewer.split("/") + + # rise if more then one column + if len(split) > 2: + raise IndexError(( + "Viewer Input string is not correct. " + "more then two `/` slashes! {}" + ).format(input_string)) + + viewer = split[1] + display = split[0] + elif "(" in viewer: + pattern = r"([\w\d\s\.\-]+).*[(](.*)[)]" + result_ = re.findall(pattern, viewer) + try: + result_ = result_.pop() + display = str(result_[1]).rstrip() + viewer = str(result_[0]).rstrip() + except IndexError: + raise IndexError(( + "Viewer Input string is not correct. " + "Missing bracket! {}" + ).format(input_string)) + + return (display, viewer) + + +def create_viewer_profile_string(viewer, display=None, path_like=False): + """Convert viewer and display to string + + Args: + viewer (str): viewer name + display (Optional[str]): display name + path_like (Optional[bool]): if True, return path like string + + Returns: + str: viewer config string + """ + if not display: + return viewer + + if path_like: + return "{}/{}".format(display, viewer) + return "{} ({})".format(viewer, display) + + +def get_filenames_without_hash(filename, frame_start, frame_end): + """Get filenames without frame hash + i.e. "renderCompositingMain.baking.0001.exr" + + Args: + filename (str): filename with frame hash + frame_start (str): start of the frame + frame_end (str): end of the frame + + Returns: + list: filename per frame of the sequence + """ + filenames = [] + for frame in range(int(frame_start), (int(frame_end) + 1)): + if "#" in filename: + # use regex to convert #### to {:0>4} + def replace(match): + return "{{:0>{}}}".format(len(match.group())) + filename_without_hashes = re.sub("#+", replace, filename) + new_filename = filename_without_hashes.format(frame) + filenames.append(new_filename) + return filenames + + +def create_camera_node_by_version(): + """Function to create the camera with the latest node class + For Nuke version 14.0 or later, the Camera4 camera node class + would be used + For the version before, the Camera2 camera node class + would be used + Returns: + Node: camera node + """ + nuke_number_version = nuke.NUKE_VERSION_MAJOR + if nuke_number_version >= 14: + return nuke.createNode("Camera4") + else: + return nuke.createNode("Camera2") + + +def link_knobs(knobs, node, group_node): + """Link knobs from inside `group_node`""" + + missing_knobs = [] + for knob in knobs: + if knob in group_node.knobs(): + continue + + if knob not in node.knobs().keys(): + missing_knobs.append(knob) + + link = nuke.Link_Knob("") + link.makeLink(node.name(), knob) + link.setName(knob) + link.setFlag(0x1000) + group_node.addKnob(link) + + if missing_knobs: + raise ValueError( + "Write node exposed knobs missing:\n\n{}\n\nPlease review" + " project settings.".format("\n".join(missing_knobs)) + ) diff --git a/client/ayon_core/hosts/nuke/api/pipeline.py b/client/ayon_core/hosts/nuke/api/pipeline.py new file mode 100644 index 0000000000..bdba0757b6 --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/pipeline.py @@ -0,0 +1,628 @@ +import nuke + +import os +import importlib +from collections import OrderedDict, defaultdict + +import pyblish.api + +from ayon_core.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) +from ayon_core.settings import get_current_project_settings +from ayon_core.lib import register_event_callback, Logger +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + register_inventory_action_path, + AVALON_CONTAINER_ID, + get_current_asset_name, + get_current_task_name, +) +from ayon_core.pipeline.workfile import BuildWorkfile +from ayon_core.tools.utils import host_tools +from ayon_core.hosts.nuke import NUKE_ROOT_DIR + +from .command import viewer_update_and_undo_stop +from .lib import ( + Context, + ROOT_DATA_KNOB, + INSTANCE_DATA_KNOB, + get_main_window, + add_publish_knob, + WorkfileSettings, + # TODO: remove this once workfile builder will be removed + process_workfile_builder, + start_workfile_template_builder, + launch_workfiles_app, + check_inventory_versions, + set_avalon_knob_data, + read_avalon_data, + on_script_load, + dirmap_file_name_filter, + add_scripts_menu, + add_scripts_gizmo, + get_node_data, + set_node_data, + MENU_LABEL, +) +from .workfile_template_builder import ( + NukePlaceholderLoadPlugin, + NukePlaceholderCreatePlugin, + build_workfile_template, + create_placeholder, + update_placeholder, +) +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) +from .constants import ASSIST + +log = Logger.get_logger(__name__) + +PLUGINS_DIR = os.path.join(NUKE_ROOT_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + +# registering pyblish gui regarding settings in presets +if os.getenv("PYBLISH_GUI", None): + pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) + + +class NukeHost( + HostBase, IWorkfileHost, ILoadHost, IPublishHost +): + name = "nuke" + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_workfile_build_placeholder_plugins(self): + return [ + NukePlaceholderLoadPlugin, + NukePlaceholderCreatePlugin + ] + + def get_containers(self): + return ls() + + def install(self): + ''' Installing all requarements for Nuke host + ''' + + pyblish.api.register_host("nuke") + + self.log.info("Registering Nuke plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + + # Register Avalon event for workfiles loading. + register_event_callback("workio.open_file", check_inventory_versions) + register_event_callback("taskChanged", change_context_label) + + _install_menu() + + # add script menu + add_scripts_menu() + add_scripts_gizmo() + + add_nuke_callbacks() + + launch_workfiles_app() + + def get_context_data(self): + root_node = nuke.root() + return get_node_data(root_node, ROOT_DATA_KNOB) + + def update_context_data(self, data, changes): + root_node = nuke.root() + set_node_data(root_node, ROOT_DATA_KNOB, data) + + +def add_nuke_callbacks(): + """ Adding all available nuke callbacks + """ + nuke_settings = get_current_project_settings()["nuke"] + workfile_settings = WorkfileSettings() + + # Set context settings. + nuke.addOnCreate( + workfile_settings.set_context_settings, nodeClass="Root") + + # adding favorites to file browser + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") + + # template builder callbacks + nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root") + + # TODO: remove this callback once workfile builder will be removed + nuke.addOnCreate(process_workfile_builder, nodeClass="Root") + + # fix ffmpeg settings on script + nuke.addOnScriptLoad(on_script_load) + + # set checker for last versions on loaded containers + nuke.addOnScriptLoad(check_inventory_versions) + nuke.addOnScriptSave(check_inventory_versions) + + # set apply all workfile settings on script load and save + nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) + + + if nuke_settings["nuke-dirmap"]["enabled"]: + log.info("Added Nuke's dir-mapping callback ...") + # Add dirmap for file paths. + nuke.addFilenameFilter(dirmap_file_name_filter) + + log.info("Added Nuke callbacks ...") + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + + for module in ( + "ayon_core.hosts.nuke.api.actions", + "ayon_core.hosts.nuke.api.menu", + "ayon_core.hosts.nuke.api.plugin", + "ayon_core.hosts.nuke.api.lib", + ): + log.info("Reloading module: {}...".format(module)) + + module = importlib.import_module(module) + + try: + importlib.reload(module) + except AttributeError as e: + from importlib import reload + log.warning("Cannot reload module: {}".format(e)) + reload(module) + + +def _show_workfiles(): + # Make sure parent is not set + # - this makes Workfiles tool as separated window which + # avoid issues with reopening + # - it is possible to explicitly change on top flag of the tool + host_tools.show_workfiles(parent=None, on_top=False) + + +def get_context_label(): + return "{0}, {1}".format( + get_current_asset_name(), + get_current_task_name() + ) + + +def _install_menu(): + """Install Avalon menu into Nuke's main menu bar.""" + + # uninstall original avalon menu + main_window = get_main_window() + menubar = nuke.menu("Nuke") + menu = menubar.addMenu(MENU_LABEL) + + if not ASSIST: + label = get_context_label() + context_action_item = menu.addCommand("Context") + context_action_item.setEnabled(False) + + Context.context_action_item = context_action_item + + context_action = context_action_item.action() + context_action.setText(label) + + # add separator after context label + menu.addSeparator() + + menu.addCommand( + "Work Files...", + _show_workfiles + ) + + menu.addSeparator() + if not ASSIST: + # only add parent if nuke version is 14 or higher + # known issue with no solution yet + menu.addCommand( + "Create...", + lambda: host_tools.show_publisher( + parent=main_window, + tab="create" + ) + ) + # only add parent if nuke version is 14 or higher + # known issue with no solution yet + menu.addCommand( + "Publish...", + lambda: host_tools.show_publisher( + parent=main_window, + tab="publish" + ) + ) + + menu.addCommand( + "Load...", + lambda: host_tools.show_loader( + parent=main_window, + use_context=True + ) + ) + menu.addCommand( + "Manage...", + lambda: host_tools.show_scene_inventory(parent=main_window) + ) + menu.addSeparator() + menu.addCommand( + "Library...", + lambda: host_tools.show_library_loader( + parent=main_window + ) + ) + menu.addSeparator() + menu.addCommand( + "Set Resolution", + lambda: WorkfileSettings().reset_resolution() + ) + menu.addCommand( + "Set Frame Range", + lambda: WorkfileSettings().reset_frame_range_handles() + ) + menu.addCommand( + "Set Colorspace", + lambda: WorkfileSettings().set_colorspace() + ) + menu.addCommand( + "Apply All Settings", + lambda: WorkfileSettings().set_context_settings() + ) + + menu.addSeparator() + menu.addCommand( + "Build Workfile", + lambda: BuildWorkfile().process() + ) + + menu_template = menu.addMenu("Template Builder") # creating template menu + menu_template.addCommand( + "Build Workfile from template", + lambda: build_workfile_template() + ) + + if not ASSIST: + menu_template.addSeparator() + menu_template.addCommand( + "Create Place Holder", + lambda: create_placeholder() + ) + menu_template.addCommand( + "Update Place Holder", + lambda: update_placeholder() + ) + + menu.addSeparator() + menu.addCommand( + "Experimental tools...", + lambda: host_tools.show_experimental_tools_dialog(parent=main_window) + ) + menu.addSeparator() + # add reload pipeline only in debug mode + if bool(os.getenv("NUKE_DEBUG")): + menu.addSeparator() + menu.addCommand("Reload Pipeline", reload_config) + + # adding shortcuts + add_shortcuts_from_presets() + + +def change_context_label(): + if ASSIST: + return + + context_action_item = Context.context_action_item + if context_action_item is None: + return + context_action = context_action_item.action() + + old_label = context_action.text() + new_label = get_context_label() + + context_action.setText(new_label) + + log.info("Task label changed from `{}` to `{}`".format( + old_label, new_label)) + + +def add_shortcuts_from_presets(): + menubar = nuke.menu("Nuke") + nuke_presets = get_current_project_settings()["nuke"]["general"] + + if nuke_presets.get("menu"): + menu_label_mapping = { + "create": "Create...", + "manage": "Manage...", + "load": "Load...", + "build_workfile": "Build Workfile", + "publish": "Publish..." + } + + for command_name, shortcut_str in nuke_presets.get("menu").items(): + log.info("menu_name `{}` | menu_label `{}`".format( + command_name, MENU_LABEL + )) + log.info("Adding Shortcut `{}` to `{}`".format( + shortcut_str, command_name + )) + try: + menu = menubar.findItem(MENU_LABEL) + item_label = menu_label_mapping[command_name] + menuitem = menu.findItem(item_label) + menuitem.setShortcut(shortcut_str) + except (AttributeError, KeyError) as e: + log.error(e) + + +def containerise(node, + name, + namespace, + context, + loader=None, + data=None): + """Bundle `node` into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + node (nuke.Node): Nuke's node object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + node (nuke.Node): containerised nuke's node object + + """ + data = OrderedDict( + [ + ("schema", "openpype:container-2.0"), + ("id", AVALON_CONTAINER_ID), + ("name", name), + ("namespace", namespace), + ("loader", str(loader)), + ("representation", context["representation"]["_id"]), + ], + + **data or dict() + ) + + set_avalon_knob_data(node, data) + + # set tab to first native + node.setTab(0) + + return node + + +def parse_container(node): + """Returns containerised data of a node + + Reads the imprinted data from `containerise`. + + Arguments: + node (nuke.Node): Nuke's node object to read imprinted data + + Returns: + dict: The container schema data for this container node. + + """ + data = read_avalon_data(node) + + # If not all required data return the empty container + required = ["schema", "id", "name", + "namespace", "loader", "representation"] + if not all(key in data for key in required): + return + + # Store the node's name + data.update({ + "objectName": node.fullName(), + "node": node, + }) + + return data + + +def update_container(node, keys=None): + """Returns node with updateted containder data + + Arguments: + node (nuke.Node): The node in Nuke to imprint as container, + keys (dict, optional): data which should be updated + + Returns: + node (nuke.Node): nuke node with updated container data + + Raises: + TypeError on given an invalid container node + + """ + keys = keys or dict() + + container = parse_container(node) + if not container: + raise TypeError("Not a valid container node.") + + container.update(keys) + node = set_avalon_knob_data(node, container) + + return node + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + """ + all_nodes = nuke.allNodes(recurseGroups=False) + + nodes = [n for n in all_nodes] + + for n in nodes: + container = parse_container(n) + if container: + yield container + + +def list_instances(creator_id=None): + """List all created instances to publish from current workfile. + + For SubsetManager + + Args: + creator_id (Optional[str]): creator identifier + + Returns: + (list) of dictionaries matching instances format + """ + instances_by_order = defaultdict(list) + subset_instances = [] + instance_ids = set() + + for node in nuke.allNodes(recurseGroups=True): + + if node.Class() in ["Viewer", "Dot"]: + continue + + try: + if node["disable"].value(): + continue + except NameError: + # pass if disable knob doesn't exist + pass + + # get data from avalon knob + instance_data = get_node_data( + node, INSTANCE_DATA_KNOB) + + if not instance_data: + continue + + if instance_data["id"] != "pyblish.avalon.instance": + continue + + if creator_id and instance_data["creator_identifier"] != creator_id: + continue + + instance_id = instance_data.get("instance_id") + if not instance_id: + pass + elif instance_id in instance_ids: + instance_data.pop("instance_id") + else: + instance_ids.add(instance_id) + + # node name could change, so update subset name data + _update_subset_name_data(instance_data, node) + + if "render_order" not in node.knobs(): + subset_instances.append((node, instance_data)) + continue + + order = int(node["render_order"].value()) + instances_by_order[order].append((node, instance_data)) + + # Sort instances based on order attribute or subset name. + # TODO: remove in future Publisher enhanced with sorting + ordered_instances = [] + for key in sorted(instances_by_order.keys()): + instances_by_subset = defaultdict(list) + for node, data_ in instances_by_order[key]: + instances_by_subset[data_["subset"]].append((node, data_)) + for subkey in sorted(instances_by_subset.keys()): + ordered_instances.extend(instances_by_subset[subkey]) + + instances_by_subset = defaultdict(list) + for node, data_ in subset_instances: + instances_by_subset[data_["subset"]].append((node, data_)) + for key in sorted(instances_by_subset.keys()): + ordered_instances.extend(instances_by_subset[key]) + + return ordered_instances + + +def _update_subset_name_data(instance_data, node): + """Update subset name data in instance data. + + Args: + instance_data (dict): instance creator data + node (nuke.Node): nuke node + """ + # make sure node name is subset name + old_subset_name = instance_data["subset"] + old_variant = instance_data["variant"] + subset_name_root = old_subset_name.replace(old_variant, "") + + new_subset_name = node.name() + new_variant = new_subset_name.replace(subset_name_root, "") + + instance_data["subset"] = new_subset_name + instance_data["variant"] = new_variant + + +def remove_instance(instance): + """Remove instance from current workfile metadata. + + For SubsetManager + + Args: + instance (dict): instance representation from subsetmanager model + """ + instance_node = instance.transient_data["node"] + instance_knob = instance_node.knobs()[INSTANCE_DATA_KNOB] + instance_node.removeKnob(instance_knob) + nuke.delete(instance_node) + + +def select_instance(instance): + """ + Select instance in Node View + + Args: + instance (dict): instance representation from subsetmanager model + """ + instance_node = instance.transient_data["node"] + instance_node["selected"].setValue(True) diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py new file mode 100644 index 0000000000..4b8ddac167 --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/plugin.py @@ -0,0 +1,1355 @@ +import nuke +import re +import os +import sys +import six +import random +import string +from collections import OrderedDict, defaultdict +from abc import abstractmethod + +from ayon_core.settings import get_current_project_settings +from ayon_core.lib import ( + BoolDef, + EnumDef +) +from ayon_core.pipeline import ( + LegacyCreator, + LoaderPlugin, + CreatorError, + Creator as NewCreator, + CreatedInstance, + get_current_task_name +) +from ayon_core.pipeline.colorspace import ( + get_display_view_colorspace_name, + get_colorspace_settings_from_publish_context, + set_colorspace_data_to_representation +) +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS +) +from .lib import ( + INSTANCE_DATA_KNOB, + Knobby, + check_subsetname_exists, + maintained_selection, + get_avalon_knob_data, + set_avalon_knob_data, + add_publish_knob, + get_nuke_imageio_settings, + set_node_knobs_from_settings, + set_node_data, + get_node_data, + get_view_process_node, + get_viewer_config_from_string, + deprecated, + get_filenames_without_hash, + link_knobs +) +from .pipeline import ( + list_instances, + remove_instance +) + + +def _collect_and_cache_nodes(creator): + key = "openpype.nuke.nodes" + if key not in creator.collection_shared_data: + instances_by_identifier = defaultdict(list) + for item in list_instances(): + _, instance_data = item + identifier = instance_data["creator_identifier"] + instances_by_identifier[identifier].append(item) + creator.collection_shared_data[key] = instances_by_identifier + return creator.collection_shared_data[key] + + +class NukeCreatorError(CreatorError): + pass + + +class NukeCreator(NewCreator): + selected_nodes = [] + + def pass_pre_attributes_to_instance( + self, + instance_data, + pre_create_data, + keys=None + ): + if not keys: + keys = pre_create_data.keys() + + creator_attrs = instance_data["creator_attributes"] = {} + for pass_key in keys: + creator_attrs[pass_key] = pre_create_data[pass_key] + + def check_existing_subset(self, subset_name): + """Make sure subset name is unique. + + It search within all nodes recursively + and checks if subset name is found in + any node having instance data knob. + + Arguments: + subset_name (str): Subset name + """ + + for node in nuke.allNodes(recurseGroups=True): + # make sure testing node is having instance knob + if INSTANCE_DATA_KNOB not in node.knobs().keys(): + continue + node_data = get_node_data(node, INSTANCE_DATA_KNOB) + + if not node_data: + # a node has no instance data + continue + + # test if subset name is matching + if node_data.get("subset") == subset_name: + raise NukeCreatorError( + ( + "A publish instance for '{}' already exists " + "in nodes! Please change the variant " + "name to ensure unique output." + ).format(subset_name) + ) + + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + """Create node representing instance. + + Arguments: + node_name (str): Name of the new node. + knobs (OrderedDict): node knobs name and values + parent (str): Name of the parent node. + node_type (str, optional): Nuke node Class. + + Returns: + nuke.Node: Newly created instance node. + + """ + node_type = node_type or "NoOp" + + node_knobs = knobs or {} + + # set parent node + parent_node = nuke.root() + if parent: + parent_node = nuke.toNode(parent) + + try: + with parent_node: + created_node = nuke.createNode(node_type) + created_node["name"].setValue(node_name) + + for key, values in node_knobs.items(): + if key in created_node.knobs(): + created_node["key"].setValue(values) + except Exception as _err: + raise NukeCreatorError("Creating have failed: {}".format(_err)) + + return created_node + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + else: + self.selected_nodes = [] + + def create(self, subset_name, instance_data, pre_create_data): + + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + try: + instance_node = self.create_instance_node( + subset_name, + node_type=instance_data.pop("node_type", None) + ) + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + set_node_data( + instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + NukeCreatorError, + NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + def collect_instances(self): + cached_instances = _collect_and_cache_nodes(self) + attr_def_keys = { + attr_def.key + for attr_def in self.get_instance_attr_defs() + } + attr_def_keys.discard(None) + + for (node, data) in cached_instances[self.identifier]: + created_instance = CreatedInstance.from_existing( + data, self + ) + created_instance.transient_data["node"] = node + self._add_instance_to_context(created_instance) + + for key in ( + set(created_instance["creator_attributes"].keys()) + - attr_def_keys + ): + created_instance["creator_attributes"].pop(key) + + def update_instances(self, update_list): + for created_inst, changes in update_list: + instance_node = created_inst.transient_data["node"] + + # update instance node name if subset name changed + if "subset" in changes.changed_keys: + instance_node["name"].setValue( + changes["subset"].new_value + ) + + # in case node is not existing anymore (user erased it manually) + try: + instance_node.fullName() + except ValueError: + self.remove_instances([created_inst]) + continue + + set_node_data( + instance_node, + INSTANCE_DATA_KNOB, + created_inst.data_to_store() + ) + + def remove_instances(self, instances): + for instance in instances: + remove_instance(instance) + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ) + ] + + def get_creator_settings(self, project_settings, settings_key=None): + if not settings_key: + settings_key = self.__class__.__name__ + return project_settings["nuke"]["create"][settings_key] + + +class NukeWriteCreator(NukeCreator): + """Add Publishable Write node""" + + identifier = "create_write" + label = "Create Write" + family = "write" + icon = "sign-out" + + def get_linked_knobs(self): + linked_knobs = [] + if "channels" in self.instance_attributes: + linked_knobs.append("channels") + if "ordered" in self.instance_attributes: + linked_knobs.append("render_order") + if "use_range_limit" in self.instance_attributes: + linked_knobs.extend(["___", "first", "last", "use_limit"]) + + return linked_knobs + + def integrate_links(self, node, outputs=True): + # skip if no selection + if not self.selected_node: + return + + # collect dependencies + input_nodes = [self.selected_node] + dependent_nodes = self.selected_node.dependent() if outputs else [] + + # relinking to collected connections + for i, input in enumerate(input_nodes): + node.setInput(i, input) + + # make it nicer in graph + node.autoplace() + + # relink also dependent nodes + for dep_nodes in dependent_nodes: + dep_nodes.setInput(0, node) + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + selected_nodes = nuke.selectedNodes() + if selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + elif len(selected_nodes) > 1: + NukeCreatorError("Creator error: Select only one camera node") + self.selected_node = selected_nodes[0] + else: + self.selected_node = None + + def get_pre_create_attr_defs(self): + attr_defs = [ + BoolDef("use_selection", label="Use selection"), + self._get_render_target_enum() + ] + return attr_defs + + def get_instance_attr_defs(self): + attr_defs = [ + self._get_render_target_enum(), + ] + # add reviewable attribute + if "reviewable" in self.instance_attributes: + attr_defs.append(self._get_reviewable_bool()) + + return attr_defs + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames" + } + if ("farm_rendering" in self.instance_attributes): + rendering_targets["frames_farm"] = "Use existing frames - farm" + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", + items=rendering_targets, + label="Render target" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=True, + label="Review" + ) + + def create(self, subset_name, instance_data, pre_create_data): + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance_node = self.create_instance_node( + subset_name, + instance_data + ) + + try: + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + set_node_data( + instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + NukeCreatorError, + NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2] + ) + + def apply_settings(self, project_settings): + """Method called on initialization of plugin to apply settings.""" + + # plugin settings + plugin_settings = self.get_creator_settings(project_settings) + + # individual attributes + self.instance_attributes = plugin_settings.get( + "instance_attributes") or self.instance_attributes + self.prenodes = plugin_settings["prenodes"] + self.default_variants = plugin_settings.get( + "default_variants") or self.default_variants + self.temp_rendering_path_template = ( + plugin_settings.get("temp_rendering_path_template") + or self.temp_rendering_path_template + ) + + +class OpenPypeCreator(LegacyCreator): + """Pype Nuke Creator class wrapper""" + node_color = "0xdfea5dff" + + def __init__(self, *args, **kwargs): + super(OpenPypeCreator, self).__init__(*args, **kwargs) + if check_subsetname_exists( + nuke.allNodes(), + self.data["subset"]): + msg = ("The subset name `{0}` is already used on a node in" + "this workfile.".format(self.data["subset"])) + self.log.error(msg + "\n\nPlease use other subset name!") + raise NameError("`{0}: {1}".format(__name__, msg)) + return + + def process(self): + from nukescripts import autoBackdrop + + instance = None + + if (self.options or {}).get("useSelection"): + + nodes = nuke.selectedNodes() + if not nodes: + nuke.message("Please select nodes that you " + "wish to add to a container") + return + + elif len(nodes) == 1: + # only one node is selected + instance = nodes[0] + + if not instance: + # Not using selection or multiple nodes selected + bckd_node = autoBackdrop() + bckd_node["tile_color"].setValue(int(self.node_color, 16)) + bckd_node["note_font_size"].setValue(24) + bckd_node["label"].setValue("[{}]".format(self.name)) + + instance = bckd_node + + # add avalon knobs + set_avalon_knob_data(instance, self.data) + add_publish_knob(instance) + + return instance + + +def get_instance_group_node_childs(instance): + """Return list of instance group node children + + Args: + instance (pyblish.Instance): pyblish instance + + Returns: + list: [nuke.Node] + """ + node = instance.data["transientData"]["node"] + + if node.Class() != "Group": + return + + # collect child nodes + child_nodes = [] + # iterate all nodes + for node in nuke.allNodes(group=node): + # add contained nodes to instance's node list + child_nodes.append(node) + + return child_nodes + + +def get_colorspace_from_node(node): + # Add version data to instance + colorspace = node["colorspace"].value() + + # remove default part of the string + if "default (" in colorspace: + colorspace = re.sub(r"default.\(|\)", "", colorspace) + + return colorspace + + +def get_review_presets_config(): + settings = get_current_project_settings() + review_profiles = ( + settings["global"] + ["publish"] + ["ExtractReview"] + ["profiles"] + ) + + outputs = {} + for profile in review_profiles: + outputs.update(profile.get("outputs", {})) + + return [str(name) for name, _prop in outputs.items()] + + +class NukeLoader(LoaderPlugin): + container_id_knob = "containerId" + container_id = None + + def reset_container_id(self): + self.container_id = "".join(random.choice( + string.ascii_uppercase + string.digits) for _ in range(10)) + + def get_container_id(self, node): + id_knob = node.knobs().get(self.container_id_knob) + return id_knob.value() if id_knob else None + + def get_members(self, source): + """Return nodes that has same "containerId" as `source`""" + source_id = self.get_container_id(source) + return [node for node in nuke.allNodes(recurseGroups=True) + if self.get_container_id(node) == source_id + and node is not source] if source_id else [] + + def set_as_member(self, node): + source_id = self.get_container_id(node) + + if source_id: + node[self.container_id_knob].setValue(source_id) + else: + HIDEN_FLAG = 0x00040000 + _knob = Knobby( + "String_Knob", + self.container_id, + flags=[ + nuke.READ_ONLY, + HIDEN_FLAG + ]) + knob = _knob.create(self.container_id_knob) + node.addKnob(knob) + + def clear_members(self, parent_node): + parent_class = parent_node.Class() + members = self.get_members(parent_node) + + dependent_nodes = None + for node in members: + _depndc = [n for n in node.dependent() if n not in members] + if not _depndc: + continue + + dependent_nodes = _depndc + break + + for member in members: + if member.Class() == parent_class: + continue + self.log.info("removing node: `{}".format(member.name())) + nuke.delete(member) + + return dependent_nodes + + +class ExporterReview(object): + """ + Base class object for generating review data from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + data = None + publish_on_farm = False + + def __init__(self, + klass, + instance, + multiple_presets=True + ): + + self.log = klass.log + self.instance = instance + self.multiple_presets = multiple_presets + self.path_in = self.instance.data.get("path", None) + self.staging_dir = self.instance.data["stagingDir"] + self.collection = self.instance.data.get("collection", None) + self.data = {"representations": []} + + def get_file_info(self): + if self.collection: + # get path + self.fname = os.path.basename( + self.collection.format("{head}{padding}{tail}") + ) + self.fhead = self.collection.format("{head}") + + # get first and last frame + self.first_frame = min(self.collection.indexes) + self.last_frame = max(self.collection.indexes) + + # make sure slate frame is not included + frame_start_handle = self.instance.data["frameStartHandle"] + if frame_start_handle > self.first_frame: + self.first_frame = frame_start_handle + + else: + self.fname = os.path.basename(self.path_in) + self.fhead = os.path.splitext(self.fname)[0] + "." + self.first_frame = self.instance.data["frameStartHandle"] + self.last_frame = self.instance.data["frameEndHandle"] + + if "#" in self.fhead: + self.fhead = self.fhead.replace("#", "")[:-1] + + def get_representation_data( + self, tags=None, range=False, + custom_tags=None, colorspace=None + ): + """ Add representation data to self.data + + Args: + tags (list[str], optional): list of defined tags. + Defaults to None. + range (bool, optional): flag for adding ranges. + Defaults to False. + custom_tags (list[str], optional): user inputted custom tags. + Defaults to None. + """ + add_tags = tags or [] + repre = { + "name": self.name, + "ext": self.ext, + "files": self.file, + "stagingDir": self.staging_dir, + "tags": [self.name.replace("_", "-")] + add_tags + } + + if custom_tags: + repre["custom_tags"] = custom_tags + + if range: + repre.update({ + "frameStart": self.first_frame, + "frameEnd": self.last_frame, + }) + if ".{}".format(self.ext) not in VIDEO_EXTENSIONS: + filenames = get_filenames_without_hash( + self.file, self.first_frame, self.last_frame) + repre["files"] = filenames + + if self.multiple_presets: + repre["outputName"] = self.name + + if self.publish_on_farm: + repre["tags"].append("publish_on_farm") + + # add colorspace data to representation + if colorspace: + set_colorspace_data_to_representation( + repre, + self.instance.context.data, + colorspace=colorspace, + log=self.log + ) + self.data["representations"].append(repre) + + def get_imageio_baking_profile(self): + from . import lib as opnlib + nuke_imageio = opnlib.get_nuke_imageio_settings() + + # TODO: this is only securing backward compatibility lets remove + # this once all projects's anatomy are updated to newer config + if "baking" in nuke_imageio.keys(): + return nuke_imageio["baking"]["viewerProcess"] + else: + return nuke_imageio["viewer"]["viewerProcess"] + + +class ExporterReviewLut(ExporterReview): + """ + Generator object for review lut from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + + """ + _temp_nodes = [] + + def __init__(self, + klass, + instance, + name=None, + ext=None, + cube_size=None, + lut_size=None, + lut_style=None, + multiple_presets=True): + # initialize parent class + super(ExporterReviewLut, self).__init__( + klass, instance, multiple_presets) + + # deal with now lut defined in viewer lut + if hasattr(klass, "viewer_lut_raw"): + self.viewer_lut_raw = klass.viewer_lut_raw + else: + self.viewer_lut_raw = False + + self.name = name or "baked_lut" + self.ext = ext or "cube" + self.cube_size = cube_size or 32 + self.lut_size = lut_size or 1024 + self.lut_style = lut_style or "linear" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + self.file = self.fhead + self.name + ".{}".format(self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def clean_nodes(self): + for node in self._temp_nodes: + nuke.delete(node) + self._temp_nodes = [] + self.log.info("Deleted nodes...") + + def generate_lut(self, **kwargs): + bake_viewer_process = kwargs["bake_viewer_process"] + bake_viewer_input_process_node = kwargs[ + "bake_viewer_input_process"] + + # ---------- start nodes creation + + # CMSTestPattern + cms_node = nuke.createNode("CMSTestPattern") + cms_node["cube_size"].setValue(self.cube_size) + # connect + self._temp_nodes.append(cms_node) + self.previous_node = cms_node + + if bake_viewer_process: + # Node View Process + if bake_viewer_input_process_node: + ipn = get_view_process_node() + if ipn is not None: + # connect + ipn.setInput(0, self.previous_node) + self._temp_nodes.append(ipn) + self.previous_node = ipn + self.log.debug( + "ViewProcess... `{}`".format(self._temp_nodes)) + + if not self.viewer_lut_raw: + # OCIODisplay + dag_node = nuke.createNode("OCIODisplay") + # connect + dag_node.setInput(0, self.previous_node) + self._temp_nodes.append(dag_node) + self.previous_node = dag_node + self.log.debug( + "OCIODisplay... `{}`".format(self._temp_nodes)) + + # GenerateLUT + gen_lut_node = nuke.createNode("GenerateLUT") + gen_lut_node["file"].setValue(self.path) + gen_lut_node["file_type"].setValue(".{}".format(self.ext)) + gen_lut_node["lut1d"].setValue(self.lut_size) + gen_lut_node["style1d"].setValue(self.lut_style) + # connect + gen_lut_node.setInput(0, self.previous_node) + self._temp_nodes.append(gen_lut_node) + # ---------- end nodes creation + + # Export lut file + nuke.execute( + gen_lut_node.name(), + int(self.first_frame), + int(self.first_frame)) + + self.log.info("Exported...") + + # ---------- generate representation data + self.get_representation_data() + + # ---------- Clean up + self.clean_nodes() + + return self.data + + +class ExporterReviewMov(ExporterReview): + """ + Metaclass for generating review mov files + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + _temp_nodes = {} + + def __init__(self, + klass, + instance, + name=None, + ext=None, + multiple_presets=True + ): + # initialize parent class + super(ExporterReviewMov, self).__init__( + klass, instance, multiple_presets) + # passing presets for nodes to self + self.nodes = klass.nodes if hasattr(klass, "nodes") else {} + + # deal with now lut defined in viewer lut + self.viewer_lut_raw = klass.viewer_lut_raw + self.write_colorspace = instance.data["colorspace"] + + self.name = name or "baked" + self.ext = ext or "mov" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + if ".{}".format(self.ext) in VIDEO_EXTENSIONS: + self.file = "{}{}.{}".format( + self.fhead, self.name, self.ext) + else: + # Output is image (or image sequence) + # When the file is an image it's possible it + # has extra information after the `fhead` that + # we want to preserve, e.g. like frame numbers + # or frames hashes like `####` + filename_no_ext = os.path.splitext( + os.path.basename(self.path_in))[0] + after_head = filename_no_ext[len(self.fhead):] + self.file = "{}{}.{}.{}".format( + self.fhead, self.name, after_head, self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def clean_nodes(self, node_name): + for node in self._temp_nodes[node_name]: + nuke.delete(node) + self._temp_nodes[node_name] = [] + self.log.info("Deleted nodes...") + + def render(self, render_node_name): + self.log.info("Rendering... ") + # Render Write node + nuke.execute( + render_node_name, + int(self.first_frame), + int(self.last_frame)) + + self.log.info("Rendered...") + + def save_file(self): + import shutil + with maintained_selection(): + self.log.info("Saving nodes as file... ") + # create nk path + path = os.path.splitext(self.path)[0] + ".nk" + # save file to the path + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + shutil.copyfile(self.instance.context.data["currentFile"], path) + + self.log.info("Nodes exported...") + return path + + def generate_mov(self, farm=False, **kwargs): + # colorspace data + colorspace = None + # get colorspace settings + # get colorspace data from context + config_data, _ = get_colorspace_settings_from_publish_context( + self.instance.context.data) + + add_tags = [] + self.publish_on_farm = farm + read_raw = kwargs["read_raw"] + bake_viewer_process = kwargs["bake_viewer_process"] + bake_viewer_input_process_node = kwargs[ + "bake_viewer_input_process"] + viewer_process_override = kwargs[ + "viewer_process_override"] + + baking_view_profile = ( + viewer_process_override or self.get_imageio_baking_profile()) + + fps = self.instance.context.data["fps"] + + self.log.debug(">> baking_view_profile `{}`".format( + baking_view_profile)) + + add_custom_tags = kwargs.get("add_custom_tags", []) + + self.log.info( + "__ add_custom_tags: `{0}`".format(add_custom_tags)) + + subset = self.instance.data["subset"] + self._temp_nodes[subset] = [] + + # Read node + r_node = nuke.createNode("Read") + r_node["file"].setValue(self.path_in) + r_node["first"].setValue(self.first_frame) + r_node["origfirst"].setValue(self.first_frame) + r_node["last"].setValue(self.last_frame) + r_node["origlast"].setValue(self.last_frame) + r_node["colorspace"].setValue(self.write_colorspace) + + # do not rely on defaults, set explicitly + # to be sure it is set correctly + r_node["frame_mode"].setValue("expression") + r_node["frame"].setValue("") + + if read_raw: + r_node["raw"].setValue(1) + + # connect to Read node + self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`") + + # add reformat node + reformat_nodes_config = kwargs["reformat_nodes_config"] + if reformat_nodes_config["enabled"]: + reposition_nodes = reformat_nodes_config["reposition_nodes"] + for reposition_node in reposition_nodes: + node_class = reposition_node["node_class"] + knobs = reposition_node["knobs"] + node = nuke.createNode(node_class) + set_node_knobs_from_settings(node, knobs) + + # connect in order + self._connect_to_above_nodes( + node, subset, "Reposition node... `{}`" + ) + # append reformated tag + add_tags.append("reformated") + + # only create colorspace baking if toggled on + if bake_viewer_process: + if bake_viewer_input_process_node: + # View Process node + ipn = get_view_process_node() + if ipn is not None: + # connect to ViewProcess node + self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`") + + if not self.viewer_lut_raw: + # OCIODisplay + dag_node = nuke.createNode("OCIODisplay") + + # assign display + display, viewer = get_viewer_config_from_string( + str(baking_view_profile) + ) + if display: + dag_node["display"].setValue(display) + + # assign viewer + dag_node["view"].setValue(viewer) + + if config_data: + # convert display and view to colorspace + colorspace = get_display_view_colorspace_name( + config_path=config_data["path"], + display=display, + view=viewer + ) + + self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`") + # Write node + write_node = nuke.createNode("Write") + self.log.debug("Path: {}".format(self.path)) + write_node["file"].setValue(str(self.path)) + write_node["file_type"].setValue(str(self.ext)) + # Knobs `meta_codec` and `mov64_codec` are not available on centos. + # TODO shouldn't this come from settings on outputs? + try: + write_node["meta_codec"].setValue("ap4h") + except Exception: + self.log.info("`meta_codec` knob was not found") + + try: + write_node["mov64_codec"].setValue("ap4h") + write_node["mov64_fps"].setValue(float(fps)) + except Exception: + self.log.info("`mov64_codec` knob was not found") + + try: + write_node["mov64_write_timecode"].setValue(1) + except Exception: + self.log.info("`mov64_write_timecode` knob was not found") + + write_node["raw"].setValue(1) + # connect + write_node.setInput(0, self.previous_node) + self._temp_nodes[subset].append(write_node) + self.log.debug("Write... `{}`".format(self._temp_nodes[subset])) + # ---------- end nodes creation + + # ---------- render or save to nk + if self.publish_on_farm: + nuke.scriptSave() + path_nk = self.save_file() + self.data.update({ + "bakeScriptPath": path_nk, + "bakeWriteNodeName": write_node.name(), + "bakeRenderPath": self.path + }) + else: + self.render(write_node.name()) + + # ---------- generate representation data + self.get_representation_data( + tags=["review", "need_thumbnail", "delete"] + add_tags, + custom_tags=add_custom_tags, + range=True, + colorspace=colorspace + ) + + self.log.debug("Representation... `{}`".format(self.data)) + + self.clean_nodes(subset) + nuke.scriptSave() + + return self.data + + def _shift_to_previous_node_and_temp(self, subset, node, message): + self._temp_nodes[subset].append(node) + self.previous_node = node + self.log.debug(message.format(self._temp_nodes[subset])) + + def _connect_to_above_nodes(self, node, subset, message): + node.setInput(0, self.previous_node) + self._shift_to_previous_node_and_temp(subset, node, message) + + +@deprecated("ayon_core.hosts.nuke.api.plugin.NukeWriteCreator") +class AbstractWriteRender(OpenPypeCreator): + """Abstract creator to gather similar implementation for Write creators""" + name = "" + label = "" + hosts = ["nuke"] + n_class = "Write" + family = "render" + icon = "sign-out" + defaults = ["Main", "Mask"] + knobs = [] + prenodes = {} + + def __init__(self, *args, **kwargs): + super(AbstractWriteRender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + + def process(self): + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. " + "The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + return + + if len(nodes) == 0: + msg = ( + "No nodes selected. Please select a single node to connect" + " to or tick off `Use selection`" + ) + self.log.error(msg) + nuke.message(msg) + return + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "nodeclass": self.n_class, + "families": [self.family], + "avalon": self.data, + "subset": self.data["subset"], + "knobs": self.knobs + } + + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + + write_node = self._create_write_node( + selected_node, + inputs, + outputs, + write_data + ) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + write_node = self._modify_write_node(write_node) + + return write_node + + def is_legacy(self): + """Check if it needs to run legacy code + + In case where `type` key is missing in single + knob it is legacy project anatomy. + + Returns: + bool: True if legacy + """ + imageio_nodes = get_nuke_imageio_settings()["nodes"] + node = imageio_nodes["requiredNodes"][0] + if "type" not in node["knobs"][0]: + # if type is not yet in project anatomy + return True + elif next(iter( + _k for _k in node["knobs"] + if _k.get("type") == "__legacy__" + ), None): + # in case someone re-saved anatomy + # with old configuration + return True + + @abstractmethod + def _create_write_node(self, selected_node, inputs, outputs, write_data): + """Family dependent implementation of Write node creation + + Args: + selected_node (nuke.Node) + inputs (list of nuke.Node) - input dependencies (what is connected) + outputs (list of nuke.Node) - output dependencies + write_data (dict) - values used to fill Knobs + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass + + @abstractmethod + def _modify_write_node(self, write_node): + """Family dependent modification of created 'write_node' + + Returns: + node (nuke.Node): group node with data as Knobs + """ + pass + + +def convert_to_valid_instaces(): + """ Check and convert to latest publisher instances + + Also save as new minor version of workfile. + """ + def family_to_identifier(family): + mapping = { + "render": "create_write_render", + "prerender": "create_write_prerender", + "still": "create_write_image", + "model": "create_model", + "camera": "create_camera", + "nukenodes": "create_backdrop", + "gizmo": "create_gizmo", + "source": "create_source" + + } + return mapping[family] + + from ayon_core.hosts.nuke.api import workio + + task_name = get_current_task_name() + + # save into new workfile + current_file = workio.current_file() + + # add file suffex if not + if "_publisherConvert" not in current_file: + new_workfile = ( + current_file[:-3] + + "_publisherConvert" + + current_file[-3:] + ) + else: + new_workfile = current_file + + path = new_workfile.replace("\\", "/") + nuke.scriptSaveAs(new_workfile, overwrite=1) + nuke.Root()["name"].setValue(path) + nuke.Root()["project_directory"].setValue(os.path.dirname(path)) + nuke.Root().setModified(False) + + _remove_old_knobs(nuke.Root()) + + # loop all nodes and convert + for node in nuke.allNodes(recurseGroups=True): + transfer_data = { + "creator_attributes": {} + } + creator_attr = transfer_data["creator_attributes"] + + if node.Class() in ["Viewer", "Dot"]: + continue + + if get_node_data(node, INSTANCE_DATA_KNOB): + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data( + node, ["avalon:", "ak:"]) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + transfer_data.update({ + k: v for k, v in avalon_knob_data.items() + if k not in ["families", "creator"] + }) + + transfer_data["task"] = task_name + + family = avalon_knob_data["family"] + # establish families + families_ak = avalon_knob_data.get("families", []) + + if "suspend_publish" in node.knobs(): + creator_attr["suspended_publish"] = ( + node["suspend_publish"].value()) + + # get review knob value + if "review" in node.knobs(): + creator_attr["review"] = ( + node["review"].value()) + + if "publish" in node.knobs(): + transfer_data["active"] = ( + node["publish"].value()) + + # add idetifier + transfer_data["creator_identifier"] = family_to_identifier(family) + + # Add all nodes in group instances. + if node.Class() == "Group": + # only alter families for render family + if families_ak and "write" in families_ak.lower(): + target = node["render"].value() + if target == "Use existing frames": + creator_attr["render_target"] = "frames" + elif target == "Local": + # Local rendering + creator_attr["render_target"] = "local" + elif target == "On farm": + # Farm rendering + creator_attr["render_target"] = "farm" + + if "deadlinePriority" in node.knobs(): + transfer_data["farm_priority"] = ( + node["deadlinePriority"].value()) + if "deadlineChunkSize" in node.knobs(): + creator_attr["farm_chunk"] = ( + node["deadlineChunkSize"].value()) + if "deadlineConcurrentTasks" in node.knobs(): + creator_attr["farm_concurrency"] = ( + node["deadlineConcurrentTasks"].value()) + + _remove_old_knobs(node) + + # add new instance knob with transfer data + set_node_data( + node, INSTANCE_DATA_KNOB, transfer_data) + + nuke.scriptSave() + + +def _remove_old_knobs(node): + remove_knobs = [ + "review", "publish", "render", "suspend_publish", "warn", "divd", + "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", + "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" + ] + print(node.name()) + + # remove all old knobs + for knob in node.allKnobs(): + try: + if knob.name() in remove_knobs: + node.removeKnob(knob) + elif "avalon" in knob.name(): + node.removeKnob(knob) + except ValueError: + pass + + +def exposed_write_knobs(settings, plugin_name, instance_node): + exposed_knobs = settings["nuke"]["create"][plugin_name]["exposed_knobs"] + if exposed_knobs: + instance_node.addKnob(nuke.Text_Knob('', 'Write Knobs')) + write_node = nuke.allNodes(group=instance_node, filter="Write")[0] + link_knobs(exposed_knobs, write_node, instance_node) diff --git a/client/ayon_core/hosts/nuke/api/utils.py b/client/ayon_core/hosts/nuke/api/utils.py new file mode 100644 index 0000000000..d738ba5464 --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/utils.py @@ -0,0 +1,140 @@ +import os +import nuke + +from ayon_core import resources +from qtpy import QtWidgets + + +def set_context_favorites(favorites=None): + """ Adding favorite folders to nuke's browser + + Arguments: + favorites (dict): couples of {name:path} + """ + favorites = favorites or {} + icon_path = resources.get_resource("icons", "folder-favorite.png") + for name, path in favorites.items(): + nuke.addFavoriteDir( + name, + path, + nuke.IMAGE | nuke.SCRIPT | nuke.GEO, + icon=icon_path) + + +def get_node_outputs(node): + ''' + Return a dictionary of the nodes and pipes that are connected to node + ''' + dep_dict = {} + dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS) + for d in dependencies: + dep_dict[d] = [] + for i in range(d.inputs()): + if d.input(i) == node: + dep_dict[d].append(i) + return dep_dict + + +def is_node_gizmo(node): + ''' + return True if node is gizmo + ''' + return 'gizmo_file' in node.knobs() + + +def gizmo_is_nuke_default(gizmo): + '''Check if gizmo is in default install path''' + plug_dir = os.path.join(os.path.dirname( + nuke.env['ExecutablePath']), 'plugins') + return gizmo.filename().startswith(plug_dir) + + +def bake_gizmos_recursively(in_group=None): + """Converting a gizmo to group + + Arguments: + is_group (nuke.Node)[optonal]: group node or all nodes + """ + from .lib import maintained_selection + if in_group is None: + in_group = nuke.Root() + # preserve selection after all is done + with maintained_selection(): + # jump to the group + with in_group: + for node in nuke.allNodes(): + if is_node_gizmo(node) and not gizmo_is_nuke_default(node): + with node: + outputs = get_node_outputs(node) + group = node.makeGroup() + # Reconnect inputs and outputs if any + if outputs: + for n, pipes in outputs.items(): + for i in pipes: + n.setInput(i, group) + for i in range(node.inputs()): + group.setInput(i, node.input(i)) + # set node position and name + group.setXYpos(node.xpos(), node.ypos()) + name = node.name() + nuke.delete(node) + group.setName(name) + node = group + + if node.Class() == "Group": + bake_gizmos_recursively(node) + + +def colorspace_exists_on_node(node, colorspace_name): + """ Check if colorspace exists on node + + Look through all options in the colorspace knob, and see if we have an + exact match to one of the items. + + Args: + node (nuke.Node): nuke node object + colorspace_name (str): color profile name + + Returns: + bool: True if exists + """ + try: + colorspace_knob = node['colorspace'] + except ValueError: + # knob is not available on input node + return False + all_clrs = get_colorspace_list(colorspace_knob) + + return colorspace_name in all_clrs + + +def get_colorspace_list(colorspace_knob): + """Get available colorspace profile names + + Args: + colorspace_knob (nuke.Knob): nuke knob object + + Returns: + list: list of strings names of profiles + """ + + all_clrs = list(colorspace_knob.values()) + reduced_clrs = [] + + if not colorspace_knob.getFlag(nuke.STRIP_CASCADE_PREFIX): + return all_clrs + + # strip colorspace with nested path + for clrs in all_clrs: + clrs = clrs.split('/')[-1] + reduced_clrs.append(clrs) + + return reduced_clrs + + +def is_headless(): + """ + Returns: + bool: headless + """ + return QtWidgets.QApplication.instance() is None diff --git a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py new file mode 100644 index 0000000000..4c15da983e --- /dev/null +++ b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py @@ -0,0 +1,1005 @@ +import collections +import nuke +from ayon_core.pipeline import registered_host +from ayon_core.pipeline.workfile.workfile_template_builder import ( + AbstractTemplateBuilder, + PlaceholderPlugin, + LoadPlaceholderItem, + CreatePlaceholderItem, + PlaceholderLoadMixin, + PlaceholderCreateMixin +) +from ayon_core.tools.workfile_template_build import ( + WorkfileBuildPlaceholderDialog, +) +from .lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, + get_main_window, + WorkfileSettings, +) + +PLACEHOLDER_SET = "PLACEHOLDERS_SET" + + +class NukeTemplateBuilder(AbstractTemplateBuilder): + """Concrete implementation of AbstractTemplateBuilder for nuke""" + + def import_template(self, path): + """Import template into current scene. + Block if a template is already loaded. + + Args: + path (str): A path to current template (usually given by + get_template_preset implementation) + + Returns: + bool: Whether the template was successfully imported or not + """ + + # TODO check if the template is already imported + + nuke.nodePaste(path) + reset_selection() + + return True + +class NukePlaceholderPlugin(PlaceholderPlugin): + node_color = 4278190335 + + def _collect_scene_placeholders(self): + # Cache placeholder data to shared data + placeholder_nodes = self.builder.get_shared_populate_data( + "placeholder_nodes" + ) + if placeholder_nodes is None: + placeholder_nodes = {} + all_groups = collections.deque() + all_groups.append(nuke.thisGroup()) + while all_groups: + group = all_groups.popleft() + for node in group.nodes(): + if isinstance(node, nuke.Group): + all_groups.append(node) + + node_knobs = node.knobs() + if ( + "is_placeholder" not in node_knobs + or not node.knob("is_placeholder").value() + ): + continue + + if "empty" in node_knobs and node.knob("empty").value(): + continue + + placeholder_nodes[node.fullName()] = node + + self.builder.set_shared_populate_data( + "placeholder_nodes", placeholder_nodes + ) + return placeholder_nodes + + def create_placeholder(self, placeholder_data): + placeholder_data["plugin_identifier"] = self.identifier + + placeholder = nuke.nodes.NoOp() + placeholder.setName("PLACEHOLDER") + placeholder.knob("tile_color").setValue(self.node_color) + + imprint(placeholder, placeholder_data) + imprint(placeholder, {"is_placeholder": True}) + placeholder.knob("is_placeholder").setVisible(False) + + def update_placeholder(self, placeholder_item, placeholder_data): + node = nuke.toNode(placeholder_item.scene_identifier) + imprint(node, placeholder_data) + + def _parse_placeholder_node_data(self, node): + placeholder_data = {} + for key in self.get_placeholder_keys(): + knob = node.knob(key) + value = None + if knob is not None: + value = knob.getValue() + placeholder_data[key] = value + return placeholder_data + + def delete_placeholder(self, placeholder): + """Remove placeholder if building was successful""" + placeholder_node = nuke.toNode(placeholder.scene_identifier) + nuke.delete(placeholder_node) + + +class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): + identifier = "nuke.load" + label = "Nuke load" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderLoadPlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + loaded_representation_ids = set() + for node in nuke.allNodes(): + if "repre_id" in node.knobs(): + loaded_representation_ids.add( + node.knob("repre_id").getValue() + ) + + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def _before_placeholder_load(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def _before_repre_load(self, placeholder, representation): + placeholder.data["last_repre_id"] = str(representation["_id"]) + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + # TODO get from shared populate data! + nodes_init = placeholder.data["nodes_init"] + nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Loaded nodes: {}".format(nodes_loaded)) + if not nodes_loaded: + return + + placeholder.data["delete"] = True + + nodes_loaded = self._move_to_placeholder_group( + placeholder, nodes_loaded + ) + placeholder.data["last_loaded"] = nodes_loaded + refresh_nodes(nodes_loaded) + + # positioning of the loaded nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_loaded) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of loaded nodes + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) + + self._set_loaded_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new loaded nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_loaded) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_loaded, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the loaded + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_loaded, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_loaded): + """ + opening the placeholder's group and copying loaded nodes in it. + + Returns : + nodes_loaded (list): the new list of pasted nodes + """ + + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_loaded) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_loaded = nuke.selectedNodes() + return nodes_loaded + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is loaded.""" + + nodes_loaded = placeholder.data["last_loaded"] + loaded_backdrops = [] + bd_orders = set() + for node in nodes_loaded: + if isinstance(node, nuke.BackdropNode): + loaded_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in loaded_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes loaded with it) + - add Id to the attributes of all the other nodes + """ + + loaded_nodes = placeholder.data["last_loaded"] + loaded_nodes_set = set(loaded_nodes) + data = {"repre_id": str(placeholder.data["last_repre_id"])} + + for node in loaded_nodes: + node_knobs = node.knobs() + if "builder_type" not in node_knobs: + # save the id of representation for all imported nodes + imprint(node, data) + node.knob("repre_id").setVisible(False) + refresh_node(node) + continue + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(loaded_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_loaded_connections(self, placeholder): + """ + set inputs and outputs of loaded nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + loaded with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) + + +class NukePlaceholderCreatePlugin( + NukePlaceholderPlugin, PlaceholderCreateMixin +): + identifier = "nuke.create" + label = "Nuke create" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderCreatePlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _before_instance_create(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + + output.append( + CreatePlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + nodes_init = placeholder.data["nodes_init"] + nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Created nodes: {}".format(nodes_created)) + if not nodes_created: + return + + placeholder.data["delete"] = True + + nodes_created = self._move_to_placeholder_group( + placeholder, nodes_created + ) + placeholder.data["last_created"] = nodes_created + refresh_nodes(nodes_created) + + # positioning of the created nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_created) + for node in nodes_created: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_created) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of created nodes + + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + + self._set_created_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new created nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_created) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_created, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the created + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_created, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_created: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_created): + """ + opening the placeholder's group and copying created nodes in it. + + Returns : + nodes_created (list): the new list of pasted nodes + """ + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_created) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_created = nuke.selectedNodes() + return nodes_created + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is create.""" + + nodes_created = placeholder.data["last_created"] + created_backdrops = [] + bd_orders = set() + for node in nodes_created: + if isinstance(node, nuke.BackdropNode): + created_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in created_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes created with it) + - add Id to the attributes of all the other nodes + """ + + created_nodes = placeholder.data["last_created"] + created_nodes_set = set(created_nodes) + + for node in created_nodes: + node_knobs = node.knobs() + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(created_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_created_connections(self, placeholder): + """ + set inputs and outputs of created nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_created"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + created with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_created"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) + + +def build_workfile_template(*args, **kwargs): + builder = NukeTemplateBuilder(registered_host()) + builder.build_template(*args, **kwargs) + + # set all settings to shot context default + WorkfileSettings().set_context_settings() + + +def update_workfile_template(*args): + builder = NukeTemplateBuilder(registered_host()) + builder.rebuild_template() + + +def create_placeholder(*args): + host = registered_host() + builder = NukeTemplateBuilder(host) + window = WorkfileBuildPlaceholderDialog(host, builder, + parent=get_main_window()) + window.show() + + +def update_placeholder(*args): + host = registered_host() + builder = NukeTemplateBuilder(host) + placeholder_items_by_id = { + placeholder_item.scene_identifier: placeholder_item + for placeholder_item in builder.get_placeholders() + } + placeholder_items = [] + for node in nuke.selectedNodes(): + node_name = node.fullName() + if node_name in placeholder_items_by_id: + placeholder_items.append(placeholder_items_by_id[node_name]) + + # TODO show UI at least + if len(placeholder_items) == 0: + raise ValueError("No node selected") + + if len(placeholder_items) > 1: + raise ValueError("Too many selected nodes") + + placeholder_item = placeholder_items[0] + window = WorkfileBuildPlaceholderDialog(host, builder, + parent=get_main_window()) + window.set_update_mode(placeholder_item) + window.exec_() diff --git a/openpype/hosts/nuke/api/workio.py b/client/ayon_core/hosts/nuke/api/workio.py similarity index 100% rename from openpype/hosts/nuke/api/workio.py rename to client/ayon_core/hosts/nuke/api/workio.py diff --git a/openpype/hosts/nuke/hooks/pre_nukeassist_setup.py b/client/ayon_core/hosts/nuke/hooks/pre_nukeassist_setup.py similarity index 81% rename from openpype/hosts/nuke/hooks/pre_nukeassist_setup.py rename to client/ayon_core/hosts/nuke/hooks/pre_nukeassist_setup.py index 657291ec51..2f6d121af5 100644 --- a/openpype/hosts/nuke/hooks/pre_nukeassist_setup.py +++ b/client/ayon_core/hosts/nuke/hooks/pre_nukeassist_setup.py @@ -1,4 +1,4 @@ -from openpype.lib.applications import PreLaunchHook +from ayon_core.lib.applications import PreLaunchHook class PrelaunchNukeAssistHook(PreLaunchHook): diff --git a/openpype/hosts/maya/plugins/publish/__init__.py b/client/ayon_core/hosts/nuke/plugins/__init__.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/__init__.py rename to client/ayon_core/hosts/nuke/plugins/__init__.py diff --git a/openpype/hosts/nuke/plugins/__init__.py b/client/ayon_core/hosts/nuke/plugins/create/__init__.py similarity index 100% rename from openpype/hosts/nuke/plugins/__init__.py rename to client/ayon_core/hosts/nuke/plugins/create/__init__.py diff --git a/client/ayon_core/hosts/nuke/plugins/create/convert_legacy.py b/client/ayon_core/hosts/nuke/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..815170ac8b --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/create/convert_legacy.py @@ -0,0 +1,52 @@ +from ayon_core.pipeline.create.creator_plugins import SubsetConvertorPlugin +from ayon_core.hosts.nuke.api.lib import ( + INSTANCE_DATA_KNOB, + get_node_data, + get_avalon_knob_data, + NODE_TAB_NAME, +) +from ayon_core.hosts.nuke.api.plugin import convert_to_valid_instaces + +import nuke + + +class LegacyConverted(SubsetConvertorPlugin): + identifier = "legacy.converter" + + def find_instances(self): + + legacy_found = False + # search for first available legacy item + for node in nuke.allNodes(recurseGroups=True): + if node.Class() in ["Viewer", "Dot"]: + continue + + if get_node_data(node, INSTANCE_DATA_KNOB): + continue + + if NODE_TAB_NAME not in node.knobs(): + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data( + node, ["avalon:", "ak:"], create=False) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + # catch and break + legacy_found = True + break + + if legacy_found: + # if not item do not add legacy instance converter + self.add_convertor_item("Convert legacy instances") + + def convert(self): + # loop all instances and convert them + convert_to_valid_instaces() + # remove legacy item if all is fine + self.remove_convertor_item() diff --git a/openpype/hosts/nuke/plugins/create/create_backdrop.py b/client/ayon_core/hosts/nuke/plugins/create/create_backdrop.py similarity index 97% rename from openpype/hosts/nuke/plugins/create/create_backdrop.py rename to client/ayon_core/hosts/nuke/plugins/create/create_backdrop.py index 52959bbef2..530392c635 100644 --- a/openpype/hosts/nuke/plugins/create/create_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_backdrop.py @@ -1,6 +1,6 @@ from nukescripts import autoBackdrop -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( NukeCreator, maintained_selection, select_nodes diff --git a/client/ayon_core/hosts/nuke/plugins/create/create_camera.py b/client/ayon_core/hosts/nuke/plugins/create/create_camera.py new file mode 100644 index 0000000000..7ade19d846 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/create/create_camera.py @@ -0,0 +1,69 @@ +import nuke +from ayon_core.hosts.nuke.api import ( + NukeCreator, + NukeCreatorError, + maintained_selection +) +from ayon_core.hosts.nuke.api.lib import ( + create_camera_node_by_version +) + + +class CreateCamera(NukeCreator): + """Add Publishable Camera""" + + identifier = "create_camera" + label = "Camera (3d)" + family = "camera" + icon = "camera" + + # plugin attributes + node_color = "0xff9100ff" + + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if self.selected_nodes: + node = self.selected_nodes[0] + if node.Class() != "Camera3": + raise NukeCreatorError( + "Creator error: Select only camera node type") + created_node = self.selected_nodes[0] + else: + created_node = create_camera_node_by_version() + + created_node["tile_color"].setValue( + int(self.node_color, 16)) + + created_node["name"].setValue(node_name) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateCamera, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError( + "Creator error: No active selection") + elif len(self.selected_nodes) > 1: + raise NukeCreatorError( + "Creator error: Select only one camera node") + else: + self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_gizmo.py b/client/ayon_core/hosts/nuke/plugins/create/create_gizmo.py similarity index 97% rename from openpype/hosts/nuke/plugins/create/create_gizmo.py rename to client/ayon_core/hosts/nuke/plugins/create/create_gizmo.py index cbe2f635c9..51c5b1931b 100644 --- a/openpype/hosts/nuke/plugins/create/create_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_gizmo.py @@ -1,5 +1,5 @@ import nuke -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( NukeCreator, NukeCreatorError, maintained_selection diff --git a/client/ayon_core/hosts/nuke/plugins/create/create_model.py b/client/ayon_core/hosts/nuke/plugins/create/create_model.py new file mode 100644 index 0000000000..db927171cd --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/create/create_model.py @@ -0,0 +1,65 @@ +import nuke +from ayon_core.hosts.nuke.api import ( + NukeCreator, + NukeCreatorError, + maintained_selection +) + + +class CreateModel(NukeCreator): + """Add Publishable Camera""" + + identifier = "create_model" + label = "Model (3d)" + family = "model" + icon = "cube" + default_variants = ["Main"] + + # plugin attributes + node_color = "0xff3200ff" + + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if self.selected_nodes: + node = self.selected_nodes[0] + if node.Class() != "Scene": + raise NukeCreatorError( + "Creator error: Select only 'Scene' node type") + created_node = node + else: + created_node = nuke.createNode("Scene") + + created_node["tile_color"].setValue( + int(self.node_color, 16)) + + created_node["name"].setValue(node_name) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateModel, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + elif len(self.selected_nodes) > 1: + NukeCreatorError("Creator error: Select only one 'Scene' node") + else: + self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_source.py b/client/ayon_core/hosts/nuke/plugins/create/create_source.py similarity index 97% rename from openpype/hosts/nuke/plugins/create/create_source.py rename to client/ayon_core/hosts/nuke/plugins/create/create_source.py index 8419c3ef33..be9fa44929 100644 --- a/openpype/hosts/nuke/plugins/create/create_source.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_source.py @@ -1,13 +1,13 @@ import nuke import six import sys -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( INSTANCE_DATA_KNOB, NukeCreator, NukeCreatorError, set_node_data ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance ) diff --git a/openpype/hosts/nuke/plugins/create/create_write_image.py b/client/ayon_core/hosts/nuke/plugins/create/create_write_image.py similarity index 96% rename from openpype/hosts/nuke/plugins/create/create_write_image.py rename to client/ayon_core/hosts/nuke/plugins/create/create_write_image.py index f21d871c9f..125cf057f8 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_image.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_write_image.py @@ -2,17 +2,17 @@ import sys import six -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef, NumberDef, UISeparatorDef, EnumDef ) -from openpype.hosts.nuke import api as napi -from openpype.hosts.nuke.api.plugin import exposed_write_knobs +from ayon_core.hosts.nuke import api as napi +from ayon_core.hosts.nuke.api.plugin import exposed_write_knobs class CreateWriteImage(napi.NukeWriteCreator): diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/client/ayon_core/hosts/nuke/plugins/create/create_write_prerender.py similarity index 95% rename from openpype/hosts/nuke/plugins/create/create_write_prerender.py rename to client/ayon_core/hosts/nuke/plugins/create/create_write_prerender.py index 742bfb20ad..371ef85a15 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_write_prerender.py @@ -2,14 +2,14 @@ import sys import six -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef ) -from openpype.hosts.nuke import api as napi -from openpype.hosts.nuke.api.plugin import exposed_write_knobs +from ayon_core.hosts.nuke import api as napi +from ayon_core.hosts.nuke.api.plugin import exposed_write_knobs class CreateWritePrerender(napi.NukeWriteCreator): diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/client/ayon_core/hosts/nuke/plugins/create/create_write_render.py similarity index 95% rename from openpype/hosts/nuke/plugins/create/create_write_render.py rename to client/ayon_core/hosts/nuke/plugins/create/create_write_render.py index fc16876f75..c5f4d5003a 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/client/ayon_core/hosts/nuke/plugins/create/create_write_render.py @@ -2,14 +2,14 @@ import sys import six -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef ) -from openpype.hosts.nuke import api as napi -from openpype.hosts.nuke.api.plugin import exposed_write_knobs +from ayon_core.hosts.nuke import api as napi +from ayon_core.hosts.nuke.api.plugin import exposed_write_knobs class CreateWriteRender(napi.NukeWriteCreator): diff --git a/client/ayon_core/hosts/nuke/plugins/create/workfile_creator.py b/client/ayon_core/hosts/nuke/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..87f62b011e --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/create/workfile_creator.py @@ -0,0 +1,68 @@ +import ayon_core.hosts.nuke.api as api +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import ( + AutoCreator, + CreatedInstance, +) +from ayon_core.hosts.nuke.api import ( + INSTANCE_DATA_KNOB, + set_node_data +) +import nuke + + +class WorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + default_variant = "Main" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + root_node = nuke.root() + instance_data = api.get_node_data( + root_node, api.INSTANCE_DATA_KNOB + ) + + project_name = self.create_context.get_current_project_name() + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + instance_data.update({ + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + }) + instance_data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, instance_data + )) + + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + instance.transient_data["node"] = root_node + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + instance_node = created_inst.transient_data["node"] + + set_node_data( + instance_node, + INSTANCE_DATA_KNOB, + created_inst.data_to_store() + ) + + def create(self, options=None): + # no need to create if it is created + # in `collect_instances` + pass diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/client/ayon_core/hosts/nuke/plugins/inventory/repair_old_loaders.py similarity index 87% rename from openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py rename to client/ayon_core/hosts/nuke/plugins/inventory/repair_old_loaders.py index 764499ff0c..7bb5c8ef20 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/client/ayon_core/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,6 +1,6 @@ -from openpype.lib import Logger -from openpype.pipeline import InventoryAction -from openpype.hosts.nuke.api.lib import set_avalon_knob_data +from ayon_core.lib import Logger +from ayon_core.pipeline import InventoryAction +from ayon_core.hosts.nuke.api.lib import set_avalon_knob_data class RepairOldLoaders(InventoryAction): diff --git a/client/ayon_core/hosts/nuke/plugins/inventory/select_containers.py b/client/ayon_core/hosts/nuke/plugins/inventory/select_containers.py new file mode 100644 index 0000000000..2fa9c06984 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/inventory/select_containers.py @@ -0,0 +1,21 @@ +from ayon_core.pipeline import InventoryAction +from ayon_core.hosts.nuke.api.command import viewer_update_and_undo_stop + + +class SelectContainers(InventoryAction): + + label = "Select Containers" + icon = "mouse-pointer" + color = "#d8d8d8" + + def process(self, containers): + import nuke + + nodes = [nuke.toNode(i["objectName"]) for i in containers] + + with viewer_update_and_undo_stop(): + # clear previous_selection + [n['selected'].setValue(False) for n in nodes] + # Select tool + for node in nodes: + node["selected"].setValue(True) diff --git a/client/ayon_core/hosts/nuke/plugins/load/actions.py b/client/ayon_core/hosts/nuke/plugins/load/actions.py new file mode 100644 index 0000000000..de51321924 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/actions.py @@ -0,0 +1,80 @@ +"""A module containing generic loader actions that will display in the Loader. + +""" + +from ayon_core.lib import Logger +from ayon_core.pipeline import load + +log = Logger.get_logger(__name__) + + +class SetFrameRangeLoader(load.LoaderPlugin): + """Set frame range excluding pre- and post-handles""" + + families = ["animation", + "camera", + "write", + "yeticache", + "pointcache"] + representations = ["*"] + extensions = {"*"} + + label = "Set frame range" + order = 11 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + from ayon_core.hosts.nuke.api import lib + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + log.info("start: {}, end: {}".format(start, end)) + if start is None or end is None: + log.info("Skipping setting frame range because start or " + "end frame data is missing..") + return + + lib.update_frame_range(start, end) + + +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): + """Set frame range including pre- and post-handles""" + + families = ["animation", + "camera", + "write", + "yeticache", + "pointcache"] + representations = ["*"] + + label = "Set frame range (with handles)" + order = 12 + icon = "clock-o" + color = "white" + + def load(self, context, name, namespace, data): + + from ayon_core.hosts.nuke.api import lib + + version = context['version'] + version_data = version.get("data", {}) + + start = version_data.get("frameStart", None) + end = version_data.get("frameEnd", None) + + if start is None or end is None: + print("Skipping setting frame range because start or " + "end frame data is missing..") + return + + # Include handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) + + lib.update_frame_range(start, end) diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py similarity index 97% rename from openpype/hosts/nuke/plugins/load/load_backdrop.py rename to client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py index 54d37da203..ed512c86ab 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py @@ -1,16 +1,16 @@ import nuke import nukescripts -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.nuke.api.lib import ( +from ayon_core.hosts.nuke.api.lib import ( find_free_space_to_paste_nodes, maintained_selection, reset_selection, @@ -18,8 +18,8 @@ get_avalon_knob_data, set_avalon_knob_data ) -from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop -from openpype.hosts.nuke.api import containerise, update_container +from ayon_core.hosts.nuke.api.command import viewer_update_and_undo_stop +from ayon_core.hosts.nuke.api import containerise, update_container class LoadBackdropNodes(load.LoaderPlugin): diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py new file mode 100644 index 0000000000..2b2fb6f938 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py @@ -0,0 +1,201 @@ +import nuke + +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id +) +from ayon_core.pipeline import ( + load, + get_current_project_name, + get_representation_path, +) +from ayon_core.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) +from ayon_core.hosts.nuke.api.lib import ( + maintained_selection +) + + +class AlembicCameraLoader(load.LoaderPlugin): + """ + This will load alembic camera into script. + """ + + families = ["camera"] + representations = ["*"] + extensions = {"abc"} + + label = "Load Alembic Camera" + icon = "camera" + color = "orange" + node_color = "0x3469ffff" + + def load(self, context, name, namespace, data): + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + namespace = namespace or context['asset']['name'] + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = { + "frameStart": first, + "frameEnd": last, + "version": vname, + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.filepath_from_context(context).replace("\\", "/") + + with maintained_selection(): + camera_node = nuke.createNode( + "Camera2", + "name {} file {} read_from_file True".format( + object_name, file), + inpanel=False + ) + + camera_node.forceValidate() + camera_node["frame_rate"].setValue(float(fps)) + + # workaround because nuke's bug is not adding + # animation keys properly + xpos = camera_node.xpos() + ypos = camera_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(camera_node) + nuke.nodePaste("%clipboard%") + camera_node = nuke.toNode(object_name) + camera_node.setXYpos(xpos, ypos) + + # color node by correct color by actual version + self.node_version_color(version, camera_node) + + return containerise( + node=camera_node, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """ + Called by Scene Inventory when look should be updated to current + version. + If any reference edits cannot be applied, eg. shader renamed and + material not present, reference is unloaded and cleaned. + All failed edits are highlighted to the user via message box. + + Args: + container: object that has look to be updated + representation: (dict): relationship data to get proper + representation from DB and persisted + data in .json + Returns: + None + """ + # Get version from io + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + + object_name = container["node"] + + # get main variables + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = { + "representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = get_representation_path(representation).replace("\\", "/") + + with maintained_selection(): + camera_node = nuke.toNode(object_name) + camera_node['selected'].setValue(True) + + # collect input output dependencies + dependencies = camera_node.dependencies() + dependent = camera_node.dependent() + + camera_node["frame_rate"].setValue(float(fps)) + camera_node["file"].setValue(file) + + # workaround because nuke's bug is + # not adding animation keys properly + xpos = camera_node.xpos() + ypos = camera_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(camera_node) + nuke.nodePaste("%clipboard%") + camera_node = nuke.toNode(object_name) + camera_node.setXYpos(xpos, ypos) + + # link to original input nodes + for i, input in enumerate(dependencies): + camera_node.setInput(i, input) + # link to original output nodes + for d in dependent: + index = next((i for i, dpcy in enumerate( + d.dependencies()) + if camera_node is dpcy), 0) + d.setInput(index, camera_node) + + # color node by correct color by actual version + self.node_version_color(version_doc, camera_node) + + self.log.info("updated to version: {}".format(version_doc.get("name"))) + + return update_container(camera_node, data_imprint) + + def node_version_color(self, version_doc, node): + """ Coloring a node by correct color by actual version + """ + # get all versions in list + project_name = get_current_project_name() + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + + # change color of node + if version_doc["_id"] == last_version_doc["_id"]: + color_value = self.node_color + else: + color_value = "0xd88467ff" + node["tile_color"].setValue(int(color_value, 16)) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + node = container["node"] + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py new file mode 100644 index 0000000000..8bce2eac6e --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py @@ -0,0 +1,474 @@ +import nuke +import qargparse +from pprint import pformat +from copy import deepcopy +from ayon_core.lib import Logger +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +from ayon_core.pipeline import ( + get_current_project_name, + get_representation_path, +) +from ayon_core.hosts.nuke.api.lib import ( + get_imageio_input_colorspace, + maintained_selection +) +from ayon_core.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop, + colorspace_exists_on_node +) +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) +from ayon_core.hosts.nuke.api import plugin + + +class LoadClip(plugin.NukeLoader): + """Load clip into Nuke + + Either it is image sequence or video file. + """ + log = Logger.get_logger(__name__) + + families = [ + "source", + "plate", + "render", + "prerender", + "review" + ] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) + + label = "Load Clip" + order = -20 + icon = "file-video-o" + color = "white" + + # Loaded from settings + _representations = [] + + script_start = int(nuke.root()["first_frame"].value()) + + # option gui + options_defaults = { + "start_at_workfile": True, + "add_retime": True + } + + node_name_template = "{class_name}_{ext}" + + @classmethod + def get_options(cls, *args): + return [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=cls.options_defaults["start_at_workfile"] + ), + qargparse.Boolean( + "add_retime", + help="Load with retime", + default=cls.options_defaults["add_retime"] + ) + ] + + @classmethod + def get_representations(cls): + return cls._representations or cls.representations + + def load(self, context, name, namespace, options): + """Load asset via database + """ + representation = context["representation"] + # reset container id so it is always unique for each instance + self.reset_container_id() + + is_sequence = len(representation["files"]) > 1 + + if is_sequence: + context["representation"] = \ + self._representation_with_hash_in_frame( + representation + ) + + filepath = self.filepath_from_context(context) + filepath = filepath.replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) + + start_at_workfile = options.get( + "start_at_workfile", self.options_defaults["start_at_workfile"]) + + add_retime = options.get( + "add_retime", self.options_defaults["add_retime"]) + + version = context['version'] + version_data = version.get("data", {}) + repre_id = representation["_id"] + + self.log.debug("_ version_data: {}\n".format( + pformat(version_data))) + self.log.debug( + "Representation id `{}` ".format(repre_id)) + + self.handle_start = version_data.get("handleStart", 0) + self.handle_end = version_data.get("handleEnd", 0) + + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + first -= self.handle_start + last += self.handle_end + + if not is_sequence: + duration = last - first + first = 1 + last = first + duration + + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + if not filepath: + self.log.warning( + "Representation id `{}` is failing to load".format(repre_id)) + return + + read_name = self._get_node_name(representation) + + # Create the Loader with the filename path set + read_node = nuke.createNode( + "Read", + "name {}".format(read_name), + inpanel=False + ) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing + with viewer_update_and_undo_stop(): + read_node["file"].setValue(filepath) + + used_colorspace = self._set_colorspace( + read_node, version_data, representation["data"], filepath) + + self._set_range_to_node(read_node, first, last, start_at_workfile) + + # add additional metadata from the version to imprint Avalon knob + add_keys = ["frameStart", "frameEnd", + "source", "colorspace", "author", "fps", "version", + "handleStart", "handleEnd"] + + data_imprint = {} + for key in add_keys: + if key == 'version': + version_doc = context["version"] + if version_doc["type"] == "hero_version": + version = "hero" + else: + version = version_doc.get("name") + + if version: + data_imprint[key] = version + + elif key == 'colorspace': + colorspace = representation["data"].get(key) + colorspace = colorspace or version_data.get(key) + data_imprint["db_colorspace"] = colorspace + if used_colorspace: + data_imprint["used_colorspace"] = used_colorspace + else: + value_ = context["version"]['data'].get( + key, str(None)) + if isinstance(value_, (str)): + value_ = value_.replace("\\", "/") + data_imprint[key] = value_ + + if add_retime and version_data.get("retime", None): + data_imprint["addRetime"] = True + + read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) + + container = containerise( + read_node, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + if add_retime and version_data.get("retime", None): + self._make_retimes(read_node, version_data) + + self.set_as_member(read_node) + + return container + + def switch(self, container, representation): + self.update(container, representation) + + def _representation_with_hash_in_frame(self, representation): + """Convert frame key value to padded hash + + Args: + representation (dict): representation data + + Returns: + dict: altered representation data + """ + representation = deepcopy(representation) + context = representation["context"] + + # Get the frame from the context and hash it + frame = context["frame"] + hashed_frame = "#" * len(str(frame)) + + # Replace the frame with the hash in the originalBasename + if ( + "{originalBasename}" in representation["data"]["template"] + ): + origin_basename = context["originalBasename"] + context["originalBasename"] = origin_basename.replace( + frame, hashed_frame + ) + + # Replace the frame with the hash in the frame + representation["context"]["frame"] = hashed_frame + return representation + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + is_sequence = len(representation["files"]) > 1 + + read_node = container["node"] + + if is_sequence: + representation = self._representation_with_hash_in_frame( + representation + ) + + filepath = get_representation_path(representation).replace("\\", "/") + self.log.debug("_ filepath: {}".format(filepath)) + + start_at_workfile = "start at" in read_node['frame_mode'].value() + + add_retime = [ + key for key in read_node.knobs().keys() + if "addRetime" in key + ] + + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + + version_data = version_doc.get("data", {}) + repre_id = representation["_id"] + + # colorspace profile + colorspace = representation["data"].get("colorspace") + colorspace = colorspace or version_data.get("colorspace") + + self.handle_start = version_data.get("handleStart", 0) + self.handle_end = version_data.get("handleEnd", 0) + + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + first -= self.handle_start + last += self.handle_end + + if not is_sequence: + duration = last - first + first = 1 + last = first + duration + + if not filepath: + self.log.warning( + "Representation id `{}` is failing to load".format(repre_id)) + return + + read_node["file"].setValue(filepath) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing + with viewer_update_and_undo_stop(): + used_colorspace = self._set_colorspace( + read_node, version_data, representation["data"], filepath) + + self._set_range_to_node(read_node, first, last, start_at_workfile) + + updated_dict = { + "representation": str(representation["_id"]), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version_doc.get("name")), + "db_colorspace": colorspace, + "source": version_data.get("source"), + "handleStart": str(self.handle_start), + "handleEnd": str(self.handle_end), + "fps": str(version_data.get("fps")), + "author": version_data.get("author") + } + + # add used colorspace if found any + if used_colorspace: + updated_dict["used_colorspace"] = used_colorspace + + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + # change color of read_node + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x4ecd25ff" + else: + color_value = "0xd84f20ff" + read_node["tile_color"].setValue(int(color_value, 16)) + + # Update the imprinted representation + update_container( + read_node, + updated_dict + ) + self.log.info( + "updated to version: {}".format(version_doc.get("name")) + ) + + if add_retime and version_data.get("retime", None): + self._make_retimes(read_node, version_data) + else: + self.clear_members(read_node) + + self.set_as_member(read_node) + + def remove(self, container): + read_node = container["node"] + assert read_node.Class() == "Read", "Must be Read" + + with viewer_update_and_undo_stop(): + members = self.get_members(read_node) + nuke.delete(read_node) + for member in members: + nuke.delete(member) + + def _set_range_to_node(self, read_node, first, last, start_at_workfile): + read_node['origfirst'].setValue(int(first)) + read_node['first'].setValue(int(first)) + read_node['origlast'].setValue(int(last)) + read_node['last'].setValue(int(last)) + + # set start frame depending on workfile or version + self._loader_shift(read_node, start_at_workfile) + + def _make_retimes(self, parent_node, version_data): + ''' Create all retime and timewarping nodes with copied animation ''' + speed = version_data.get('speed', 1) + time_warp_nodes = version_data.get('timewarps', []) + last_node = None + source_id = self.get_container_id(parent_node) + self.log.debug("__ source_id: {}".format(source_id)) + self.log.debug("__ members: {}".format( + self.get_members(parent_node))) + + dependent_nodes = self.clear_members(parent_node) + + with maintained_selection(): + parent_node['selected'].setValue(True) + + if speed != 1: + rtn = nuke.createNode( + "Retime", + "speed {}".format(speed)) + + rtn["before"].setValue("continue") + rtn["after"].setValue("continue") + rtn["input.first_lock"].setValue(True) + rtn["input.first"].setValue( + self.script_start + ) + self.set_as_member(rtn) + last_node = rtn + + if time_warp_nodes != []: + start_anim = self.script_start + (self.handle_start / speed) + for timewarp in time_warp_nodes: + twn = nuke.createNode( + timewarp["Class"], + "name {}".format(timewarp["name"]) + ) + if isinstance(timewarp["lookup"], list): + # if array for animation + twn["lookup"].setAnimated() + for i, value in enumerate(timewarp["lookup"]): + twn["lookup"].setValueAt( + (start_anim + i) + value, + (start_anim + i)) + else: + # if static value `int` + twn["lookup"].setValue(timewarp["lookup"]) + + self.set_as_member(twn) + last_node = twn + + if dependent_nodes: + # connect to original inputs + for i, n in enumerate(dependent_nodes): + last_node.setInput(i, n) + + def _loader_shift(self, read_node, workfile_start=False): + """ Set start frame of read node to a workfile start + + Args: + read_node (nuke.Node): The nuke's read node + workfile_start (bool): set workfile start frame if true + + """ + if workfile_start: + read_node['frame_mode'].setValue("start at") + read_node['frame'].setValue(str(self.script_start)) + + def _get_node_name(self, representation): + + repre_cont = representation["context"] + name_data = { + "asset": repre_cont["asset"], + "subset": repre_cont["subset"], + "representation": representation["name"], + "ext": repre_cont["representation"], + "id": representation["_id"], + "class_name": self.__class__.__name__ + } + + return self.node_name_template.format(**name_data) + + def _set_colorspace(self, node, version_data, repre_data, path): + output_color = None + path = path.replace("\\", "/") + # get colorspace + colorspace = repre_data.get("colorspace") + colorspace = colorspace or version_data.get("colorspace") + + # colorspace from `project_settings/nuke/imageio/regexInputs` + iio_colorspace = get_imageio_input_colorspace(path) + + # Set colorspace defined in version data + if ( + colorspace is not None + and colorspace_exists_on_node(node, str(colorspace)) + ): + node["colorspace"].setValue(str(colorspace)) + output_color = str(colorspace) + elif iio_colorspace is not None: + node["colorspace"].setValue(iio_colorspace) + output_color = iio_colorspace + + return output_color diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py new file mode 100644 index 0000000000..0b5f31033e --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py @@ -0,0 +1,353 @@ +import json +from collections import OrderedDict +import nuke +import six + +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +from ayon_core.pipeline import ( + load, + get_current_project_name, + get_representation_path, +) +from ayon_core.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) + + +class LoadEffects(load.LoaderPlugin): + """Loading colorspace soft effect exported from nukestudio""" + + families = ["effect"] + representations = ["*"] + extensions = {"json"} + + label = "Load Effects - nodes" + order = 0 + icon = "cc" + color = "white" + ignore_attr = ["useLifetime"] + + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = { + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.filepath_from_context(context).replace("\\", "/") + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).items()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + GN = nuke.createNode( + "Group", + "name {}_1".format(object_name), + inpanel=False + ) + + # adding content to the group node + with GN: + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + + try: + node[k].value() + except NameError as e: + self.log.warning(e) + continue + + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + # get main variables + # Get version from io + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + + # get corresponding node + GN = container["node"] + + file = get_representation_path(representation).replace("\\", "/") + name = container['name'] + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = { + "representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # Update the imprinted representation + update_container( + GN, + data_imprint + ) + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).items()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # adding content to the group node + with GN: + # first remove all nodes + [nuke.delete(n) for n in nuke.allNodes()] + + # create input node + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for _, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + + try: + node[k].value() + except NameError as e: + self.log.warning(e) + continue + + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + # create output node + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + + # change color of node + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x3469ffff" + else: + color_value = "0xd84f20ff" + + GN["tile_color"].setValue(int(color_value, 16)) + + self.log.info("updated to version: {}".format(version_doc.get("name"))) + + def connect_read_node(self, group_node, asset, subset): + """ + Finds read node and selects it + + Arguments: + asset (str): asset name + + Returns: + nuke node: node is selected + None: if nothing found + """ + search_name = "{0}_{1}".format(asset, subset) + + node = [ + n for n in nuke.allNodes(filter="Read") + if search_name in n["file"].value() + ] + if len(node) > 0: + rn = node[0] + else: + rn = None + + # Parent read node has been found + # solving connections + if rn: + dep_nodes = rn.dependent() + + if len(dep_nodes) > 0: + for dn in dep_nodes: + dn.setInput(0, group_node) + + group_node.setInput(0, rn) + group_node.autoplace() + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items() + if isinstance(v, dict)] + subTrackNums = [v["subTrackIndex"] for k, v in data.items() + if isinstance(v, dict)] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if isinstance(val, dict) + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes through all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.items()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, six.text_type): + return str(input) + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + node = container["node"] + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py similarity index 98% rename from openpype/hosts/nuke/plugins/load/load_effects_ip.py rename to client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py index cdfdfef3b8..4d8a8518f2 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py @@ -3,17 +3,17 @@ import six import nuke -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.nuke.api import lib -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import lib +from ayon_core.hosts.nuke.api import ( containerise, update_container, viewer_update_and_undo_stop diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py similarity index 97% rename from openpype/hosts/nuke/plugins/load/load_gizmo.py rename to client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py index 19b5cca74e..54daa74405 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py @@ -1,21 +1,21 @@ import nuke -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.nuke.api.lib import ( +from ayon_core.hosts.nuke.api.lib import ( maintained_selection, get_avalon_knob_data, set_avalon_knob_data, swap_node_with_dependency, ) -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( containerise, update_container, viewer_update_and_undo_stop diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py similarity index 98% rename from openpype/hosts/nuke/plugins/load/load_gizmo_ip.py rename to client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py index 5b4877678a..677d9868f1 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,23 +1,23 @@ import nuke import six -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.nuke.api.lib import ( +from ayon_core.hosts.nuke.api.lib import ( maintained_selection, create_backdrop, get_avalon_knob_data, set_avalon_knob_data, swap_node_with_dependency, ) -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( containerise, update_container, viewer_update_and_undo_stop diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_image.py b/client/ayon_core/hosts/nuke/plugins/load/load_image.py new file mode 100644 index 0000000000..b9f47bddc9 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_image.py @@ -0,0 +1,256 @@ +import nuke + +import qargparse + +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +from ayon_core.pipeline import ( + load, + get_current_project_name, + get_representation_path, +) +from ayon_core.hosts.nuke.api.lib import ( + get_imageio_input_colorspace +) +from ayon_core.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) +from ayon_core.lib.transcoding import ( + IMAGE_EXTENSIONS +) + + +class LoadImage(load.LoaderPlugin): + """Load still image into Nuke""" + + families = [ + "render2d", + "source", + "plate", + "render", + "prerender", + "review", + "image" + ] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS + ) + + label = "Load Image" + order = -10 + icon = "image" + color = "white" + + # Loaded from settings + _representations = [] + + node_name_template = "{class_name}_{ext}" + + options = [ + qargparse.Integer( + "frame_number", + label="Frame Number", + default=int(nuke.root()["first_frame"].getValue()), + min=1, + max=999999, + help="What frame is reading from?" + ) + ] + + @classmethod + def get_representations(cls): + return cls._representations or cls.representations + + def load(self, context, name, namespace, options): + self.log.info("__ options: `{}`".format(options)) + frame_number = options.get( + "frame_number", int(nuke.root()["first_frame"].getValue()) + ) + + version = context['version'] + version_data = version.get("data", {}) + repr_id = context["representation"]["_id"] + + self.log.info("version_data: {}\n".format(version_data)) + self.log.debug( + "Representation id `{}` ".format(repr_id)) + + last = first = int(frame_number) + + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + file = self.filepath_from_context(context) + + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + representation = context["representation"] + repr_cont = representation["context"] + frame = repr_cont.get("frame") + if frame: + padding = len(frame) + file = file.replace( + frame, + format(frame_number, "0{}".format(padding))) + + read_name = self._get_node_name(representation) + + # Create the Loader with the filename path set + with viewer_update_and_undo_stop(): + r = nuke.createNode( + "Read", + "name {}".format(read_name), + inpanel=False + ) + + r["file"].setValue(file) + + # Set colorspace defined in version data + colorspace = context["version"]["data"].get("colorspace") + if colorspace: + r["colorspace"].setValue(str(colorspace)) + + preset_clrsp = get_imageio_input_colorspace(file) + + if preset_clrsp is not None: + r["colorspace"].setValue(preset_clrsp) + + r["origfirst"].setValue(first) + r["first"].setValue(first) + r["origlast"].setValue(last) + r["last"].setValue(last) + + # add additional metadata from the version to imprint Avalon knob + add_keys = ["source", "colorspace", "author", "fps", "version"] + + data_imprint = { + "frameStart": first, + "frameEnd": last + } + for k in add_keys: + if k == 'version': + data_imprint.update({k: context["version"]['name']}) + else: + data_imprint.update( + {k: context["version"]['data'].get(k, str(None))}) + + r["tile_color"].setValue(int("0x4ecd25ff", 16)) + + return containerise(r, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + node = container["node"] + frame_number = node["first"].value() + + assert node.Class() == "Read", "Must be Read" + + repr_cont = representation["context"] + + file = get_representation_path(representation) + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + frame = repr_cont.get("frame") + if frame: + padding = len(frame) + file = file.replace( + frame, + format(frame_number, "0{}".format(padding))) + + # Get start frame from version data + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + last_version_doc = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + + version_data = version_doc.get("data", {}) + + last = first = int(frame_number) + + # Set the global in to the start frame of the sequence + node["file"].setValue(file) + node["origfirst"].setValue(first) + node["first"].setValue(first) + node["origlast"].setValue(last) + node["last"].setValue(last) + + updated_dict = {} + updated_dict.update({ + "representation": str(representation["_id"]), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version_doc.get("name")), + "colorspace": version_data.get("colorspace"), + "source": version_data.get("source"), + "fps": str(version_data.get("fps")), + "author": version_data.get("author") + }) + + # change color of node + if version_doc["_id"] == last_version_doc["_id"]: + color_value = "0x4ecd25ff" + else: + color_value = "0xd84f20ff" + node["tile_color"].setValue(int(color_value, 16)) + + # Update the imprinted representation + update_container( + node, + updated_dict + ) + self.log.info("updated to version: {}".format(version_doc.get("name"))) + + def remove(self, container): + node = container["node"] + assert node.Class() == "Read", "Must be Read" + + with viewer_update_and_undo_stop(): + nuke.delete(node) + + def _get_node_name(self, representation): + + repre_cont = representation["context"] + name_data = { + "asset": repre_cont["asset"], + "subset": repre_cont["subset"], + "representation": representation["name"], + "ext": repre_cont["representation"], + "id": representation["_id"], + "class_name": self.__class__.__name__ + } + + return self.node_name_template.format(**name_data) diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_matchmove.py b/client/ayon_core/hosts/nuke/plugins/load/load_matchmove.py new file mode 100644 index 0000000000..412181f3d9 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_matchmove.py @@ -0,0 +1,30 @@ +import nuke +from ayon_core.pipeline import load + + +class MatchmoveLoader(load.LoaderPlugin): + """ + This will run matchmove script to create track in script. + """ + + families = ["matchmove"] + representations = ["*"] + extensions = {"py"} + + defaults = ["Camera", "Object"] + + label = "Run matchmove script" + icon = "empire" + color = "orange" + + def load(self, context, name, namespace, data): + path = self.filepath_from_context(context) + if path.lower().endswith(".py"): + exec(open(path).read()) + + else: + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) + + return True diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_model.py b/client/ayon_core/hosts/nuke/plugins/load/load_model.py new file mode 100644 index 0000000000..125cb28e27 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/load/load_model.py @@ -0,0 +1,211 @@ +import nuke + +from ayon_core.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +from ayon_core.pipeline import ( + load, + get_current_project_name, + get_representation_path, +) +from ayon_core.hosts.nuke.api.lib import maintained_selection +from ayon_core.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) + + +class AlembicModelLoader(load.LoaderPlugin): + """ + This will load alembic model or anim into script. + """ + + families = ["model", "pointcache", "animation"] + representations = ["*"] + extensions = {"abc"} + + label = "Load Alembic" + icon = "cube" + color = "orange" + node_color = "0x4ecd91ff" + + def load(self, context, name, namespace, data): + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + namespace = namespace or context['asset']['name'] + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = { + "frameStart": first, + "frameEnd": last, + "version": vname + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.filepath_from_context(context).replace("\\", "/") + + with maintained_selection(): + model_node = nuke.createNode( + "ReadGeo2", + "name {} file {} ".format( + object_name, file), + inpanel=False + ) + + model_node.forceValidate() + + # Ensure all items are imported and selected. + scene_view = model_node.knob('scene_view') + scene_view.setImportedItems(scene_view.getAllItems()) + scene_view.setSelectedItems(scene_view.getAllItems()) + + model_node["frame_rate"].setValue(float(fps)) + + # workaround because nuke's bug is not adding + # animation keys properly + xpos = model_node.xpos() + ypos = model_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(model_node) + nuke.nodePaste("%clipboard%") + model_node = nuke.toNode(object_name) + model_node.setXYpos(xpos, ypos) + + # color node by correct color by actual version + self.node_version_color(version, model_node) + + return containerise( + node=model_node, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """ + Called by Scene Inventory when look should be updated to current + version. + If any reference edits cannot be applied, eg. shader renamed and + material not present, reference is unloaded and cleaned. + All failed edits are highlighted to the user via message box. + + Args: + container: object that has look to be updated + representation: (dict): relationship data to get proper + representation from DB and persisted + data in .json + Returns: + None + """ + # Get version from io + project_name = get_current_project_name() + version_doc = get_version_by_id(project_name, representation["parent"]) + + # get corresponding node + model_node = container["node"] + + # get main variables + version_data = version_doc.get("data", {}) + vname = version_doc.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + fps = version_data.get("fps") or nuke.root()["fps"].getValue() + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["source", "author", "fps"] + + data_imprint = { + "representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname + } + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = get_representation_path(representation).replace("\\", "/") + + with maintained_selection(): + model_node['selected'].setValue(True) + + # collect input output dependencies + dependencies = model_node.dependencies() + dependent = model_node.dependent() + + model_node["frame_rate"].setValue(float(fps)) + model_node["file"].setValue(file) + + # Ensure all items are imported and selected. + scene_view = model_node.knob('scene_view') + scene_view.setImportedItems(scene_view.getAllItems()) + scene_view.setSelectedItems(scene_view.getAllItems()) + + # workaround because nuke's bug is + # not adding animation keys properly + xpos = model_node.xpos() + ypos = model_node.ypos() + nuke.nodeCopy("%clipboard%") + nuke.delete(model_node) + + # paste the node back and set the position + nuke.nodePaste("%clipboard%") + model_node = nuke.selectedNode() + model_node.setXYpos(xpos, ypos) + + # link to original input nodes + for i, input in enumerate(dependencies): + model_node.setInput(i, input) + # link to original output nodes + for d in dependent: + index = next((i for i, dpcy in enumerate( + d.dependencies()) + if model_node is dpcy), 0) + d.setInput(index, model_node) + + # color node by correct color by actual version + self.node_version_color(version_doc, model_node) + + self.log.info("updated to version: {}".format(version_doc.get("name"))) + + return update_container(model_node, data_imprint) + + def node_version_color(self, version, node): + """ Coloring a node by correct color by actual version""" + + project_name = get_current_project_name() + last_version_doc = get_last_version_by_subset_id( + project_name, version["parent"], fields=["_id"] + ) + + # change color of node + if version["_id"] == last_version_doc["_id"]: + color_value = self.node_color + else: + color_value = "0xd88467ff" + node["tile_color"].setValue(int(color_value, 16)) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_ociolook.py b/client/ayon_core/hosts/nuke/plugins/load/load_ociolook.py similarity index 99% rename from openpype/hosts/nuke/plugins/load/load_ociolook.py rename to client/ayon_core/hosts/nuke/plugins/load/load_ociolook.py index c0f8235253..e168c2bac1 100644 --- a/openpype/hosts/nuke/plugins/load/load_ociolook.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_ociolook.py @@ -4,16 +4,16 @@ import nuke import six -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_current_project_name, get_representation_path, ) -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api import ( containerise, viewer_update_and_undo_stop, update_container, diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py similarity index 96% rename from openpype/hosts/nuke/plugins/load/load_script_precomp.py rename to client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py index cbe19d217b..1c91e51a09 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,16 +1,16 @@ import nuke -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_last_version_by_subset_id, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_current_project_name, load, get_representation_path, ) -from openpype.hosts.nuke.api.lib import get_avalon_knob_data -from openpype.hosts.nuke.api import ( +from ayon_core.hosts.nuke.api.lib import get_avalon_knob_data +from ayon_core.hosts.nuke.api import ( containerise, update_container, viewer_update_and_undo_stop diff --git a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_backdrop.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/collect_backdrop.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_backdrop.py index d04c1204e3..fc17de95b4 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_backdrop.py @@ -1,6 +1,6 @@ from pprint import pformat import pyblish.api -from openpype.hosts.nuke.api import lib as pnlib +from ayon_core.hosts.nuke.api import lib as pnlib import nuke diff --git a/openpype/hosts/nuke/plugins/publish/collect_context_data.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_context_data.py similarity index 93% rename from openpype/hosts/nuke/plugins/publish/collect_context_data.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_context_data.py index b85e924f55..0a032e5a2d 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_context_data.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_context_data.py @@ -1,9 +1,9 @@ import os import nuke import pyblish.api -from openpype.lib import get_version_from_path -import openpype.hosts.nuke.api as napi -from openpype.pipeline import KnownPublishError +from ayon_core.lib import get_version_from_path +import ayon_core.hosts.nuke.api as napi +from ayon_core.pipeline import KnownPublishError class CollectContextData(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/nuke/plugins/publish/collect_framerate.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_framerate.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_framerate.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_framerate.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_gizmo.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_gizmo.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_gizmo.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_model.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_model.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_model.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_model.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_nuke_instance_data.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_nuke_instance_data.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_nuke_instance_data.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_nuke_instance_data.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_reads.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_reads.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_reads.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_slate_node.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_slate_node.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_slate_node.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_workfile.py diff --git a/openpype/hosts/nuke/plugins/publish/collect_writes.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py similarity index 99% rename from openpype/hosts/nuke/plugins/publish/collect_writes.py rename to client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py index 6f9245f5b9..84dc7992a5 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_writes.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py @@ -1,8 +1,8 @@ import os import nuke import pyblish.api -from openpype.hosts.nuke import api as napi -from openpype.pipeline import publish +from ayon_core.hosts.nuke import api as napi +from ayon_core.pipeline import publish class CollectNukeWrites(pyblish.api.InstancePlugin, diff --git a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_backdrop.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/extract_backdrop.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_backdrop.py index 2a6a5dee2a..e53ce9015a 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_backdrop.py @@ -4,8 +4,8 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.nuke.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api.lib import ( maintained_selection, reset_selection, select_nodes diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_camera.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_camera.py new file mode 100644 index 0000000000..810a2e0a76 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_camera.py @@ -0,0 +1,184 @@ +import os +import math +from pprint import pformat + +import nuke + +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api.lib import maintained_selection + + +class ExtractCamera(publish.Extractor): + """ 3D camera extractor + """ + label = 'Extract Camera' + order = pyblish.api.ExtractorOrder + families = ["camera"] + hosts = ["nuke"] + + # presets + write_geo_knobs = [ + ("file_type", "abc"), + ("storageFormat", "Ogawa"), + ("writeGeometries", False), + ("writePointClouds", False), + ("writeAxes", False) + ] + + def process(self, instance): + camera_node = instance.data["transientData"]["node"] + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + step = 1 + output_range = str(nuke.FrameRange(first_frame, last_frame, step)) + + rm_nodes = [] + self.log.debug("Creating additional nodes for 3D Camera Extractor") + subset = instance.data["subset"] + staging_dir = self.staging_dir(instance) + + # get extension form preset + extension = next((k[1] for k in self.write_geo_knobs + if k[0] == "file_type"), None) + if not extension: + raise RuntimeError( + "Bad config for extension in presets. " + "Talk to your supervisor or pipeline admin") + + # create file name and path + filename = subset + ".{}".format(extension) + file_path = os.path.join(staging_dir, filename).replace("\\", "/") + + with maintained_selection(): + # bake camera with axeses onto word coordinate XYZ + rm_n = bakeCameraWithAxeses( + camera_node, output_range) + rm_nodes.append(rm_n) + + # create scene node + rm_n = nuke.createNode("Scene") + rm_nodes.append(rm_n) + + # create write geo node + wg_n = nuke.createNode("WriteGeo") + wg_n["file"].setValue(file_path) + # add path to write to + for k, v in self.write_geo_knobs: + wg_n[k].setValue(v) + rm_nodes.append(wg_n) + + # write out camera + nuke.execute( + wg_n, + int(first_frame), + int(last_frame) + ) + # erase additional nodes + for n in rm_nodes: + nuke.delete(n) + + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': extension, + 'ext': extension, + 'files': filename, + "stagingDir": staging_dir, + "frameStart": first_frame, + "frameEnd": last_frame + } + instance.data["representations"].append(representation) + + instance.data.update({ + "path": file_path, + "outputDir": staging_dir, + "ext": extension, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, + }) + + self.log.debug("Extracted instance '{0}' to: {1}".format( + instance.name, file_path)) + + +def bakeCameraWithAxeses(camera_node, output_range): + """ Baking all perent hierarchy of axeses into camera + with transposition onto word XYZ coordinance + """ + bakeFocal = False + bakeHaperture = False + bakeVaperture = False + + camera_matrix = camera_node['world_matrix'] + + new_cam_n = nuke.createNode("Camera2") + new_cam_n.setInput(0, None) + new_cam_n['rotate'].setAnimated() + new_cam_n['translate'].setAnimated() + + old_focal = camera_node['focal'] + if old_focal.isAnimated() and not (old_focal.animation(0).constant()): + new_cam_n['focal'].setAnimated() + bakeFocal = True + else: + new_cam_n['focal'].setValue(old_focal.value()) + + old_haperture = camera_node['haperture'] + if old_haperture.isAnimated() and not ( + old_haperture.animation(0).constant()): + new_cam_n['haperture'].setAnimated() + bakeHaperture = True + else: + new_cam_n['haperture'].setValue(old_haperture.value()) + + old_vaperture = camera_node['vaperture'] + if old_vaperture.isAnimated() and not ( + old_vaperture.animation(0).constant()): + new_cam_n['vaperture'].setAnimated() + bakeVaperture = True + else: + new_cam_n['vaperture'].setValue(old_vaperture.value()) + + new_cam_n['win_translate'].setValue(camera_node['win_translate'].value()) + new_cam_n['win_scale'].setValue(camera_node['win_scale'].value()) + + for x in nuke.FrameRange(output_range): + math_matrix = nuke.math.Matrix4() + for y in range(camera_matrix.height()): + for z in range(camera_matrix.width()): + matrix_pointer = z + (y * camera_matrix.width()) + math_matrix[matrix_pointer] = camera_matrix.getValueAt( + x, (y + (z * camera_matrix.width()))) + + rot_matrix = nuke.math.Matrix4(math_matrix) + rot_matrix.rotationOnly() + rot = rot_matrix.rotationsZXY() + + new_cam_n['rotate'].setValueAt(math.degrees(rot[0]), x, 0) + new_cam_n['rotate'].setValueAt(math.degrees(rot[1]), x, 1) + new_cam_n['rotate'].setValueAt(math.degrees(rot[2]), x, 2) + new_cam_n['translate'].setValueAt( + camera_matrix.getValueAt(x, 3), x, 0) + new_cam_n['translate'].setValueAt( + camera_matrix.getValueAt(x, 7), x, 1) + new_cam_n['translate'].setValueAt( + camera_matrix.getValueAt(x, 11), x, 2) + + if bakeFocal: + new_cam_n['focal'].setValueAt(old_focal.getValueAt(x), x) + if bakeHaperture: + new_cam_n['haperture'].setValueAt(old_haperture.getValueAt(x), x) + if bakeVaperture: + new_cam_n['vaperture'].setValueAt(old_vaperture.getValueAt(x), x) + + return new_cam_n diff --git a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_gizmo.py similarity index 94% rename from openpype/hosts/nuke/plugins/publish/extract_gizmo.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_gizmo.py index ecec0d6f80..2a2e2255fd 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_gizmo.py @@ -3,9 +3,9 @@ import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.nuke.api import utils as pnutils -from openpype.hosts.nuke.api.lib import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api import utils as pnutils +from ayon_core.hosts.nuke.api.lib import ( maintained_selection, reset_selection, select_nodes diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_model.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_model.py new file mode 100644 index 0000000000..6f35e95630 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_model.py @@ -0,0 +1,108 @@ +import os +from pprint import pformat +import nuke +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api.lib import ( + maintained_selection, + select_nodes +) + + +class ExtractModel(publish.Extractor): + """ 3D model extractor + """ + label = 'Extract Model' + order = pyblish.api.ExtractorOrder + families = ["model"] + hosts = ["nuke"] + + # presets + write_geo_knobs = [ + ("file_type", "abc"), + ("storageFormat", "Ogawa"), + ("writeGeometries", True), + ("writePointClouds", False), + ("writeAxes", False) + ] + + def process(self, instance): + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + self.log.debug("instance.data: `{}`".format( + pformat(instance.data))) + + rm_nodes = [] + model_node = instance.data["transientData"]["node"] + + self.log.debug("Creating additional nodes for Extract Model") + subset = instance.data["subset"] + staging_dir = self.staging_dir(instance) + + extension = next((k[1] for k in self.write_geo_knobs + if k[0] == "file_type"), None) + if not extension: + raise RuntimeError( + "Bad config for extension in presets. " + "Talk to your supervisor or pipeline admin") + + # create file name and path + filename = subset + ".{}".format(extension) + file_path = os.path.join(staging_dir, filename).replace("\\", "/") + + with maintained_selection(): + # select model node + select_nodes([model_node]) + + # create write geo node + wg_n = nuke.createNode("WriteGeo") + wg_n["file"].setValue(file_path) + # add path to write to + for k, v in self.write_geo_knobs: + wg_n[k].setValue(v) + rm_nodes.append(wg_n) + + # write out model + nuke.execute( + wg_n, + int(first_frame), + int(last_frame) + ) + # erase additional nodes + for n in rm_nodes: + nuke.delete(n) + + self.log.debug("Filepath: {}".format(file_path)) + + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': extension, + 'ext': extension, + 'files': filename, + "stagingDir": staging_dir, + "frameStart": first_frame, + "frameEnd": last_frame + } + instance.data["representations"].append(representation) + + instance.data.update({ + "path": file_path, + "outputDir": staging_dir, + "ext": extension, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, + }) + + self.log.debug("Extracted instance '{0}' to: {1}".format( + instance.name, file_path)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_ouput_node.py similarity index 95% rename from openpype/hosts/nuke/plugins/publish/extract_ouput_node.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_ouput_node.py index 3fe1443bb3..b8e038a4f5 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_ouput_node.py @@ -1,6 +1,6 @@ import nuke import pyblish.api -from openpype.hosts.nuke.api.lib import maintained_selection +from ayon_core.hosts.nuke.api.lib import maintained_selection class CreateOutputNode(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/nuke/plugins/publish/extract_output_directory.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_output_directory.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/extract_output_directory.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_output_directory.py diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_render_local.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_render_local.py new file mode 100644 index 0000000000..45514ede5e --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_render_local.py @@ -0,0 +1,209 @@ +import os +import shutil + +import pyblish.api +import clique +import nuke +from ayon_core.hosts.nuke import api as napi +from ayon_core.pipeline import publish +from ayon_core.lib import collect_frames + + +class NukeRenderLocal(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): + """Render the current Nuke composition locally. + + Extract the result of savers by starting a comp render + This will run the local render of Fusion. + + Allows to use last published frames and overwrite only specific ones + (set in instance.data.get("frames_to_fix")) + """ + + order = pyblish.api.ExtractorOrder + label = "Render Local" + hosts = ["nuke"] + families = ["render.local", "prerender.local", "image.local"] + + def process(self, instance): + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) + + node = None + for x in child_nodes: + if x.Class() == "Write": + node = x + + self.log.debug("instance collected: {}".format(instance.data)) + + node_subset_name = instance.data.get("name", None) + + first_frame = instance.data.get("frameStartHandle", None) + last_frame = instance.data.get("frameEndHandle", None) + + filenames = [] + node_file = node["file"] + # Collect expected filepaths for each frame + # - for cases that output is still image is first created set of + # paths which is then sorted and converted to list + expected_paths = list(sorted({ + node_file.evaluate(frame) + for frame in range(first_frame, last_frame + 1) + })) + # Extract only filenames for representation + filenames.extend([ + os.path.basename(filepath) + for filepath in expected_paths + ]) + + # Ensure output directory exists. + out_dir = os.path.dirname(expected_paths[0]) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + + frames_to_render = [(first_frame, last_frame)] + + frames_to_fix = instance.data.get("frames_to_fix") + if instance.data.get("last_version_published_files") and frames_to_fix: + frames_to_render = self._get_frames_to_render(frames_to_fix) + anatomy = instance.context.data["anatomy"] + self._copy_last_published(anatomy, instance, out_dir, + filenames) + + for render_first_frame, render_last_frame in frames_to_render: + + self.log.info("Starting render") + self.log.info("Start frame: {}".format(render_first_frame)) + self.log.info("End frame: {}".format(render_last_frame)) + + # Render frames + nuke.execute( + str(node_subset_name), + int(render_first_frame), + int(render_last_frame) + ) + + ext = node["file_type"].value() + colorspace = napi.get_colorspace_from_node(node) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + if len(filenames) == 1: + repre = { + 'name': ext, + 'ext': ext, + 'files': filenames[0], + "stagingDir": out_dir + } + else: + repre = { + 'name': ext, + 'ext': ext, + 'frameStart': ( + "{{:0>{}}}" + .format(len(str(last_frame))) + .format(first_frame) + ), + 'files': filenames, + "stagingDir": out_dir + } + + # inject colorspace data + self.set_representation_colorspace( + repre, instance.context, + colorspace=colorspace + ) + + instance.data["representations"].append(repre) + + self.log.debug("Extracted instance '{0}' to: {1}".format( + instance.name, + out_dir + )) + + families = instance.data["families"] + # redefinition of families + if "render.local" in families: + instance.data['family'] = 'render' + families.remove('render.local') + families.insert(0, "render2d") + instance.data["anatomyData"]["family"] = "render" + elif "prerender.local" in families: + instance.data['family'] = 'prerender' + families.remove('prerender.local') + families.insert(0, "prerender") + instance.data["anatomyData"]["family"] = "prerender" + elif "image.local" in families: + instance.data['family'] = 'image' + families.remove('image.local') + instance.data["anatomyData"]["family"] = "image" + instance.data["families"] = families + + collections, remainder = clique.assemble(filenames) + self.log.debug('collections: {}'.format(str(collections))) + + if collections: + collection = collections[0] + instance.data['collection'] = collection + + self.log.info('Finished render') + + self.log.debug("_ instance.data: {}".format(instance.data)) + + def _copy_last_published(self, anatomy, instance, out_dir, + expected_filenames): + """Copies last published files to temporary out_dir. + + These are base of files which will be extended/fixed for specific + frames. + Renames published file to expected file name based on frame, eg. + test_project_test_asset_subset_v005.1001.exr > new_render.1001.exr + """ + last_published = instance.data["last_version_published_files"] + last_published_and_frames = collect_frames(last_published) + + expected_and_frames = collect_frames(expected_filenames) + frames_and_expected = {v: k for k, v in expected_and_frames.items()} + for file_path, frame in last_published_and_frames.items(): + file_path = anatomy.fill_root(file_path) + if not os.path.exists(file_path): + continue + target_file_name = frames_and_expected.get(frame) + if not target_file_name: + continue + + out_path = os.path.join(out_dir, target_file_name) + self.log.debug("Copying '{}' -> '{}'".format(file_path, out_path)) + shutil.copy(file_path, out_path) + + # TODO shouldn't this be uncommented + # instance.context.data["cleanupFullPaths"].append(out_path) + + def _get_frames_to_render(self, frames_to_fix): + """Return list of frame range tuples to render + + Args: + frames_to_fix (str): specific or range of frames to be rerendered + (1005, 1009-1010) + Returns: + (list): [(1005, 1005), (1009-1010)] + """ + frames_to_render = [] + + for frame_range in frames_to_fix.split(","): + if frame_range.isdigit(): + render_first_frame = frame_range + render_last_frame = frame_range + elif '-' in frame_range: + frames = frame_range.split('-') + render_first_frame = int(frames[0]) + render_last_frame = int(frames[1]) + else: + raise ValueError("Wrong format of frames to fix {}" + .format(frames_to_fix)) + frames_to_render.append((render_first_frame, + render_last_frame)) + return frames_to_render diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_data.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/extract_review_data.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_review_data.py index c221af40fb..258a019319 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_data.py @@ -2,7 +2,7 @@ from pprint import pformat import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractReviewData(publish.Extractor): diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_data_lut.py similarity index 93% rename from openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_review_data_lut.py index b007f90f6c..0674a2dd55 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_data_lut.py @@ -1,9 +1,9 @@ import os import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api import plugin +from ayon_core.hosts.nuke.api.lib import maintained_selection class ExtractReviewDataLut(publish.Extractor): diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_intermediates.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/extract_review_intermediates.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py index a02a807206..a00c1c593f 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_intermediates.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py @@ -3,9 +3,9 @@ from pprint import pformat import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import maintained_selection +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api import plugin +from ayon_core.hosts.nuke.api.lib import maintained_selection class ExtractReviewIntermediates(publish.Extractor): diff --git a/openpype/hosts/nuke/plugins/publish/extract_script_save.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_script_save.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/extract_script_save.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_script_save.py diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_slate_frame.py similarity index 99% rename from openpype/hosts/nuke/plugins/publish/extract_slate_frame.py rename to client/ayon_core/hosts/nuke/plugins/publish/extract_slate_frame.py index 5816434f2b..0c4823b1aa 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -6,8 +6,8 @@ import pyblish.api import six -from openpype.pipeline import publish -from openpype.hosts.nuke.api import ( +from ayon_core.pipeline import publish +from ayon_core.hosts.nuke.api import ( maintained_selection, duplicate_node, get_view_process_node diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_asset_context.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_asset_context.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_asset_context.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_asset_context.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_backdrop.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_backdrop.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_backdrop.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_backdrop.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_gizmo.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_gizmo.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_gizmo.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_gizmo.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_knobs.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_knobs.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_knobs.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_knobs.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_output_resolution.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_output_resolution.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_output_resolution.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_output_resolution.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_proxy_mode.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_rendered_frames.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_script_attributes.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_script_attributes.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_script_attributes.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_script_attributes.xml diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml b/client/ayon_core/hosts/nuke/plugins/publish/help/validate_write_nodes.xml similarity index 100% rename from openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml rename to client/ayon_core/hosts/nuke/plugins/publish/help/validate_write_nodes.xml diff --git a/openpype/hosts/nuke/plugins/publish/increment_script_version.py b/client/ayon_core/hosts/nuke/plugins/publish/increment_script_version.py similarity index 93% rename from openpype/hosts/nuke/plugins/publish/increment_script_version.py rename to client/ayon_core/hosts/nuke/plugins/publish/increment_script_version.py index b854dc0aa1..6b0be42ba1 100644 --- a/openpype/hosts/nuke/plugins/publish/increment_script_version.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/increment_script_version.py @@ -17,7 +17,7 @@ def process(self, context): assert all(result["success"] for result in context.data["results"]), ( "Publishing not successful so version is not increased.") - from openpype.lib import version_up + from ayon_core.lib import version_up path = context.data["currentFile"] nuke.scriptSaveAs(version_up(path)) self.log.info('Incrementing script version') diff --git a/openpype/hosts/nuke/plugins/publish/remove_ouput_node.py b/client/ayon_core/hosts/nuke/plugins/publish/remove_ouput_node.py similarity index 100% rename from openpype/hosts/nuke/plugins/publish/remove_ouput_node.py rename to client/ayon_core/hosts/nuke/plugins/publish/remove_ouput_node.py diff --git a/openpype/hosts/nuke/plugins/publish/validate_asset_context.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_asset_context.py similarity index 96% rename from openpype/hosts/nuke/plugins/publish/validate_asset_context.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_asset_context.py index 731645a11c..b4814c6a00 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_asset_context.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_asset_context.py @@ -4,13 +4,13 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishXmlValidationError, OptionalPyblishPluginMixin ) -from openpype.hosts.nuke.api import SelectInstanceNodeAction +from ayon_core.hosts.nuke.api import SelectInstanceNodeAction class ValidateCorrectAssetContext( diff --git a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_backdrop.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/validate_backdrop.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_backdrop.py index 761b080caa..22344c661e 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_backdrop.py @@ -1,8 +1,8 @@ import nuke import pyblish -from openpype.hosts.nuke import api as napi +from ayon_core.hosts.nuke import api as napi -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, OptionalPyblishPluginMixin diff --git a/openpype/hosts/nuke/plugins/publish/validate_exposed_knobs.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_exposed_knobs.py similarity index 93% rename from openpype/hosts/nuke/plugins/publish/validate_exposed_knobs.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_exposed_knobs.py index fe5644f0c9..9111bcdc2c 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_exposed_knobs.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_exposed_knobs.py @@ -1,8 +1,8 @@ import pyblish.api -from openpype.pipeline.publish import get_errored_instances_from_context -from openpype.hosts.nuke.api.lib import link_knobs -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import get_errored_instances_from_context +from ayon_core.hosts.nuke.api.lib import link_knobs +from ayon_core.pipeline.publish import ( OptionalPyblishPluginMixin, PublishValidationError ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_gizmo.py similarity index 95% rename from openpype/hosts/nuke/plugins/publish/validate_gizmo.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_gizmo.py index 878d938bea..2cdcb90d70 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_gizmo.py @@ -1,6 +1,6 @@ import pyblish -from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.nuke import api as napi +from ayon_core.pipeline import PublishXmlValidationError +from ayon_core.hosts.nuke import api as napi import nuke diff --git a/openpype/hosts/nuke/plugins/publish/validate_knobs.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_knobs.py similarity index 98% rename from openpype/hosts/nuke/plugins/publish/validate_knobs.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_knobs.py index db21cdc7c5..84efebab53 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_knobs.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_knobs.py @@ -2,7 +2,7 @@ import six import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( RepairContextAction, PublishXmlValidationError, ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_output_resolution.py similarity index 96% rename from openpype/hosts/nuke/plugins/publish/validate_output_resolution.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_output_resolution.py index ff6d73c6ec..e8a00d2294 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_output_resolution.py @@ -1,8 +1,8 @@ import pyblish.api -from openpype.hosts.nuke import api as napi -from openpype.pipeline.publish import RepairAction -from openpype.pipeline import ( +from ayon_core.hosts.nuke import api as napi +from ayon_core.pipeline.publish import RepairAction +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_proxy_mode.py similarity index 92% rename from openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_proxy_mode.py index c26a03f31a..26e54295c9 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_proxy_mode.py @@ -1,6 +1,6 @@ import pyblish import nuke -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class FixProxyMode(pyblish.api.Action): diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_rendered_frames.py similarity index 97% rename from openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_rendered_frames.py index 64bf69b69b..852267f68c 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_rendered_frames.py @@ -1,8 +1,8 @@ import os import pyblish.api import clique -from openpype.pipeline import PublishXmlValidationError -from openpype.pipeline.publish import get_errored_instances_from_context +from ayon_core.pipeline import PublishXmlValidationError +from ayon_core.pipeline.publish import get_errored_instances_from_context class RepairActionBase(pyblish.api.Action): diff --git a/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_script_attributes.py similarity index 95% rename from openpype/hosts/nuke/plugins/publish/validate_script_attributes.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_script_attributes.py index 57bfce7993..c4974817bd 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_script_attributes.py @@ -1,11 +1,11 @@ from copy import deepcopy import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin ) -from openpype.pipeline.publish import RepairAction -from openpype.hosts.nuke.api.lib import ( +from ayon_core.pipeline.publish import RepairAction +from ayon_core.hosts.nuke.api.lib import ( WorkfileSettings ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py b/client/ayon_core/hosts/nuke/plugins/publish/validate_write_nodes.py similarity index 96% rename from openpype/hosts/nuke/plugins/publish/validate_write_nodes.py rename to client/ayon_core/hosts/nuke/plugins/publish/validate_write_nodes.py index f490b580d6..4274d68826 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/validate_write_nodes.py @@ -1,14 +1,14 @@ from collections import defaultdict import pyblish.api -from openpype.pipeline.publish import get_errored_instances_from_context -from openpype.hosts.nuke.api.lib import ( +from ayon_core.pipeline.publish import get_errored_instances_from_context +from ayon_core.hosts.nuke.api.lib import ( get_write_node_template_attr, set_node_knobs_from_settings, color_gui_to_int ) -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( PublishXmlValidationError, OptionalPyblishPluginMixin ) diff --git a/openpype/hosts/nuke/plugins/create/__init__.py b/client/ayon_core/hosts/nuke/startup/__init__.py similarity index 100% rename from openpype/hosts/nuke/plugins/create/__init__.py rename to client/ayon_core/hosts/nuke/startup/__init__.py diff --git a/openpype/hosts/nuke/startup/clear_rendered.py b/client/ayon_core/hosts/nuke/startup/clear_rendered.py similarity index 87% rename from openpype/hosts/nuke/startup/clear_rendered.py rename to client/ayon_core/hosts/nuke/startup/clear_rendered.py index 744af71034..8072aae14f 100644 --- a/openpype/hosts/nuke/startup/clear_rendered.py +++ b/client/ayon_core/hosts/nuke/startup/clear_rendered.py @@ -1,6 +1,6 @@ import os -from openpype.lib import Logger +from ayon_core.lib import Logger def clear_rendered(dir_path): diff --git a/openpype/hosts/nuke/startup/custom_write_node.py b/client/ayon_core/hosts/nuke/startup/custom_write_node.py similarity index 98% rename from openpype/hosts/nuke/startup/custom_write_node.py rename to client/ayon_core/hosts/nuke/startup/custom_write_node.py index ea53725834..01e255d0c0 100644 --- a/openpype/hosts/nuke/startup/custom_write_node.py +++ b/client/ayon_core/hosts/nuke/startup/custom_write_node.py @@ -2,8 +2,8 @@ import os import nuke import nukescripts -from openpype.pipeline import Anatomy -from openpype.hosts.nuke.api.lib import ( +from ayon_core.pipeline import Anatomy +from ayon_core.hosts.nuke.api.lib import ( set_node_knobs_from_settings, get_nuke_imageio_settings ) diff --git a/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py b/client/ayon_core/hosts/nuke/startup/frame_setting_for_read_nodes.py similarity index 100% rename from openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py rename to client/ayon_core/hosts/nuke/startup/frame_setting_for_read_nodes.py diff --git a/client/ayon_core/hosts/nuke/startup/menu.py b/client/ayon_core/hosts/nuke/startup/menu.py new file mode 100644 index 0000000000..2559e2142a --- /dev/null +++ b/client/ayon_core/hosts/nuke/startup/menu.py @@ -0,0 +1,5 @@ +from ayon_core.pipeline import install_host +from ayon_core.hosts.nuke.api import NukeHost + +host = NukeHost() +install_host(host) diff --git a/openpype/hosts/nuke/startup/write_to_read.py b/client/ayon_core/hosts/nuke/startup/write_to_read.py similarity index 99% rename from openpype/hosts/nuke/startup/write_to_read.py rename to client/ayon_core/hosts/nuke/startup/write_to_read.py index b7add40f47..8a8ffb8d3d 100644 --- a/openpype/hosts/nuke/startup/write_to_read.py +++ b/client/ayon_core/hosts/nuke/startup/write_to_read.py @@ -2,7 +2,7 @@ import os import glob import nuke -from openpype.lib import Logger +from ayon_core.lib import Logger log = Logger.get_logger(__name__) SINGLE_FILE_FORMATS = ['avi', 'mp4', 'mxf', 'mov', 'mpg', 'mpeg', 'wmv', 'm4v', diff --git a/openpype/hosts/nuke/vendor/google/protobuf/__init__.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/__init__.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/__init__.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/__init__.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/any_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/any_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/any_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/any_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/api_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/api_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/api_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/api_pb2.py diff --git a/openpype/hosts/nuke/startup/__init__.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/compiler/__init__.py similarity index 100% rename from openpype/hosts/nuke/startup/__init__.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/compiler/__init__.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/compiler/plugin_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/descriptor.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_database.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_database.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/descriptor_database.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_database.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/descriptor_pool.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_pool.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/descriptor_pool.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/descriptor_pool.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/duration_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/duration_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/duration_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/duration_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/empty_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/empty_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/empty_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/empty_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/field_mask_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/compiler/__init__.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/__init__.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/compiler/__init__.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/__init__.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/_parameterized.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/api_implementation.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/builder.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/builder.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/builder.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/builder.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/containers.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/containers.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/containers.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/containers.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/decoder.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/decoder.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/decoder.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/decoder.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/encoder.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/encoder.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/encoder.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/encoder.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/enum_type_wrapper.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/enum_type_wrapper.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/enum_type_wrapper.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/enum_type_wrapper.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/extension_dict.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/message_listener.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/message_listener.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/message_listener.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/message_listener.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/message_set_extensions_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/missing_enum_values_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_extensions_dynamic_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_extensions_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/more_messages_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/no_package_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/python_message.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/python_message.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/python_message.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/python_message.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/type_checkers.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/well_known_types.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/wire_format.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/wire_format.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/wire_format.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/internal/wire_format.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/json_format.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/json_format.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/json_format.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/json_format.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/message.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/message.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/message.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/message.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/message_factory.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/message_factory.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/message_factory.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/message_factory.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/proto_builder.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/proto_builder.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/proto_builder.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/proto_builder.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/internal/__init__.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/__init__.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/internal/__init__.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/__init__.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/cpp_message.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/pyext/python_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/reflection.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/reflection.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/reflection.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/reflection.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/service.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/service.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/service.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/service.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/service_reflection.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/service_reflection.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/service_reflection.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/service_reflection.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/source_context_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/source_context_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/source_context_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/source_context_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/struct_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/struct_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/struct_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/struct_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/symbol_database.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/symbol_database.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/symbol_database.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/symbol_database.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/text_encoding.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/text_encoding.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/text_encoding.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/text_encoding.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/text_format.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/text_format.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/text_format.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/text_format.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/timestamp_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/type_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/type_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/type_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/type_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/pyext/__init__.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/util/__init__.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/pyext/__init__.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/util/__init__.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/util/json_format_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/util/json_format_proto3_pb2.py diff --git a/openpype/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py b/client/ayon_core/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py rename to client/ayon_core/hosts/nuke/vendor/google/protobuf/wrappers_pb2.py diff --git a/openpype/hosts/photoshop/__init__.py b/client/ayon_core/hosts/photoshop/__init__.py similarity index 100% rename from openpype/hosts/photoshop/__init__.py rename to client/ayon_core/hosts/photoshop/__init__.py diff --git a/client/ayon_core/hosts/photoshop/addon.py b/client/ayon_core/hosts/photoshop/addon.py new file mode 100644 index 0000000000..0c7efdb317 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/addon.py @@ -0,0 +1,25 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +PHOTOSHOP_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class PhotoshopAddon(OpenPypeModule, IHostAddon): + name = "photoshop" + host_name = "photoshop" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + defaults = { + "AYON_LOG_NO_COLORS": "1", + "WEBSOCKET_URL": "ws://localhost:8099/ws/" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".psd", ".psb"] diff --git a/client/ayon_core/hosts/photoshop/api/README.md b/client/ayon_core/hosts/photoshop/api/README.md new file mode 100644 index 0000000000..72e2217829 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/README.md @@ -0,0 +1,257 @@ +# Photoshop Integration + +## Setup + +The Photoshop integration requires two components to work; `extension` and `server`. + +### Extension + +To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd). + +``` +ExManCmd /install {path to addon}/api/extension.zxp +``` + +### Server + +The easiest way to get the server and Photoshop launch is with: + +``` +python -c ^"import ayon_core.hosts.photoshop;ayon_core.hosts.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^" +``` + +`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists. + +## Usage + +The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this: + +![Ayon Panel](panel.png "AYON Panel") + + +## Developing + +### Extension +When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions). + +When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide). + +``` +ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12 +ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon +``` + +### Plugin Examples + +These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py). + +#### Creator Plugin +```python +from avalon import photoshop + + +class CreateImage(photoshop.Creator): + """Image folder for publish.""" + + name = "imageDefault" + label = "Image" + family = "image" + + def __init__(self, *args, **kwargs): + super(CreateImage, self).__init__(*args, **kwargs) +``` + +#### Collector Plugin +```python +import pythoncom + +import pyblish.api + + +class CollectInstances(pyblish.api.ContextPlugin): + """Gather instances by LayerSet and file metadata + + This collector takes into account assets that are associated with + an LayerSet and marked with a unique identifier; + + Identifier: + id (str): "pyblish.avalon.instance" + """ + + label = "Instances" + order = pyblish.api.CollectorOrder + hosts = ["photoshop"] + families_mapping = { + "image": [] + } + + def process(self, context): + # Necessary call when running in a different thread which pyblish-qml + # can be. + pythoncom.CoInitialize() + + photoshop_client = PhotoshopClientStub() + layers = photoshop_client.get_layers() + layers_meta = photoshop_client.get_layers_metadata() + for layer in layers: + layer_data = photoshop_client.read(layer, layers_meta) + + # Skip layers without metadata. + if layer_data is None: + continue + + # Skip containers. + if "container" in layer_data["id"]: + continue + + # child_layers = [*layer.Layers] + # self.log.debug("child_layers {}".format(child_layers)) + # if not child_layers: + # self.log.info("%s skipped, it was empty." % layer.Name) + # continue + + instance = context.create_instance(layer.name) + instance.append(layer) + instance.data.update(layer_data) + instance.data["families"] = self.families_mapping[ + layer_data["family"] + ] + instance.data["publish"] = layer.visible + + # Produce diagnostic message for any graphical + # user interface interested in visualising it. + self.log.info("Found: \"%s\" " % instance.data["name"]) +``` + +#### Extractor Plugin +```python +import os + +from ayon_core.pipeline import publish +from ayon_core.hosts.photoshop import api as photoshop + + +class ExtractImage(publish.Extractor): + """Produce a flattened image file from instance + + This plug-in takes into account only the layers in the group. + """ + + label = "Extract Image" + hosts = ["photoshop"] + families = ["image"] + formats = ["png", "jpg"] + + def process(self, instance): + + staging_dir = self.staging_dir(instance) + self.log.info("Outputting image to {}".format(staging_dir)) + + # Perform extraction + stub = photoshop.stub() + files = {} + with photoshop.maintained_selection(): + self.log.info("Extracting %s" % str(list(instance))) + with photoshop.maintained_visibility(): + # Hide all other layers. + extract_ids = set([ll.id for ll in stub. + get_layers_in_layers([instance[0]])]) + + for layer in stub.get_layers(): + # limit unnecessary calls to client + if layer.visible and layer.id not in extract_ids: + stub.set_visible(layer.id, False) + + save_options = [] + if "png" in self.formats: + save_options.append('png') + if "jpg" in self.formats: + save_options.append('jpg') + + file_basename = os.path.splitext( + stub.get_active_document_name() + )[0] + for extension in save_options: + _filename = "{}.{}".format(file_basename, extension) + files[extension] = _filename + + full_filename = os.path.join(staging_dir, _filename) + stub.saveAs(full_filename, extension, True) + + representations = [] + for extension, filename in files.items(): + representations.append({ + "name": extension, + "ext": extension, + "files": filename, + "stagingDir": staging_dir + }) + instance.data["representations"] = representations + instance.data["stagingDir"] = staging_dir + + self.log.info(f"Extracted {instance} to {staging_dir}") +``` + +#### Loader Plugin +```python +from avalon import api, photoshop +from ayon_core.pipeline import load, get_representation_path + +stub = photoshop.stub() + + +class ImageLoader(load.LoaderPlugin): + """Load images + + Stores the imported asset in a container named after the asset. + """ + + families = ["image"] + representations = ["*"] + + def load(self, context, name=None, namespace=None, data=None): + path = self.filepath_from_context(context) + with photoshop.maintained_selection(): + layer = stub.import_smart_object(path) + + self[:] = [layer] + + return photoshop.containerise( + name, + namespace, + layer, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + layer = container.pop("layer") + + with photoshop.maintained_selection(): + stub.replace_smart_object( + layer, get_representation_path(representation) + ) + + stub.imprint( + layer, {"representation": str(representation["_id"])} + ) + + def remove(self, container): + container["layer"].Delete() + + def switch(self, container, representation): + self.update(container, representation) +``` +For easier debugging of Javascript: +https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1 +Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome +then localhost:8078 (port set in `photoshop\extension\.debug`) + +Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 + +Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x +## Resources + - https://github.com/lohriialo/photoshop-scripting-python + - https://www.adobe.com/devnet/photoshop/scripting.html + - https://github.com/Adobe-CEP/Getting-Started-guides + - https://github.com/Adobe-CEP/CEP-Resources diff --git a/openpype/hosts/photoshop/api/__init__.py b/client/ayon_core/hosts/photoshop/api/__init__.py similarity index 100% rename from openpype/hosts/photoshop/api/__init__.py rename to client/ayon_core/hosts/photoshop/api/__init__.py diff --git a/openpype/hosts/photoshop/api/extension.zxp b/client/ayon_core/hosts/photoshop/api/extension.zxp similarity index 100% rename from openpype/hosts/photoshop/api/extension.zxp rename to client/ayon_core/hosts/photoshop/api/extension.zxp diff --git a/openpype/hosts/photoshop/api/extension/.debug b/client/ayon_core/hosts/photoshop/api/extension/.debug similarity index 100% rename from openpype/hosts/photoshop/api/extension/.debug rename to client/ayon_core/hosts/photoshop/api/extension/.debug diff --git a/openpype/hosts/photoshop/api/extension/CSXS/manifest.xml b/client/ayon_core/hosts/photoshop/api/extension/CSXS/manifest.xml similarity index 100% rename from openpype/hosts/photoshop/api/extension/CSXS/manifest.xml rename to client/ayon_core/hosts/photoshop/api/extension/CSXS/manifest.xml diff --git a/openpype/hosts/photoshop/api/extension/client/CSInterface.js b/client/ayon_core/hosts/photoshop/api/extension/client/CSInterface.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/client/CSInterface.js rename to client/ayon_core/hosts/photoshop/api/extension/client/CSInterface.js diff --git a/openpype/hosts/photoshop/api/extension/client/client.js b/client/ayon_core/hosts/photoshop/api/extension/client/client.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/client/client.js rename to client/ayon_core/hosts/photoshop/api/extension/client/client.js diff --git a/openpype/hosts/photoshop/api/extension/client/loglevel.min.js b/client/ayon_core/hosts/photoshop/api/extension/client/loglevel.min.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/client/loglevel.min.js rename to client/ayon_core/hosts/photoshop/api/extension/client/loglevel.min.js diff --git a/openpype/hosts/photoshop/api/extension/client/wsrpc.js b/client/ayon_core/hosts/photoshop/api/extension/client/wsrpc.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/client/wsrpc.js rename to client/ayon_core/hosts/photoshop/api/extension/client/wsrpc.js diff --git a/openpype/hosts/photoshop/api/extension/client/wsrpc.min.js b/client/ayon_core/hosts/photoshop/api/extension/client/wsrpc.min.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/client/wsrpc.min.js rename to client/ayon_core/hosts/photoshop/api/extension/client/wsrpc.min.js diff --git a/openpype/hosts/photoshop/api/extension/host/JSX.js b/client/ayon_core/hosts/photoshop/api/extension/host/JSX.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/host/JSX.js rename to client/ayon_core/hosts/photoshop/api/extension/host/JSX.js diff --git a/openpype/hosts/photoshop/api/extension/host/index.jsx b/client/ayon_core/hosts/photoshop/api/extension/host/index.jsx similarity index 100% rename from openpype/hosts/photoshop/api/extension/host/index.jsx rename to client/ayon_core/hosts/photoshop/api/extension/host/index.jsx diff --git a/openpype/hosts/photoshop/api/extension/host/json.js b/client/ayon_core/hosts/photoshop/api/extension/host/json.js similarity index 100% rename from openpype/hosts/photoshop/api/extension/host/json.js rename to client/ayon_core/hosts/photoshop/api/extension/host/json.js diff --git a/openpype/hosts/photoshop/api/extension/icons/ayon_logo.png b/client/ayon_core/hosts/photoshop/api/extension/icons/ayon_logo.png similarity index 100% rename from openpype/hosts/photoshop/api/extension/icons/ayon_logo.png rename to client/ayon_core/hosts/photoshop/api/extension/icons/ayon_logo.png diff --git a/openpype/hosts/photoshop/api/extension/index.html b/client/ayon_core/hosts/photoshop/api/extension/index.html similarity index 100% rename from openpype/hosts/photoshop/api/extension/index.html rename to client/ayon_core/hosts/photoshop/api/extension/index.html diff --git a/client/ayon_core/hosts/photoshop/api/launch_logic.py b/client/ayon_core/hosts/photoshop/api/launch_logic.py new file mode 100644 index 0000000000..adf90be311 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/launch_logic.py @@ -0,0 +1,405 @@ +import os +import subprocess +import collections +import asyncio + +from wsrpc_aiohttp import ( + WebSocketRoute, + WebSocketAsync +) + +from qtpy import QtCore + +from ayon_core.lib import Logger, StringTemplate +from ayon_core.pipeline import ( + registered_host, + Anatomy, +) +from ayon_core.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile, +) +from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.tools.utils import host_tools +from ayon_core.tools.adobe_webserver.app import WebServerTool +from ayon_core.pipeline.context_tools import change_current_context +from ayon_core.client import get_asset_by_name + +from .ws_stub import PhotoshopServerStub + +log = Logger.get_logger(__name__) + + +class ConnectionNotEstablishedYet(Exception): + pass + + +class MainThreadItem: + """Structure to store information about callback in main thread. + + Item should be used to execute callback in main thread which may be needed + for execution of Qt objects. + + Item store callback (callable variable), arguments and keyword arguments + for the callback. Item hold information about it's process. + """ + not_set = object() + + def __init__(self, callback, *args, **kwargs): + self._done = False + self._exception = self.not_set + self._result = self.not_set + self._callback = callback + self._args = args + self._kwargs = kwargs + + @property + def done(self): + return self._done + + @property + def exception(self): + return self._exception + + @property + def result(self): + return self._result + + def execute(self): + """Execute callback and store its result. + + Method must be called from main thread. Item is marked as `done` + when callback execution finished. Store output of callback of exception + information when callback raises one. + """ + log.debug("Executing process in main thread") + if self.done: + log.warning("- item is already processed") + return + + log.info("Running callback: {}".format(str(self._callback))) + try: + result = self._callback(*self._args, **self._kwargs) + self._result = result + + except Exception as exc: + self._exception = exc + + finally: + self._done = True + + +def stub(): + """ + Convenience function to get server RPC stub to call methods directed + for host (Photoshop). + It expects already created connection, started from client. + Currently created when panel is opened (PS: Window>Extensions>Avalon) + :return: where functions could be called from + """ + ps_stub = PhotoshopServerStub() + if not ps_stub.client: + raise ConnectionNotEstablishedYet("Connection is not created yet") + + return ps_stub + + +def show_tool_by_name(tool_name): + kwargs = {} + if tool_name == "loader": + kwargs["use_context"] = True + + host_tools.show_tool_by_name(tool_name, **kwargs) + + +class ProcessLauncher(QtCore.QObject): + route_name = "Photoshop" + _main_thread_callbacks = collections.deque() + + def __init__(self, subprocess_args): + self._subprocess_args = subprocess_args + self._log = None + + super(ProcessLauncher, self).__init__() + + # Keep track if launcher was already started + self._started = False + + self._process = None + self._websocket_server = None + + start_process_timer = QtCore.QTimer() + start_process_timer.setInterval(100) + + loop_timer = QtCore.QTimer() + loop_timer.setInterval(200) + + start_process_timer.timeout.connect(self._on_start_process_timer) + loop_timer.timeout.connect(self._on_loop_timer) + + self._start_process_timer = start_process_timer + self._loop_timer = loop_timer + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger( + "{}-launcher".format(self.route_name) + ) + return self._log + + @property + def websocket_server_is_running(self): + if self._websocket_server is not None: + return self._websocket_server.is_running + return False + + @property + def is_process_running(self): + if self._process is not None: + return self._process.poll() is None + return False + + @property + def is_host_connected(self): + """Returns True if connected, False if app is not running at all.""" + if not self.is_process_running: + return False + + try: + _stub = stub() + if _stub: + return True + except Exception: + pass + + return None + + @classmethod + def execute_in_main_thread(cls, callback, *args, **kwargs): + item = MainThreadItem(callback, *args, **kwargs) + cls._main_thread_callbacks.append(item) + return item + + def start(self): + if self._started: + return + self.log.info("Started launch logic of Photoshop") + self._started = True + self._start_process_timer.start() + + def exit(self): + """ Exit whole application. """ + if self._start_process_timer.isActive(): + self._start_process_timer.stop() + if self._loop_timer.isActive(): + self._loop_timer.stop() + + if self._websocket_server is not None: + self._websocket_server.stop() + + if self._process: + self._process.kill() + self._process.wait() + + QtCore.QCoreApplication.exit() + + def _on_loop_timer(self): + # TODO find better way and catch errors + # Run only callbacks that are in queue at the moment + cls = self.__class__ + for _ in range(len(cls._main_thread_callbacks)): + if cls._main_thread_callbacks: + item = cls._main_thread_callbacks.popleft() + item.execute() + + if not self.is_process_running: + self.log.info("Host process is not running. Closing") + self.exit() + + elif not self.websocket_server_is_running: + self.log.info("Websocket server is not running. Closing") + self.exit() + + def _on_start_process_timer(self): + # TODO add try except validations for each part in this method + # Start server as first thing + if self._websocket_server is None: + self._init_server() + return + + # TODO add waiting time + # Wait for webserver + if not self.websocket_server_is_running: + return + + # Start application process + if self._process is None: + self._start_process() + self.log.info("Waiting for host to connect") + return + + # TODO add waiting time + # Wait until host is connected + if self.is_host_connected: + self._start_process_timer.stop() + self._loop_timer.start() + elif ( + not self.is_process_running + or not self.websocket_server_is_running + ): + self.exit() + + def _init_server(self): + if self._websocket_server is not None: + return + + self.log.debug( + "Initialization of websocket server for host communication" + ) + + self._websocket_server = websocket_server = WebServerTool() + if websocket_server.port_occupied( + websocket_server.host_name, + websocket_server.port + ): + self.log.info( + "Server already running, sending actual context and exit." + ) + asyncio.run(websocket_server.send_context_change(self.route_name)) + self.exit() + return + + # Add Websocket route + websocket_server.add_route("*", "/ws/", WebSocketAsync) + # Add after effects route to websocket handler + + print("Adding {} route".format(self.route_name)) + WebSocketAsync.add_route( + self.route_name, PhotoshopRoute + ) + self.log.info("Starting websocket server for host communication") + websocket_server.start_server() + + def _start_process(self): + if self._process is not None: + return + self.log.info("Starting host process") + try: + self._process = subprocess.Popen( + self._subprocess_args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + except Exception: + self.log.info("exce", exc_info=True) + self.exit() + + +class PhotoshopRoute(WebSocketRoute): + """ + One route, mimicking external application (like Harmony, etc). + All functions could be called from client. + 'do_notify' function calls function on the client - mimicking + notification after long running job on the server or similar + """ + instance = None + + def init(self, **kwargs): + # Python __init__ must be return "self". + # This method might return anything. + log.debug("someone called Photoshop route") + self.instance = self + return kwargs + + # server functions + async def ping(self): + log.debug("someone called Photoshop route ping") + + # This method calls function on the client side + # client functions + async def set_context(self, project, asset, task): + """ + Sets 'project' and 'asset' to envs, eg. setting context. + + Opens last workile from that context if exists. + + Args: + project (str) + asset (str) + task (str + """ + log.info("Setting context change") + log.info(f"project {project} asset {asset} task {task}") + + asset_doc = get_asset_by_name(project, asset) + change_current_context(asset_doc, task) + + last_workfile_path = self._get_last_workfile_path(project, + asset, + task) + if last_workfile_path and os.path.exists(last_workfile_path): + ProcessLauncher.execute_in_main_thread( + lambda: stub().open(last_workfile_path)) + + + async def read(self): + log.debug("photoshop.read client calls server server calls " + "photoshop client") + return await self.socket.call('photoshop.read') + + # panel routes for tools + async def workfiles_route(self): + self._tool_route("workfiles") + + async def loader_route(self): + self._tool_route("loader") + + async def publish_route(self): + self._tool_route("publisher") + + async def sceneinventory_route(self): + self._tool_route("sceneinventory") + + async def experimental_tools_route(self): + self._tool_route("experimental_tools") + + def _tool_route(self, _tool_name): + """The address accessed when clicking on the buttons.""" + + ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name) + + # Required return statement. + return "nothing" + + def _get_last_workfile_path(self, project_name, asset_name, task_name): + """Returns last workfile path if exists""" + host = registered_host() + host_name = "photoshop" + template_key = get_workfile_template_key_from_context( + asset_name, + task_name, + host_name, + project_name=project_name + ) + anatomy = Anatomy(project_name) + + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) + data["root"] = anatomy.roots + + file_template = anatomy.templates[template_key]["file"] + + # Define saving file extension + extensions = host.get_workfile_extensions() + + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data + ) + last_workfile_path = get_last_workfile( + work_root, file_template, data, extensions, True + ) + + return last_workfile_path diff --git a/client/ayon_core/hosts/photoshop/api/lib.py b/client/ayon_core/hosts/photoshop/api/lib.py new file mode 100644 index 0000000000..3111503e40 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/lib.py @@ -0,0 +1,85 @@ +import os +import sys +import contextlib +import traceback + +from ayon_core.lib import env_value_to_bool, Logger +from ayon_core.addon import AddonsManager +from ayon_core.pipeline import install_host +from ayon_core.tools.utils import host_tools +from ayon_core.tools.utils import get_ayon_qt_app +from ayon_core.tests.lib import is_in_tests + +from .launch_logic import ProcessLauncher, stub + +log = Logger.get_logger(__name__) + + +def safe_excepthook(*args): + traceback.print_exception(*args) + + +def main(*subprocess_args): + from ayon_core.hosts.photoshop.api import PhotoshopHost + + host = PhotoshopHost() + install_host(host) + + sys.excepthook = safe_excepthook + + # coloring in StdOutBroker + os.environ["AYON_LOG_NO_COLORS"] = "0" + app = get_ayon_qt_app() + app.setQuitOnLastWindowClosed(False) + + launcher = ProcessLauncher(subprocess_args) + launcher.start() + + if env_value_to_bool("HEADLESS_PUBLISH"): + manager = AddonsManager() + webpublisher_addon = manager["webpublisher"] + launcher.execute_in_main_thread( + webpublisher_addon.headless_publish, + log, + "ClosePS", + is_in_tests() + ) + elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", + default=True): + + launcher.execute_in_main_thread( + host_tools.show_workfiles, + save=env_value_to_bool("WORKFILES_SAVE_AS") + ) + + sys.exit(app.exec_()) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context.""" + selection = stub().get_selected_layers() + try: + yield selection + finally: + stub().select_layers(selection) + + +@contextlib.contextmanager +def maintained_visibility(layers=None): + """Maintain visibility during context. + + Args: + layers (list) of PSItem (used for caching) + """ + visibility = {} + if not layers: + layers = stub().get_layers() + for layer in layers: + visibility[layer.id] = layer.visible + try: + yield + finally: + for layer in layers: + stub().set_visible(layer.id, visibility[layer.id]) + pass diff --git a/openpype/hosts/photoshop/api/panel.png b/client/ayon_core/hosts/photoshop/api/panel.png similarity index 100% rename from openpype/hosts/photoshop/api/panel.png rename to client/ayon_core/hosts/photoshop/api/panel.png diff --git a/openpype/hosts/photoshop/api/panel_failure.png b/client/ayon_core/hosts/photoshop/api/panel_failure.png similarity index 100% rename from openpype/hosts/photoshop/api/panel_failure.png rename to client/ayon_core/hosts/photoshop/api/panel_failure.png diff --git a/client/ayon_core/hosts/photoshop/api/pipeline.py b/client/ayon_core/hosts/photoshop/api/pipeline.py new file mode 100644 index 0000000000..046ec8e6ee --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/pipeline.py @@ -0,0 +1,281 @@ +import os + +from qtpy import QtWidgets + +import pyblish.api + +from ayon_core.lib import register_event_callback, Logger +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + AVALON_CONTAINER_ID, +) + +from ayon_core.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) + +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.hosts.photoshop import PHOTOSHOP_HOST_DIR +from ayon_core.tools.utils import get_ayon_qt_app + +from . import lib + +log = Logger.get_logger(__name__) + +PLUGINS_DIR = os.path.join(PHOTOSHOP_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "photoshop" + + def install(self): + """Install Photoshop-specific functionality needed for integration. + + This function is called automatically on calling + `api.install(photoshop)`. + """ + log.info("Installing OpenPype Photoshop...") + pyblish.api.register_host("photoshop") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + register_event_callback("application.launched", on_application_launch) + + def current_file(self): + try: + full_name = lib.stub().get_active_document_full_name() + if full_name and full_name != "null": + return os.path.normpath(full_name).replace("\\", "/") + except Exception: + pass + + return None + + def work_root(self, session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") + + def open_workfile(self, filepath): + lib.stub().open(filepath) + + return True + + def save_workfile(self, filepath=None): + _, ext = os.path.splitext(filepath) + lib.stub().saveAs(filepath, ext[1:], True) + + def get_current_workfile(self): + return self.current_file() + + def workfile_has_unsaved_changes(self): + if self.current_file(): + return not lib.stub().is_saved() + + return False + + def get_workfile_extensions(self): + return [".psd", ".psb"] + + def get_containers(self): + return ls() + + def get_context_data(self): + """Get stored values for context (validation enable/disable etc)""" + meta = _get_stub().get_layers_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + def update_context_data(self, data, changes): + """Store value needed for context""" + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + def list_instances(self): + """List all created instances to publish from current workfile. + + Pulls from File > File Info + + Returns: + (list) of dictionaries matching instances format + """ + stub = _get_stub() + + if not stub: + return [] + + instances = [] + layers_meta = stub.get_layers_metadata() + if layers_meta: + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) + + return instances + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + Args: + instance (dict): instance representation from subsetmanager model + """ + stub = _get_stub() + + if not stub: + return + + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_layer(instance["members"][0]) + if item: + stub.rename_layer(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + +def check_inventory(): + if not any_outdated_containers(): + return + + # Warn about outdated containers. + _app = get_ayon_qt_app() + + message_box = QtWidgets.QMessageBox() + message_box.setIcon(QtWidgets.QMessageBox.Warning) + msg = "There are outdated containers in the scene." + message_box.setText(msg) + message_box.exec_() + + +def on_application_launch(): + check_inventory() + + +def ls(): + """Yields containers from active Photoshop document + + This is the host-equivalent of api.ls(), but instead of listing + assets on disk, it lists assets already loaded in Photoshop; once loaded + they are called 'containers' + + Yields: + dict: container + + """ + try: + stub = lib.stub() # only after Photoshop is up + except lib.ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + if not stub.get_active_document_name(): + return + + layers_meta = stub.get_layers_metadata() # minimalize calls to PS + for layer in stub.get_layers(): + data = stub.read(layer, layers_meta) + + # Skip non-tagged layers. + if not data: + continue + + # Filter to only containers. + if "container" not in data["id"]: + continue + + # Append transient data + data["objectName"] = layer.name.replace(stub.LOADED_ICON, '') + data["layer"] = layer + + yield data + + +def _get_stub(): + """Handle pulling stub from PS to run operations on host + + Returns: + (PhotoshopServerStub) or None + """ + try: + stub = lib.stub() # only after Photoshop is up + except lib.ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return + + if not stub.get_active_document_name(): + return + + return stub + + +def containerise( + name, namespace, layer, context, loader=None, suffix="_CON" +): + """Imprint layer with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + layer (PSItem): Layer to containerise + context (dict): Asset information + loader (str, optional): Name of loader used to produce this container. + suffix (str, optional): Suffix of container, defaults to `_CON`. + + Returns: + container (str): Name of container assembly + """ + layer.name = name + suffix + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + "members": [str(layer.id)] + } + stub = lib.stub() + stub.imprint(layer.id, data) + + return layer + + +def cache_and_get_instances(creator): + """Cache instances in shared data. + + Storing all instances as a list as legacy instances might be still present. + Args: + creator (Creator): Plugin which would like to get instances from host. + Returns: + List[]: list of all instances stored in metadata + """ + shared_key = "openpype.photoshop.instances" + if shared_key not in creator.collection_shared_data: + creator.collection_shared_data[shared_key] = \ + creator.host.list_instances() + return creator.collection_shared_data[shared_key] diff --git a/client/ayon_core/hosts/photoshop/api/plugin.py b/client/ayon_core/hosts/photoshop/api/plugin.py new file mode 100644 index 0000000000..22645a1f9b --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/plugin.py @@ -0,0 +1,35 @@ +import re + +from ayon_core.pipeline import LoaderPlugin +from .launch_logic import stub + + +def get_unique_layer_name(layers, asset_name, subset_name): + """ + Gets all layer names and if 'asset_name_subset_name' is present, it + increases suffix by 1 (eg. creates unique layer name - for Loader) + Args: + layers (list) of dict with layers info (name, id etc.) + asset_name (string): + subset_name (string): + + Returns: + (string): name_00X (without version) + """ + name = "{}_{}".format(asset_name, subset_name) + names = {} + for layer in layers: + layer_name = re.sub(r'_\d{3}$', '', layer.name) + if layer_name in names.keys(): + names[layer_name] = names[layer_name] + 1 + else: + names[layer_name] = 1 + occurrences = names.get(name, 0) + + return "{}_{:0>3d}".format(name, occurrences + 1) + + +class PhotoshopLoader(LoaderPlugin): + @staticmethod + def get_stub(): + return stub() diff --git a/client/ayon_core/hosts/photoshop/api/ws_stub.py b/client/ayon_core/hosts/photoshop/api/ws_stub.py new file mode 100644 index 0000000000..42bad05f26 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/api/ws_stub.py @@ -0,0 +1,571 @@ +""" + Stub handling connection from server to client. + Used anywhere solution is calling client methods. +""" +import json +import attr +from wsrpc_aiohttp import WebSocketAsync + +from ayon_core.tools.adobe_webserver.app import WebServerTool + + +@attr.s +class PSItem(object): + """ + Object denoting layer or group item in PS. Each item is created in + PS by any Loader, but contains same fields, which are being used + in later processing. + """ + # metadata + id = attr.ib() # id created by AE, could be used for querying + name = attr.ib() # name of item + group = attr.ib(default=None) # item type (footage, folder, comp) + parents = attr.ib(factory=list) + visible = attr.ib(default=True) + type = attr.ib(default=None) + # all imported elements, single for + members = attr.ib(factory=list) + long_name = attr.ib(default=None) + color_code = attr.ib(default=None) # color code of layer + instance_id = attr.ib(default=None) + + @property + def clean_name(self): + """Returns layer name without publish icon highlight + + Returns: + (str) + """ + return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '') + .replace(PhotoshopServerStub.LOADED_ICON, '')) + + +class PhotoshopServerStub: + """ + Stub for calling function on client (Photoshop js) side. + Expects that client is already connected (started when avalon menu + is opened). + 'self.websocketserver.call' is used as async wrapper + """ + PUBLISH_ICON = '\u2117 ' + LOADED_ICON = '\u25bc' + + def __init__(self): + self.websocketserver = WebServerTool.get_instance() + self.client = self.get_client() + + @staticmethod + def get_client(): + """ + Return first connected client to WebSocket + TODO implement selection by Route + :return: client + """ + clients = WebSocketAsync.get_clients() + client = None + if len(clients) > 0: + key = list(clients.keys())[0] + client = clients.get(key) + + return client + + def open(self, path): + """Open file located at 'path' (local). + + Args: + path(string): file path locally + Returns: None + """ + self.websocketserver.call( + self.client.call('Photoshop.open', path=path) + ) + + def read(self, layer, layers_meta=None): + """Parses layer metadata from Headline field of active document. + + Args: + layer: (PSItem) + layers_meta: full list from Headline (for performance in loops) + Returns: + (dict) of layer metadata stored in PS file + + Example: + { + 'id': 'pyblish.avalon.container', + 'loader': 'ImageLoader', + 'members': ['64'], + 'name': 'imageMainMiddle', + 'namespace': 'Hero_imageMainMiddle_001', + 'representation': '6203dc91e80934d9f6ee7d96', + 'schema': 'openpype:container-2.0' + } + """ + if layers_meta is None: + layers_meta = self.get_layers_metadata() + + for layer_meta in layers_meta: + layer_id = layer_meta.get("uuid") # legacy + if layer_meta.get("members"): + layer_id = layer_meta["members"][0] + if str(layer.id) == str(layer_id): + return layer_meta + print("Unable to find layer metadata for {}".format(layer.id)) + + def imprint(self, item_id, data, all_layers=None, items_meta=None): + """Save layer metadata to Headline field of active document + + Stores metadata in format: + [{ + "active":true, + "subset":"imageBG", + "family":"image", + "id":"pyblish.avalon.instance", + "asset":"Town", + "uuid": "8" + }] - for created instances + OR + [{ + "schema": "openpype:container-2.0", + "id": "pyblish.avalon.instance", + "name": "imageMG", + "namespace": "Jungle_imageMG_001", + "loader": "ImageLoader", + "representation": "5fbfc0ee30a946093c6ff18a", + "members": [ + "40" + ] + }] - for loaded instances + + Args: + item_id (str): + data(string): json representation for single layer + all_layers (list of PSItem): for performance, could be + injected for usage in loop, if not, single call will be + triggered + items_meta(string): json representation from Headline + (for performance - provide only if imprint is in + loop - value should be same) + Returns: None + """ + if not items_meta: + items_meta = self.get_layers_metadata() + + # json.dumps writes integer values in a dictionary to string, so + # anticipating it here. + item_id = str(item_id) + is_new = True + result_meta = [] + for item_meta in items_meta: + if ((item_meta.get('members') and + item_id == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): + is_new = False + if data: + item_meta.update(data) + result_meta.append(item_meta) + else: + result_meta.append(item_meta) + + if is_new: + result_meta.append(data) + + # Ensure only valid ids are stored. + if not all_layers: + all_layers = self.get_layers() + layer_ids = [layer.id for layer in all_layers] + cleaned_data = [] + + for item in result_meta: + if item.get("members"): + if int(item["members"][0]) not in layer_ids: + continue + + cleaned_data.append(item) + + payload = json.dumps(cleaned_data, indent=4) + self.websocketserver.call( + self.client.call('Photoshop.imprint', payload=payload) + ) + + def get_layers(self): + """Returns JSON document with all(?) layers in active document. + + Returns: + Format of tuple: { 'id':'123', + 'name': 'My Layer 1', + 'type': 'GUIDE'|'FG'|'BG'|'OBJ' + 'visible': 'true'|'false' + """ + res = self.websocketserver.call( + self.client.call('Photoshop.get_layers') + ) + + return self._to_records(res) + + def get_layer(self, layer_id): + """ + Returns PSItem for specific 'layer_id' or None if not found + Args: + layer_id (string): unique layer id, stored in 'uuid' field + + Returns: + (PSItem) or None + """ + layers = self.get_layers() + for layer in layers: + if str(layer.id) == str(layer_id): + return layer + + def get_layers_in_layers(self, layers): + """Return all layers that belong to layers (might be groups). + + Args: + layers : + + Returns: + + """ + parent_ids = set([lay.id for lay in layers]) + + return self._get_layers_in_layers(parent_ids) + + def get_layers_in_layers_ids(self, layers_ids, layers=None): + """Return all layers that belong to layers (might be groups). + + Args: + layers_ids + layers : + + Returns: + + """ + parent_ids = set(layers_ids) + + return self._get_layers_in_layers(parent_ids, layers) + + def _get_layers_in_layers(self, parent_ids, layers=None): + if not layers: + layers = self.get_layers() + + all_layers = layers + ret = [] + + for layer in all_layers: + parents = set(layer.parents) + if len(parent_ids & parents) > 0: + ret.append(layer) + if layer.id in parent_ids: + ret.append(layer) + + return ret + + def create_group(self, name): + """Create new group (eg. LayerSet) + + Returns: + + """ + enhanced_name = self.PUBLISH_ICON + name + ret = self.websocketserver.call( + self.client.call('Photoshop.create_group', name=enhanced_name) + ) + # create group on PS is asynchronous, returns only id + return PSItem(id=ret, name=name, group=True) + + def group_selected_layers(self, name): + """Group selected layers into new LayerSet (eg. group) + + Returns: + (Layer) + """ + enhanced_name = self.PUBLISH_ICON + name + res = self.websocketserver.call( + self.client.call( + 'Photoshop.group_selected_layers', name=enhanced_name + ) + ) + res = self._to_records(res) + if res: + rec = res.pop() + rec.name = rec.name.replace(self.PUBLISH_ICON, '') + return rec + raise ValueError("No group record returned") + + def get_selected_layers(self): + """Get a list of actually selected layers. + + Returns: + """ + res = self.websocketserver.call( + self.client.call('Photoshop.get_selected_layers') + ) + return self._to_records(res) + + def select_layers(self, layers): + """Selects specified layers in Photoshop by its ids. + + Args: + layers: + """ + layers_id = [str(lay.id) for lay in layers] + self.websocketserver.call( + self.client.call( + 'Photoshop.select_layers', + layers=json.dumps(layers_id) + ) + ) + + def get_active_document_full_name(self): + """Returns full name with path of active document via ws call + + Returns(string): + full path with name + """ + res = self.websocketserver.call( + self.client.call('Photoshop.get_active_document_full_name') + ) + + return res + + def get_active_document_name(self): + """Returns just a name of active document via ws call + + Returns(string): + file name + """ + return self.websocketserver.call( + self.client.call('Photoshop.get_active_document_name') + ) + + def is_saved(self): + """Returns true if no changes in active document + + Returns: + + """ + return self.websocketserver.call( + self.client.call('Photoshop.is_saved') + ) + + def save(self): + """Saves active document""" + self.websocketserver.call( + self.client.call('Photoshop.save') + ) + + def saveAs(self, image_path, ext, as_copy): + """Saves active document to psd (copy) or png or jpg + + Args: + image_path(string): full local path + ext: + as_copy: + Returns: None + """ + self.websocketserver.call( + self.client.call( + 'Photoshop.saveAs', + image_path=image_path, + ext=ext, + as_copy=as_copy + ) + ) + + def set_visible(self, layer_id, visibility): + """Set layer with 'layer_id' to 'visibility' + + Args: + layer_id: + visibility: + Returns: None + """ + self.websocketserver.call( + self.client.call( + 'Photoshop.set_visible', + layer_id=layer_id, + visibility=visibility + ) + ) + + def hide_all_others_layers(self, layers): + """hides all layers that are not part of the list or that are not + children of this list + + Args: + layers (list): list of PSItem - highest hierarchy + """ + extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)]) + + self.hide_all_others_layers_ids(extract_ids) + + def hide_all_others_layers_ids(self, extract_ids, layers=None): + """hides all layers that are not part of the list or that are not + children of this list + + Args: + extract_ids (list): list of integer that should be visible + layers (list) of PSItem (used for caching) + """ + if not layers: + layers = self.get_layers() + for layer in layers: + if layer.visible and layer.id not in extract_ids: + self.set_visible(layer.id, False) + + def get_layers_metadata(self): + """Reads layers metadata from Headline from active document in PS. + (Headline accessible by File > File Info) + + Returns: + (list) + example: + {"8":{"active":true,"subset":"imageBG", + "family":"image","id":"pyblish.avalon.instance", + "asset":"Town"}} + 8 is layer(group) id - used for deletion, update etc. + """ + res = self.websocketserver.call(self.client.call('Photoshop.read')) + layers_data = [] + try: + if res: + layers_data = json.loads(res) + except json.decoder.JSONDecodeError: + raise ValueError("{} cannot be parsed, recreate meta".format(res)) + # format of metadata changed from {} to [] because of standardization + # keep current implementation logic as its working + if isinstance(layers_data, dict): + for layer_id, layer_meta in layers_data.items(): + if layer_meta.get("schema") != "openpype:container-2.0": + layer_meta["members"] = [str(layer_id)] + layers_data = list(layers_data.values()) + return layers_data + + def import_smart_object(self, path, layer_name, as_reference=False): + """Import the file at `path` as a smart object to active document. + + Args: + path (str): File path to import. + layer_name (str): Unique layer name to differentiate how many times + same smart object was loaded + as_reference (bool): pull in content or reference + """ + enhanced_name = self.LOADED_ICON + layer_name + res = self.websocketserver.call( + self.client.call( + 'Photoshop.import_smart_object', + path=path, + name=enhanced_name, + as_reference=as_reference + ) + ) + rec = self._to_records(res).pop() + if rec: + rec.name = rec.name.replace(self.LOADED_ICON, '') + return rec + + def replace_smart_object(self, layer, path, layer_name): + """Replace the smart object `layer` with file at `path` + + Args: + layer (PSItem): + path (str): File to import. + layer_name (str): Unique layer name to differentiate how many times + same smart object was loaded + """ + enhanced_name = self.LOADED_ICON + layer_name + self.websocketserver.call( + self.client.call( + 'Photoshop.replace_smart_object', + layer_id=layer.id, + path=path, + name=enhanced_name + ) + ) + + def delete_layer(self, layer_id): + """Deletes specific layer by it's id. + + Args: + layer_id (int): id of layer to delete + """ + self.websocketserver.call( + self.client.call('Photoshop.delete_layer', layer_id=layer_id) + ) + + def rename_layer(self, layer_id, name): + """Renames specific layer by it's id. + + Args: + layer_id (int): id of layer to delete + name (str): new name + """ + self.websocketserver.call( + self.client.call( + 'Photoshop.rename_layer', + layer_id=layer_id, + name=name + ) + ) + + def remove_instance(self, instance_id): + cleaned_data = [] + + for item in self.get_layers_metadata(): + inst_id = item.get("instance_id") or item.get("uuid") + if inst_id != instance_id: + cleaned_data.append(item) + + payload = json.dumps(cleaned_data, indent=4) + + self.websocketserver.call( + self.client.call('Photoshop.imprint', payload=payload) + ) + + def get_extension_version(self): + """Returns version number of installed extension.""" + return self.websocketserver.call( + self.client.call('Photoshop.get_extension_version') + ) + + def close(self): + """Shutting down PS and process too. + + For webpublishing only. + """ + # TODO change client.call to method with checks for client + self.websocketserver.call(self.client.call('Photoshop.close')) + + def _to_records(self, res): + """Converts string json representation into list of PSItem for + dot notation access to work. + + Args: + res (string): valid json + + Returns: + + """ + try: + layers_data = json.loads(res) + except json.decoder.JSONDecodeError: + raise ValueError("Received broken JSON {}".format(res)) + ret = [] + + # convert to AEItem to use dot donation + if isinstance(layers_data, dict): + layers_data = [layers_data] + for d in layers_data: + # currently implemented and expected fields + ret.append(PSItem( + d.get('id'), + d.get('name'), + d.get('group'), + d.get('parents'), + d.get('visible'), + d.get('type'), + d.get('members'), + d.get('long_name'), + d.get("color_code"), + d.get("instance_id") + )) + return ret diff --git a/client/ayon_core/hosts/photoshop/lib.py b/client/ayon_core/hosts/photoshop/lib.py new file mode 100644 index 0000000000..b905caf1bd --- /dev/null +++ b/client/ayon_core/hosts/photoshop/lib.py @@ -0,0 +1,103 @@ +import re + +import ayon_core.hosts.photoshop.api as api +from ayon_core.client import get_asset_by_name +from ayon_core.lib import prepare_template_data +from ayon_core.pipeline import ( + AutoCreator, + CreatedInstance +) +from ayon_core.hosts.photoshop.api.pipeline import cache_and_get_instances + + +class PSAutoCreator(AutoCreator): + """Generic autocreator to extend.""" + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + creator_id = instance_data.get("creator_identifier") + + if creator_id == self.identifier: + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + self.log.debug("update_list:: {}".format(update_list)) + for created_inst, _changes in update_list: + api.stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name + + if existing_instance is None: + existing_instance_asset = None + else: + existing_instance_asset = existing_instance["folderPath"] + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant + } + data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, None + )) + + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + api.stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( + existing_instance_asset != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + + +def clean_subset_name(subset_name): + """Clean all variants leftover {layer} from subset name.""" + dynamic_data = prepare_template_data({"layer": "{layer}"}) + for value in dynamic_data.values(): + if value in subset_name: + subset_name = (subset_name.replace(value, "") + .replace("__", "_") + .replace("..", ".")) + # clean trailing separator as Main_ + pattern = r'[\W_]+$' + replacement = '' + return re.sub(pattern, replacement, subset_name) diff --git a/openpype/hosts/photoshop/plugins/create/create_flatten_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_flatten_image.py similarity index 83% rename from openpype/hosts/photoshop/plugins/create/create_flatten_image.py rename to client/ayon_core/hosts/photoshop/plugins/create/create_flatten_image.py index 24be9df0e0..666fd52f78 100644 --- a/openpype/hosts/photoshop/plugins/create/create_flatten_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_flatten_image.py @@ -1,12 +1,11 @@ -from openpype.pipeline import CreatedInstance +from ayon_core.pipeline import CreatedInstance -from openpype import AYON_SERVER_ENABLED -from openpype.lib import BoolDef -import openpype.hosts.photoshop.api as api -from openpype.hosts.photoshop.lib import PSAutoCreator, clean_subset_name -from openpype.pipeline.create import get_subset_name -from openpype.lib import prepare_template_data -from openpype.client import get_asset_by_name +from ayon_core.lib import BoolDef +import ayon_core.hosts.photoshop.api as api +from ayon_core.hosts.photoshop.lib import PSAutoCreator, clean_subset_name +from ayon_core.pipeline.create import get_subset_name +from ayon_core.lib import prepare_template_data +from ayon_core.client import get_asset_by_name class AutoImageCreator(PSAutoCreator): @@ -40,10 +39,8 @@ def create(self, options=None): if existing_instance is None: existing_instance_asset = None - elif AYON_SERVER_ENABLED: - existing_instance_asset = existing_instance["folderPath"] else: - existing_instance_asset = existing_instance["asset"] + existing_instance_asset = existing_instance["folderPath"] if existing_instance is None: subset_name = self.get_subset_name( @@ -52,12 +49,9 @@ def create(self, options=None): ) data = { + "folderPath": asset_name, "task": task_name, } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name if not self.active_on_create: data["active"] = False @@ -80,10 +74,7 @@ def create(self, options=None): self.default_variant, task_name, asset_doc, project_name, host_name ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name + existing_instance["folderPath"] = asset_name existing_instance["task"] = task_name existing_instance["subset"] = subset_name diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py similarity index 96% rename from openpype/hosts/photoshop/plugins/create/create_image.py rename to client/ayon_core/hosts/photoshop/plugins/create/create_image.py index 4f2e90886a..a28872bba1 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py @@ -1,16 +1,16 @@ import re -from openpype.hosts.photoshop import api -from openpype.lib import BoolDef -from openpype.pipeline import ( +from ayon_core.hosts.photoshop import api +from ayon_core.lib import BoolDef +from ayon_core.pipeline import ( Creator, CreatedInstance, CreatorError ) -from openpype.lib import prepare_template_data -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS -from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances -from openpype.hosts.photoshop.lib import clean_subset_name +from ayon_core.lib import prepare_template_data +from ayon_core.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from ayon_core.hosts.photoshop.api.pipeline import cache_and_get_instances +from ayon_core.hosts.photoshop.lib import clean_subset_name class ImageCreator(Creator): diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_review.py b/client/ayon_core/hosts/photoshop/plugins/create/create_review.py new file mode 100644 index 0000000000..888b294248 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_review.py @@ -0,0 +1,28 @@ +from ayon_core.hosts.photoshop.lib import PSAutoCreator + + +class ReviewCreator(PSAutoCreator): + """Creates review instance which might be disabled from publishing.""" + identifier = "review" + family = "review" + + default_variant = "Main" + + def get_detail_description(self): + return """Auto creator for review. + + Photoshop review is created from all published images or from all + visible layers if no `image` instances got created. + + Review might be disabled by an artist (instance shouldn't be deleted as + it will get recreated in next publish either way). + """ + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["ReviewCreator"] + ) + + self.default_variant = plugin_settings["default_variant"] + self.active_on_create = plugin_settings["active_on_create"] + self.enabled = plugin_settings["enabled"] diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_workfile.py b/client/ayon_core/hosts/photoshop/plugins/create/create_workfile.py new file mode 100644 index 0000000000..3485027215 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_workfile.py @@ -0,0 +1,28 @@ +from ayon_core.hosts.photoshop.lib import PSAutoCreator + + +class WorkfileCreator(PSAutoCreator): + identifier = "workfile" + family = "workfile" + + default_variant = "Main" + + def get_detail_description(self): + return """Auto creator for workfile. + + It is expected that each publish will also publish its source workfile + for safekeeping. This creator triggers automatically without need for + an artist to remember and trigger it explicitly. + + Workfile instance could be disabled if it is not required to publish + workfile. (Instance shouldn't be deleted though as it will be recreated + in next publish automatically). + """ + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["WorkfileCreator"] + ) + + self.active_on_create = plugin_settings["active_on_create"] + self.enabled = plugin_settings["enabled"] diff --git a/client/ayon_core/hosts/photoshop/plugins/load/load_image.py b/client/ayon_core/hosts/photoshop/plugins/load/load_image.py new file mode 100644 index 0000000000..0fa6bca901 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/load/load_image.py @@ -0,0 +1,84 @@ +import re + +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.hosts.photoshop.api import get_unique_layer_name + + +class ImageLoader(photoshop.PhotoshopLoader): + """Load images + + Stores the imported asset in a container named after the asset. + """ + + families = ["image", "render"] + representations = ["*"] + + def load(self, context, name=None, namespace=None, data=None): + stub = self.get_stub() + layer_name = get_unique_layer_name( + stub.get_layers(), + context["asset"]["name"], + name + ) + with photoshop.maintained_selection(): + path = self.filepath_from_context(context) + layer = self.import_layer(path, layer_name, stub) + + self[:] = [layer] + namespace = namespace or layer_name + + return photoshop.containerise( + name, + namespace, + layer, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + """ Switch asset or change version """ + stub = self.get_stub() + + layer = container.pop("layer") + + context = representation.get("context", {}) + + namespace_from_container = re.sub(r'_\d{3}$', '', + container["namespace"]) + layer_name = "{}_{}".format(context["asset"], context["subset"]) + # switching assets + if namespace_from_container != layer_name: + layer_name = get_unique_layer_name( + stub.get_layers(), context["asset"], context["subset"] + ) + else: # switching version - keep same name + layer_name = container["namespace"] + + path = get_representation_path(representation) + with photoshop.maintained_selection(): + stub.replace_smart_object( + layer, path, layer_name + ) + + stub.imprint( + layer.id, {"representation": str(representation["_id"])} + ) + + def remove(self, container): + """ + Removes element from scene: deletes layer + removes from Headline + Args: + container (dict): container to be removed - used to get layer_id + """ + stub = self.get_stub() + + layer = container.pop("layer") + stub.imprint(layer.id, {}) + stub.delete_layer(layer.id) + + def switch(self, container, representation): + self.update(container, representation) + + def import_layer(self, file_name, layer_name, stub): + return stub.import_smart_object(file_name, layer_name) diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/client/ayon_core/hosts/photoshop/plugins/load/load_image_from_sequence.py similarity index 95% rename from openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py rename to client/ayon_core/hosts/photoshop/plugins/load/load_image_from_sequence.py index f9fceb80bb..06ac70041e 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/client/ayon_core/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -2,8 +2,8 @@ import qargparse -from openpype.hosts.photoshop import api as photoshop -from openpype.hosts.photoshop.api import get_unique_layer_name +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.hosts.photoshop.api import get_unique_layer_name class ImageFromSequenceLoader(photoshop.PhotoshopLoader): diff --git a/client/ayon_core/hosts/photoshop/plugins/load/load_reference.py b/client/ayon_core/hosts/photoshop/plugins/load/load_reference.py new file mode 100644 index 0000000000..e2fec039d0 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/load/load_reference.py @@ -0,0 +1,85 @@ +import re + +from ayon_core.pipeline import get_representation_path +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.hosts.photoshop.api import get_unique_layer_name + + +class ReferenceLoader(photoshop.PhotoshopLoader): + """Load reference images + + Stores the imported asset in a container named after the asset. + + Inheriting from 'load_image' didn't work because of + "Cannot write to closing transport", possible refactor. + """ + + families = ["image", "render"] + representations = ["*"] + + def load(self, context, name=None, namespace=None, data=None): + stub = self.get_stub() + layer_name = get_unique_layer_name( + stub.get_layers(), context["asset"]["name"], name + ) + with photoshop.maintained_selection(): + path = self.filepath_from_context(context) + layer = self.import_layer(path, layer_name, stub) + + self[:] = [layer] + namespace = namespace or layer_name + + return photoshop.containerise( + name, + namespace, + layer, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + """ Switch asset or change version """ + stub = self.get_stub() + layer = container.pop("layer") + + context = representation.get("context", {}) + + namespace_from_container = re.sub(r'_\d{3}$', '', + container["namespace"]) + layer_name = "{}_{}".format(context["asset"], context["subset"]) + # switching assets + if namespace_from_container != layer_name: + layer_name = get_unique_layer_name( + stub.get_layers(), context["asset"], context["subset"] + ) + else: # switching version - keep same name + layer_name = container["namespace"] + + path = get_representation_path(representation) + with photoshop.maintained_selection(): + stub.replace_smart_object( + layer, path, layer_name + ) + + stub.imprint( + layer.id, {"representation": str(representation["_id"])} + ) + + def remove(self, container): + """Removes element from scene: deletes layer + removes from Headline + + Args: + container (dict): container to be removed - used to get layer_id + """ + stub = self.get_stub() + layer = container.pop("layer") + stub.imprint(layer.id, {}) + stub.delete_layer(layer.id) + + def switch(self, container, representation): + self.update(container, representation) + + def import_layer(self, file_name, layer_name, stub): + return stub.import_smart_object( + file_name, layer_name, as_reference=True + ) diff --git a/openpype/hosts/photoshop/plugins/publish/closePS.py b/client/ayon_core/hosts/photoshop/plugins/publish/closePS.py similarity index 91% rename from openpype/hosts/photoshop/plugins/publish/closePS.py rename to client/ayon_core/hosts/photoshop/plugins/publish/closePS.py index b4c3a4c966..6f86d98580 100644 --- a/openpype/hosts/photoshop/plugins/publish/closePS.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/closePS.py @@ -4,7 +4,7 @@ import pyblish.api -from openpype.hosts.photoshop import api as photoshop +from ayon_core.hosts.photoshop import api as photoshop class ClosePS(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_image.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image.py similarity index 95% rename from openpype/hosts/photoshop/plugins/publish/collect_auto_image.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image.py index 4d7838c510..051a3da0a1 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_auto_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image.py @@ -1,8 +1,8 @@ import pyblish.api -from openpype.client import get_asset_name_identifier -from openpype.hosts.photoshop import api as photoshop -from openpype.pipeline.create import get_subset_name +from ayon_core.client import get_asset_name_identifier +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.pipeline.create import get_subset_name class CollectAutoImage(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py similarity index 94% rename from openpype/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py index 741fb0e9cd..0585f4f226 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_image_refresh.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.photoshop import api as photoshop +from ayon_core.hosts.photoshop import api as photoshop class CollectAutoImageRefresh(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_review.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_review.py similarity index 94% rename from openpype/hosts/photoshop/plugins/publish/collect_auto_review.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_review.py index e5a2f326d7..c8d4ddf111 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_auto_review.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_review.py @@ -7,9 +7,9 @@ """ import pyblish.api -from openpype.client import get_asset_name_identifier -from openpype.hosts.photoshop import api as photoshop -from openpype.pipeline.create import get_subset_name +from ayon_core.client import get_asset_name_identifier +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.pipeline.create import get_subset_name class CollectAutoReview(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_workfile.py similarity index 94% rename from openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_workfile.py index 9ccb8f4f85..365fd0a684 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_auto_workfile.py @@ -1,9 +1,9 @@ import os import pyblish.api -from openpype.client import get_asset_name_identifier -from openpype.hosts.photoshop import api as photoshop -from openpype.pipeline.create import get_subset_name +from ayon_core.client import get_asset_name_identifier +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.pipeline.create import get_subset_name class CollectAutoWorkfile(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/collect_batch_data.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_batch_data.py new file mode 100644 index 0000000000..5e43a021c3 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -0,0 +1,81 @@ +"""Parses batch context from json and continues in publish process. + +Provides: + context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant + +Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as +webpublisher should be eventually ejected as an addon, eg. mentioned plugin +shouldn't be pushed into general publish plugins. +""" + +import os + +import pyblish.api + +from ayon_core.pipeline import legacy_io +from openpype_modules.webpublisher.lib import ( + get_batch_asset_task_info, + parse_json +) +from ayon_core.tests.lib import is_in_tests + + +class CollectBatchData(pyblish.api.ContextPlugin): + """Collect batch data from json stored in 'AYON_PUBLISH_DATA' env dir. + + The directory must contain 'manifest.json' file where batch data should be + stored. + """ + # must be really early, context values are only in json file + order = pyblish.api.CollectorOrder - 0.495 + label = "Collect batch data" + hosts = ["photoshop"] + targets = ["webpublish"] + + def process(self, context): + self.log.info("CollectBatchData") + batch_dir = ( + os.environ.get("AYON_PUBLISH_DATA") + or os.environ.get("OPENPYPE_PUBLISH_DATA") + ) + if is_in_tests(): + self.log.debug("Automatic testing, no batch data, skipping") + return + + assert batch_dir, ( + "Missing `AYON_PUBLISH_DATA`") + + assert os.path.exists(batch_dir), \ + "Folder {} doesn't exist".format(batch_dir) + + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` was not found." + "Could not set project `root` which may cause issues." + ) + + batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) + + context.data["batchDir"] = batch_dir + context.data["batchData"] = batch_data + + asset_name, task_name, task_type = get_batch_asset_task_info( + batch_data["context"] + ) + + os.environ["AVALON_ASSET"] = asset_name + os.environ["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_TASK"] = task_name + + context.data["asset"] = asset_name + context.data["task"] = task_name + context.data["taskType"] = task_type + context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_color_coded_instances.py similarity index 96% rename from openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index c16616bcb2..e309da62ba 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -3,10 +3,10 @@ import pyblish.api -from openpype.lib import prepare_template_data -from openpype.hosts.photoshop import api as photoshop -from openpype.settings import get_project_settings -from openpype.tests.lib import is_in_tests +from ayon_core.lib import prepare_template_data +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.settings import get_project_settings +from ayon_core.tests.lib import is_in_tests class CollectColorCodedInstances(pyblish.api.ContextPlugin): @@ -46,7 +46,10 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): def process(self, context): self.log.info("CollectColorCodedInstances") - batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + batch_dir = ( + os.environ.get("AYON_PUBLISH_DATA") + or os.environ.get("OPENPYPE_PUBLISH_DATA") + ) if (is_in_tests() and (not batch_dir or not os.path.exists(batch_dir))): self.log.debug("Automatic testing, no batch data, skipping") diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..74353d452f --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_current_file.py @@ -0,0 +1,18 @@ +import os + +import pyblish.api + +from ayon_core.hosts.photoshop import api as photoshop + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Current File" + hosts = ["photoshop"] + + def process(self, context): + context.data["currentFile"] = os.path.normpath( + photoshop.stub().get_active_document_full_name() + ).replace("\\", "/") diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/collect_extension_version.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_extension_version.py new file mode 100644 index 0000000000..2d24a8de15 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_extension_version.py @@ -0,0 +1,57 @@ +import os +import re +import pyblish.api + +from ayon_core.hosts.photoshop import api as photoshop + + +class CollectExtensionVersion(pyblish.api.ContextPlugin): + """ Pulls and compares version of installed extension. + + It is recommended to use same extension as in provided Openpype code. + + Please use Anastasiyโ€™s Extension Manager or ZXPInstaller to update + extension in case of an error. + + You can locate extension.zxp in your installed Openpype code in + `repos/avalon-core/avalon/photoshop` + """ + # This technically should be a validator, but other collectors might be + # impacted with usage of obsolete extension, so collector that runs first + # was chosen + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect extension version" + hosts = ["photoshop"] + + optional = True + active = True + + def process(self, context): + installed_version = photoshop.stub().get_extension_version() + + if not installed_version: + raise ValueError("Unknown version, probably old extension") + + manifest_url = os.path.join(os.path.dirname(photoshop.__file__), + "extension", "CSXS", "manifest.xml") + + if not os.path.exists(manifest_url): + self.log.debug("Unable to locate extension manifest, not checking") + return + + expected_version = None + with open(manifest_url) as fp: + content = fp.read() + + found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', + content) + if found: + expected_version = found[0][1] + + if expected_version != installed_version: + msg = "Expected version '{}' found '{}'\n".format( + expected_version, installed_version) + msg += "Please update your installed extension, it might not work " + msg += "properly." + + raise ValueError(msg) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_image.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_image.py similarity index 91% rename from openpype/hosts/photoshop/plugins/publish/collect_image.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_image.py index 64727cef33..bfd73bfc5f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_image.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.hosts.photoshop import api +from ayon_core.hosts.photoshop import api class CollectImage(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_published_version.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_published_version.py similarity index 94% rename from openpype/hosts/photoshop/plugins/publish/collect_published_version.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_published_version.py index eec6f1fae4..e330b04a1f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_published_version.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_published_version.py @@ -17,8 +17,8 @@ import pyblish.api -from openpype.client import get_last_version_by_subset_name -from openpype.pipeline.version_start import get_versioning_start +from ayon_core.client import get_last_version_by_subset_name +from ayon_core.pipeline.version_start import get_versioning_start class CollectPublishedVersion(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/collect_review.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_review.py new file mode 100644 index 0000000000..e487760736 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_review.py @@ -0,0 +1,32 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" + +import os + +import pyblish.api + +from ayon_core.pipeline.create import get_subset_name + + +class CollectReview(pyblish.api.ContextPlugin): + """Adds review to families for instances marked to be reviewable. + """ + + label = "Collect Review" + label = "Review" + hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.1 + + publish = True + + def process(self, context): + for instance in context: + creator_attributes = instance.data["creator_attributes"] + if (creator_attributes.get("mark_for_review") and + "review" not in instance.data["families"]): + instance.data["families"].append("review") diff --git a/openpype/hosts/photoshop/plugins/publish/collect_version.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_version.py similarity index 100% rename from openpype/hosts/photoshop/plugins/publish/collect_version.py rename to client/ayon_core/hosts/photoshop/plugins/publish/collect_version.py diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/photoshop/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..6740a6c82a --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/collect_workfile.py @@ -0,0 +1,32 @@ +import os +import pyblish.api + +from ayon_core.pipeline.create import get_subset_name + + +class CollectWorkfile(pyblish.api.ContextPlugin): + """Collect current script for publish.""" + + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect Workfile" + hosts = ["photoshop"] + + default_variant = "Main" + + def process(self, context): + for instance in context: + if instance.data["family"] == "workfile": + file_path = context.data["currentFile"] + _, ext = os.path.splitext(file_path) + staging_dir = os.path.dirname(file_path) + base_name = os.path.basename(file_path) + + # creating representation + _, ext = os.path.splitext(file_path) + instance.data["representations"].append({ + "name": ext[1:], + "ext": ext[1:], + "files": base_name, + "stagingDir": staging_dir, + }) + return diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/client/ayon_core/hosts/photoshop/plugins/publish/extract_image.py similarity index 95% rename from openpype/hosts/photoshop/plugins/publish/extract_image.py rename to client/ayon_core/hosts/photoshop/plugins/publish/extract_image.py index 680f580cc0..71605b53d9 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/extract_image.py @@ -1,8 +1,8 @@ import os import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.photoshop import api as photoshop +from ayon_core.pipeline import publish +from ayon_core.hosts.photoshop import api as photoshop class ExtractImage(pyblish.api.ContextPlugin): @@ -96,6 +96,6 @@ def staging_dir(self, instance): the instance.data['stagingDir'] """ - from openpype.pipeline.publish import get_instance_staging_dir + from ayon_core.pipeline.publish import get_instance_staging_dir return get_instance_staging_dir(instance) diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/extract_review.py b/client/ayon_core/hosts/photoshop/plugins/publish/extract_review.py new file mode 100644 index 0000000000..732a53f194 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/extract_review.py @@ -0,0 +1,319 @@ +import os +import shutil +from PIL import Image + +from ayon_core.lib import ( + run_subprocess, + get_ffmpeg_tool_args, +) +from ayon_core.pipeline import publish +from ayon_core.hosts.photoshop import api as photoshop + + +class ExtractReview(publish.Extractor): + """ + Produce a flattened or sequence image files from all 'image' instances. + + If no 'image' instance is created, it produces flattened image from + all visible layers. + + It creates review, thumbnail and mov representations. + + 'review' family could be used in other steps as a reference, as it + contains flattened image by default. (Eg. artist could load this + review as a single item and see full image. In most cases 'image' + family is separated by layers to better usage in animation or comp.) + """ + + label = "Extract Review" + hosts = ["photoshop"] + families = ["review"] + + # Extract Options + jpg_options = None + mov_options = None + make_image_sequence = None + max_downscale_size = 8192 + + def process(self, instance): + staging_dir = self.staging_dir(instance) + self.log.info("Outputting image to {}".format(staging_dir)) + + fps = instance.data.get("fps", 25) + stub = photoshop.stub() + self.output_seq_filename = os.path.splitext( + stub.get_active_document_name())[0] + ".%04d.jpg" + + layers = self._get_layers_from_image_instances(instance) + self.log.info("Layers image instance found: {}".format(layers)) + + repre_name = "jpg" + repre_skeleton = { + "name": repre_name, + "ext": "jpg", + "stagingDir": staging_dir, + "tags": self.jpg_options['tags'], + } + + if instance.data["family"] != "review": + self.log.debug("Existing extracted file from image family used.") + # enable creation of review, without this jpg review would clash + # with jpg of the image family + output_name = repre_name + repre_name = "{}_{}".format(repre_name, output_name) + repre_skeleton.update({"name": repre_name, + "outputName": output_name}) + + img_file = self.output_seq_filename % 0 + self._prepare_file_for_image_family(img_file, instance, + staging_dir) + repre_skeleton.update({ + "files": img_file, + }) + processed_img_names = [img_file] + elif self.make_image_sequence and len(layers) > 1: + self.log.debug("Extract layers to image sequence.") + img_list = self._save_sequence_images(staging_dir, layers) + + repre_skeleton.update({ + "frameStart": 0, + "frameEnd": len(img_list), + "fps": fps, + "files": img_list, + }) + processed_img_names = img_list + else: + self.log.debug("Extract layers to flatten image.") + img_file = self._save_flatten_image(staging_dir, layers) + + repre_skeleton.update({ + "files": img_file, + }) + processed_img_names = [img_file] + + instance.data["representations"].append(repre_skeleton) + + ffmpeg_args = get_ffmpeg_tool_args("ffmpeg") + + instance.data["stagingDir"] = staging_dir + + source_files_pattern = os.path.join(staging_dir, + self.output_seq_filename) + source_files_pattern = self._check_and_resize(processed_img_names, + source_files_pattern, + staging_dir) + self._generate_thumbnail( + list(ffmpeg_args), + instance, + source_files_pattern, + staging_dir) + + no_of_frames = len(processed_img_names) + if no_of_frames > 1: + self._generate_mov( + list(ffmpeg_args), + instance, + fps, + no_of_frames, + source_files_pattern, + staging_dir) + + self.log.info(f"Extracted {instance} to {staging_dir}") + + def _prepare_file_for_image_family(self, img_file, instance, staging_dir): + """Converts existing file for image family to .jpg + + Image instance could have its own separate review (instance per layer + for example). This uses extracted file instead of extracting again. + Args: + img_file (str): name of output file (with 0000 value for ffmpeg + later) + instance: + staging_dir (str): temporary folder where extracted file is located + """ + repre_file = instance.data["representations"][0] + source_file_path = os.path.join(repre_file["stagingDir"], + repre_file["files"]) + if not os.path.exists(source_file_path): + raise RuntimeError(f"{source_file_path} doesn't exist for " + "review to create from") + _, ext = os.path.splitext(repre_file["files"]) + if ext != ".jpg": + im = Image.open(source_file_path) + if (im.mode in ('RGBA', 'LA') or ( + im.mode == 'P' and 'transparency' in im.info)): + # without this it produces messy low quality jpg + rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff") + rgb_im.alpha_composite(im) + rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file)) + else: + im.save(os.path.join(staging_dir, img_file)) + else: + # handles already .jpg + shutil.copy(source_file_path, + os.path.join(staging_dir, img_file)) + + def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, + source_files_pattern, staging_dir): + """Generates .mov to upload to Ftrack. + + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + fps (str) + no_of_frames (int): + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ + # Generate mov. + mov_path = os.path.join(staging_dir, "review.mov") + self.log.info(f"Generate mov review: {mov_path}") + args = ffmpeg_path + [ + "-y", + "-i", source_files_pattern, + "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", + "-vframes", str(no_of_frames), + mov_path + ] + self.log.debug("mov args:: {}".format(args)) + _output = run_subprocess(args) + instance.data["representations"].append({ + "name": "mov", + "ext": "mov", + "files": os.path.basename(mov_path), + "stagingDir": staging_dir, + "frameStart": 1, + "frameEnd": no_of_frames, + "fps": fps, + "tags": self.mov_options['tags'] + }) + + def _generate_thumbnail( + self, ffmpeg_args, instance, source_files_pattern, staging_dir + ): + """Generates scaled down thumbnail and adds it as representation. + + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ + # Generate thumbnail + thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") + self.log.info(f"Generate thumbnail {thumbnail_path}") + args = ffmpeg_args + [ + "-y", + "-i", source_files_pattern, + "-vf", "scale=300:-1", + "-vframes", "1", + thumbnail_path + ] + self.log.debug("thumbnail args:: {}".format(args)) + _output = run_subprocess(args) + instance.data["representations"].append({ + "name": "thumbnail", + "ext": "jpg", + "outputName": "thumb", + "files": os.path.basename(thumbnail_path), + "stagingDir": staging_dir, + "tags": ["thumbnail", "delete"] + }) + instance.data["thumbnailPath"] = thumbnail_path + + def _check_and_resize(self, processed_img_names, source_files_pattern, + staging_dir): + """Check if saved image could be used in ffmpeg. + + Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be + used as a source for thumbnail or review mov. + """ + Image.MAX_IMAGE_PIXELS = None + first_url = os.path.join(staging_dir, processed_img_names[0]) + with Image.open(first_url) as im: + width, height = im.size + + if width > self.max_downscale_size or height > self.max_downscale_size: + resized_dir = os.path.join(staging_dir, "resized") + os.mkdir(resized_dir) + source_files_pattern = os.path.join(resized_dir, + self.output_seq_filename) + for file_name in processed_img_names: + source_url = os.path.join(staging_dir, file_name) + with Image.open(source_url) as res_img: + # 'thumbnail' automatically keeps aspect ratio + res_img.thumbnail((self.max_downscale_size, + self.max_downscale_size), + Image.ANTIALIAS) + res_img.save(os.path.join(resized_dir, file_name)) + + return source_files_pattern + + def _get_layers_from_image_instances(self, instance): + """Collect all layers from 'instance'. + + Returns: + (list) of PSItem + """ + layers = [] + # creating review for existing 'image' instance + if instance.data["family"] == "image" and instance.data.get("layer"): + layers.append(instance.data["layer"]) + return layers + + for image_instance in instance.context: + if image_instance.data["family"] != "image": + continue + if not image_instance.data.get("layer"): + # dummy instance for flatten image + continue + layers.append(image_instance.data.get("layer")) + + return sorted(layers) + + def _save_flatten_image(self, staging_dir, layers): + """Creates flat image from 'layers' into 'staging_dir'. + + Returns: + (str): path to new image + """ + img_filename = self.output_seq_filename % 0 + output_image_path = os.path.join(staging_dir, img_filename) + stub = photoshop.stub() + + with photoshop.maintained_visibility(): + self.log.info("Extracting {}".format(layers)) + if layers: + stub.hide_all_others_layers(layers) + + stub.saveAs(output_image_path, 'jpg', True) + + return img_filename + + def _save_sequence_images(self, staging_dir, layers): + """Creates separate flat images from 'layers' into 'staging_dir'. + + Used as source for multi frames .mov to review at once. + Returns: + (list): paths to new images + """ + stub = photoshop.stub() + + list_img_filename = [] + with photoshop.maintained_visibility(): + for i, layer in enumerate(layers): + self.log.info("Extracting {}".format(layer)) + + img_filename = self.output_seq_filename % i + output_image_path = os.path.join(staging_dir, img_filename) + list_img_filename.append(img_filename) + + with photoshop.maintained_visibility(): + stub.hide_all_others_layers([layer]) + stub.saveAs(output_image_path, 'jpg', True) + + return list_img_filename diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/extract_save_scene.py b/client/ayon_core/hosts/photoshop/plugins/publish/extract_save_scene.py new file mode 100644 index 0000000000..962c0722db --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/extract_save_scene.py @@ -0,0 +1,14 @@ +from ayon_core.pipeline import publish +from ayon_core.hosts.photoshop import api as photoshop + + +class ExtractSaveScene(publish.Extractor): + """Save scene before extraction.""" + + order = publish.Extractor.order - 0.49 + label = "Extract Save Scene" + hosts = ["photoshop"] + families = ["workfile"] + + def process(self, instance): + photoshop.stub().save() diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml b/client/ayon_core/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml similarity index 100% rename from openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml rename to client/ayon_core/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/client/ayon_core/hosts/photoshop/plugins/publish/help/validate_naming.xml similarity index 100% rename from openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml rename to client/ayon_core/hosts/photoshop/plugins/publish/help/validate_naming.xml diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/increment_workfile.py b/client/ayon_core/hosts/photoshop/plugins/publish/increment_workfile.py new file mode 100644 index 0000000000..9b25a35ef5 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/increment_workfile.py @@ -0,0 +1,32 @@ +import os +import pyblish.api +from ayon_core.pipeline.publish import get_errored_plugins_from_context +from ayon_core.lib import version_up + +from ayon_core.hosts.photoshop import api as photoshop + + +class IncrementWorkfile(pyblish.api.InstancePlugin): + """Increment the current workfile. + + Saves the current scene with an increased version number. + """ + + label = "Increment Workfile" + order = pyblish.api.IntegratorOrder + 9.0 + hosts = ["photoshop"] + families = ["workfile"] + optional = True + + def process(self, instance): + errored_plugins = get_errored_plugins_from_context(instance.context) + if errored_plugins: + raise RuntimeError( + "Skipping incrementing current file because publishing failed." + ) + + scene_path = version_up(instance.context.data["currentFile"]) + _, ext = os.path.splitext(scene_path) + photoshop.stub().saveAs(scene_path, ext[1:], True) + + self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/client/ayon_core/hosts/photoshop/plugins/publish/validate_instance_asset.py b/client/ayon_core/hosts/photoshop/plugins/publish/validate_instance_asset.py new file mode 100644 index 0000000000..dc0f2efd52 --- /dev/null +++ b/client/ayon_core/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -0,0 +1,72 @@ +import pyblish.api + +from ayon_core.pipeline import get_current_asset_name +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from ayon_core.hosts.photoshop import api as photoshop + + +class ValidateInstanceAssetRepair(pyblish.api.Action): + """Repair the instance asset.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + stub = photoshop.stub() + current_asset_name = get_current_asset_name() + for instance in instances: + data = stub.read(instance[0]) + data["asset"] = current_asset_name + stub.imprint(instance[0], data) + + +class ValidateInstanceAsset(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate the instance asset is the current selected context asset. + + As it might happen that multiple worfiles are opened, switching + between them would mess with selected context. + In that case outputs might be output under wrong asset! + + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. + """ + + label = "Validate Instance Asset" + hosts = ["photoshop"] + optional = True + actions = [ValidateInstanceAssetRepair] + order = ValidateContentsOrder + + def process(self, instance): + instance_asset = instance.data["asset"] + current_asset = get_current_asset_name() + + if instance_asset != current_asset: + msg = ( + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}." + + ) + repair_msg = ( + f"Repair with 'Repair' button to use '{current_asset}'.\n" + ) + formatting_data = {"msg": msg, + "repair_msg": repair_msg} + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/client/ayon_core/hosts/photoshop/plugins/publish/validate_naming.py similarity index 95% rename from openpype/hosts/photoshop/plugins/publish/validate_naming.py rename to client/ayon_core/hosts/photoshop/plugins/publish/validate_naming.py index 07810f505e..89018a1cff 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/client/ayon_core/hosts/photoshop/plugins/publish/validate_naming.py @@ -2,9 +2,9 @@ import pyblish.api -from openpype.hosts.photoshop import api as photoshop -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS -from openpype.pipeline.publish import ( +from ayon_core.hosts.photoshop import api as photoshop +from ayon_core.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, ) diff --git a/openpype/hosts/photoshop/resources/template.psd b/client/ayon_core/hosts/photoshop/resources/template.psd similarity index 100% rename from openpype/hosts/photoshop/resources/template.psd rename to client/ayon_core/hosts/photoshop/resources/template.psd diff --git a/openpype/hosts/resolve/README.markdown b/client/ayon_core/hosts/resolve/README.markdown similarity index 100% rename from openpype/hosts/resolve/README.markdown rename to client/ayon_core/hosts/resolve/README.markdown diff --git a/openpype/hosts/resolve/RESOLVE_API_v18.5.1-build6.txt b/client/ayon_core/hosts/resolve/RESOLVE_API_v18.5.1-build6.txt similarity index 100% rename from openpype/hosts/resolve/RESOLVE_API_v18.5.1-build6.txt rename to client/ayon_core/hosts/resolve/RESOLVE_API_v18.5.1-build6.txt diff --git a/openpype/hosts/resolve/__init__.py b/client/ayon_core/hosts/resolve/__init__.py similarity index 100% rename from openpype/hosts/resolve/__init__.py rename to client/ayon_core/hosts/resolve/__init__.py diff --git a/client/ayon_core/hosts/resolve/addon.py b/client/ayon_core/hosts/resolve/addon.py new file mode 100644 index 0000000000..9c9932826b --- /dev/null +++ b/client/ayon_core/hosts/resolve/addon.py @@ -0,0 +1,23 @@ +import os + +from ayon_core.modules import OpenPypeModule, IHostAddon + +from .utils import RESOLVE_ROOT_DIR + + +class ResolveAddon(OpenPypeModule, IHostAddon): + name = "resolve" + host_name = "resolve" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(RESOLVE_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".drp"] diff --git a/openpype/hosts/resolve/api/__init__.py b/client/ayon_core/hosts/resolve/api/__init__.py similarity index 100% rename from openpype/hosts/resolve/api/__init__.py rename to client/ayon_core/hosts/resolve/api/__init__.py diff --git a/client/ayon_core/hosts/resolve/api/action.py b/client/ayon_core/hosts/resolve/api/action.py new file mode 100644 index 0000000000..620d51b2b3 --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/action.py @@ -0,0 +1,52 @@ +# absolute_import is needed to counter the `module has no cmds error` in Maya +from __future__ import absolute_import + +import pyblish.api + + +from ayon_core.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid clips in Resolve timeline when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + + try: + from .lib import get_project_manager + pm = get_project_manager() + self.log.debug(pm) + except ImportError: + raise ImportError("Current host is not Resolve") + + errored_instances = get_errored_instances_from_context(context, + plugin=plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid clips..") + invalid = list() + for instance in errored_instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning("Plug-in returned to be invalid, " + "but has no selectable nodes.") + + # Ensure unique (process each node only once) + invalid = list(set(invalid)) + + if invalid: + self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) + # TODO: select resolve timeline track items in current timeline + else: + self.log.info("No invalid nodes found.") diff --git a/client/ayon_core/hosts/resolve/api/lib.py b/client/ayon_core/hosts/resolve/api/lib.py new file mode 100644 index 0000000000..2c648bb4cc --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/lib.py @@ -0,0 +1,938 @@ +import sys +import json +import re +import os +import contextlib +from opentimelineio import opentime + +from ayon_core.lib import Logger +from ayon_core.pipeline.editorial import ( + is_overlapping_otio_ranges, + frames_to_timecode +) + +from ..otio import davinci_export as otio_export + +log = Logger.get_logger(__name__) + +self = sys.modules[__name__] +self.project_manager = None +self.media_storage = None + +# OpenPype sequential rename variables +self.rename_index = 0 +self.rename_add = 0 + +self.publish_clip_color = "Pink" +self.pype_marker_workflow = True + +# OpenPype compound clip workflow variable +self.pype_tag_name = "VFX Notes" + +# OpenPype marker workflow variables +self.pype_marker_name = "OpenPypeData" +self.pype_marker_duration = 1 +self.pype_marker_color = "Mint" +self.temp_marker_frame = None + +# OpenPype default timeline +self.pype_timeline_name = "OpenPypeTimeline" + + +@contextlib.contextmanager +def maintain_current_timeline(to_timeline: object, + from_timeline: object = None): + """Maintain current timeline selection during context + + Attributes: + from_timeline (resolve.Timeline)[optional]: + Example: + >>> print(from_timeline.GetName()) + timeline1 + >>> print(to_timeline.GetName()) + timeline2 + + >>> with maintain_current_timeline(to_timeline): + ... print(get_current_timeline().GetName()) + timeline2 + + >>> print(get_current_timeline().GetName()) + timeline1 + """ + project = get_current_project() + working_timeline = from_timeline or project.GetCurrentTimeline() + + # switch to the input timeline + project.SetCurrentTimeline(to_timeline) + + try: + # do a work + yield + finally: + # put the original working timeline to context + project.SetCurrentTimeline(working_timeline) + + +def get_project_manager(): + from . import bmdvr + if not self.project_manager: + self.project_manager = bmdvr.GetProjectManager() + return self.project_manager + + +def get_media_storage(): + from . import bmdvr + if not self.media_storage: + self.media_storage = bmdvr.GetMediaStorage() + return self.media_storage + + +def get_current_project(): + """Get current project object. + """ + return get_project_manager().GetCurrentProject() + + +def get_current_timeline(new=False): + """Get current timeline object. + + Args: + new (bool)[optional]: [DEPRECATED] if True it will create + new timeline if none exists + + Returns: + TODO: will need to reflect future `None` + object: resolve.Timeline + """ + project = get_current_project() + timeline = project.GetCurrentTimeline() + + # return current timeline if any + if timeline: + return timeline + + # TODO: [deprecated] and will be removed in future + if new: + return get_new_timeline() + + +def get_any_timeline(): + """Get any timeline object. + + Returns: + object | None: resolve.Timeline + """ + project = get_current_project() + timeline_count = project.GetTimelineCount() + if timeline_count > 0: + return project.GetTimelineByIndex(1) + + +def get_new_timeline(timeline_name: str = None): + """Get new timeline object. + + Arguments: + timeline_name (str): New timeline name. + + Returns: + object: resolve.Timeline + """ + project = get_current_project() + media_pool = project.GetMediaPool() + new_timeline = media_pool.CreateEmptyTimeline( + timeline_name or self.pype_timeline_name) + project.SetCurrentTimeline(new_timeline) + return new_timeline + + +def create_bin(name: str, root: object = None) -> object: + """ + Create media pool's folder. + + Return folder object and if the name does not exist it will create a new. + If the input name is with forward or backward slashes then it will create + all parents and return the last child bin object + + Args: + name (str): name of folder / bin, or hierarchycal name "parent/name" + root (resolve.Folder)[optional]: root folder / bin object + + Returns: + object: resolve.Folder + """ + # get all variables + media_pool = get_current_project().GetMediaPool() + root_bin = root or media_pool.GetRootFolder() + + # create hierarchy of bins in case there is slash in name + if "/" in name.replace("\\", "/"): + child_bin = None + for bname in name.split("/"): + child_bin = create_bin(bname, child_bin or root_bin) + if child_bin: + return child_bin + else: + created_bin = None + for subfolder in root_bin.GetSubFolderList(): + if subfolder.GetName() in name: + created_bin = subfolder + + if not created_bin: + new_folder = media_pool.AddSubFolder(root_bin, name) + media_pool.SetCurrentFolder(new_folder) + else: + media_pool.SetCurrentFolder(created_bin) + + return media_pool.GetCurrentFolder() + + +def remove_media_pool_item(media_pool_item: object) -> bool: + media_pool = get_current_project().GetMediaPool() + return media_pool.DeleteClips([media_pool_item]) + + +def create_media_pool_item( + files: list, + root: object = None, +) -> object: + """ + Create media pool item. + + Args: + files (list[str]): list of absolute paths to files + root (resolve.Folder)[optional]: root folder / bin object + + Returns: + object: resolve.MediaPoolItem + """ + # get all variables + media_pool = get_current_project().GetMediaPool() + root_bin = root or media_pool.GetRootFolder() + + # make sure files list is not empty and first available file exists + filepath = next((f for f in files if os.path.isfile(f)), None) + if not filepath: + raise FileNotFoundError("No file found in input files list") + + # try to search in bin if the clip does not exist + existing_mpi = get_media_pool_item(filepath, root_bin) + + if existing_mpi: + return existing_mpi + + # add all data in folder to media pool + media_pool_items = media_pool.ImportMedia(files) + + return media_pool_items.pop() if media_pool_items else False + + +def get_media_pool_item(filepath, root: object = None) -> object: + """ + Return clip if found in folder with use of input file path. + + Args: + filepath (str): absolute path to a file + root (resolve.Folder)[optional]: root folder / bin object + + Returns: + object: resolve.MediaPoolItem + """ + media_pool = get_current_project().GetMediaPool() + root = root or media_pool.GetRootFolder() + fname = os.path.basename(filepath) + + for _mpi in root.GetClipList(): + _mpi_name = _mpi.GetClipProperty("File Name") + _mpi_name = get_reformated_path(_mpi_name, first=True) + if fname in _mpi_name: + return _mpi + return None + + +def create_timeline_item( + media_pool_item: object, + timeline: object = None, + timeline_in: int = None, + source_start: int = None, + source_end: int = None, +) -> object: + """ + Add media pool item to current or defined timeline. + + Args: + media_pool_item (resolve.MediaPoolItem): resolve's object + timeline (Optional[resolve.Timeline]): resolve's object + timeline_in (Optional[int]): timeline input frame (sequence frame) + source_start (Optional[int]): media source input frame (sequence frame) + source_end (Optional[int]): media source output frame (sequence frame) + + Returns: + object: resolve.TimelineItem + """ + # get all variables + project = get_current_project() + media_pool = project.GetMediaPool() + _clip_property = media_pool_item.GetClipProperty + clip_name = _clip_property("File Name") + timeline = timeline or get_current_timeline() + + # timing variables + if all([timeline_in, source_start, source_end]): + fps = timeline.GetSetting("timelineFrameRate") + duration = source_end - source_start + timecode_in = frames_to_timecode(timeline_in, fps) + timecode_out = frames_to_timecode(timeline_in + duration, fps) + else: + timecode_in = None + timecode_out = None + + # if timeline was used then switch it to current timeline + with maintain_current_timeline(timeline): + # Add input mediaPoolItem to clip data + clip_data = { + "mediaPoolItem": media_pool_item, + } + + if source_start: + clip_data["startFrame"] = source_start + if source_end: + clip_data["endFrame"] = source_end + if timecode_in: + clip_data["recordFrame"] = timeline_in + + # add to timeline + media_pool.AppendToTimeline([clip_data]) + + output_timeline_item = get_timeline_item( + media_pool_item, timeline) + + assert output_timeline_item, AssertionError(( + "Clip name '{}' was't created on the timeline: '{}' \n\n" + "Please check if correct track position is activated, \n" + "or if a clip is not already at the timeline in \n" + "position: '{}' out: '{}'. \n\n" + "Clip data: {}" + ).format( + clip_name, timeline.GetName(), timecode_in, timecode_out, clip_data + )) + return output_timeline_item + + +def get_timeline_item(media_pool_item: object, + timeline: object = None) -> object: + """ + Returns clips related to input mediaPoolItem. + + Args: + media_pool_item (resolve.MediaPoolItem): resolve's object + timeline (resolve.Timeline)[optional]: resolve's object + + Returns: + object: resolve.TimelineItem + """ + _clip_property = media_pool_item.GetClipProperty + clip_name = _clip_property("File Name") + output_timeline_item = None + timeline = timeline or get_current_timeline() + + with maintain_current_timeline(timeline): + # search the timeline for the added clip + + for _ti_data in get_current_timeline_items(): + _ti_clip = _ti_data["clip"]["item"] + _ti_clip_property = _ti_clip.GetMediaPoolItem().GetClipProperty + if clip_name in _ti_clip_property("File Name"): + output_timeline_item = _ti_clip + + return output_timeline_item + + +def get_video_track_names() -> list: + tracks = list() + track_type = "video" + timeline = get_current_timeline() + + # get all tracks count filtered by track type + selected_track_count = timeline.GetTrackCount(track_type) + + # loop all tracks and get items + track_index: int + for track_index in range(1, (int(selected_track_count) + 1)): + track_name = timeline.GetTrackName("video", track_index) + tracks.append(track_name) + + return tracks + + +def get_current_timeline_items( + filter: bool = False, + track_type: str = None, + track_name: str = None, + selecting_color: str = None) -> list: + """ Gets all available current timeline track items + """ + track_type = track_type or "video" + selecting_color = selecting_color or "Chocolate" + project = get_current_project() + + # get timeline anyhow + timeline = ( + get_current_timeline() or + get_any_timeline() or + get_new_timeline() + ) + selected_clips = [] + + # get all tracks count filtered by track type + selected_track_count = timeline.GetTrackCount(track_type) + + # loop all tracks and get items + _clips = {} + for track_index in range(1, (int(selected_track_count) + 1)): + _track_name = timeline.GetTrackName(track_type, track_index) + + # filter out all unmathed track names + if track_name and _track_name not in track_name: + continue + + timeline_items = timeline.GetItemListInTrack( + track_type, track_index) + _clips[track_index] = timeline_items + + _data = { + "project": project, + "timeline": timeline, + "track": { + "name": _track_name, + "index": track_index, + "type": track_type} + } + # get track item object and its color + for clip_index, ti in enumerate(_clips[track_index]): + data = _data.copy() + data["clip"] = { + "item": ti, + "index": clip_index + } + ti_color = ti.GetClipColor() + if filter and selecting_color in ti_color or not filter: + selected_clips.append(data) + return selected_clips + + +def get_pype_timeline_item_by_name(name: str) -> object: + """Get timeline item by name. + + Args: + name (str): name of timeline item + + Returns: + object: resolve.TimelineItem + """ + for _ti_data in get_current_timeline_items(): + _ti_clip = _ti_data["clip"]["item"] + tag_data = get_timeline_item_pype_tag(_ti_clip) + tag_name = tag_data.get("namespace") + if not tag_name: + continue + if tag_name in name: + return _ti_clip + return None + + +def get_timeline_item_pype_tag(timeline_item): + """ + Get openpype track item tag created by creator or loader plugin. + + Attributes: + trackItem (resolve.TimelineItem): resolve object + + Returns: + dict: openpype tag data + """ + return_tag = None + + if self.pype_marker_workflow: + return_tag = get_pype_marker(timeline_item) + else: + media_pool_item = timeline_item.GetMediaPoolItem() + + # get all tags from track item + _tags = media_pool_item.GetMetadata() + if not _tags: + return None + for key, data in _tags.items(): + # return only correct tag defined by global name + if key in self.pype_tag_name: + return_tag = json.loads(data) + + return return_tag + + +def set_timeline_item_pype_tag(timeline_item, data=None): + """ + Set openpype track item tag to input timeline_item. + + Attributes: + trackItem (resolve.TimelineItem): resolve api object + + Returns: + dict: json loaded data + """ + data = data or dict() + + # get available openpype tag if any + tag_data = get_timeline_item_pype_tag(timeline_item) + + if self.pype_marker_workflow: + # delete tag as it is not updatable + if tag_data: + delete_pype_marker(timeline_item) + + tag_data.update(data) + set_pype_marker(timeline_item, tag_data) + else: + if tag_data: + media_pool_item = timeline_item.GetMediaPoolItem() + # it not tag then create one + tag_data.update(data) + media_pool_item.SetMetadata( + self.pype_tag_name, json.dumps(tag_data)) + else: + tag_data = data + # if openpype tag available then update with input data + # add it to the input track item + timeline_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) + + return tag_data + + +def imprint(timeline_item, data=None): + """ + Adding `Avalon data` into a hiero track item tag. + + Also including publish attribute into tag. + + Arguments: + timeline_item (hiero.core.TrackItem): hiero track item object + data (dict): Any data which needs to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + set_timeline_item_pype_tag(timeline_item, data) + + # add publish attribute + set_publish_attribute(timeline_item, True) + + +def set_publish_attribute(timeline_item, value): + """ Set Publish attribute in input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = get_timeline_item_pype_tag(timeline_item) + tag_data["publish"] = value + # set data to the publish attribute + set_timeline_item_pype_tag(timeline_item, tag_data) + + +def get_publish_attribute(timeline_item): + """ Get Publish attribute from input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = get_timeline_item_pype_tag(timeline_item) + return tag_data["publish"] + + +def set_pype_marker(timeline_item, tag_data): + source_start = timeline_item.GetLeftOffset() + item_duration = timeline_item.GetDuration() + frame = int(source_start + (item_duration / 2)) + + # marker attributes + frameId = (frame / 10) * 10 + color = self.pype_marker_color + name = self.pype_marker_name + note = json.dumps(tag_data) + duration = (self.pype_marker_duration / 10) * 10 + + timeline_item.AddMarker( + frameId, + color, + name, + note, + duration + ) + + +def get_pype_marker(timeline_item): + timeline_item_markers = timeline_item.GetMarkers() + for marker_frame, marker in timeline_item_markers.items(): + color = marker["color"] + name = marker["name"] + if name == self.pype_marker_name and color == self.pype_marker_color: + note = marker["note"] + self.temp_marker_frame = marker_frame + return json.loads(note) + + return dict() + + +def delete_pype_marker(timeline_item): + timeline_item.DeleteMarkerAtFrame(self.temp_marker_frame) + self.temp_marker_frame = None + + +def create_compound_clip(clip_data, name, folder): + """ + Convert timeline object into nested timeline object + + Args: + clip_data (dict): timeline item object packed into dict + with project, timeline (sequence) + folder (resolve.MediaPool.Folder): media pool folder object, + name (str): name for compound clip + + Returns: + resolve.MediaPoolItem: media pool item with compound clip timeline(cct) + """ + # get basic objects form data + project = clip_data["project"] + timeline = clip_data["timeline"] + clip = clip_data["clip"] + + # get details of objects + clip_item = clip["item"] + + mp = project.GetMediaPool() + + # get clip attributes + clip_attributes = get_clip_attributes(clip_item) + + mp_item = clip_item.GetMediaPoolItem() + _mp_props = mp_item.GetClipProperty + + mp_first_frame = int(_mp_props("Start")) + mp_last_frame = int(_mp_props("End")) + + # initialize basic source timing for otio + ci_l_offset = clip_item.GetLeftOffset() + ci_duration = clip_item.GetDuration() + rate = float(_mp_props("FPS")) + + # source rational times + mp_in_rc = opentime.RationalTime((ci_l_offset), rate) + mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) + + # get frame in and out for clip swapping + in_frame = opentime.to_frames(mp_in_rc) + out_frame = opentime.to_frames(mp_out_rc) + + # keep original sequence + tl_origin = timeline + + # Set current folder to input media_pool_folder: + mp.SetCurrentFolder(folder) + + # check if clip doesn't exist already: + clips = folder.GetClipList() + cct = next((c for c in clips + if c.GetName() in name), None) + + if cct: + print(f"Compound clip exists: {cct}") + else: + # Create empty timeline in current folder and give name: + cct = mp.CreateEmptyTimeline(name) + + # check if clip doesn't exist already: + clips = folder.GetClipList() + cct = next((c for c in clips + if c.GetName() in name), None) + print(f"Compound clip created: {cct}") + + with maintain_current_timeline(cct, tl_origin): + # Add input clip to the current timeline: + mp.AppendToTimeline([{ + "mediaPoolItem": mp_item, + "startFrame": mp_first_frame, + "endFrame": mp_last_frame + }]) + + # Add collected metadata and attributes to the comound clip: + if mp_item.GetMetadata(self.pype_tag_name): + clip_attributes[self.pype_tag_name] = mp_item.GetMetadata( + self.pype_tag_name)[self.pype_tag_name] + + # stringify + clip_attributes = json.dumps(clip_attributes) + + # add attributes to metadata + for k, v in mp_item.GetMetadata().items(): + cct.SetMetadata(k, v) + + # add metadata to cct + cct.SetMetadata(self.pype_tag_name, clip_attributes) + + # reset start timecode of the compound clip + cct.SetClipProperty("Start TC", _mp_props("Start TC")) + + # swap clips on timeline + swap_clips(clip_item, cct, in_frame, out_frame) + + cct.SetClipColor("Pink") + return cct + + +def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame): + """ + Swapping clips on timeline in timelineItem + + It will add take and activate it to the frame range which is inputted + + Args: + from_clip (resolve.TimelineItem) + to_clip (resolve.mediaPoolItem) + to_clip_name (str): name of to_clip + to_in_frame (float): cut in frame, usually `GetLeftOffset()` + to_out_frame (float): cut out frame, usually left offset plus duration + + Returns: + bool: True if successfully replaced + + """ + _clip_prop = to_clip.GetClipProperty + to_clip_name = _clip_prop("File Name") + # add clip item as take to timeline + take = from_clip.AddTake( + to_clip, + float(to_in_frame), + float(to_out_frame) + ) + + if not take: + return False + + for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)): + take_item = from_clip.GetTakeByIndex(take_index) + take_mp_item = take_item["mediaPoolItem"] + if to_clip_name in take_mp_item.GetName(): + from_clip.SelectTakeByIndex(take_index) + from_clip.FinalizeTake() + return True + return False + + +def _validate_tc(x): + # Validate and reformat timecode string + + if len(x) != 11: + print('Invalid timecode. Try again.') + + c = ':' + colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:] + + if colonized.replace(':', '').isdigit(): + print(f"_ colonized: {colonized}") + return colonized + else: + print('Invalid timecode. Try again.') + + +def get_pype_clip_metadata(clip): + """ + Get openpype metadata created by creator plugin + + Attributes: + clip (resolve.TimelineItem): resolve's object + + Returns: + dict: hierarchy, orig clip attributes + """ + mp_item = clip.GetMediaPoolItem() + metadata = mp_item.GetMetadata() + + return metadata.get(self.pype_tag_name) + + +def get_clip_attributes(clip): + """ + Collect basic attributes from resolve timeline item + + Args: + clip (resolve.TimelineItem): timeline item object + + Returns: + dict: all collected attributres as key: values + """ + mp_item = clip.GetMediaPoolItem() + + return { + "clipIn": clip.GetStart(), + "clipOut": clip.GetEnd(), + "clipLeftOffset": clip.GetLeftOffset(), + "clipRightOffset": clip.GetRightOffset(), + "clipMarkers": clip.GetMarkers(), + "clipFlags": clip.GetFlagList(), + "sourceId": mp_item.GetMediaId(), + "sourceProperties": mp_item.GetClipProperty() + } + + +def set_project_manager_to_folder_name(folder_name): + """ + Sets context of Project manager to given folder by name. + + Searching for folder by given name from root folder to nested. + If no existing folder by name it will create one in root folder. + + Args: + folder_name (str): name of searched folder + + Returns: + bool: True if success + + Raises: + Exception: Cannot create folder in root + + """ + # initialize project manager + get_project_manager() + + set_folder = False + + # go back to root folder + if self.project_manager.GotoRootFolder(): + log.info(f"Testing existing folder: {folder_name}") + folders = _convert_resolve_list_type( + self.project_manager.GetFoldersInCurrentFolder()) + log.info(f"Testing existing folders: {folders}") + # get me first available folder object + # with the same name as in `folder_name` else return False + if next((f for f in folders if f in folder_name), False): + log.info(f"Found existing folder: {folder_name}") + set_folder = self.project_manager.OpenFolder(folder_name) + + if set_folder: + return True + + # if folder by name is not existent then create one + # go back to root folder + log.info(f"Folder `{folder_name}` not found and will be created") + if self.project_manager.GotoRootFolder(): + try: + # create folder by given name + self.project_manager.CreateFolder(folder_name) + self.project_manager.OpenFolder(folder_name) + return True + except NameError as e: + log.error((f"Folder with name `{folder_name}` cannot be created!" + f"Error: {e}")) + return False + + +def _convert_resolve_list_type(resolve_list): + """ Resolve is using indexed dictionary as list type. + `{1.0: 'vaule'}` + This will convert it to normal list class + """ + assert isinstance(resolve_list, dict), ( + "Input argument should be dict() type") + + return [resolve_list[i] for i in sorted(resolve_list.keys())] + + +def create_otio_time_range_from_timeline_item_data(timeline_item_data): + timeline_item = timeline_item_data["clip"]["item"] + project = timeline_item_data["project"] + timeline = timeline_item_data["timeline"] + timeline_start = timeline.GetStartFrame() + + frame_start = int(timeline_item.GetStart() - timeline_start) + frame_duration = int(timeline_item.GetDuration()) + fps = project.GetSetting("timelineFrameRate") + + return otio_export.create_otio_time_range( + frame_start, frame_duration, fps) + + +def get_otio_clip_instance_data(otio_timeline, timeline_item_data): + """ + Return otio objects for timeline, track and clip + + Args: + timeline_item_data (dict): timeline_item_data from list returned by + resolve.get_current_timeline_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + + timeline_item = timeline_item_data["clip"]["item"] + track_name = timeline_item_data["track"]["name"] + timeline_range = create_otio_time_range_from_timeline_item_data( + timeline_item_data) + + for otio_clip in otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if track_name not in track_name: + continue + if otio_clip.name not in timeline_item.GetName(): + continue + if is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + + # add pypedata marker to otio_clip metadata + for marker in otio_clip.markers: + if self.pype_marker_name in marker.name: + otio_clip.metadata.update(marker.metadata) + return {"otioClip": otio_clip} + + return None + + +def get_reformated_path(path, padded=False, first=False): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]") + + if "[" in path: + padding_pattern = r"(\d+)(?=-)" + padding = len(re.findall(padding_pattern, path).pop()) + num_pattern = r"(\[\d+\-\d+\])" + if padded: + path = re.sub(num_pattern, f"%0{padding}d", path) + elif first: + first_frame = re.findall(first_frame_pattern, path, flags=0) + if len(first_frame) >= 1: + first_frame = first_frame[0] + path = re.sub(num_pattern, first_frame, path) + else: + path = re.sub(num_pattern, "%d", path) + return path diff --git a/client/ayon_core/hosts/resolve/api/menu.py b/client/ayon_core/hosts/resolve/api/menu.py new file mode 100644 index 0000000000..59eba14d83 --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/menu.py @@ -0,0 +1,183 @@ +import os +import sys + +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.utils import host_tools +from ayon_core.pipeline import registered_host + + +MENU_LABEL = os.environ["AYON_MENU_LABEL"] + + +def load_stylesheet(): + path = os.path.join(os.path.dirname(__file__), "menu_style.qss") + if not os.path.exists(path): + print("Unable to load stylesheet, file not found in resources") + return "" + + with open(path, "r") as file_stream: + stylesheet = file_stream.read() + return stylesheet + + +class Spacer(QtWidgets.QWidget): + def __init__(self, height, *args, **kwargs): + super(Spacer, self).__init__(*args, **kwargs) + + self.setFixedHeight(height) + + real_spacer = QtWidgets.QWidget(self) + real_spacer.setObjectName("Spacer") + real_spacer.setFixedHeight(height) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(real_spacer) + + self.setLayout(layout) + + +class OpenPypeMenu(QtWidgets.QWidget): + def __init__(self, *args, **kwargs): + super(OpenPypeMenu, self).__init__(*args, **kwargs) + + self.setObjectName(f"{MENU_LABEL}Menu") + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + + self.setWindowTitle(f"{MENU_LABEL}") + save_current_btn = QtWidgets.QPushButton("Save current file", self) + workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self) + create_btn = QtWidgets.QPushButton("Create ...", self) + publish_btn = QtWidgets.QPushButton("Publish ...", self) + load_btn = QtWidgets.QPushButton("Load ...", self) + inventory_btn = QtWidgets.QPushButton("Manager ...", self) + subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) + libload_btn = QtWidgets.QPushButton("Library ...", self) + experimental_btn = QtWidgets.QPushButton( + "Experimental tools ...", self + ) + # rename_btn = QtWidgets.QPushButton("Rename", self) + # set_colorspace_btn = QtWidgets.QPushButton( + # "Set colorspace from presets", self + # ) + # reset_resolution_btn = QtWidgets.QPushButton( + # "Set Resolution from presets", self + # ) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(10, 20, 10, 20) + + layout.addWidget(save_current_btn) + + layout.addWidget(Spacer(15, self)) + + layout.addWidget(workfiles_btn) + layout.addWidget(create_btn) + layout.addWidget(publish_btn) + layout.addWidget(load_btn) + layout.addWidget(inventory_btn) + layout.addWidget(subsetm_btn) + + layout.addWidget(Spacer(15, self)) + + layout.addWidget(libload_btn) + + # layout.addWidget(Spacer(15, self)) + + # layout.addWidget(rename_btn) + + # layout.addWidget(Spacer(15, self)) + + # layout.addWidget(set_colorspace_btn) + # layout.addWidget(reset_resolution_btn) + layout.addWidget(Spacer(15, self)) + layout.addWidget(experimental_btn) + + self.setLayout(layout) + + save_current_btn.clicked.connect(self.on_save_current_clicked) + save_current_btn.setShortcut(QtGui.QKeySequence.Save) + workfiles_btn.clicked.connect(self.on_workfile_clicked) + create_btn.clicked.connect(self.on_create_clicked) + publish_btn.clicked.connect(self.on_publish_clicked) + load_btn.clicked.connect(self.on_load_clicked) + inventory_btn.clicked.connect(self.on_inventory_clicked) + subsetm_btn.clicked.connect(self.on_subsetm_clicked) + libload_btn.clicked.connect(self.on_libload_clicked) + # rename_btn.clicked.connect(self.on_rename_clicked) + # set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked) + # reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked) + experimental_btn.clicked.connect(self.on_experimental_clicked) + + def on_save_current_clicked(self): + host = registered_host() + current_file = host.get_current_workfile() + if not current_file: + print("Current project is not saved. " + "Please save once first via workfiles tool.") + host_tools.show_workfiles() + return + + print(f"Saving current file to: {current_file}") + host.save_workfile(current_file) + + def on_workfile_clicked(self): + print("Clicked Workfile") + host_tools.show_workfiles() + + def on_create_clicked(self): + print("Clicked Create") + host_tools.show_creator() + + def on_publish_clicked(self): + print("Clicked Publish") + host_tools.show_publish(parent=None) + + def on_load_clicked(self): + print("Clicked Load") + host_tools.show_loader(use_context=True) + + def on_inventory_clicked(self): + print("Clicked Inventory") + host_tools.show_scene_inventory() + + def on_subsetm_clicked(self): + print("Clicked Subset Manager") + host_tools.show_subset_manager() + + def on_libload_clicked(self): + print("Clicked Library") + host_tools.show_library_loader() + + def on_rename_clicked(self): + print("Clicked Rename") + + def on_set_colorspace_clicked(self): + print("Clicked Set Colorspace") + + def on_set_resolution_clicked(self): + print("Clicked Set Resolution") + + def on_experimental_clicked(self): + host_tools.show_experimental_tools_dialog() + + +def launch_pype_menu(): + app = QtWidgets.QApplication(sys.argv) + + pype_menu = OpenPypeMenu() + + stylesheet = load_stylesheet() + pype_menu.setStyleSheet(stylesheet) + + pype_menu.show() + + sys.exit(app.exec_()) diff --git a/openpype/hosts/resolve/api/menu_style.qss b/client/ayon_core/hosts/resolve/api/menu_style.qss similarity index 100% rename from openpype/hosts/resolve/api/menu_style.qss rename to client/ayon_core/hosts/resolve/api/menu_style.qss diff --git a/client/ayon_core/hosts/resolve/api/pipeline.py b/client/ayon_core/hosts/resolve/api/pipeline.py new file mode 100644 index 0000000000..2c5e0daf4b --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/pipeline.py @@ -0,0 +1,303 @@ +""" +Basic avalon integration +""" +import os +import contextlib +from collections import OrderedDict + +from pyblish import api as pyblish + +from ayon_core.lib import Logger +from ayon_core.pipeline import ( + schema, + register_loader_plugin_path, + register_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.host import ( + HostBase, + IWorkfileHost, + ILoadHost +) + +from . import lib +from .utils import get_resolve_module +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) + +log = Logger.get_logger(__name__) + +HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") + +AVALON_CONTAINERS = ":AVALON_CONTAINERS" + + +class ResolveHost(HostBase, IWorkfileHost, ILoadHost): + name = "resolve" + + def install(self): + """Install resolve-specific functionality of avalon-core. + + This is where you install menus and register families, data + and loaders into resolve. + + It is called automatically when installing via `api.install(resolve)`. + + See the Maya equivalent for inspiration on how to implement this. + + """ + + log.info("ayon_core.hosts.resolve installed") + + pyblish.register_host(self.name) + pyblish.register_plugin_path(PUBLISH_PATH) + print("Registering DaVinci Resolve plug-ins..") + + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + # register callback for switching publishable + pyblish.register_callback("instanceToggled", + on_pyblish_instance_toggled) + + get_resolve_module() + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_containers(self): + return ls() + + +def containerise(timeline_item, + name, + namespace, + context, + loader=None, + data=None): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + timeline_item (hiero.core.TrackItem): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + timeline_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = OrderedDict({ + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + }) + + if data: + data_imprint.update(data) + + lib.set_timeline_item_pype_tag(timeline_item, data_imprint) + + return timeline_item + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + + See the `container.json` schema for details on how it should look, + and the Maya equivalent, which is in `avalon.maya.pipeline` + """ + + # get all track items from current timeline + all_timeline_items = lib.get_current_timeline_items(filter=False) + + for timeline_item_data in all_timeline_items: + timeline_item = timeline_item_data["clip"]["item"] + container = parse_container(timeline_item) + if container: + yield container + + +def parse_container(timeline_item, validate=True): + """Return container data from timeline_item's openpype tag. + + Args: + timeline_item (hiero.core.TrackItem): A containerised track item. + validate (bool)[optional]: validating with avalon scheme + + Returns: + dict: The container schema data for input containerized track item. + + """ + # convert tag metadata to normal keys names + data = lib.get_timeline_item_pype_tag(timeline_item) + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if not all(key in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = timeline_item.GetName() + + # Store reference to the node object + container["_timeline_item"] = timeline_item + + return container + + +def update_container(timeline_item, data=None): + """Update container data to input timeline_item's openpype tag. + + Args: + timeline_item (hiero.core.TrackItem): A containerised track item. + data (dict)[optional]: dictionery with data to be updated + + Returns: + bool: True if container was updated correctly + + """ + data = data or dict() + + container = lib.get_timeline_item_pype_tag(timeline_item) + + for _key, _value in container.items(): + try: + container[_key] = data[_key] + except KeyError: + pass + + log.info("Updating container: `{}`".format(timeline_item)) + return bool(lib.set_timeline_item_pype_tag(timeline_item, container)) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... node['selected'].setValue(True) + >>> print(node['selected'].value()) + False + """ + try: + # do the operation + yield + finally: + pass + + +def reset_selection(): + """Deselect all selected nodes + """ + pass + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + from ayon_core.hosts.resolve.api import ( + set_publish_attribute + ) + + # Whether instances should be passthrough based on new value + timeline_item = instance.data["item"] + set_publish_attribute(timeline_item, new_value) + + +def remove_instance(instance): + """Remove instance marker from track item.""" + instance_id = instance.get("uuid") + + selected_timeline_items = lib.get_current_timeline_items( + filter=True, selecting_color=lib.publish_clip_color) + + found_ti = None + for timeline_item_data in selected_timeline_items: + timeline_item = timeline_item_data["clip"]["item"] + + # get openpype tag data + tag_data = lib.get_timeline_item_pype_tag(timeline_item) + _ti_id = tag_data.get("uuid") + if _ti_id == instance_id: + found_ti = timeline_item + break + + if found_ti is None: + return + + # removing instance by marker color + print(f"Removing instance: {found_ti.GetName()}") + found_ti.DeleteMarkersByColor(lib.pype_marker_color) + + +def list_instances(): + """List all created instances from current workfile.""" + listed_instances = [] + selected_timeline_items = lib.get_current_timeline_items( + filter=True, selecting_color=lib.publish_clip_color) + + for timeline_item_data in selected_timeline_items: + timeline_item = timeline_item_data["clip"]["item"] + ti_name = timeline_item.GetName().split(".")[0] + + # get openpype tag data + tag_data = lib.get_timeline_item_pype_tag(timeline_item) + + if tag_data: + asset = tag_data.get("asset") + subset = tag_data.get("subset") + tag_data["label"] = f"{ti_name} [{asset}-{subset}]" + listed_instances.append(tag_data) + + return listed_instances diff --git a/client/ayon_core/hosts/resolve/api/plugin.py b/client/ayon_core/hosts/resolve/api/plugin.py new file mode 100644 index 0000000000..ccb20f712f --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/plugin.py @@ -0,0 +1,901 @@ +import re +import uuid +import copy + +import qargparse +from qtpy import QtWidgets, QtCore + +from ayon_core.settings import get_current_project_settings +from ayon_core.pipeline import ( + LegacyCreator, + LoaderPlugin, + Anatomy +) + +from . import lib +from .menu import load_stylesheet + + +class CreatorWidget(QtWidgets.QDialog): + + # output items + items = {} + + def __init__(self, name, info, ui_inputs, parent=None): + super(CreatorWidget, self).__init__(parent) + + self.setObjectName(name) + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + self.setWindowTitle(name or "OpenPype Creator Input") + self.resize(500, 700) + + # Where inputs and labels are set + self.content_widget = [QtWidgets.QWidget(self)] + top_layout = QtWidgets.QFormLayout(self.content_widget[0]) + top_layout.setObjectName("ContentLayout") + top_layout.addWidget(Spacer(5, self)) + + # first add widget tag line + top_layout.addWidget(QtWidgets.QLabel(info)) + + # main dynamic layout + self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAsNeeded) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOn) + self.scroll_area.setHorizontalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff) + self.scroll_area.setWidgetResizable(True) + + self.content_widget.append(self.scroll_area) + + scroll_widget = QtWidgets.QWidget(self) + in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) + self.content_layout = [in_scroll_area] + + # add preset data into input widget layout + self.items = self.populate_widgets(ui_inputs) + self.scroll_area.setWidget(scroll_widget) + + # Confirmation buttons + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + + cancel_btn = QtWidgets.QPushButton("Cancel") + btns_layout.addWidget(cancel_btn) + + ok_btn = QtWidgets.QPushButton("Ok") + btns_layout.addWidget(ok_btn) + + # Main layout of the dialog + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.setSpacing(0) + + # adding content widget + for w in self.content_widget: + main_layout.addWidget(w) + + main_layout.addWidget(btns_widget) + + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + stylesheet = load_stylesheet() + self.setStyleSheet(stylesheet) + + def _on_ok_clicked(self): + self.result = self.value(self.items) + self.close() + + def _on_cancel_clicked(self): + self.result = None + self.close() + + def value(self, data, new_data=None): + new_data = new_data or {} + for k, v in data.items(): + new_data[k] = { + "target": None, + "value": None + } + if v["type"] == "dict": + new_data[k]["target"] = v["target"] + new_data[k]["value"] = self.value(v["value"]) + if v["type"] == "section": + new_data.pop(k) + new_data = self.value(v["value"], new_data) + elif getattr(v["value"], "currentText", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].currentText() + elif getattr(v["value"], "isChecked", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].isChecked() + elif getattr(v["value"], "value", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].value() + elif getattr(v["value"], "text", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].text() + + return new_data + + def camel_case_split(self, text): + matches = re.finditer( + '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) + return " ".join([str(m.group(0)).capitalize() for m in matches]) + + def create_row(self, layout, type, text, **kwargs): + # get type attribute from qwidgets + attr = getattr(QtWidgets, type) + + # convert label text to normal capitalized text with spaces + label_text = self.camel_case_split(text) + + # assign the new text to label widget + label = QtWidgets.QLabel(label_text) + label.setObjectName("LineLabel") + + # create attribute name text strip of spaces + attr_name = text.replace(" ", "") + + # create attribute and assign default values + setattr( + self, + attr_name, + attr(parent=self)) + + # assign the created attribute to variable + item = getattr(self, attr_name) + for func, val in kwargs.items(): + if getattr(item, func): + func_attr = getattr(item, func) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) + + # add to layout + layout.addRow(label, item) + + return item + + def populate_widgets(self, data, content_layout=None): + """ + Populate widget from input dict. + + Each plugin has its own set of widget rows defined in dictionary + each row values should have following keys: `type`, `target`, + `label`, `order`, `value` and optionally also `toolTip`. + + Args: + data (dict): widget rows or organized groups defined + by types `dict` or `section` + content_layout (QtWidgets.QFormLayout)[optional]: used when nesting + + Returns: + dict: redefined data dict updated with created widgets + + """ + + content_layout = content_layout or self.content_layout[-1] + # fix order of process by defined order value + ordered_keys = list(data.keys()) + for k, v in data.items(): + try: + # try removing a key from index which should + # be filled with new + ordered_keys.pop(v["order"]) + except IndexError: + pass + # add key into correct order + ordered_keys.insert(v["order"], k) + + # process ordered + for k in ordered_keys: + v = data[k] + tool_tip = v.get("toolTip", "") + if v["type"] == "dict": + # adding spacer between sections + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + if v["type"] == "section": + # adding spacer between sections + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + elif v["type"] == "QLineEdit": + data[k]["value"] = self.create_row( + content_layout, "QLineEdit", v["label"], + setText=v["value"], setToolTip=tool_tip) + elif v["type"] == "QComboBox": + data[k]["value"] = self.create_row( + content_layout, "QComboBox", v["label"], + addItems=v["value"], setToolTip=tool_tip) + elif v["type"] == "QCheckBox": + data[k]["value"] = self.create_row( + content_layout, "QCheckBox", v["label"], + setChecked=v["value"], setToolTip=tool_tip) + elif v["type"] == "QSpinBox": + data[k]["value"] = self.create_row( + content_layout, "QSpinBox", v["label"], + setRange=(0, 99999), + setValue=v["value"], + setToolTip=tool_tip) + return data + + +class Spacer(QtWidgets.QWidget): + def __init__(self, height, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + + self.setFixedHeight(height) + + real_spacer = QtWidgets.QWidget(self) + real_spacer.setObjectName("Spacer") + real_spacer.setFixedHeight(height) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(real_spacer) + + self.setLayout(layout) + + +class ClipLoader: + + active_bin = None + data = {} + + def __init__(self, loader_obj, context, **options): + """ Initialize object + + Arguments: + loader_obj (ayon_core.pipeline.load.LoaderPlugin): plugin object + context (dict): loader plugin context + options (dict)[optional]: possible keys: + projectBinPath: "path/to/binItem" + + """ + self.__dict__.update(loader_obj.__dict__) + self.context = context + self.active_project = lib.get_current_project() + + # try to get value from options or evaluate key value for `handles` + self.with_handles = options.get("handles") is True + + # try to get value from options or evaluate key value for `load_to` + self.new_timeline = ( + options.get("newTimeline") or + options.get("load_to") == "New timeline" + ) + # try to get value from options or evaluate key value for `load_how` + self.sequential_load = ( + options.get("sequentially") or + options.get("load_how") == "Sequentially in order" + ) + + assert self._populate_data(), str( + "Cannot Load selected data, look into database " + "or call your supervisor") + + # inject asset data to representation dict + self._get_asset_data() + + # add active components to class + if self.new_timeline: + loader_cls = loader_obj.__class__ + if loader_cls.timeline: + # if multiselection is set then use options sequence + self.active_timeline = loader_cls.timeline + else: + # create new sequence + self.active_timeline = lib.get_new_timeline( + "{}_{}".format( + self.data["timeline_basename"], + str(uuid.uuid4())[:8] + ) + ) + loader_cls.timeline = self.active_timeline + + else: + self.active_timeline = lib.get_current_timeline() + + def _populate_data(self): + """ Gets context and convert it to self.data + data structure: + { + "name": "assetName_subsetName_representationName" + "binPath": "projectBinPath", + } + """ + # create name + representation = self.context["representation"] + representation_context = representation["context"] + asset = str(representation_context["asset"]) + subset = str(representation_context["subset"]) + representation_name = str(representation_context["representation"]) + self.data["clip_name"] = "_".join([ + asset, + subset, + representation_name + ]) + self.data["versionData"] = self.context["version"]["data"] + + self.data["timeline_basename"] = "timeline_{}_{}".format( + subset, representation_name) + + # solve project bin structure path + hierarchy = str("/".join(( + "Loader", + representation_context["hierarchy"].replace("\\", "/"), + asset + ))) + + self.data["binPath"] = hierarchy + + return True + + def _get_asset_data(self): + """ Get all available asset data + + joint `data` key with asset.data dict into the representation + + """ + + self.data["assetData"] = copy.deepcopy(self.context["asset"]["data"]) + + def load(self, files): + """Load clip into timeline + + Arguments: + files (list[str]): list of files to load into timeline + """ + # create project bin for the media to be imported into + self.active_bin = lib.create_bin(self.data["binPath"]) + + # create mediaItem in active project bin + # create clip media + media_pool_item = lib.create_media_pool_item( + files, + self.active_bin + ) + _clip_property = media_pool_item.GetClipProperty + source_in = int(_clip_property("Start")) + source_out = int(_clip_property("End")) + source_duration = int(_clip_property("Frames")) + + if not self.with_handles: + # Load file without the handles of the source media + # We remove the handles from the source in and source out + # so that the handles are excluded in the timeline + handle_start = 0 + handle_end = 0 + + # get version data frame data from db + version_data = self.data["versionData"] + frame_start = version_data.get("frameStart") + frame_end = version_data.get("frameEnd") + + # The version data usually stored the frame range + handles of the + # media however certain representations may be shorter because they + # exclude those handles intentionally. Unfortunately the + # representation does not store that in the database currently; + # so we should compensate for those cases. If the media is shorter + # than the frame range specified in the database we assume it is + # without handles and thus we do not need to remove the handles + # from source and out + if frame_start is not None and frame_end is not None: + # Version has frame range data, so we can compare media length + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_start + handle_end + database_frame_duration = int( + frame_end_handle - frame_start_handle + 1 + ) + if source_duration >= database_frame_duration: + source_in += handle_start + source_out -= handle_end + + # get timeline in + timeline_start = self.active_timeline.GetStartFrame() + if self.sequential_load: + # set timeline start frame + timeline_in = int(timeline_start) + else: + # set timeline start frame + original clip in frame + timeline_in = int( + timeline_start + self.data["assetData"]["clipIn"]) + + # make track item from source in bin as item + timeline_item = lib.create_timeline_item( + media_pool_item, + self.active_timeline, + timeline_in, + source_in, + source_out, + ) + + print("Loading clips: `{}`".format(self.data["clip_name"])) + return timeline_item + + def update(self, timeline_item, files): + # create project bin for the media to be imported into + self.active_bin = lib.create_bin(self.data["binPath"]) + + # create mediaItem in active project bin + # create clip media + media_pool_item = lib.create_media_pool_item( + files, + self.active_bin + ) + _clip_property = media_pool_item.GetClipProperty + + source_in = int(_clip_property("Start")) + source_out = int(_clip_property("End")) + + lib.swap_clips( + timeline_item, + media_pool_item, + source_in, + source_out + ) + + print("Loading clips: `{}`".format(self.data["clip_name"])) + return timeline_item + + +class TimelineItemLoader(LoaderPlugin): + """A basic SequenceLoader for Resolve + + This will implement the basic behavior for a loader to inherit from that + will containerize the reference and will implement the `remove` and + `update` logic. + + """ + + options = [ + qargparse.Boolean( + "handles", + label="Include handles", + default=0, + help="Load with handles or without?" + ), + qargparse.Choice( + "load_to", + label="Where to load clips", + items=[ + "Current timeline", + "New timeline" + ], + default=0, + help="Where do you want clips to be loaded?" + ), + qargparse.Choice( + "load_how", + label="How to load clips", + items=[ + "Original timing", + "Sequentially in order" + ], + default="Original timing", + help="Would you like to place it at original timing?" + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None + ): + pass + + def update(self, container, representation): + """Update an existing `container` + """ + pass + + def remove(self, container): + """Remove an existing `container` + """ + pass + + +class Creator(LegacyCreator): + """Creator class wrapper + """ + marker_color = "Purple" + + def __init__(self, *args, **kwargs): + super(Creator, self).__init__(*args, **kwargs) + + resolve_p_settings = get_current_project_settings().get("resolve") + self.presets = {} + if resolve_p_settings: + self.presets = resolve_p_settings["create"].get( + self.__class__.__name__, {}) + + # adding basic current context resolve objects + self.project = lib.get_current_project() + self.timeline = lib.get_current_timeline() + + if (self.options or {}).get("useSelection"): + self.selected = lib.get_current_timeline_items(filter=True) + else: + self.selected = lib.get_current_timeline_items(filter=False) + + self.widget = CreatorWidget + + +class PublishClip: + """ + Convert a track item to publishable instance + + Args: + timeline_item (hiero.core.TrackItem): hiero track item object + kwargs (optional): additional data needed for rename=True (presets) + + Returns: + hiero.core.TrackItem: hiero track item object with openpype tag + """ + vertical_clip_match = {} + tag_data = {} + types = { + "shot": "shot", + "folder": "folder", + "episode": "episode", + "sequence": "sequence", + "track": "sequence", + } + + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" + + # default templates for non-ui use + rename_default = False + hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" + clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" + subset_name_default = "" + review_track_default = "< none >" + subset_family_default = "plate" + count_from_default = 10 + count_steps_default = 10 + vertical_sync_default = False + driving_layer_default = "" + + def __init__(self, cls, timeline_item_data, **kwargs): + # populate input cls attribute onto self.[attr] + self.__dict__.update(cls.__dict__) + + # get main parent objects + self.timeline_item_data = timeline_item_data + self.timeline_item = timeline_item_data["clip"]["item"] + timeline_name = timeline_item_data["timeline"].GetName() + self.timeline_name = str(timeline_name).replace(" ", "_") + + # track item (clip) main attributes + self.ti_name = self.timeline_item.GetName() + self.ti_index = int(timeline_item_data["clip"]["index"]) + + # get track name and index + track_name = timeline_item_data["track"]["name"] + self.track_name = str(track_name).replace(" ", "_") + self.track_index = int(timeline_item_data["track"]["index"]) + + # adding tag.family into tag + if kwargs.get("avalon"): + self.tag_data.update(kwargs["avalon"]) + + # adding ui inputs if any + self.ui_inputs = kwargs.get("ui_inputs", {}) + + # adding media pool folder if any + self.mp_folder = kwargs.get("mp_folder") + + # populate default data before we get other attributes + self._populate_timeline_item_default_data() + + # use all populated default data to create all important attributes + self._populate_attributes() + + # create parents with correct types + self._create_parents() + + def convert(self): + # solve track item data and add them to tag data + self._convert_to_tag_data() + + # if track name is in review track name and also if driving track name + # is not in review track name: skip tag creation + if (self.track_name in self.review_layer) and ( + self.driving_layer not in self.review_layer): + return + + # deal with clip name + new_name = self.tag_data.pop("newClipName") + + if self.rename: + self.tag_data["asset_name"] = new_name + else: + self.tag_data["asset_name"] = self.ti_name + + # AYON unique identifier + folder_path = "/{}/{}".format( + self.tag_data["hierarchy"], + self.tag_data["asset_name"] + ) + self.tag_data["folder_path"] = folder_path + + # create new name for track item + if not lib.pype_marker_workflow: + # create compound clip workflow + lib.create_compound_clip( + self.timeline_item_data, + self.tag_data["asset_name"], + self.mp_folder + ) + + # add timeline_item_data selection to tag + self.tag_data.update({ + "track_data": self.timeline_item_data["track"] + }) + + # create openpype tag on timeline_item and add data + lib.imprint(self.timeline_item, self.tag_data) + + return self.timeline_item + + def _populate_timeline_item_default_data(self): + """ Populate default formatting data from track item. """ + + self.timeline_item_default_data = { + "_folder_": "shots", + "_sequence_": self.timeline_name, + "_track_": self.track_name, + "_clip_": self.ti_name, + "_trackIndex_": self.track_index, + "_clipIndex_": self.ti_index + } + + def _populate_attributes(self): + """ Populate main object attributes. """ + # track item frame range and parent track name for vertical sync check + self.clip_in = int(self.timeline_item.GetStart()) + self.clip_out = int(self.timeline_item.GetEnd()) + + # define ui inputs if non gui mode was used + self.shot_num = self.ti_index + + # ui_inputs data or default values if gui was not used + self.rename = self.ui_inputs.get( + "clipRename", {}).get("value") or self.rename_default + self.clip_name = self.ui_inputs.get( + "clipName", {}).get("value") or self.clip_name_default + self.hierarchy = self.ui_inputs.get( + "hierarchy", {}).get("value") or self.hierarchy_default + self.hierarchy_data = self.ui_inputs.get( + "hierarchyData", {}).get("value") or \ + self.timeline_item_default_data.copy() + self.count_from = self.ui_inputs.get( + "countFrom", {}).get("value") or self.count_from_default + self.count_steps = self.ui_inputs.get( + "countSteps", {}).get("value") or self.count_steps_default + self.subset_name = self.ui_inputs.get( + "subsetName", {}).get("value") or self.subset_name_default + self.subset_family = self.ui_inputs.get( + "subsetFamily", {}).get("value") or self.subset_family_default + self.vertical_sync = self.ui_inputs.get( + "vSyncOn", {}).get("value") or self.vertical_sync_default + self.driving_layer = self.ui_inputs.get( + "vSyncTrack", {}).get("value") or self.driving_layer_default + self.review_track = self.ui_inputs.get( + "reviewTrack", {}).get("value") or self.review_track_default + + # build subset name from layer name + if self.subset_name == "": + self.subset_name = self.track_name + + # create subset for publishing + self.subset = self.subset_family + self.subset_name.capitalize() + + def _replace_hash_to_expression(self, name, text): + """ Replace hash with number in correct padding. """ + _spl = text.split("#") + _len = (len(_spl) - 1) + _repl = "{{{0}:0>{1}}}".format(name, _len) + new_text = text.replace(("#" * _len), _repl) + return new_text + + def _convert_to_tag_data(self): + """ Convert internal data to tag data. + + Populating the tag data into internal variable self.tag_data + """ + # define vertical sync attributes + hero_track = True + self.review_layer = "" + if self.vertical_sync: + # check if track name is not in driving layer + if self.track_name not in self.driving_layer: + # if it is not then define vertical sync as None + hero_track = False + + # increasing steps by index of rename iteration + self.count_steps *= self.rename_index + + hierarchy_formatting_data = {} + _data = self.timeline_item_default_data.copy() + if self.ui_inputs: + # adding tag metadata from ui + for _k, _v in self.ui_inputs.items(): + if _v["target"] == "tag": + self.tag_data[_k] = _v["value"] + + # driving layer is set as positive match + if hero_track or self.vertical_sync: + # mark review layer + if self.review_track and ( + self.review_track not in self.review_track_default): + # if review layer is defined and not the same as default + self.review_layer = self.review_track + # shot num calculate + if self.rename_index == 0: + self.shot_num = self.count_from + else: + self.shot_num = self.count_from + self.count_steps + + # clip name sequence number + _data.update({"shot": self.shot_num}) + + # solve # in test to pythonic expression + for _k, _v in self.hierarchy_data.items(): + if "#" not in _v["value"]: + continue + self.hierarchy_data[ + _k]["value"] = self._replace_hash_to_expression( + _k, _v["value"]) + + # fill up pythonic expresisons in hierarchy data + for k, _v in self.hierarchy_data.items(): + hierarchy_formatting_data[k] = _v["value"].format(**_data) + else: + # if no gui mode then just pass default data + hierarchy_formatting_data = self.hierarchy_data + + tag_hierarchy_data = self._solve_tag_hierarchy_data( + hierarchy_formatting_data + ) + + tag_hierarchy_data.update({"heroTrack": True}) + if hero_track and self.vertical_sync: + self.vertical_clip_match.update({ + (self.clip_in, self.clip_out): tag_hierarchy_data + }) + + if not hero_track and self.vertical_sync: + # driving layer is set as negative match + for (_in, _out), hero_data in self.vertical_clip_match.items(): + hero_data.update({"heroTrack": False}) + if _in == self.clip_in and _out == self.clip_out: + data_subset = hero_data["subset"] + # add track index in case duplicity of names in hero data + if self.subset in data_subset: + hero_data["subset"] = self.subset + str( + self.track_index) + # in case track name and subset name is the same then add + if self.subset_name == self.track_name: + hero_data["subset"] = self.subset + # assign data to return hierarchy data to tag + tag_hierarchy_data = hero_data + + # add data to return data dict + self.tag_data.update(tag_hierarchy_data) + + # add uuid to tag data + self.tag_data["uuid"] = str(uuid.uuid4()) + + # add review track only to hero track + if hero_track and self.review_layer: + self.tag_data.update({"reviewTrack": self.review_layer}) + else: + self.tag_data.update({"reviewTrack": None}) + + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): + """ Solve tag data from hierarchy data and templates. """ + # fill up clip name and hierarchy keys + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) + + return { + "newClipName": clip_name_filled, + "hierarchy": hierarchy_filled, + "parents": self.parents, + "hierarchyData": hierarchy_formatting_data, + "subset": self.subset, + "family": self.subset_family + } + + def _convert_to_entity(self, key): + """ Converting input key to key with type. """ + # convert to entity type + entity_type = self.types.get(key) + + assert entity_type, "Missing entity type for `{}`".format( + key + ) + + return { + "entity_type": entity_type, + "entity_name": self.hierarchy_data[key]["value"].format( + **self.timeline_item_default_data + ) + } + + def _create_parents(self): + """ Create parents and return it in list. """ + self.parents = [] + + pattern = re.compile(self.parents_search_pattern) + par_split = [pattern.findall(t).pop() + for t in self.hierarchy.split("/")] + + for key in par_split: + parent = self._convert_to_entity(key) + self.parents.append(parent) + + +def get_representation_files(representation): + anatomy = Anatomy() + files = [] + for file_data in representation["files"]: + path = anatomy.fill_root(file_data["path"]) + files.append(path) + return files diff --git a/openpype/hosts/resolve/api/testing_utils.py b/client/ayon_core/hosts/resolve/api/testing_utils.py similarity index 100% rename from openpype/hosts/resolve/api/testing_utils.py rename to client/ayon_core/hosts/resolve/api/testing_utils.py diff --git a/openpype/hosts/resolve/api/todo-rendering.py b/client/ayon_core/hosts/resolve/api/todo-rendering.py similarity index 100% rename from openpype/hosts/resolve/api/todo-rendering.py rename to client/ayon_core/hosts/resolve/api/todo-rendering.py diff --git a/client/ayon_core/hosts/resolve/api/utils.py b/client/ayon_core/hosts/resolve/api/utils.py new file mode 100644 index 0000000000..030534370b --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/utils.py @@ -0,0 +1,83 @@ +#! python3 + +""" +Resolve's tools for setting environment +""" + +import os +import sys + +from ayon_core.lib import Logger + +log = Logger.get_logger(__name__) + + +def get_resolve_module(): + from ayon_core.hosts.resolve import api + # dont run if already loaded + if api.bmdvr: + log.info(("resolve module is assigned to " + f"`ayon_core.hosts.resolve.api.bmdvr`: {api.bmdvr}")) + return api.bmdvr + try: + """ + The PYTHONPATH needs to be set correctly for this import + statement to work. An alternative is to import the + DaVinciResolveScript by specifying absolute path + (see ExceptionHandler logic) + """ + import DaVinciResolveScript as bmd + except ImportError: + if sys.platform.startswith("darwin"): + expected_path = ("/Library/Application Support/Blackmagic Design" + "/DaVinci Resolve/Developer/Scripting/Modules") + elif sys.platform.startswith("win") \ + or sys.platform.startswith("cygwin"): + expected_path = os.path.normpath( + os.getenv('PROGRAMDATA') + ( + "/Blackmagic Design/DaVinci Resolve/Support/Developer" + "/Scripting/Modules" + ) + ) + elif sys.platform.startswith("linux"): + expected_path = "/opt/resolve/libs/Fusion/Modules" + else: + raise NotImplementedError( + "Unsupported platform: {}".format(sys.platform) + ) + + # check if the default path has it... + print(("Unable to find module DaVinciResolveScript from " + "$PYTHONPATH - trying default locations")) + + module_path = os.path.normpath( + os.path.join( + expected_path, + "DaVinciResolveScript.py" + ) + ) + + try: + import imp + bmd = imp.load_source('DaVinciResolveScript', module_path) + except ImportError: + # No fallbacks ... report error: + log.error( + ("Unable to find module DaVinciResolveScript - please " + "ensure that the module DaVinciResolveScript is " + "discoverable by python") + ) + log.error( + ("For a default DaVinci Resolve installation, the " + f"module is expected to be located in: {expected_path}") + ) + sys.exit() + # assign global var and return + bmdvr = bmd.scriptapp("Resolve") + bmdvf = bmd.scriptapp("Fusion") + api.bmdvr = bmdvr + api.bmdvf = bmdvf + log.info(("Assigning resolve module to " + f"`ayon_core.hosts.resolve.api.bmdvr`: {api.bmdvr}")) + log.info(("Assigning resolve module to " + f"`ayon_core.hosts.resolve.api.bmdvf`: {api.bmdvf}")) diff --git a/client/ayon_core/hosts/resolve/api/workio.py b/client/ayon_core/hosts/resolve/api/workio.py new file mode 100644 index 0000000000..5e4865ddc5 --- /dev/null +++ b/client/ayon_core/hosts/resolve/api/workio.py @@ -0,0 +1,96 @@ +"""Host API required Work Files tool""" + +import os +from ayon_core.lib import Logger +from .lib import ( + get_project_manager, + get_current_project +) + + +log = Logger.get_logger(__name__) + + +def file_extensions(): + return [".drp"] + + +def has_unsaved_changes(): + get_project_manager().SaveProject() + return False + + +def save_file(filepath): + pm = get_project_manager() + file = os.path.basename(filepath) + fname, _ = os.path.splitext(file) + project = get_current_project() + name = project.GetName() + + response = False + if name == "Untitled Project": + response = pm.CreateProject(fname) + log.info("New project created: {}".format(response)) + pm.SaveProject() + elif name != fname: + response = project.SetName(fname) + log.info("Project renamed: {}".format(response)) + + exported = pm.ExportProject(fname, filepath) + log.info("Project exported: {}".format(exported)) + + +def open_file(filepath): + """ + Loading project + """ + + from . import bmdvr + + pm = get_project_manager() + page = bmdvr.GetCurrentPage() + if page is not None: + # Save current project only if Resolve has an active page, otherwise + # we consider Resolve being in a pre-launch state (no open UI yet) + project = pm.GetCurrentProject() + print(f"Saving current project: {project}") + pm.SaveProject() + + file = os.path.basename(filepath) + fname, _ = os.path.splitext(file) + + try: + # load project from input path + project = pm.LoadProject(fname) + log.info(f"Project {project.GetName()} opened...") + + except AttributeError: + log.warning((f"Project with name `{fname}` does not exist! It will " + f"be imported from {filepath} and then loaded...")) + if pm.ImportProject(filepath): + # load project from input path + project = pm.LoadProject(fname) + log.info(f"Project imported/loaded {project.GetName()}...") + return True + return False + return True + + +def current_file(): + pm = get_project_manager() + file_ext = file_extensions()[0] + workdir_path = os.getenv("AVALON_WORKDIR") + project = pm.GetCurrentProject() + project_name = project.GetName() + file_name = project_name + file_ext + + # create current file path + current_file_path = os.path.join(workdir_path, file_name) + + # return current file path if it exists + if os.path.exists(current_file_path): + return os.path.normpath(current_file_path) + + +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/resolve/hooks/pre_resolve_last_workfile.py b/client/ayon_core/hosts/resolve/hooks/pre_resolve_last_workfile.py similarity index 85% rename from openpype/hosts/resolve/hooks/pre_resolve_last_workfile.py rename to client/ayon_core/hosts/resolve/hooks/pre_resolve_last_workfile.py index 73f5ac75b1..d82651289c 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_last_workfile.py +++ b/client/ayon_core/hosts/resolve/hooks/pre_resolve_last_workfile.py @@ -1,5 +1,5 @@ import os -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class PreLaunchResolveLastWorkfile(PreLaunchHook): @@ -28,8 +28,8 @@ def execute(self): # Add path to launch environment for the startup script to pick up self.log.info( - "Setting OPENPYPE_RESOLVE_OPEN_ON_LAUNCH to launch " + "Setting AYON_RESOLVE_OPEN_ON_LAUNCH to launch " f"last workfile: {last_workfile}" ) - key = "OPENPYPE_RESOLVE_OPEN_ON_LAUNCH" + key = "AYON_RESOLVE_OPEN_ON_LAUNCH" self.launch_context.env[key] = last_workfile diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/client/ayon_core/hosts/resolve/hooks/pre_resolve_setup.py similarity index 95% rename from openpype/hosts/resolve/hooks/pre_resolve_setup.py rename to client/ayon_core/hosts/resolve/hooks/pre_resolve_setup.py index 326f37dffc..c14fd75b2f 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py +++ b/client/ayon_core/hosts/resolve/hooks/pre_resolve_setup.py @@ -1,8 +1,8 @@ import os from pathlib import Path import platform -from openpype.lib.applications import PreLaunchHook, LaunchTypes -from openpype.hosts.resolve.utils import setup +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.hosts.resolve.utils import setup class PreLaunchResolveSetup(PreLaunchHook): @@ -25,7 +25,7 @@ class PreLaunchResolveSetup(PreLaunchHook): It also defines: - `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype Fusion scripts to be copied to for Resolve to pick them up. - - `OPENPYPE_LOG_NO_COLORS` to True to ensure OP doesn't try to + - `AYON_LOG_NO_COLORS` to True to ensure OP doesn't try to use logging with terminal colors as it fails in Resolve. """ @@ -132,7 +132,7 @@ def execute(self): ] = resolve_utility_scripts_dir.as_posix() # remove terminal coloring tags - self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True" + self.launch_context.env["AYON_LOG_NO_COLORS"] = "1" # Resolve Setup integration setup(self.launch_context.env) diff --git a/client/ayon_core/hosts/resolve/hooks/pre_resolve_startup.py b/client/ayon_core/hosts/resolve/hooks/pre_resolve_startup.py new file mode 100644 index 0000000000..ab16053450 --- /dev/null +++ b/client/ayon_core/hosts/resolve/hooks/pre_resolve_startup.py @@ -0,0 +1,25 @@ +import os + +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes +import ayon_core.hosts.resolve + + +class PreLaunchResolveStartup(PreLaunchHook): + """Special hook to configure startup script. + + """ + order = 11 + app_groups = {"resolve"} + launch_types = {LaunchTypes.local} + + def execute(self): + # Set the openpype prelaunch startup script path for easy access + # in the LUA .scriptlib code + op_resolve_root = os.path.dirname(ayon_core.hosts.resolve.__file__) + script_path = os.path.join(op_resolve_root, "startup.py") + key = "AYON_RESOLVE_STARTUP_SCRIPT" + self.launch_context.env[key] = script_path + + self.log.info( + f"Setting AYON_RESOLVE_STARTUP_SCRIPT to: {script_path}" + ) diff --git a/openpype/hosts/nuke/vendor/google/protobuf/util/__init__.py b/client/ayon_core/hosts/resolve/otio/__init__.py similarity index 100% rename from openpype/hosts/nuke/vendor/google/protobuf/util/__init__.py rename to client/ayon_core/hosts/resolve/otio/__init__.py diff --git a/openpype/hosts/resolve/otio/davinci_export.py b/client/ayon_core/hosts/resolve/otio/davinci_export.py similarity index 100% rename from openpype/hosts/resolve/otio/davinci_export.py rename to client/ayon_core/hosts/resolve/otio/davinci_export.py diff --git a/openpype/hosts/resolve/otio/davinci_import.py b/client/ayon_core/hosts/resolve/otio/davinci_import.py similarity index 100% rename from openpype/hosts/resolve/otio/davinci_import.py rename to client/ayon_core/hosts/resolve/otio/davinci_import.py diff --git a/openpype/hosts/resolve/otio/utils.py b/client/ayon_core/hosts/resolve/otio/utils.py similarity index 100% rename from openpype/hosts/resolve/otio/utils.py rename to client/ayon_core/hosts/resolve/otio/utils.py diff --git a/client/ayon_core/hosts/resolve/plugins/create/create_shot_clip.py b/client/ayon_core/hosts/resolve/plugins/create/create_shot_clip.py new file mode 100644 index 0000000000..65bc9fed9d --- /dev/null +++ b/client/ayon_core/hosts/resolve/plugins/create/create_shot_clip.py @@ -0,0 +1,272 @@ +# from pprint import pformat +from ayon_core.hosts.resolve.api import plugin, lib +from ayon_core.hosts.resolve.api.lib import ( + get_video_track_names, + create_bin, +) + + +class CreateShotClip(plugin.Creator): + """Publishable clip""" + + label = "Create Publishable Clip" + family = "clip" + icon = "film" + defaults = ["Main"] + + gui_tracks = get_video_track_names() + gui_name = "OpenPype publish attributes creator" + gui_info = "Define sequential rename and fill hierarchy data." + gui_inputs = { + "renameHierarchy": { + "type": "section", + "label": "Shot Hierarchy And Rename Settings", + "target": "ui", + "order": 0, + "value": { + "hierarchy": { + "value": "{folder}/{sequence}", + "type": "QLineEdit", + "label": "Shot Parent Hierarchy", + "target": "tag", + "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa + "order": 0}, + "clipRename": { + "value": False, + "type": "QCheckBox", + "label": "Rename clips", + "target": "ui", + "toolTip": "Renaming selected clips on fly", # noqa + "order": 1}, + "clipName": { + "value": "{sequence}{shot}", + "type": "QLineEdit", + "label": "Clip Name Template", + "target": "ui", + "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa + "order": 2}, + "countFrom": { + "value": 10, + "type": "QSpinBox", + "label": "Count sequence from", + "target": "ui", + "toolTip": "Set when the sequence number stafrom", # noqa + "order": 3}, + "countSteps": { + "value": 10, + "type": "QSpinBox", + "label": "Stepping number", + "target": "ui", + "toolTip": "What number is adding every new step", # noqa + "order": 4}, + } + }, + "hierarchyData": { + "type": "dict", + "label": "Shot Template Keywords", + "target": "tag", + "order": 1, + "value": { + "folder": { + "value": "shots", + "type": "QLineEdit", + "label": "{folder}", + "target": "tag", + "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 0}, + "episode": { + "value": "ep01", + "type": "QLineEdit", + "label": "{episode}", + "target": "tag", + "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 1}, + "sequence": { + "value": "sq01", + "type": "QLineEdit", + "label": "{sequence}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 2}, + "track": { + "value": "{_track_}", + "type": "QLineEdit", + "label": "{track}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 3}, + "shot": { + "value": "sh###", + "type": "QLineEdit", + "label": "{shot}", + "target": "tag", + "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 4} + } + }, + "verticalSync": { + "type": "section", + "label": "Vertical Synchronization Of Attributes", + "target": "ui", + "order": 2, + "value": { + "vSyncOn": { + "value": True, + "type": "QCheckBox", + "label": "Enable Vertical Sync", + "target": "ui", + "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa + "order": 0}, + "vSyncTrack": { + "value": gui_tracks, # noqa + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1 + } + } + }, + "publishSettings": { + "type": "section", + "label": "Publish Settings", + "target": "ui", + "order": 3, + "value": { + "subsetName": { + "value": ["", "main", "bg", "fg", "bg", + "animatic"], + "type": "QComboBox", + "label": "Subset Name", + "target": "ui", + "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa + "order": 0}, + "subsetFamily": { + "value": ["plate", "take"], + "type": "QComboBox", + "label": "Subset Family", + "target": "ui", "toolTip": "What use of this subset is for", # noqa + "order": 1}, + "reviewTrack": { + "value": ["< none >"] + gui_tracks, + "type": "QComboBox", + "label": "Use Review Track", + "target": "ui", + "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa + "order": 2}, + "audio": { + "value": False, + "type": "QCheckBox", + "label": "Include audio", + "target": "tag", + "toolTip": "Process subsets with corresponding audio", # noqa + "order": 3}, + "sourceResolution": { + "value": False, + "type": "QCheckBox", + "label": "Source resolution", + "target": "tag", + "toolTip": "Is resloution taken from timeline or source?", # noqa + "order": 4}, + } + }, + "shotAttr": { + "type": "section", + "label": "Shot Attributes", + "target": "ui", + "order": 4, + "value": { + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle start (head)", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle end (tail)", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + } + } + } + } + + presets = None + + def process(self): + # get key pares from presets and match it on ui inputs + for k, v in self.gui_inputs.items(): + if v["type"] in ("dict", "section"): + # nested dictionary (only one level allowed + # for sections and dict) + for _k, _v in v["value"].items(): + if self.presets.get(_k) is not None: + self.gui_inputs[k][ + "value"][_k]["value"] = self.presets[_k] + if self.presets.get(k): + self.gui_inputs[k]["value"] = self.presets[k] + + # open widget for plugins inputs + widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) + widget.exec_() + + if len(self.selected) < 1: + return + + if not widget.result: + print("Operation aborted") + return + + self.rename_add = 0 + + # get ui output for track name for vertical sync + v_sync_track = widget.result["vSyncTrack"]["value"] + + # sort selected trackItems by + sorted_selected_track_items = [] + unsorted_selected_track_items = [] + print("_____ selected ______") + print(self.selected) + for track_item_data in self.selected: + if track_item_data["track"]["name"] in v_sync_track: + sorted_selected_track_items.append(track_item_data) + else: + unsorted_selected_track_items.append(track_item_data) + + sorted_selected_track_items.extend(unsorted_selected_track_items) + + # sequence attrs + sq_frame_start = self.timeline.GetStartFrame() + sq_markers = self.timeline.GetMarkers() + + # create media bin for compound clips (trackItems) + mp_folder = create_bin(self.timeline.GetName()) + + kwargs = { + "ui_inputs": widget.result, + "avalon": self.data, + "mp_folder": mp_folder, + "sq_frame_start": sq_frame_start, + "sq_markers": sq_markers + } + print(kwargs) + for i, track_item_data in enumerate(sorted_selected_track_items): + self.rename_index = i + self.log.info(track_item_data) + # convert track item to timeline media pool item + track_item = plugin.PublishClip( + self, track_item_data, **kwargs).convert() + track_item.SetClipColor(lib.publish_clip_color) diff --git a/client/ayon_core/hosts/resolve/plugins/load/load_clip.py b/client/ayon_core/hosts/resolve/plugins/load/load_clip.py new file mode 100644 index 0000000000..47aeac213b --- /dev/null +++ b/client/ayon_core/hosts/resolve/plugins/load/load_clip.py @@ -0,0 +1,167 @@ +from ayon_core.client import get_last_version_by_subset_id +from ayon_core.pipeline import ( + get_representation_context, + get_current_project_name +) +from ayon_core.hosts.resolve.api import lib, plugin +from ayon_core.hosts.resolve.api.pipeline import ( + containerise, + update_container, +) +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) + + +class LoadClip(plugin.TimelineItemLoader): + """Load a subset to timeline as clip + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render", "review"] + + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) + + label = "Load as clip" + order = -10 + icon = "code-fork" + color = "orange" + + # for loader multiselection + timeline = None + + # presets + clip_color_last = "Olive" + clip_color = "Orange" + + def load(self, context, name, namespace, options): + + # load clip to timeline and get main variables + files = plugin.get_representation_files(context["representation"]) + + timeline_item = plugin.ClipLoader( + self, context, **options).load(files) + namespace = namespace or timeline_item.GetName() + + # update color of clip regarding the version order + self.set_item_color(timeline_item, version=context["version"]) + + data_imprint = self.get_tag_data(context, name, namespace) + return containerise( + timeline_item, + name, namespace, context, + self.__class__.__name__, + data_imprint) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ Updating previously loaded clips + """ + + context = get_representation_context(representation) + name = container['name'] + namespace = container['namespace'] + timeline_item = container["_timeline_item"] + + media_pool_item = timeline_item.GetMediaPoolItem() + + files = plugin.get_representation_files(representation) + + loader = plugin.ClipLoader(self, context) + timeline_item = loader.update(timeline_item, files) + + # update color of clip regarding the version order + self.set_item_color(timeline_item, version=context["version"]) + + # if original media pool item has no remaining usages left + # remove it from the media pool + if int(media_pool_item.GetClipProperty("Usage")) == 0: + lib.remove_media_pool_item(media_pool_item) + + data_imprint = self.get_tag_data(context, name, namespace) + return update_container(timeline_item, data_imprint) + + def get_tag_data(self, context, name, namespace): + """Return data to be imprinted on the timeline item marker""" + + representation = context["representation"] + version = context['version'] + version_data = version.get("data", {}) + version_name = version.get("name", None) + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # add additional metadata from the version to imprint Avalon knob + # move all version data keys to tag data + add_version_data_keys = [ + "frameStart", "frameEnd", "source", "author", + "fps", "handleStart", "handleEnd" + ] + data = { + key: version_data.get(key, "None") for key in add_version_data_keys + } + + # add variables related to version context + data.update({ + "representation": str(representation["_id"]), + "version": version_name, + "colorspace": colorspace, + "objectName": object_name + }) + return data + + @classmethod + def set_item_color(cls, timeline_item, version): + """Color timeline item based on whether it is outdated or latest""" + # define version name + version_name = version.get("name", None) + # get all versions in list + project_name = get_current_project_name() + last_version_doc = get_last_version_by_subset_id( + project_name, + version["parent"], + fields=["name"] + ) + if last_version_doc: + last_version = last_version_doc["name"] + else: + last_version = None + + # set clip colour + if version_name == last_version: + timeline_item.SetClipColor(cls.clip_color_last) + else: + timeline_item.SetClipColor(cls.clip_color) + + def remove(self, container): + timeline_item = container["_timeline_item"] + media_pool_item = timeline_item.GetMediaPoolItem() + timeline = lib.get_current_timeline() + + # DeleteClips function was added in Resolve 18.5+ + # by checking None we can detect whether the + # function exists in Resolve + if timeline.DeleteClips is not None: + timeline.DeleteClips([timeline_item]) + else: + # Resolve versions older than 18.5 can't delete clips via API + # so all we can do is just remove the pype marker to 'untag' it + if lib.get_pype_marker(timeline_item): + # Note: We must call `get_pype_marker` because + # `delete_pype_marker` uses a global variable set by + # `get_pype_marker` to delete the right marker + # TODO: Improve code to avoid the global `temp_marker_frame` + lib.delete_pype_marker(timeline_item) + + # if media pool item has no remaining usages left + # remove it from the media pool + if int(media_pool_item.GetClipProperty("Usage")) == 0: + lib.remove_media_pool_item(media_pool_item) diff --git a/client/ayon_core/hosts/resolve/plugins/publish/extract_workfile.py b/client/ayon_core/hosts/resolve/plugins/publish/extract_workfile.py new file mode 100644 index 0000000000..48ebdee7e3 --- /dev/null +++ b/client/ayon_core/hosts/resolve/plugins/publish/extract_workfile.py @@ -0,0 +1,52 @@ +import os +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.resolve.api.lib import get_project_manager + + +class ExtractWorkfile(publish.Extractor): + """ + Extractor export DRP workfile file representation + """ + + label = "Extract Workfile" + order = pyblish.api.ExtractorOrder + families = ["workfile"] + hosts = ["resolve"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + project = instance.context.data["activeProject"] + staging_dir = self.staging_dir(instance) + + resolve_workfile_ext = ".drp" + drp_file_name = name + resolve_workfile_ext + + drp_file_path = os.path.normpath( + os.path.join(staging_dir, drp_file_name)) + + # write out the drp workfile + get_project_manager().ExportProject( + project.GetName(), drp_file_path) + + # create drp workfile representation + representation_drp = { + 'name': resolve_workfile_ext[1:], + 'ext': resolve_workfile_ext[1:], + 'files': drp_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_drp) + + # add sourcePath attribute to instance + if not instance.data.get("sourcePath"): + instance.data["sourcePath"] = drp_file_path + + self.log.info("Added Resolve file representation: {}".format( + representation_drp)) diff --git a/client/ayon_core/hosts/resolve/plugins/publish/precollect_instances.py b/client/ayon_core/hosts/resolve/plugins/publish/precollect_instances.py new file mode 100644 index 0000000000..0ae6206496 --- /dev/null +++ b/client/ayon_core/hosts/resolve/plugins/publish/precollect_instances.py @@ -0,0 +1,143 @@ +from pprint import pformat + +import pyblish + +from ayon_core.hosts.resolve.api.lib import ( + get_current_timeline_items, + get_timeline_item_pype_tag, + publish_clip_color, + get_publish_attribute, + get_otio_clip_instance_data, +) + + +class PrecollectInstances(pyblish.api.ContextPlugin): + """Collect all Track items selection.""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Precollect Instances" + hosts = ["resolve"] + + def process(self, context): + otio_timeline = context.data["otioTimeline"] + selected_timeline_items = get_current_timeline_items( + filter=True, selecting_color=publish_clip_color) + + self.log.info( + "Processing enabled track items: {}".format( + len(selected_timeline_items))) + + for timeline_item_data in selected_timeline_items: + + data = {} + timeline_item = timeline_item_data["clip"]["item"] + + # get pype tag data + tag_data = get_timeline_item_pype_tag(timeline_item) + self.log.debug(f"__ tag_data: {pformat(tag_data)}") + + if not tag_data: + continue + + if tag_data.get("id") != "pyblish.avalon.instance": + continue + + media_pool_item = timeline_item.GetMediaPoolItem() + source_duration = int(media_pool_item.GetClipProperty("Frames")) + + # solve handles length + handle_start = min( + tag_data["handleStart"], int(timeline_item.GetLeftOffset())) + handle_end = min( + tag_data["handleEnd"], int( + source_duration - timeline_item.GetRightOffset())) + + self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end)) + + # add tag data to instance data + data.update({ + k: v for k, v in tag_data.items() + if k not in ("id", "applieswhole", "label") + }) + + asset = tag_data["folder_path"] + subset = tag_data["subset"] + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": "{} {}".format(asset, subset), + "asset": asset, + "item": timeline_item, + "publish": get_publish_attribute(timeline_item), + "fps": context.data["fps"], + "handleStart": handle_start, + "handleEnd": handle_end, + "newAssetPublishing": True, + "families": ["clip"], + }) + + # otio clip data + otio_data = get_otio_clip_instance_data( + otio_timeline, timeline_item_data) or {} + data.update(otio_data) + + # add resolution + self.get_resolution_to_data(data, context) + + # create instance + instance = context.create_instance(**data) + + # create shot instance for shot attributes create/update + self.create_shot_instance(context, timeline_item, **data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" + + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata["width"], + "resolutionHeight": otio_clip_metadata["height"], + "pixelAspect": otio_clip_metadata["pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["width"], + "resolutionHeight": otio_tl_metadata["height"], + "pixelAspect": otio_tl_metadata["pixelAspect"] + }) + + def create_shot_instance(self, context, timeline_item, **data): + hero_track = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + + if not hero_track: + return + + if not hierarchy_data: + return + + asset = data["asset"] + subset = "shotMain" + + # insert family into families + family = "shot" + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": "{} {}".format(asset, subset), + "subset": subset, + "asset": asset, + "family": family, + "families": [], + "publish": get_publish_attribute(timeline_item) + }) + + context.create_instance(**data) diff --git a/client/ayon_core/hosts/resolve/plugins/publish/precollect_workfile.py b/client/ayon_core/hosts/resolve/plugins/publish/precollect_workfile.py new file mode 100644 index 0000000000..5f8cf6b5d9 --- /dev/null +++ b/client/ayon_core/hosts/resolve/plugins/publish/precollect_workfile.py @@ -0,0 +1,53 @@ +import pyblish.api +from pprint import pformat + +from ayon_core.pipeline import get_current_asset_name + +from ayon_core.hosts.resolve import api as rapi +from ayon_core.hosts.resolve.otio import davinci_export + + +class PrecollectWorkfile(pyblish.api.ContextPlugin): + """Precollect the current working file into context""" + + label = "Precollect Workfile" + order = pyblish.api.CollectorOrder - 0.5 + + def process(self, context): + current_asset_name = get_current_asset_name() + asset_name = current_asset_name.split("/")[-1] + + subset = "workfileMain" + project = rapi.get_current_project() + fps = project.GetSetting("timelineFrameRate") + video_tracks = rapi.get_video_track_names() + + # adding otio timeline to context + otio_timeline = davinci_export.create_otio_timeline(project) + + instance_data = { + "name": "{}_{}".format(asset_name, subset), + "label": "{} {}".format(current_asset_name, subset), + "asset": current_asset_name, + "subset": subset, + "item": project, + "family": "workfile", + "families": [] + } + + # create instance with workfile + instance = context.create_instance(**instance_data) + + # update context with main project attributes + context_data = { + "activeProject": project, + "otioTimeline": otio_timeline, + "videoTracks": video_tracks, + "currentFile": project.GetName(), + "fps": fps, + } + context.data.update(context_data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.debug("__ instance.data: {}".format(pformat(instance.data))) + self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/client/ayon_core/hosts/resolve/startup.py b/client/ayon_core/hosts/resolve/startup.py new file mode 100644 index 0000000000..174a2878c5 --- /dev/null +++ b/client/ayon_core/hosts/resolve/startup.py @@ -0,0 +1,70 @@ +"""This script is used as a startup script in Resolve through a .scriptlib file + +It triggers directly after the launch of Resolve and it's recommended to keep +it optimized for fast performance since the Resolve UI is actually interactive +while this is running. As such, there's nothing ensuring the user isn't +continuing manually before any of the logic here runs. As such we also try +to delay any imports as much as possible. + +This code runs in a separate process to the main Resolve process. + +""" +import os +from ayon_core.lib import Logger +import ayon_core.hosts.resolve.api + +log = Logger.get_logger(__name__) + + +def ensure_installed_host(): + """Install resolve host with openpype and return the registered host. + + This function can be called multiple times without triggering an + additional install. + """ + from ayon_core.pipeline import install_host, registered_host + host = registered_host() + if host: + return host + + host = ayon_core.hosts.resolve.api.ResolveHost() + install_host(host) + return registered_host() + + +def launch_menu(): + print("Launching Resolve OpenPype menu..") + ensure_installed_host() + ayon_core.hosts.resolve.api.launch_pype_menu() + + +def open_workfile(path): + # Avoid the need to "install" the host + host = ensure_installed_host() + host.open_workfile(path) + + +def main(): + # Open last workfile + workfile_path = os.environ.get("AYON_RESOLVE_OPEN_ON_LAUNCH") + + if workfile_path and os.path.exists(workfile_path): + log.info(f"Opening last workfile: {workfile_path}") + open_workfile(workfile_path) + else: + log.info("No last workfile set to open. Skipping..") + + # Launch OpenPype menu + from ayon_core.settings import get_project_settings + from ayon_core.pipeline.context_tools import get_current_project_name + project_name = get_current_project_name() + log.info(f"Current project name in context: {project_name}") + + settings = get_project_settings(project_name) + if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True): + log.info("Launching OpenPype menu..") + launch_menu() + + +if __name__ == "__main__": + main() diff --git a/client/ayon_core/hosts/resolve/utility_scripts/AYON__Menu.py b/client/ayon_core/hosts/resolve/utility_scripts/AYON__Menu.py new file mode 100644 index 0000000000..08cefb9d61 --- /dev/null +++ b/client/ayon_core/hosts/resolve/utility_scripts/AYON__Menu.py @@ -0,0 +1,22 @@ +import os +import sys + +from ayon_core.pipeline import install_host +from ayon_core.lib import Logger + +log = Logger.get_logger(__name__) + + +def main(env): + from ayon_core.hosts.resolve.api import ResolveHost, launch_pype_menu + + # activate resolve from openpype + host = ResolveHost() + install_host(host) + + launch_pype_menu() + + +if __name__ == "__main__": + result = main(os.environ) + sys.exit(not bool(result)) diff --git a/client/ayon_core/hosts/resolve/utility_scripts/ayon_startup.scriptlib b/client/ayon_core/hosts/resolve/utility_scripts/ayon_startup.scriptlib new file mode 100644 index 0000000000..22253390a3 --- /dev/null +++ b/client/ayon_core/hosts/resolve/utility_scripts/ayon_startup.scriptlib @@ -0,0 +1,21 @@ +-- Run OpenPype's Python launch script for resolve +function file_exists(name) + local f = io.open(name, "r") + return f ~= nil and io.close(f) +end + + +ayon_startup_script = os.getenv("AYON_RESOLVE_STARTUP_SCRIPT") +if ayon_startup_script ~= nil then + script = fusion:MapPath(ayon_startup_script) + + if file_exists(script) then + -- We must use RunScript to ensure it runs in a separate + -- process to Resolve itself to avoid a deadlock for + -- certain imports of OpenPype libraries or Qt + print("Running launch script: " .. script) + fusion:RunScript(script) + else + print("Launch script not found at: " .. script) + end +end \ No newline at end of file diff --git a/openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py b/client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_export.py similarity index 96% rename from openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py rename to client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_export.py index 0431eb7daa..c1c83eb060 100644 --- a/openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py +++ b/client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_export.py @@ -1,6 +1,6 @@ #!/usr/bin/env python import os -from openpype.hosts.resolve.otio import davinci_export as otio_export +from ayon_core.hosts.resolve.otio import davinci_export as otio_export resolve = bmd.scriptapp("Resolve") # noqa fu = resolve.Fusion() diff --git a/openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py b/client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_import.py similarity index 96% rename from openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py rename to client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_import.py index d8b630eb2a..5bbdd73402 100644 --- a/openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py +++ b/client/ayon_core/hosts/resolve/utility_scripts/develop/OTIO_import.py @@ -1,6 +1,6 @@ #!/usr/bin/env python import os -from openpype.hosts.resolve.otio import davinci_import as otio_import +from ayon_core.hosts.resolve.otio import davinci_import as otio_import resolve = bmd.scriptapp("Resolve") # noqa fu = resolve.Fusion() diff --git a/client/ayon_core/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py b/client/ayon_core/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py new file mode 100644 index 0000000000..c394238860 --- /dev/null +++ b/client/ayon_core/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +import os +import sys + +from ayon_core.pipeline import install_host + + +def main(env): + from ayon_core.hosts.resolve.utils import setup + import ayon_core.hosts.resolve.api as bmdvr + # Registers openpype's Global pyblish plugins + install_host(bmdvr) + setup(env) + + +if __name__ == "__main__": + result = main(os.environ) + sys.exit(not bool(result)) diff --git a/client/ayon_core/hosts/resolve/utils.py b/client/ayon_core/hosts/resolve/utils.py new file mode 100644 index 0000000000..4ef6ea4f40 --- /dev/null +++ b/client/ayon_core/hosts/resolve/utils.py @@ -0,0 +1,71 @@ +import os +import shutil +from ayon_core.lib import Logger, is_running_from_build + +RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def setup(env): + log = Logger.get_logger("ResolveSetup") + scripts = {} + util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") + util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] + + util_scripts_paths = [os.path.join( + RESOLVE_ROOT_DIR, + "utility_scripts" + )] + + # collect script dirs + if util_scripts_env: + log.info("Utility Scripts Env: `{}`".format(util_scripts_env)) + util_scripts_paths = util_scripts_env.split( + os.pathsep) + util_scripts_paths + + # collect scripts from dirs + for path in util_scripts_paths: + scripts.update({path: os.listdir(path)}) + + log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths)) + log.info("Utility Scripts: `{}`".format(scripts)) + + # Make sure scripts dir exists + os.makedirs(util_scripts_dir, exist_ok=True) + + # make sure no script file is in folder + for script in os.listdir(util_scripts_dir): + path = os.path.join(util_scripts_dir, script) + log.info("Removing `{}`...".format(path)) + if os.path.isdir(path): + shutil.rmtree(path, onerror=None) + else: + os.remove(path) + + # copy scripts into Resolve's utility scripts dir + for directory, scripts in scripts.items(): + for script in scripts: + if ( + is_running_from_build() + and script in ["tests", "develop"] + ): + # only copy those if started from build + continue + + src = os.path.join(directory, script) + dst = os.path.join(util_scripts_dir, script) + + # TODO: Make this a less hacky workaround + if script == "ayon_startup.scriptlib": + # Handle special case for scriptlib that needs to be a folder + # up from the Comp folder in the Fusion scripts + dst = os.path.join(os.path.dirname(util_scripts_dir), + script) + + log.info("Copying `{}` to `{}`...".format(src, dst)) + if os.path.isdir(src): + shutil.copytree( + src, dst, symlinks=False, + ignore=None, ignore_dangling_symlinks=False + ) + else: + shutil.copy2(src, dst) diff --git a/openpype/hosts/substancepainter/__init__.py b/client/ayon_core/hosts/substancepainter/__init__.py similarity index 100% rename from openpype/hosts/substancepainter/__init__.py rename to client/ayon_core/hosts/substancepainter/__init__.py diff --git a/client/ayon_core/hosts/substancepainter/addon.py b/client/ayon_core/hosts/substancepainter/addon.py new file mode 100644 index 0000000000..a7f21c2288 --- /dev/null +++ b/client/ayon_core/hosts/substancepainter/addon.py @@ -0,0 +1,34 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +SUBSTANCE_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class SubstanceAddon(OpenPypeModule, IHostAddon): + name = "substancepainter" + host_name = "substancepainter" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to SUBSTANCE_PAINTER_PLUGINS_PATH + plugin_path = os.path.join(SUBSTANCE_HOST_DIR, "deploy") + plugin_path = plugin_path.replace("\\", "/") + if env.get("SUBSTANCE_PAINTER_PLUGINS_PATH"): + plugin_path += os.pathsep + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] + + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] = plugin_path + + # Log in Substance Painter doesn't support custom terminal colors + env["AYON_LOG_NO_COLORS"] = "1" + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(SUBSTANCE_HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".spp", ".toc"] diff --git a/openpype/hosts/substancepainter/api/__init__.py b/client/ayon_core/hosts/substancepainter/api/__init__.py similarity index 100% rename from openpype/hosts/substancepainter/api/__init__.py rename to client/ayon_core/hosts/substancepainter/api/__init__.py diff --git a/openpype/hosts/substancepainter/api/colorspace.py b/client/ayon_core/hosts/substancepainter/api/colorspace.py similarity index 100% rename from openpype/hosts/substancepainter/api/colorspace.py rename to client/ayon_core/hosts/substancepainter/api/colorspace.py diff --git a/openpype/hosts/substancepainter/api/lib.py b/client/ayon_core/hosts/substancepainter/api/lib.py similarity index 100% rename from openpype/hosts/substancepainter/api/lib.py rename to client/ayon_core/hosts/substancepainter/api/lib.py diff --git a/client/ayon_core/hosts/substancepainter/api/pipeline.py b/client/ayon_core/hosts/substancepainter/api/pipeline.py new file mode 100644 index 0000000000..2bbcf2aded --- /dev/null +++ b/client/ayon_core/hosts/substancepainter/api/pipeline.py @@ -0,0 +1,428 @@ +# -*- coding: utf-8 -*- +"""Pipeline tools for OpenPype Substance Painter integration.""" +import os +import logging +from functools import partial + +# Substance 3D Painter modules +import substance_painter.ui +import substance_painter.event +import substance_painter.project + +import pyblish.api + +from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost +from ayon_core.settings import ( + get_current_project_settings, + get_system_settings +) + +from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.pipeline import ( + register_creator_plugin_path, + register_loader_plugin_path, + AVALON_CONTAINER_ID, + Anatomy +) +from ayon_core.lib import ( + StringTemplate, + register_event_callback, + emit_event, +) +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.hosts.substancepainter import SUBSTANCE_HOST_DIR + +from . import lib + +log = logging.getLogger("ayon_core.hosts.substance") + +PLUGINS_DIR = os.path.join(SUBSTANCE_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + +OPENPYPE_METADATA_KEY = "OpenPype" +OPENPYPE_METADATA_CONTAINERS_KEY = "containers" # child key +OPENPYPE_METADATA_CONTEXT_KEY = "context" # child key +OPENPYPE_METADATA_INSTANCES_KEY = "instances" # child key + + +class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "substancepainter" + + def __init__(self): + super(SubstanceHost, self).__init__() + self._has_been_setup = False + self.menu = None + self.callbacks = [] + self.shelves = [] + + def install(self): + pyblish.api.register_host("substancepainter") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + log.info("Installing callbacks ... ") + # register_event_callback("init", on_init) + self._register_callbacks() + # register_event_callback("before.save", before_save) + # register_event_callback("save", on_save) + register_event_callback("open", on_open) + # register_event_callback("new", on_new) + + log.info("Installing menu ... ") + self._install_menu() + + project_settings = get_current_project_settings() + self._install_shelves(project_settings) + + self._has_been_setup = True + + def uninstall(self): + self._uninstall_shelves() + self._uninstall_menu() + self._deregister_callbacks() + + def workfile_has_unsaved_changes(self): + + if not substance_painter.project.is_open(): + return False + + return substance_painter.project.needs_saving() + + def get_workfile_extensions(self): + return [".spp", ".toc"] + + def save_workfile(self, dst_path=None): + + if not substance_painter.project.is_open(): + return False + + if not dst_path: + dst_path = self.get_current_workfile() + + full_save_mode = substance_painter.project.ProjectSaveMode.Full + substance_painter.project.save_as(dst_path, full_save_mode) + + return dst_path + + def open_workfile(self, filepath): + + if not os.path.exists(filepath): + raise RuntimeError("File does not exist: {}".format(filepath)) + + # We must first explicitly close current project before opening another + if substance_painter.project.is_open(): + substance_painter.project.close() + + substance_painter.project.open(filepath) + return filepath + + def get_current_workfile(self): + if not substance_painter.project.is_open(): + return None + + filepath = substance_painter.project.file_path() + if filepath and filepath.endswith(".spt"): + # When currently in a Substance Painter template assume our + # scene isn't saved. This can be the case directly after doing + # "New project", the path will then be the template used. This + # avoids Workfiles tool trying to save as .spt extension if the + # file hasn't been saved before. + return + + return filepath + + def get_containers(self): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) + if containers: + for key, container in containers.items(): + container["objectName"] = key + yield container + + def update_context_data(self, data, changes): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + metadata.set(OPENPYPE_METADATA_CONTEXT_KEY, data) + + def get_context_data(self): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + return metadata.get(OPENPYPE_METADATA_CONTEXT_KEY) or {} + + def _install_menu(self): + from PySide2 import QtWidgets + from ayon_core.tools.utils import host_tools + + parent = substance_painter.ui.get_main_window() + + tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON" + menu = QtWidgets.QMenu(tab_menu_label) + + action = menu.addAction("Create...") + action.triggered.connect( + lambda: host_tools.show_publisher(parent=parent, + tab="create") + ) + + action = menu.addAction("Load...") + action.triggered.connect( + lambda: host_tools.show_loader(parent=parent, use_context=True) + ) + + action = menu.addAction("Publish...") + action.triggered.connect( + lambda: host_tools.show_publisher(parent=parent, + tab="publish") + ) + + action = menu.addAction("Manage...") + action.triggered.connect( + lambda: host_tools.show_scene_inventory(parent=parent) + ) + + action = menu.addAction("Library...") + action.triggered.connect( + lambda: host_tools.show_library_loader(parent=parent) + ) + + menu.addSeparator() + action = menu.addAction("Work Files...") + action.triggered.connect( + lambda: host_tools.show_workfiles(parent=parent) + ) + + substance_painter.ui.add_menu(menu) + + def on_menu_destroyed(): + self.menu = None + + menu.destroyed.connect(on_menu_destroyed) + + self.menu = menu + + def _uninstall_menu(self): + if self.menu: + self.menu.destroy() + self.menu = None + + def _register_callbacks(self): + # Prepare emit event callbacks + open_callback = partial(emit_event, "open") + + # Connect to the Substance Painter events + dispatcher = substance_painter.event.DISPATCHER + for event, callback in [ + (substance_painter.event.ProjectOpened, open_callback) + ]: + dispatcher.connect(event, callback) + # Keep a reference so we can deregister if needed + self.callbacks.append((event, callback)) + + def _deregister_callbacks(self): + for event, callback in self.callbacks: + substance_painter.event.DISPATCHER.disconnect(event, callback) + self.callbacks.clear() + + def _install_shelves(self, project_settings): + + shelves = project_settings["substancepainter"].get("shelves", {}) + if not shelves: + return + + # Prepare formatting data if we detect any path which might have + # template tokens like {asset} in there. + formatting_data = {} + has_formatting_entries = any("{" in path for path in shelves.values()) + if has_formatting_entries: + project_name = self.get_current_project_name() + asset_name = self.get_current_asset_name() + task_name = self.get_current_asset_name() + system_settings = get_system_settings() + formatting_data = get_template_data_with_names(project_name, + asset_name, + task_name, + system_settings) + anatomy = Anatomy(project_name) + formatting_data["root"] = anatomy.roots + + for name, path in shelves.items(): + shelf_name = None + + # Allow formatting with anatomy for the paths + if "{" in path: + path = StringTemplate.format_template(path, formatting_data) + + try: + shelf_name = lib.load_shelf(path, name=name) + except ValueError as exc: + print(f"Failed to load shelf -> {exc}") + + if shelf_name: + self.shelves.append(shelf_name) + + def _uninstall_shelves(self): + for shelf_name in self.shelves: + substance_painter.resource.Shelves.remove(shelf_name) + self.shelves.clear() + + +def on_open(): + log.info("Running callback on open..") + + if any_outdated_containers(): + from ayon_core.tools.utils import SimplePopup + + log.warning("Scene has outdated content.") + + # Get main window + parent = substance_painter.ui.get_main_window() + if parent is None: + log.info("Skipping outdated content pop-up " + "because Substance window can't be found.") + else: + + # Show outdated pop-up + def _on_show_inventory(): + from ayon_core.tools.utils import host_tools + host_tools.show_scene_inventory(parent=parent) + + dialog = SimplePopup(parent=parent) + dialog.setWindowTitle("Substance scene has outdated content") + dialog.set_message("There are outdated containers in " + "your Substance scene.") + dialog.on_clicked.connect(_on_show_inventory) + dialog.show() + + +def imprint_container(container, + name, + namespace, + context, + loader): + """Imprint a loaded container with metadata. + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + container (dict): The (substance metadata) dictionary to imprint into. + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (load.LoaderPlugin): loader instance used to produce container. + + Returns: + None + + """ + + data = [ + ("schema", "openpype:container-2.0"), + ("id", AVALON_CONTAINER_ID), + ("name", str(name)), + ("namespace", str(namespace) if namespace else None), + ("loader", str(loader.__class__.__name__)), + ("representation", str(context["representation"]["_id"])), + ] + for key, value in data: + container[key] = value + + +def set_container_metadata(object_name, container_data, update=False): + """Helper method to directly set the data for a specific container + + Args: + object_name (str): The unique object name identifier for the container + container_data (dict): The data for the container. + Note 'objectName' data is derived from `object_name` and key in + `container_data` will be ignored. + update (bool): Whether to only update the dict data. + + """ + # The objectName is derived from the key in the metadata so won't be stored + # in the metadata in the container's data. + container_data.pop("objectName", None) + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) or {} + if update: + existing_data = containers.setdefault(object_name, {}) + existing_data.update(container_data) # mutable dict, in-place update + else: + containers[object_name] = container_data + metadata.set("containers", containers) + + +def remove_container_metadata(object_name): + """Helper method to remove the data for a specific container""" + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) + if containers: + containers.pop(object_name, None) + metadata.set("containers", containers) + + +def set_instance(instance_id, instance_data, update=False): + """Helper method to directly set the data for a specific container + + Args: + instance_id (str): Unique identifier for the instance + instance_data (dict): The instance data to store in the metaadata. + """ + set_instances({instance_id: instance_data}, update=update) + + +def set_instances(instance_data_by_id, update=False): + """Store data for multiple instances at the same time. + + This is more optimal than querying and setting them in the metadata one + by one. + """ + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + + for instance_id, instance_data in instance_data_by_id.items(): + if update: + existing_data = instances.get(instance_id, {}) + existing_data.update(instance_data) + else: + instances[instance_id] = instance_data + + metadata.set("instances", instances) + + +def remove_instance(instance_id): + """Helper method to remove the data for a specific container""" + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + instances.pop(instance_id, None) + metadata.set("instances", instances) + + +def get_instances_by_id(): + """Return all instances stored in the project instances metadata""" + if not substance_painter.project.is_open(): + return {} + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + return metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + + +def get_instances(): + """Return all instances stored in the project instances as a list""" + return list(get_instances_by_id().values()) diff --git a/openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py b/client/ayon_core/hosts/substancepainter/deploy/plugins/openpype_plugin.py similarity index 84% rename from openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py rename to client/ayon_core/hosts/substancepainter/deploy/plugins/openpype_plugin.py index e7e1849546..8ced463367 100644 --- a/openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py +++ b/client/ayon_core/hosts/substancepainter/deploy/plugins/openpype_plugin.py @@ -21,13 +21,13 @@ def cleanup_openpype_qt_widgets(): def start_plugin(): - from openpype.pipeline import install_host - from openpype.hosts.substancepainter.api import SubstanceHost + from ayon_core.pipeline import install_host + from ayon_core.hosts.substancepainter.api import SubstanceHost install_host(SubstanceHost()) def close_plugin(): - from openpype.pipeline import uninstall_host + from ayon_core.pipeline import uninstall_host cleanup_openpype_qt_widgets() uninstall_host() diff --git a/openpype/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py b/client/ayon_core/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py similarity index 100% rename from openpype/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py rename to client/ayon_core/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py diff --git a/openpype/hosts/substancepainter/plugins/create/create_textures.py b/client/ayon_core/hosts/substancepainter/plugins/create/create_textures.py similarity index 96% rename from openpype/hosts/substancepainter/plugins/create/create_textures.py rename to client/ayon_core/hosts/substancepainter/plugins/create/create_textures.py index 6972ba2794..831ab6bb23 100644 --- a/openpype/hosts/substancepainter/plugins/create/create_textures.py +++ b/client/ayon_core/hosts/substancepainter/plugins/create/create_textures.py @@ -1,21 +1,21 @@ # -*- coding: utf-8 -*- """Creator plugin for creating textures.""" -from openpype.pipeline import CreatedInstance, Creator, CreatorError -from openpype.lib import ( +from ayon_core.pipeline import CreatedInstance, Creator, CreatorError +from ayon_core.lib import ( EnumDef, UILabelDef, NumberDef, BoolDef ) -from openpype.hosts.substancepainter.api.pipeline import ( +from ayon_core.hosts.substancepainter.api.pipeline import ( get_instances, set_instance, set_instances, remove_instance ) -from openpype.hosts.substancepainter.api.lib import get_export_presets +from ayon_core.hosts.substancepainter.api.lib import get_export_presets import substance_painter.project diff --git a/client/ayon_core/hosts/substancepainter/plugins/create/create_workfile.py b/client/ayon_core/hosts/substancepainter/plugins/create/create_workfile.py new file mode 100644 index 0000000000..a51b7d859b --- /dev/null +++ b/client/ayon_core/hosts/substancepainter/plugins/create/create_workfile.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" + +from ayon_core.pipeline import CreatedInstance, AutoCreator +from ayon_core.client import get_asset_by_name + +from ayon_core.hosts.substancepainter.api.pipeline import ( + set_instances, + set_instance, + get_instances +) + +import substance_painter.project + + +class CreateWorkfile(AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.substancepainter.workfile" + label = "Workfile" + family = "workfile" + icon = "document" + + default_variant = "Main" + + def create(self): + + if not substance_painter.project.is_open(): + return + + variant = self.default_variant + project_name = self.project_name + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + # Workfile instance should always exist and must only exist once. + # As such we'll first check if it already exists and is collected. + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + if current_instance is None: + current_instance_asset = None + else: + current_instance_asset = current_instance["folderPath"] + + if current_instance is None: + self.log.info("Auto-creating workfile instance...") + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": variant + } + current_instance = self.create_instance_in_context(subset_name, + data) + elif ( + current_instance_asset != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + current_instance["folderPath"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + set_instance( + instance_id=current_instance.get("instance_id"), + instance_data=current_instance.data_to_store() + ) + + def collect_instances(self): + for instance in get_instances(): + if (instance.get("creator_identifier") == self.identifier or + instance.get("family") == self.family): + self.create_instance_in_context_from_existing(instance) + + def update_instances(self, update_list): + instance_data_by_id = {} + for instance, _changes in update_list: + # Persist the data + instance_id = instance.get("instance_id") + instance_data = instance.data_to_store() + instance_data_by_id[instance_id] = instance_data + set_instances(instance_data_by_id, update=True) + + # Helper methods (this might get moved into Creator class) + def create_instance_in_context(self, subset_name, data): + instance = CreatedInstance( + self.family, subset_name, data, self + ) + self.create_context.creator_adds_instance(instance) + return instance + + def create_instance_in_context_from_existing(self, data): + instance = CreatedInstance.from_existing(data, self) + self.create_context.creator_adds_instance(instance) + return instance diff --git a/openpype/hosts/substancepainter/plugins/load/load_mesh.py b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py similarity index 95% rename from openpype/hosts/substancepainter/plugins/load/load_mesh.py rename to client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py index 08c1d5c391..48aa99d357 100644 --- a/openpype/hosts/substancepainter/plugins/load/load_mesh.py +++ b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py @@ -1,14 +1,14 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( load, get_representation_path, ) -from openpype.pipeline.load import LoadError -from openpype.hosts.substancepainter.api.pipeline import ( +from ayon_core.pipeline.load import LoadError +from ayon_core.hosts.substancepainter.api.pipeline import ( imprint_container, set_container_metadata, remove_container_metadata ) -from openpype.hosts.substancepainter.api.lib import prompt_new_file_with_mesh +from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh import substance_painter.project import qargparse diff --git a/client/ayon_core/hosts/substancepainter/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/substancepainter/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..db0edafac0 --- /dev/null +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/collect_current_file.py @@ -0,0 +1,17 @@ +import pyblish.api + +from ayon_core.pipeline import registered_host + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Current Workfile" + hosts = ["substancepainter"] + + def process(self, context): + host = registered_host() + path = host.get_current_workfile() + context.data["currentFile"] = path + self.log.debug(f"Current workfile: {path}") diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py b/client/ayon_core/hosts/substancepainter/plugins/publish/collect_textureset_images.py similarity index 97% rename from openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py rename to client/ayon_core/hosts/substancepainter/plugins/publish/collect_textureset_images.py index 316f72509e..b8279c99cd 100644 --- a/openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/collect_textureset_images.py @@ -2,15 +2,15 @@ import copy import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish import substance_painter.textureset -from openpype.hosts.substancepainter.api.lib import ( +from ayon_core.hosts.substancepainter.api.lib import ( get_parsed_export_maps, strip_template ) -from openpype.pipeline.create import get_subset_name -from openpype.client import get_asset_by_name +from ayon_core.pipeline.create import get_subset_name +from ayon_core.client import get_asset_by_name class CollectTextureSet(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_workfile_representation.py b/client/ayon_core/hosts/substancepainter/plugins/publish/collect_workfile_representation.py similarity index 100% rename from openpype/hosts/substancepainter/plugins/publish/collect_workfile_representation.py rename to client/ayon_core/hosts/substancepainter/plugins/publish/collect_workfile_representation.py diff --git a/openpype/hosts/substancepainter/plugins/publish/extract_textures.py b/client/ayon_core/hosts/substancepainter/plugins/publish/extract_textures.py similarity index 97% rename from openpype/hosts/substancepainter/plugins/publish/extract_textures.py rename to client/ayon_core/hosts/substancepainter/plugins/publish/extract_textures.py index bb6f15ead9..0fa7b52f45 100644 --- a/openpype/hosts/substancepainter/plugins/publish/extract_textures.py +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/extract_textures.py @@ -1,6 +1,6 @@ import substance_painter.export -from openpype.pipeline import KnownPublishError, publish +from ayon_core.pipeline import KnownPublishError, publish class ExtractTextures(publish.Extractor, diff --git a/client/ayon_core/hosts/substancepainter/plugins/publish/increment_workfile.py b/client/ayon_core/hosts/substancepainter/plugins/publish/increment_workfile.py new file mode 100644 index 0000000000..521a28130b --- /dev/null +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/increment_workfile.py @@ -0,0 +1,23 @@ +import pyblish.api + +from ayon_core.lib import version_up +from ayon_core.pipeline import registered_host + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 1 + label = "Increment Workfile Version" + optional = True + hosts = ["substancepainter"] + + def process(self, context): + + assert all(result["success"] for result in context.data["results"]), ( + "Publishing not successful so version is not increased.") + + host = registered_host() + path = context.data["currentFile"] + self.log.info(f"Incrementing current workfile to: {path}") + host.save_workfile(version_up(path)) diff --git a/openpype/hosts/substancepainter/plugins/publish/save_workfile.py b/client/ayon_core/hosts/substancepainter/plugins/publish/save_workfile.py similarity index 96% rename from openpype/hosts/substancepainter/plugins/publish/save_workfile.py rename to client/ayon_core/hosts/substancepainter/plugins/publish/save_workfile.py index 517f5fd17f..627fb991aa 100644 --- a/openpype/hosts/substancepainter/plugins/publish/save_workfile.py +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/save_workfile.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( registered_host, KnownPublishError ) diff --git a/openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py b/client/ayon_core/hosts/substancepainter/plugins/publish/validate_ouput_maps.py similarity index 98% rename from openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py rename to client/ayon_core/hosts/substancepainter/plugins/publish/validate_ouput_maps.py index 252683b6c8..720771994c 100644 --- a/openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py +++ b/client/ayon_core/hosts/substancepainter/plugins/publish/validate_ouput_maps.py @@ -5,7 +5,7 @@ import substance_painter.export -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateOutputMaps(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/traypublisher/__init__.py b/client/ayon_core/hosts/traypublisher/__init__.py similarity index 100% rename from openpype/hosts/traypublisher/__init__.py rename to client/ayon_core/hosts/traypublisher/__init__.py diff --git a/client/ayon_core/hosts/traypublisher/addon.py b/client/ayon_core/hosts/traypublisher/addon.py new file mode 100644 index 0000000000..d8fc5ed105 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/addon.py @@ -0,0 +1,60 @@ +import os + +from ayon_core.lib import get_ayon_launcher_args +from ayon_core.lib.execute import run_detached_process +from ayon_core.modules import ( + click_wrap, + OpenPypeModule, + ITrayAction, + IHostAddon, +) + +TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): + label = "Publisher" + name = "traypublisher" + host_name = "traypublisher" + + def initialize(self, modules_settings): + self.enabled = True + self.publish_paths = [ + os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") + ] + + def tray_init(self): + return + + def on_action_trigger(self): + self.run_traypublisher() + + def connect_with_addons(self, enabled_modules): + """Collect publish paths from other modules.""" + publish_paths = self.manager.collect_plugin_paths()["publish"] + self.publish_paths.extend(publish_paths) + + def run_traypublisher(self): + args = get_ayon_launcher_args( + "module", self.name, "launch" + ) + run_detached_process(args) + + def cli(self, click_group): + click_group.add_command(cli_main.to_click_obj()) + + +@click_wrap.group( + TrayPublishAddon.name, + help="TrayPublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +def launch(): + """Launch TrayPublish tool UI.""" + + from ayon_core.tools import traypublisher + + traypublisher.main() diff --git a/openpype/hosts/traypublisher/api/__init__.py b/client/ayon_core/hosts/traypublisher/api/__init__.py similarity index 100% rename from openpype/hosts/traypublisher/api/__init__.py rename to client/ayon_core/hosts/traypublisher/api/__init__.py diff --git a/openpype/hosts/traypublisher/api/editorial.py b/client/ayon_core/hosts/traypublisher/api/editorial.py similarity index 99% rename from openpype/hosts/traypublisher/api/editorial.py rename to client/ayon_core/hosts/traypublisher/api/editorial.py index 613f1de768..d84a7200c8 100644 --- a/openpype/hosts/traypublisher/api/editorial.py +++ b/client/ayon_core/hosts/traypublisher/api/editorial.py @@ -1,8 +1,8 @@ import re from copy import deepcopy -from openpype.client import get_asset_by_id -from openpype.pipeline.create import CreatorError +from ayon_core.client import get_asset_by_id +from ayon_core.pipeline.create import CreatorError class ShotMetadataSolver: diff --git a/client/ayon_core/hosts/traypublisher/api/pipeline.py b/client/ayon_core/hosts/traypublisher/api/pipeline.py new file mode 100644 index 0000000000..87177705c9 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/api/pipeline.py @@ -0,0 +1,183 @@ +import os +import json +import tempfile +import atexit + +import pyblish.api + +from ayon_core.pipeline import ( + register_creator_plugin_path, + legacy_io, +) +from ayon_core.host import HostBase, IPublishHost + + +ROOT_DIR = os.path.dirname(os.path.dirname( + os.path.abspath(__file__) +)) +PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") +CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") + + +class TrayPublisherHost(HostBase, IPublishHost): + name = "traypublisher" + + def install(self): + os.environ["AVALON_APP"] = self.name + legacy_io.Session["AVALON_APP"] = self.name + + pyblish.api.register_host("traypublisher") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_creator_plugin_path(CREATE_PATH) + + def get_context_title(self): + return HostContext.get_project_name() + + def get_context_data(self): + return HostContext.get_context_data() + + def update_context_data(self, data, changes): + HostContext.save_context_data(data) + + def set_project_name(self, project_name): + # TODO Deregister project specific plugins and register new project + # plugins + os.environ["AVALON_PROJECT"] = project_name + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() + HostContext.set_project_name(project_name) + + +class HostContext: + _context_json_path = None + + @staticmethod + def _on_exit(): + if ( + HostContext._context_json_path + and os.path.exists(HostContext._context_json_path) + ): + os.remove(HostContext._context_json_path) + + @classmethod + def get_context_json_path(cls): + if cls._context_json_path is None: + output_file = tempfile.NamedTemporaryFile( + mode="w", prefix="traypub_", suffix=".json" + ) + output_file.close() + cls._context_json_path = output_file.name + atexit.register(HostContext._on_exit) + print(cls._context_json_path) + return cls._context_json_path + + @classmethod + def _get_data(cls, group=None): + json_path = cls.get_context_json_path() + data = {} + if not os.path.exists(json_path): + with open(json_path, "w") as json_stream: + json.dump(data, json_stream) + else: + with open(json_path, "r") as json_stream: + content = json_stream.read() + if content: + data = json.loads(content) + if group is None: + return data + return data.get(group) + + @classmethod + def _save_data(cls, group, new_data): + json_path = cls.get_context_json_path() + data = cls._get_data() + data[group] = new_data + with open(json_path, "w") as json_stream: + json.dump(data, json_stream) + + @classmethod + def add_instance(cls, instance): + instances = cls.get_instances() + instances.append(instance) + cls.save_instances(instances) + + @classmethod + def get_instances(cls): + return cls._get_data("instances") or [] + + @classmethod + def save_instances(cls, instances): + cls._save_data("instances", instances) + + @classmethod + def get_context_data(cls): + return cls._get_data("context") or {} + + @classmethod + def save_context_data(cls, data): + cls._save_data("context", data) + + @classmethod + def get_project_name(cls): + return cls._get_data("project_name") + + @classmethod + def set_project_name(cls, project_name): + cls._save_data("project_name", project_name) + + @classmethod + def get_data_to_store(cls): + return { + "project_name": cls.get_project_name(), + "instances": cls.get_instances(), + "context": cls.get_context_data(), + } + + +def list_instances(): + return HostContext.get_instances() + + +def update_instances(update_list): + updated_instances = {} + for instance, _changes in update_list: + updated_instances[instance.id] = instance.data_to_store() + + instances = HostContext.get_instances() + for instance_data in instances: + instance_id = instance_data["instance_id"] + if instance_id in updated_instances: + new_instance_data = updated_instances[instance_id] + old_keys = set(instance_data.keys()) + new_keys = set(new_instance_data.keys()) + instance_data.update(new_instance_data) + for key in (old_keys - new_keys): + instance_data.pop(key) + + HostContext.save_instances(instances) + + +def remove_instances(instances): + if not isinstance(instances, (tuple, list)): + instances = [instances] + + current_instances = HostContext.get_instances() + for instance in instances: + instance_id = instance.data["instance_id"] + found_idx = None + for idx, _instance in enumerate(current_instances): + if instance_id == _instance["instance_id"]: + found_idx = idx + break + + if found_idx is not None: + current_instances.pop(found_idx) + HostContext.save_instances(current_instances) + + +def get_context_data(): + return HostContext.get_context_data() + + +def update_context_data(data, changes): + HostContext.save_context_data(data) diff --git a/client/ayon_core/hosts/traypublisher/api/plugin.py b/client/ayon_core/hosts/traypublisher/api/plugin.py new file mode 100644 index 0000000000..77a8f23d2e --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/api/plugin.py @@ -0,0 +1,334 @@ +from ayon_core.client import ( + get_assets, + get_subsets, + get_last_versions, + get_asset_name_identifier, +) +from ayon_core.lib.attribute_definitions import ( + FileDef, + BoolDef, + NumberDef, + UISeparatorDef, +) +from ayon_core.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS +from ayon_core.pipeline.create import ( + Creator, + HiddenCreator, + CreatedInstance, + cache_and_get_instances, + PRE_CREATE_THUMBNAIL_KEY, +) +from .pipeline import ( + list_instances, + update_instances, + remove_instances, + HostContext, +) + +REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS) +SHARED_DATA_KEY = "openpype.traypublisher.instances" + + +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" + settings_category = "traypublisher" + + def collect_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + + +class TrayPublishCreator(Creator): + create_allow_context_change = True + host_name = "traypublisher" + settings_category = "traypublisher" + + def collect_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + new_instance.mark_as_stored() + + # Add instance to current context + self._add_instance_to_context(new_instance) + + +class SettingsCreator(TrayPublishCreator): + create_allow_context_change = True + create_allow_thumbnail = True + allow_version_control = False + + extensions = [] + + def create(self, subset_name, data, pre_create_data): + # Pass precreate data to creator attributes + thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None) + + # Fill 'version_to_use' if version control is enabled + if self.allow_version_control: + asset_name = data["folderPath"] + subset_docs_by_asset_id = self._prepare_next_versions( + [asset_name], [subset_name]) + version = subset_docs_by_asset_id[asset_name].get(subset_name) + pre_create_data["version_to_use"] = version + data["_previous_last_version"] = version + + data["creator_attributes"] = pre_create_data + data["settings_creator"] = True + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, data, self) + + self._store_new_instance(new_instance) + + if thumbnail_path: + self.set_instance_thumbnail_path(new_instance.id, thumbnail_path) + + def _prepare_next_versions(self, asset_names, subset_names): + """Prepare next versions for given asset and subset names. + + Todos: + Expect combination of subset names by asset name to avoid + unnecessary server calls for unused subsets. + + Args: + asset_names (Iterable[str]): Asset names. + subset_names (Iterable[str]): Subset names. + + Returns: + dict[str, dict[str, int]]: Last versions by asset + and subset names. + """ + + # Prepare all versions for all combinations to '1' + subset_docs_by_asset_id = { + asset_name: { + subset_name: 1 + for subset_name in subset_names + } + for asset_name in asset_names + } + if not asset_names or not subset_names: + return subset_docs_by_asset_id + + asset_docs = get_assets( + self.project_name, + asset_names=asset_names, + fields=["_id", "name", "data.parents"] + ) + asset_names_by_id = { + asset_doc["_id"]: get_asset_name_identifier(asset_doc) + for asset_doc in asset_docs + } + subset_docs = list(get_subsets( + self.project_name, + asset_ids=asset_names_by_id.keys(), + subset_names=subset_names, + fields=["_id", "name", "parent"] + )) + + subset_ids = {subset_doc["_id"] for subset_doc in subset_docs} + last_versions = get_last_versions( + self.project_name, + subset_ids, + fields=["name", "parent"]) + + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + asset_name = asset_names_by_id[asset_id] + subset_name = subset_doc["name"] + subset_id = subset_doc["_id"] + last_version = last_versions.get(subset_id) + version = 0 + if last_version is not None: + version = last_version["name"] + subset_docs_by_asset_id[asset_name][subset_name] += version + return subset_docs_by_asset_id + + def _fill_next_versions(self, instances_data): + """Fill next version for instances. + + Instances have also stored previous next version to be able to + recognize if user did enter different version. If version was + not changed by user, or user set it to '0' the next version will be + updated by current database state. + """ + + filtered_instance_data = [] + for instance in instances_data: + previous_last_version = instance.get("_previous_last_version") + creator_attributes = instance["creator_attributes"] + use_next_version = creator_attributes.get( + "use_next_version", True) + version = creator_attributes.get("version_to_use", 0) + if ( + use_next_version + or version == 0 + or version == previous_last_version + ): + filtered_instance_data.append(instance) + + asset_names = { + instance["folderPath"] + for instance in filtered_instance_data + } + subset_names = { + instance["subset"] + for instance in filtered_instance_data} + subset_docs_by_asset_id = self._prepare_next_versions( + asset_names, subset_names + ) + for instance in filtered_instance_data: + asset_name = instance["folderPath"] + subset_name = instance["subset"] + version = subset_docs_by_asset_id[asset_name][subset_name] + instance["creator_attributes"]["version_to_use"] = version + instance["_previous_last_version"] = version + + def collect_instances(self): + """Collect instances from host. + + Overriden to be able to manage version control attributes. If version + control is disabled, the attributes will be removed from instances, + and next versions are filled if is version control enabled. + """ + + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + instances = instances_by_identifier[self.identifier] + if not instances: + return + + if self.allow_version_control: + self._fill_next_versions(instances) + + for instance_data in instances: + # Make sure that there are not data related to version control + # if plugin does not support it + if not self.allow_version_control: + instance_data.pop("_previous_last_version", None) + creator_attributes = instance_data["creator_attributes"] + creator_attributes.pop("version_to_use", None) + creator_attributes.pop("use_next_version", None) + + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def get_instance_attr_defs(self): + defs = self.get_pre_create_attr_defs() + if self.allow_version_control: + defs += [ + UISeparatorDef(), + BoolDef( + "use_next_version", + default=True, + label="Use next version", + ), + NumberDef( + "version_to_use", + default=1, + minimum=0, + maximum=999, + label="Version to use", + ) + ] + return defs + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "representation_files", + folders=False, + extensions=self.extensions, + allow_sequences=self.allow_sequences, + single_item=not self.allow_multiple_items, + label="Representations", + ), + FileDef( + "reviewable", + folders=False, + extensions=REVIEW_EXTENSIONS, + allow_sequences=True, + single_item=True, + label="Reviewable representations", + extensions_label="Single reviewable item" + ) + ] + + @classmethod + def from_settings(cls, item_data): + identifier = item_data["identifier"] + family = item_data["family"] + if not identifier: + identifier = "settings_{}".format(family) + return type( + "{}{}".format(cls.__name__, identifier), + (cls, ), + { + "family": family, + "identifier": identifier, + "label": item_data["label"].strip(), + "icon": item_data["icon"], + "description": item_data["description"], + "detailed_description": item_data["detailed_description"], + "extensions": item_data["extensions"], + "allow_sequences": item_data["allow_sequences"], + "allow_multiple_items": item_data["allow_multiple_items"], + "allow_version_control": item_data.get( + "allow_version_control", False), + "default_variants": item_data["default_variants"], + } + ) diff --git a/openpype/hosts/traypublisher/batch_parsing.py b/client/ayon_core/hosts/traypublisher/batch_parsing.py similarity index 97% rename from openpype/hosts/traypublisher/batch_parsing.py rename to client/ayon_core/hosts/traypublisher/batch_parsing.py index 3ce3b095b9..fdb3021a20 100644 --- a/openpype/hosts/traypublisher/batch_parsing.py +++ b/client/ayon_core/hosts/traypublisher/batch_parsing.py @@ -2,8 +2,8 @@ import os import re -from openpype.lib import Logger -from openpype.client import get_assets, get_asset_by_name +from ayon_core.lib import Logger +from ayon_core.client import get_assets, get_asset_by_name def get_asset_doc_from_file_name(source_filename, project_name, diff --git a/openpype/hosts/traypublisher/plugins/create/create_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py similarity index 92% rename from openpype/hosts/traypublisher/plugins/create/create_colorspace_look.py rename to client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py index ac4c72a0ce..3969294f1e 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py @@ -6,17 +6,16 @@ """ from pathlib import Path -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_by_name -from openpype.lib.attribute_definitions import ( +from ayon_core.client import get_asset_by_name +from ayon_core.lib.attribute_definitions import ( FileDef, EnumDef, TextDef, UISeparatorDef ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance, CreatorError ) -from openpype.pipeline import colorspace -from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator +from ayon_core.pipeline import colorspace +from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator class CreateColorspaceLook(TrayPublishCreator): @@ -55,10 +54,7 @@ def create(self, subset_name, instance_data, pre_create_data): # this should never happen raise CreatorError("Missing files from representation") - if AYON_SERVER_ENABLED: - asset_name = instance_data["folderPath"] - else: - asset_name = instance_data["asset"] + asset_name = instance_data["folderPath"] asset_doc = get_asset_by_name( self.project_name, asset_name) diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial.py similarity index 90% rename from openpype/hosts/traypublisher/plugins/create/create_editorial.py rename to client/ayon_core/hosts/traypublisher/plugins/create/create_editorial.py index dce4a051fd..51a67a871e 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_editorial.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial.py @@ -1,20 +1,20 @@ import os from copy import deepcopy import opentimelineio as otio -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( + +from ayon_core.client import ( get_asset_by_name, get_project ) -from openpype.hosts.traypublisher.api.plugin import ( +from ayon_core.hosts.traypublisher.api.plugin import ( TrayPublishCreator, HiddenTrayPublishCreator ) -from openpype.hosts.traypublisher.api.editorial import ( +from ayon_core.hosts.traypublisher.api.editorial import ( ShotMetadataSolver ) -from openpype.pipeline import CreatedInstance -from openpype.lib import ( +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import ( get_ffprobe_data, convert_ffprobe_fps_value, @@ -102,21 +102,12 @@ class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): label = "Editorial Shot" def get_instance_attr_defs(self): - instance_attributes = [] - if AYON_SERVER_ENABLED: - instance_attributes.append( - TextDef( - "folderPath", - label="Folder path" - ) - ) - else: - instance_attributes.append( - TextDef( - "shotName", - label="Shot name" - ) + instance_attributes = [ + TextDef( + "folderPath", + label="Folder path" ) + ] instance_attributes.extend(CLIP_ATTR_DEFS) return instance_attributes @@ -224,11 +215,8 @@ def create(self, subset_name, instance_data, pre_create_data): i["family"] for i in self._creator_settings["family_presets"] ] } - if AYON_SERVER_ENABLED: - asset_name = instance_data["folderPath"] - else: - asset_name = instance_data["asset"] + asset_name = instance_data["folderPath"] asset_doc = get_asset_by_name(self.project_name, asset_name) if pre_create_data["fps"] == "from_selection": @@ -379,17 +367,20 @@ def _get_clip_instances( instance_data (dict): clip instance data family_presets (list): list of dict settings subset presets """ - self.asset_name_check = [] - tracks = otio_timeline.each_child( - descended_from_type=otio.schema.Track - ) + tracks = [ + track for track in otio_timeline.each_child( + descended_from_type=otio.schema.Track) + if track.kind == "Video" + ] - # media data for audio sream and reference solving + # media data for audio stream and reference solving media_data = self._get_media_source_metadata(media_path) for track in tracks: + # set track name track.name = f"{sequence_file_name} - {otio_timeline.name}" + try: track_start_frame = ( abs(track.source_range.start_time.value) @@ -398,19 +389,19 @@ def _get_clip_instances( except AttributeError: track_start_frame = 0 - - for clip in track.each_child(): - if not self._validate_clip_for_processing(clip): + for otio_clip in track.each_child(): + if not self._validate_clip_for_processing(otio_clip): continue + # get available frames info to clip data - self._create_otio_reference(clip, media_path, media_data) + self._create_otio_reference(otio_clip, media_path, media_data) # convert timeline range to source range - self._restore_otio_source_range(clip) + self._restore_otio_source_range(otio_clip) base_instance_data = self._get_base_instance_data( - clip, + otio_clip, instance_data, track_start_frame ) @@ -429,7 +420,7 @@ def _get_clip_instances( continue instance = self._make_subset_instance( - clip, + otio_clip, _fpreset, deepcopy(base_instance_data), parenting_data @@ -608,10 +599,7 @@ def _make_subset_naming( Returns: str: label string """ - if AYON_SERVER_ENABLED: - asset_name = instance_data["creator_attributes"]["folderPath"] - else: - asset_name = instance_data["creator_attributes"]["shotName"] + asset_name = instance_data["creator_attributes"]["folderPath"] variant_name = instance_data["variant"] family = preset["family"] @@ -683,11 +671,6 @@ def _get_base_instance_data( } ) - # It should be validated only in openpype since we are supporting - # publishing to AYON with folder path and uniqueness is not an issue - if not AYON_SERVER_ENABLED: - self._validate_name_uniqueness(shot_name) - timing_data = self._get_timing_data( otio_clip, timeline_offset, @@ -720,18 +703,9 @@ def _get_base_instance_data( } # update base instance data with context data # and also update creator attributes with context data - if AYON_SERVER_ENABLED: - # TODO: this is here just to be able to publish - # to AYON with folder path - creator_attributes["folderPath"] = shot_metadata.pop("folderPath") - base_instance_data["folderPath"] = parent_asset_name - else: - creator_attributes.update({ - "shotName": shot_name, - "Parent hierarchy path": shot_metadata["hierarchy"] - }) + creator_attributes["folderPath"] = shot_metadata.pop("folderPath") + base_instance_data["folderPath"] = parent_asset_name - base_instance_data["asset"] = parent_asset_name # add creator attributes to shared instance data base_instance_data["creator_attributes"] = creator_attributes # add hierarchy shot metadata @@ -834,22 +808,6 @@ def _validate_clip_for_processing(self, otio_clip): return True - def _validate_name_uniqueness(self, name): - """ Validating name uniqueness. - - In context of other clip names in sequence file. - - Args: - name (str): shot name string - """ - if name not in self.asset_name_check: - self.asset_name_check.append(name) - else: - self.log.warning( - f"Duplicate shot name: {name}! " - "Please check names in the input sequence files." - ) - def get_pre_create_attr_defs(self): """ Creating pre-create attributes at creator plugin. diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_from_settings.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_from_settings.py new file mode 100644 index 0000000000..20f8dd792a --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_from_settings.py @@ -0,0 +1,23 @@ +import os +from ayon_core.lib import Logger +from ayon_core.settings import get_project_settings + +log = Logger.get_logger(__name__) + + +def initialize(): + from ayon_core.hosts.traypublisher.api.plugin import SettingsCreator + + project_name = os.environ["AVALON_PROJECT"] + project_settings = get_project_settings(project_name) + + simple_creators = project_settings["traypublisher"]["simple_creators"] + + global_variables = globals() + for item in simple_creators: + + dynamic_plugin = SettingsCreator.from_settings(item) + global_variables[dynamic_plugin.__name__] = dynamic_plugin + + +initialize() diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_movie_batch.py similarity index 90% rename from openpype/hosts/traypublisher/plugins/create/create_movie_batch.py rename to client/ayon_core/hosts/traypublisher/plugins/create/create_movie_batch.py index 8fa65c7fff..274495855b 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -2,22 +2,21 @@ import os import re -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_name_identifier -from openpype.lib import ( +from ayon_core.client import get_asset_name_identifier +from ayon_core.lib import ( FileDef, BoolDef, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( CreatedInstance, ) -from openpype.pipeline.create import ( +from ayon_core.pipeline.create import ( get_subset_name, TaskNotSetError, ) -from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator -from openpype.hosts.traypublisher.batch_parsing import ( +from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator +from ayon_core.hosts.traypublisher.batch_parsing import ( get_asset_doc_from_file_name ) @@ -68,11 +67,8 @@ def create(self, subset_name, data, pre_create_data): asset_name = get_asset_name_identifier(asset_doc) + instance_data["folderPath"] = asset_name instance_data["task"] = task_name - if AYON_SERVER_ENABLED: - instance_data["folderPath"] = asset_name - else: - instance_data["asset"] = asset_name # Create new instance new_instance = CreatedInstance(self.family, subset_name, diff --git a/openpype/hosts/traypublisher/plugins/create/create_online.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_online.py similarity index 93% rename from openpype/hosts/traypublisher/plugins/create/create_online.py rename to client/ayon_core/hosts/traypublisher/plugins/create/create_online.py index 199fae6d2c..db11d30afe 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_online.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_online.py @@ -7,13 +7,13 @@ """ from pathlib import Path -# from openpype.client import get_subset_by_name, get_asset_by_name -from openpype.lib.attribute_definitions import FileDef, BoolDef -from openpype.pipeline import ( +# from ayon_core.client import get_subset_by_name, get_asset_by_name +from ayon_core.lib.attribute_definitions import FileDef, BoolDef +from ayon_core.pipeline import ( CreatedInstance, CreatorError ) -from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator +from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator class OnlineCreator(TrayPublishCreator): diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_app_name.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_app_name.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_app_name.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_clip_instances.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_clip_instances.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_colorspace_look.py similarity index 95% rename from openpype/hosts/traypublisher/plugins/publish/collect_colorspace_look.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_colorspace_look.py index 6aede099bf..b194e9bf00 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_colorspace_look.py @@ -1,12 +1,12 @@ import os from pprint import pformat import pyblish.api -from openpype.pipeline import publish -from openpype.pipeline import colorspace +from ayon_core.pipeline import publish +from ayon_core.pipeline import colorspace class CollectColorspaceLook(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin): + publish.AYONPyblishPluginMixin): """Collect OCIO colorspace look from LUT file """ diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_instances.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_instances.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py similarity index 93% rename from openpype/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py index 75c26ac958..8e29a0048d 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py @@ -1,15 +1,15 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( publish, registered_host ) -from openpype.lib import EnumDef -from openpype.pipeline import colorspace -from openpype.pipeline.publish import KnownPublishError +from ayon_core.lib import EnumDef +from ayon_core.pipeline import colorspace +from ayon_core.pipeline.publish import KnownPublishError class CollectColorspace(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin, + publish.AYONPyblishPluginMixin, publish.ColormanagedPyblishPluginMixin): """Collect explicit user defined representation colorspaces""" diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_movie_batch.py similarity index 91% rename from openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_movie_batch.py index 5f8b2878b7..c4f2915ef8 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -1,11 +1,11 @@ import os import pyblish.api -from openpype.pipeline import OpenPypePyblishPluginMixin +from ayon_core.pipeline import AYONPyblishPluginMixin class CollectMovieBatch( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin + pyblish.api.InstancePlugin, AYONPyblishPluginMixin ): """Collect file url for batch movies and create representation. diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_online_file.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_online_file.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_online_file.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_frames.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_review_frames.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_review_frames.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_review_frames.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py similarity index 97% rename from openpype/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py index 5e60a94927..7eded0f6f5 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_sequence_frame_data.py @@ -1,7 +1,7 @@ import pyblish.api import clique -from openpype.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline import OptionalPyblishPluginMixin class CollectSequenceFrameData( diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_shot_instances.py similarity index 95% rename from openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_shot_instances.py index b99b634da1..b19eb36168 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -2,8 +2,6 @@ import pyblish.api import opentimelineio as otio -from openpype import AYON_SERVER_ENABLED - class CollectShotInstance(pyblish.api.InstancePlugin): """ Collect shot instances @@ -79,6 +77,7 @@ def _get_otio_clip(self, instance): clip for clip in otio_timeline.each_child( descended_from_type=otio.schema.Clip) if clip.name == otio_clip.name + if clip.parent().kind == "Video" ] otio_clip = clips.pop() @@ -121,7 +120,7 @@ def _solve_inputs_to_data(self, instance): frame_end = _cr_attrs["frameEnd"] frame_dur = frame_end - frame_start - data = { + return { "fps": float(_cr_attrs["fps"]), "handleStart": _cr_attrs["handle_start"], "handleEnd": _cr_attrs["handle_end"], @@ -132,14 +131,9 @@ def _solve_inputs_to_data(self, instance): "clipDuration": _cr_attrs["clipDuration"], "sourceIn": _cr_attrs["sourceIn"], "sourceOut": _cr_attrs["sourceOut"], - "workfileFrameStart": workfile_start_frame + "workfileFrameStart": workfile_start_frame, + "asset": _cr_attrs["folderPath"], } - if AYON_SERVER_ENABLED: - data["asset"] = _cr_attrs["folderPath"] - else: - data["asset"] = _cr_attrs["shotName"] - - return data def _solve_hierarchy_context(self, instance): """ Adding hierarchy data to context shared data. diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_simple_instances.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_simple_instances.py diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_source.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_source.py similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/collect_source.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_source.py diff --git a/openpype/hosts/traypublisher/plugins/publish/extract_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_colorspace_look.py similarity index 92% rename from openpype/hosts/traypublisher/plugins/publish/extract_colorspace_look.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/extract_colorspace_look.py index f94bbc7a49..f7be322e74 100644 --- a/openpype/hosts/traypublisher/plugins/publish/extract_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_colorspace_look.py @@ -1,11 +1,11 @@ import os import json import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractColorspaceLook(publish.Extractor, - publish.OpenPypePyblishPluginMixin): + publish.AYONPyblishPluginMixin): """Extract OCIO colorspace look from LUT file """ diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml b/client/ayon_core/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml similarity index 100% rename from openpype/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml rename to client/ayon_core/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml b/client/ayon_core/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml similarity index 100% rename from openpype/hosts/standalonepublisher/plugins/publish/help/validate_frame_ranges.xml rename to client/ayon_core/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_colorspace.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace.py similarity index 94% rename from openpype/hosts/traypublisher/plugins/publish/validate_colorspace.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace.py index 58c40938d2..9f629f78dd 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_colorspace.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace.py @@ -1,17 +1,17 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( publish, PublishValidationError ) -from openpype.pipeline.colorspace import ( +from ayon_core.pipeline.colorspace import ( get_ocio_config_colorspaces ) class ValidateColorspace(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin, + publish.AYONPyblishPluginMixin, publish.ColormanagedPyblishPluginMixin): """Validate representation colorspaces""" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace_look.py similarity index 96% rename from openpype/hosts/traypublisher/plugins/publish/validate_colorspace_look.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace_look.py index 548ce9d15a..91c7632376 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_colorspace_look.py @@ -1,13 +1,13 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( publish, PublishValidationError ) class ValidateColorspaceLook(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin): + publish.AYONPyblishPluginMixin): """Validate colorspace look attributes""" label = "Validate colorspace look attributes" diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py similarity index 97% rename from openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py index 1fb27acdeb..6a85f92ce1 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, OptionalPyblishPluginMixin, diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_filepaths.py similarity index 97% rename from openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/validate_filepaths.py index b67e47d213..4a4f3dae69 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -1,6 +1,6 @@ import os import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateFilePath(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py new file mode 100644 index 0000000000..cd4a98b84d --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -0,0 +1,81 @@ +import re + +import pyblish.api + +from ayon_core.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) + + +class ValidateFrameRange(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validating frame range of rendered files against state in DB.""" + + label = "Validate Frame Range" + hosts = ["traypublisher"] + families = ["render", "plate"] + order = ValidateContentsOrder + + optional = True + # published data might be sequence (.mov, .mp4) in that counting files + # doesnt make sense + check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + skip_timelines_check = [] # skip for specific task names (regex) + + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + # editorial would fail since they might not be in database yet + new_asset_publishing = instance.data.get("newAssetPublishing") + if new_asset_publishing: + self.log.debug("Instance is creating new asset. Skipping.") + return + + if (self.skip_timelines_check and + any(re.search(pattern, instance.data["task"]) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping for {} task".format(instance.data["task"])) + + asset_doc = instance.data["assetEntity"] + asset_data = asset_doc["data"] + frame_start = asset_data["frameStart"] + frame_end = asset_data["frameEnd"] + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + repres = instance.data.get("representations") + if not repres: + self.log.info("No representations, skipping.") + return + + first_repre = repres[0] + ext = first_repre['ext'].replace(".", '') + + if not ext or ext.lower() not in self.check_extensions: + self.log.warning("Cannot check for extension {}".format(ext)) + return + + files = first_repre["files"] + if isinstance(files, str): + files = [files] + frames = len(files) + + msg = ( + "Frame duration from DB:'{}' doesn't match number of files:'{}'" + " Please change frame range for Asset or limit no. of files" + ). format(int(duration), frames) + + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_online_file.py similarity index 91% rename from openpype/hosts/traypublisher/plugins/publish/validate_online_file.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/validate_online_file.py index 2db865ca2b..e655578095 100644 --- a/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_online_file.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError, OptionalPyblishPluginMixin, ) -from openpype.client import get_subset_by_name +from ayon_core.client import get_subset_by_name class ValidateOnlineFile(OptionalPyblishPluginMixin, diff --git a/openpype/hosts/tvpaint/__init__.py b/client/ayon_core/hosts/tvpaint/__init__.py similarity index 100% rename from openpype/hosts/tvpaint/__init__.py rename to client/ayon_core/hosts/tvpaint/__init__.py diff --git a/client/ayon_core/hosts/tvpaint/addon.py b/client/ayon_core/hosts/tvpaint/addon.py new file mode 100644 index 0000000000..375f7266ae --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/addon.py @@ -0,0 +1,40 @@ +import os +from ayon_core.modules import OpenPypeModule, IHostAddon + +TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def get_launch_script_path(): + return os.path.join( + TVPAINT_ROOT_DIR, + "api", + "launch_script.py" + ) + + +class TVPaintAddon(OpenPypeModule, IHostAddon): + name = "tvpaint" + host_name = "tvpaint" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + + defaults = { + "AYON_LOG_NO_COLORS": "1" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(TVPAINT_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".tvpp"] diff --git a/openpype/hosts/tvpaint/api/__init__.py b/client/ayon_core/hosts/tvpaint/api/__init__.py similarity index 100% rename from openpype/hosts/tvpaint/api/__init__.py rename to client/ayon_core/hosts/tvpaint/api/__init__.py diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/client/ayon_core/hosts/tvpaint/api/communication_server.py similarity index 98% rename from openpype/hosts/tvpaint/api/communication_server.py rename to client/ayon_core/hosts/tvpaint/api/communication_server.py index 2c4d8160a6..d185bdf685 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/client/ayon_core/hosts/tvpaint/api/communication_server.py @@ -21,9 +21,8 @@ ) from aiohttp_json_rpc.exceptions import RpcError -from openpype import AYON_SERVER_ENABLED -from openpype.lib import emit_event -from openpype.hosts.tvpaint.tvpaint_plugin import get_plugin_files_path +from ayon_core.lib import emit_event +from ayon_core.hosts.tvpaint.tvpaint_plugin import get_plugin_files_path log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -301,7 +300,7 @@ class QtTVPaintRpc(BaseTVPaintRpc): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - from openpype.tools.utils import host_tools + from ayon_core.tools.utils import host_tools self.tools_helper = host_tools.HostToolsHelper() route_name = self.route_name @@ -835,9 +834,7 @@ def execute_george_through_file(self, george_script): class QtCommunicator(BaseCommunicator): - label = os.getenv("AVALON_LABEL") - if not label: - label = "AYON" if AYON_SERVER_ENABLED else "OpenPype" + label = os.getenv("AYON_MENU_LABEL") or "AYON" title = "{} Tools".format(label) menu_definitions = { "title": title, diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/client/ayon_core/hosts/tvpaint/api/launch_script.py similarity index 95% rename from openpype/hosts/tvpaint/api/launch_script.py rename to client/ayon_core/hosts/tvpaint/api/launch_script.py index 614dbe8a6e..bcc92d8b6d 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/client/ayon_core/hosts/tvpaint/api/launch_script.py @@ -8,9 +8,9 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype import style -from openpype.pipeline import install_host -from openpype.hosts.tvpaint.api import ( +from ayon_core import style +from ayon_core.pipeline import install_host +from ayon_core.hosts.tvpaint.api import ( TVPaintHost, CommunicationWrapper, ) diff --git a/openpype/hosts/tvpaint/api/lib.py b/client/ayon_core/hosts/tvpaint/api/lib.py similarity index 100% rename from openpype/hosts/tvpaint/api/lib.py rename to client/ayon_core/hosts/tvpaint/api/lib.py diff --git a/client/ayon_core/hosts/tvpaint/api/pipeline.py b/client/ayon_core/hosts/tvpaint/api/pipeline.py new file mode 100644 index 0000000000..1360b423b3 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/api/pipeline.py @@ -0,0 +1,511 @@ +import os +import json +import tempfile +import logging + +import requests + +import pyblish.api + +from ayon_core.client import get_asset_by_name +from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost +from ayon_core.hosts.tvpaint import TVPAINT_ROOT_DIR +from ayon_core.settings import get_current_project_settings +from ayon_core.lib import register_event_callback +from ayon_core.pipeline import ( + legacy_io, + register_loader_plugin_path, + register_creator_plugin_path, + AVALON_CONTAINER_ID, +) +from ayon_core.pipeline.context_tools import get_global_context + +from .lib import ( + execute_george, + execute_george_through_file +) + +log = logging.getLogger(__name__) + + +METADATA_SECTION = "avalon" +SECTION_NAME_CONTEXT = "context" +SECTION_NAME_CREATE_CONTEXT = "create_context" +SECTION_NAME_INSTANCES = "instances" +SECTION_NAME_CONTAINERS = "containers" +# Maximum length of metadata chunk string +# TODO find out the max (500 is safe enough) +TVPAINT_CHUNK_LENGTH = 500 + +"""TVPaint's Metadata + +Metadata are stored to TVPaint's workfile. + +Workfile works similar to .ini file but has few limitation. Most important +limitation is that value under key has limited length. Due to this limitation +each metadata section/key stores number of "subkeys" that are related to +the section. + +Example: +Metadata key `"instances"` may have stored value "2". In that case it is +expected that there are also keys `["instances0", "instances1"]`. + +Workfile data looks like: +``` +[avalon] +instances0=[{{__dq__}id{__dq__}: {__dq__}pyblish.avalon.instance{__dq__... +instances1=...more data... +instances=2 +``` +""" + + +class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "tvpaint" + + def install(self): + """Install TVPaint-specific functionality.""" + + log.info("OpenPype - Installing TVPaint integration") + legacy_io.install() + + # Create workdir folder if does not exist yet + workdir = legacy_io.Session["AVALON_WORKDIR"] + if not os.path.exists(workdir): + os.makedirs(workdir) + + plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins") + publish_dir = os.path.join(plugins_dir, "publish") + load_dir = os.path.join(plugins_dir, "load") + create_dir = os.path.join(plugins_dir, "create") + + pyblish.api.register_host("tvpaint") + pyblish.api.register_plugin_path(publish_dir) + register_loader_plugin_path(load_dir) + register_creator_plugin_path(create_dir) + + register_event_callback("application.launched", self.initial_launch) + register_event_callback("application.exit", self.application_exit) + + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return self.get_current_context().get("project_name") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return self.get_current_context().get("asset_name") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return self.get_current_context().get("task_name") + + def get_current_context(self): + context = get_current_workfile_context() + if not context: + return get_global_context() + + if "project_name" in context: + return context + # This is legacy way how context was stored + return { + "project_name": context.get("project"), + "asset_name": context.get("asset"), + "task_name": context.get("task") + } + + # --- Create --- + def get_context_data(self): + return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {}) + + def update_context_data(self, data, changes): + return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data) + + def list_instances(self): + """List all created instances from current workfile.""" + return list_instances() + + def write_instances(self, data): + return write_instances(data) + + # --- Workfile --- + def open_workfile(self, filepath): + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( + filepath.replace("\\", "/") + ) + return execute_george_through_file(george_script) + + def save_workfile(self, filepath=None): + if not filepath: + filepath = self.get_current_workfile() + context = get_global_context() + save_current_workfile_context(context) + + # Execute george script to save workfile. + george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) + return execute_george(george_script) + + def work_root(self, session): + return session["AVALON_WORKDIR"] + + def get_current_workfile(self): + return execute_george("tv_GetProjectName") + + def workfile_has_unsaved_changes(self): + return None + + def get_workfile_extensions(self): + return [".tvpp"] + + # --- Load --- + def get_containers(self): + return get_containers() + + def initial_launch(self): + # Setup project settings if its the template that's launched. + # TODO also check for template creation when it's possible to define + # templates + last_workfile = os.environ.get("AVALON_LAST_WORKFILE") + if not last_workfile or os.path.exists(last_workfile): + return + + log.info("Setting up project...") + global_context = get_global_context() + project_name = global_context.get("project_name") + asset_name = global_context.get("aset_name") + if not project_name or not asset_name: + return + + asset_doc = get_asset_by_name(project_name, asset_name) + + set_context_settings(project_name, asset_doc) + + def application_exit(self): + """Logic related to TimerManager. + + Todo: + This should be handled out of TVPaint integration logic. + """ + + data = get_current_project_settings() + stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] + + if not stop_timer: + return + + # Stop application timer. + webserver_url = os.environ.get("AYON_WEBSERVER_URL") + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + requests.post(rest_api_url) + + +def containerise( + name, namespace, members, context, loader, current_containers=None +): + """Add new container to metadata. + + Args: + name (str): Container name. + namespace (str): Container namespace. + members (list): List of members that were loaded and belongs + to the container (layer names). + current_containers (list): Preloaded containers. Should be used only + on update/switch when containers were modified during the process. + + Returns: + dict: Container data stored to workfile metadata. + """ + + container_data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "members": members, + "name": name, + "namespace": namespace, + "loader": str(loader), + "representation": str(context["representation"]["_id"]) + } + if current_containers is None: + current_containers = get_containers() + + # Add container to containers list + current_containers.append(container_data) + + # Store data to metadata + write_workfile_metadata(SECTION_NAME_CONTAINERS, current_containers) + + return container_data + + +def split_metadata_string(text, chunk_length=None): + """Split string by length. + + Split text to chunks by entered length. + Example: + ```python + text = "ABCDEFGHIJKLM" + result = split_metadata_string(text, 3) + print(result) + >>> ['ABC', 'DEF', 'GHI', 'JKL'] + ``` + + Args: + text (str): Text that will be split into chunks. + chunk_length (int): Single chunk size. Default chunk_length is + set to global variable `TVPAINT_CHUNK_LENGTH`. + + Returns: + list: List of strings with at least one item. + """ + if chunk_length is None: + chunk_length = TVPAINT_CHUNK_LENGTH + chunks = [] + for idx in range(chunk_length, len(text) + chunk_length, chunk_length): + start_idx = idx - chunk_length + chunks.append(text[start_idx:idx]) + return chunks + + +def get_workfile_metadata_string_for_keys(metadata_keys): + """Read metadata for specific keys from current project workfile. + + All values from entered keys are stored to single string without separator. + + Function is designed to help get all values for one metadata key at once. + So order of passed keys matteres. + + Args: + metadata_keys (list, str): Metadata keys for which data should be + retrieved. Order of keys matters! It is possible to enter only + single key as string. + """ + # Add ability to pass only single key + if isinstance(metadata_keys, str): + metadata_keys = [metadata_keys] + + output_file = tempfile.NamedTemporaryFile( + mode="w", prefix="a_tvp_", suffix=".txt", delete=False + ) + output_file.close() + output_filepath = output_file.name.replace("\\", "/") + + george_script_parts = [] + george_script_parts.append( + "output_path = \"{}\"".format(output_filepath) + ) + # Store data for each index of metadata key + for metadata_key in metadata_keys: + george_script_parts.append( + "tv_readprojectstring \"{}\" \"{}\" \"\"".format( + METADATA_SECTION, metadata_key + ) + ) + george_script_parts.append( + "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result" + ) + + # Execute the script + george_script = "\n".join(george_script_parts) + execute_george_through_file(george_script) + + # Load data from temp file + with open(output_filepath, "r") as stream: + file_content = stream.read() + + # Remove `\n` from content + output_string = file_content.replace("\n", "") + + # Delete temp file + os.remove(output_filepath) + + return output_string + + +def get_workfile_metadata_string(metadata_key): + """Read metadata for specific key from current project workfile.""" + result = get_workfile_metadata_string_for_keys([metadata_key]) + if not result: + return None + + stripped_result = result.strip() + if not stripped_result: + return None + + # NOTE Backwards compatibility when metadata key did not store range of key + # indexes but the value itself + # NOTE We don't have to care about negative values with `isdecimal` check + if not stripped_result.isdecimal(): + metadata_string = result + else: + keys = [] + for idx in range(int(stripped_result)): + keys.append("{}{}".format(metadata_key, idx)) + metadata_string = get_workfile_metadata_string_for_keys(keys) + + # Replace quotes plaholders with their values + metadata_string = ( + metadata_string + .replace("{__sq__}", "'") + .replace("{__dq__}", "\"") + ) + return metadata_string + + +def get_workfile_metadata(metadata_key, default=None): + """Read and parse metadata for specific key from current project workfile. + + Pipeline use function to store loaded and created instances within keys + stored in `SECTION_NAME_INSTANCES` and `SECTION_NAME_CONTAINERS` + constants. + + Args: + metadata_key (str): Key defying which key should read. It is expected + value contain json serializable string. + """ + if default is None: + default = [] + + json_string = get_workfile_metadata_string(metadata_key) + if json_string: + try: + return json.loads(json_string) + except json.decoder.JSONDecodeError: + # TODO remove when backwards compatibility of storing metadata + # will be removed + print(( + "Fixed invalid metadata in workfile." + " Not serializable string was: {}" + ).format(json_string)) + write_workfile_metadata(metadata_key, default) + return default + + +def write_workfile_metadata(metadata_key, value): + """Write metadata for specific key into current project workfile. + + George script has specific way how to work with quotes which should be + solved automatically with this function. + + Args: + metadata_key (str): Key defying under which key value will be stored. + value (dict,list,str): Data to store they must be json serializable. + """ + if isinstance(value, (dict, list)): + value = json.dumps(value) + + if not value: + value = "" + + # Handle quotes in dumped json string + # - replace single and double quotes with placeholders + value = ( + value + .replace("'", "{__sq__}") + .replace("\"", "{__dq__}") + ) + chunks = split_metadata_string(value) + chunks_len = len(chunks) + + write_template = "tv_writeprojectstring \"{}\" \"{}\" \"{}\"" + george_script_parts = [] + # Add information about chunks length to metadata key itself + george_script_parts.append( + write_template.format(METADATA_SECTION, metadata_key, chunks_len) + ) + # Add chunk values to indexed metadata keys + for idx, chunk_value in enumerate(chunks): + sub_key = "{}{}".format(metadata_key, idx) + george_script_parts.append( + write_template.format(METADATA_SECTION, sub_key, chunk_value) + ) + + george_script = "\n".join(george_script_parts) + + return execute_george_through_file(george_script) + + +def get_current_workfile_context(): + """Return context in which was workfile saved.""" + return get_workfile_metadata(SECTION_NAME_CONTEXT, {}) + + +def save_current_workfile_context(context): + """Save context which was used to create a workfile.""" + return write_workfile_metadata(SECTION_NAME_CONTEXT, context) + + +def list_instances(): + """List all created instances from current workfile.""" + return get_workfile_metadata(SECTION_NAME_INSTANCES) + + +def write_instances(data): + return write_workfile_metadata(SECTION_NAME_INSTANCES, data) + + +def get_containers(): + output = get_workfile_metadata(SECTION_NAME_CONTAINERS) + if output: + for item in output: + if "objectName" not in item and "members" in item: + members = item["members"] + if isinstance(members, list): + members = "|".join([str(member) for member in members]) + item["objectName"] = members + return output + + +def set_context_settings(project_name, asset_doc): + """Set workfile settings by asset document data. + + Change fps, resolution and frame start/end. + """ + + width_key = "resolutionWidth" + height_key = "resolutionHeight" + + width = asset_doc["data"].get(width_key) + height = asset_doc["data"].get(height_key) + if width is None or height is None: + print("Resolution was not found!") + else: + execute_george( + "tv_resizepage {} {} 0".format(width, height) + ) + + framerate = asset_doc["data"].get("fps") + + if framerate is not None: + execute_george( + "tv_framerate {} \"timestretch\"".format(framerate) + ) + else: + print("Framerate was not found!") + + frame_start = asset_doc["data"].get("frameStart") + frame_end = asset_doc["data"].get("frameEnd") + + if frame_start is None or frame_end is None: + print("Frame range was not found!") + return + + handle_start = asset_doc["data"].get("handleStart") + handle_end = asset_doc["data"].get("handleEnd") + + # Always start from 0 Mark In and set only Mark Out + mark_in = 0 + mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end + + execute_george("tv_markin {} set".format(mark_in)) + execute_george("tv_markout {} set".format(mark_out)) diff --git a/client/ayon_core/hosts/tvpaint/api/plugin.py b/client/ayon_core/hosts/tvpaint/api/plugin.py new file mode 100644 index 0000000000..88a0e74528 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/api/plugin.py @@ -0,0 +1,189 @@ +import re + +from ayon_core.pipeline import LoaderPlugin +from ayon_core.pipeline.create import ( + CreatedInstance, + get_subset_name, + AutoCreator, + Creator, +) +from ayon_core.pipeline.create.creator_plugins import cache_and_get_instances + +from .lib import get_layers_data + + +SHARED_DATA_KEY = "openpype.tvpaint.instances" + + +class TVPaintCreatorCommon: + @property + def subset_template_family_filter(self): + return self.family + + def _cache_and_get_instances(self): + return cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances + ) + + def _collect_create_instances(self): + instances_by_identifier = self._cache_and_get_instances() + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def _update_create_instances(self, update_list): + if not update_list: + return + + cur_instances = self.host.list_instances() + cur_instances_by_id = {} + for instance_data in cur_instances: + instance_id = instance_data.get("instance_id") + if instance_id: + cur_instances_by_id[instance_id] = instance_data + + for instance, changes in update_list: + instance_data = changes.new_value + cur_instance_data = cur_instances_by_id.get(instance.id) + if cur_instance_data is None: + cur_instances.append(instance_data) + continue + for key in set(cur_instance_data) - set(instance_data): + cur_instance_data.pop(key) + cur_instance_data.update(instance_data) + self.host.write_instances(cur_instances) + + def _custom_get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + dynamic_data = self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + + return get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data, + project_settings=self.project_settings, + family_filter=self.subset_template_family_filter + ) + + +class TVPaintCreator(Creator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def remove_instances(self, instances): + ids_to_remove = { + instance.id + for instance in instances + } + cur_instances = self.host.list_instances() + changed = False + new_instances = [] + for instance_data in cur_instances: + if instance_data.get("instance_id") in ids_to_remove: + changed = True + else: + new_instances.append(instance_data) + + if changed: + self.host.write_instances(new_instances) + + for instance in instances: + self._remove_instance_from_context(instance) + + def get_dynamic_data(self, *args, **kwargs): + # Change asset and name by current workfile context + create_context = self.create_context + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + output = {} + if asset_name: + output["asset"] = asset_name + if task_name: + output["task"] = task_name + return output + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) + + def _store_new_instance(self, new_instance): + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + +class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) + + +class Loader(LoaderPlugin): + hosts = ["tvpaint"] + + @staticmethod + def get_members_from_container(container): + if "members" not in container and "objectName" in container: + # Backwards compatibility + layer_ids_str = container.get("objectName") + return [ + int(layer_id) for layer_id in layer_ids_str.split("|") + ] + return container["members"] + + def get_unique_layer_name(self, asset_name, name): + """Layer name with counter as suffix. + + Find higher 3 digit suffix from all layer names in scene matching regex + `{asset_name}_{name}_{suffix}`. Higher 3 digit suffix is used + as base for next number if scene does not contain layer matching regex + `0` is used ase base. + + Args: + asset_name (str): Name of subset's parent asset document. + name (str): Name of loaded subset. + + Returns: + (str): `{asset_name}_{name}_{higher suffix + 1}` + """ + layer_name_base = "{}_{}".format(asset_name, name) + + counter_regex = re.compile(r"_(\d{3})$") + + higher_counter = 0 + for layer in get_layers_data(): + layer_name = layer["name"] + if not layer_name.startswith(layer_name_base): + continue + number_subpart = layer_name[len(layer_name_base):] + groups = counter_regex.findall(number_subpart) + if len(groups) != 1: + continue + + counter = int(groups[0]) + if counter > higher_counter: + higher_counter = counter + continue + + return "{}_{:0>3d}".format(layer_name_base, higher_counter + 1) diff --git a/openpype/hosts/tvpaint/hooks/pre_launch_args.py b/client/ayon_core/hosts/tvpaint/hooks/pre_launch_args.py similarity index 84% rename from openpype/hosts/tvpaint/hooks/pre_launch_args.py rename to client/ayon_core/hosts/tvpaint/hooks/pre_launch_args.py index a1c946b60b..25e324c5cc 100644 --- a/openpype/hosts/tvpaint/hooks/pre_launch_args.py +++ b/client/ayon_core/hosts/tvpaint/hooks/pre_launch_args.py @@ -1,5 +1,5 @@ -from openpype.lib import get_openpype_execute_args -from openpype.lib.applications import PreLaunchHook, LaunchTypes +from ayon_core.lib import get_ayon_launcher_args +from ayon_core.lib.applications import PreLaunchHook, LaunchTypes class TvpaintPrelaunchHook(PreLaunchHook): @@ -23,7 +23,7 @@ def execute(self): while self.launch_context.launch_args: remainders.append(self.launch_context.launch_args.pop(0)) - new_launch_args = get_openpype_execute_args( + new_launch_args = get_ayon_launcher_args( "run", self.launch_script_path(), executable_path ) @@ -37,6 +37,6 @@ def execute(self): self.launch_context.launch_args.extend(remainders) def launch_script_path(self): - from openpype.hosts.tvpaint import get_launch_script_path + from ayon_core.hosts.tvpaint import get_launch_script_path return get_launch_script_path() diff --git a/openpype/hosts/tvpaint/lib.py b/client/ayon_core/hosts/tvpaint/lib.py similarity index 100% rename from openpype/hosts/tvpaint/lib.py rename to client/ayon_core/hosts/tvpaint/lib.py diff --git a/client/ayon_core/hosts/tvpaint/plugins/create/convert_legacy.py b/client/ayon_core/hosts/tvpaint/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..d3c6c06c8a --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/create/convert_legacy.py @@ -0,0 +1,150 @@ +import collections + +from ayon_core.pipeline.create.creator_plugins import ( + SubsetConvertorPlugin, + cache_and_get_instances, +) +from ayon_core.hosts.tvpaint.api.plugin import SHARED_DATA_KEY +from ayon_core.hosts.tvpaint.api.lib import get_groups_data + + +class TVPaintLegacyConverted(SubsetConvertorPlugin): + """Conversion of legacy instances in scene to new creators. + + This convertor handles only instances created by core creators. + + All instances that would be created using auto-creators are removed as at + the moment of finding them would there already be existing instances. + """ + + identifier = "tvpaint.legacy.converter" + + def find_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances + ) + if instances_by_identifier[None]: + self.add_convertor_item("Convert legacy instances") + + def convert(self): + current_instances = self.host.list_instances() + to_convert = collections.defaultdict(list) + converted = False + for instance in current_instances: + if instance.get("creator_identifier") is not None: + continue + converted = True + + family = instance.get("family") + if family in ( + "renderLayer", + "renderPass", + "renderScene", + "review", + "workfile", + ): + to_convert[family].append(instance) + else: + instance["keep"] = False + + # Skip if nothing was changed + if not converted: + self.remove_convertor_item() + return + + self._convert_render_layers( + to_convert["renderLayer"], current_instances) + self._convert_render_passes( + to_convert["renderPass"], current_instances) + self._convert_render_scenes( + to_convert["renderScene"], current_instances) + self._convert_workfiles( + to_convert["workfile"], current_instances) + self._convert_reviews( + to_convert["review"], current_instances) + + new_instances = [ + instance + for instance in current_instances + if instance.get("keep") is not False + ] + self.host.write_instances(new_instances) + # remove legacy item if all is fine + self.remove_convertor_item() + + def _convert_render_layers(self, render_layers, current_instances): + if not render_layers: + return + + # Look for possible existing render layers in scene + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_identifier"]["group_id"] + render_layers_by_group_id[group_id] = instance + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + for render_layer in render_layers: + group_id = render_layer.pop("group_id") + # Just remove legacy instance if group is already occupied + if group_id in render_layers_by_group_id: + render_layer["keep"] = False + continue + # Add identifier + render_layer["creator_identifier"] = "render.layer" + # Change 'uuid' to 'instance_id' + render_layer["instance_id"] = render_layer.pop("uuid") + # Fill creator attributes + render_layer["creator_attributes"] = { + "group_id": group_id + } + render_layer["family"] = "render" + group = groups_by_id[group_id] + # Use group name for variant + group["variant"] = group["name"] + + def _convert_render_passes(self, render_passes, current_instances): + if not render_passes: + return + + # Render passes must have available render layers so we look for render + # layers first + # - '_convert_render_layers' must be called before this method + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id] = instance + + for render_pass in render_passes: + group_id = render_pass.pop("group_id") + render_layer = render_layers_by_group_id.get(group_id) + if not render_layer: + render_pass["keep"] = False + continue + + render_pass["creator_identifier"] = "render.pass" + render_pass["instance_id"] = render_pass.pop("uuid") + render_pass["family"] = "render" + + render_pass["creator_attributes"] = { + "render_layer_instance_id": render_layer["instance_id"] + } + render_pass["variant"] = render_pass.pop("pass") + render_pass.pop("renderlayer") + + # Rest of instances are just marked for deletion + def _convert_render_scenes(self, render_scenes, current_instances): + for render_scene in render_scenes: + render_scene["keep"] = False + + def _convert_workfiles(self, workfiles, current_instances): + for render_scene in workfiles: + render_scene["keep"] = False + + def _convert_reviews(self, reviews, current_instances): + for render_scene in reviews: + render_scene["keep"] = False diff --git a/client/ayon_core/hosts/tvpaint/plugins/create/create_render.py b/client/ayon_core/hosts/tvpaint/plugins/create/create_render.py new file mode 100644 index 0000000000..7d908e8018 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/create/create_render.py @@ -0,0 +1,1158 @@ +"""Render Layer and Passes creators. + +Render layer is main part which is represented by group in TVPaint. All TVPaint +layers marked with that group color are part of the render layer. To be more +specific about some parts of layer it is possible to create sub-sets of layer +which are named passes. Render pass consist of layers in same color group as +render layer but define more specific part. + +For example render layer could be 'Bob' which consist of 5 TVPaint layers. +- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head' +- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body' +- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm' +- Last layer does not belong to render pass at all + +Bob will be rendered as 'beauty' of bob (all visible layers in group). +His head will be rendered too but without any other parts. The same for body +and arm. + +What is this good for? Compositing has more power how the renders are used. +Can do transforms on each render pass without need to modify a re-render them +using TVPaint. + +The workflow may hit issues when there are used other blending modes than +default 'color' blend more. In that case it is not recommended to use this +workflow at all as other blend modes may affect all layers in clip which can't +be done. + +There is special case for simple publishing of scene which is called +'render.scene'. That will use all visible layers and render them as one big +sequence. + +Todos: + Add option to extract marked layers and passes as json output format for + AfterEffects. +""" + +import collections +from typing import Any, Optional, Union + +from ayon_core.client import get_asset_by_name, get_asset_name_identifier +from ayon_core.lib import ( + prepare_template_data, + AbstractAttrDef, + UILabelDef, + UISeparatorDef, + EnumDef, + TextDef, + BoolDef, +) +from ayon_core.pipeline.create import ( + CreatedInstance, + CreatorError, +) +from ayon_core.hosts.tvpaint.api.plugin import ( + TVPaintCreator, + TVPaintAutoCreator, +) +from ayon_core.hosts.tvpaint.api.lib import ( + get_layers_data, + get_groups_data, + execute_george_through_file, +) + +RENDER_LAYER_DETAILED_DESCRIPTIONS = ( + """Render Layer is "a group of TVPaint layers" + +Be aware Render Layer is not TVPaint layer. + +All TVPaint layers in the scene with the color group id are rendered in the +beauty pass. To create sub passes use Render Pass creator which is +dependent on existence of render layer instance. + +The group can represent an asset (tree) or different part of scene that consist +of one or more TVPaint layers that can be used as single item during +compositing (for example). + +In some cases may be needed to have sub parts of the layer. For example 'Bob' +could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes. +""" +) + + +RENDER_PASS_DETAILED_DESCRIPTIONS = ( + """Render Pass is sub part of Render Layer. + +Render Pass can consist of one or more TVPaint layers. Render Pass must +belong to a Render Layer. Marked TVPaint layers will change it's group color +to match group color of Render Layer. +""" +) + + +AUTODETECT_RENDER_DETAILED_DESCRIPTION = ( + """Semi-automated Render Layer and Render Pass creation. + +Based on information in TVPaint scene will be created Render Layers and Render +Passes. All color groups used in scene will be used for Render Layer creation. +Name of the group is used as a variant. + +All TVPaint layers under the color group will be created as Render Pass where +layer name is used as variant. + +The plugin will use all used color groups and layers, or can skip those that +are not visible. + +There is option to auto-rename color groups before Render Layer creation. That +is based on settings template where is filled index of used group from bottom +to top. +""" +) + +class CreateRenderlayer(TVPaintCreator): + """Mark layer group as Render layer instance. + + All TVPaint layers in the scene with the color group id are rendered in the + beauty pass. To create sub passes use Render Layer creator which is + dependent on existence of render layer instance. + """ + + label = "Render Layer" + family = "render" + subset_template_family_filter = "renderLayer" + identifier = "render.layer" + icon = "fa5.images" + + # George script to change color group + rename_script_template = ( + "tv_layercolor \"setcolor\"" + " {clip_id} {group_id} {r} {g} {b} \"{name}\"" + ) + # Order to be executed before Render Pass creator + order = 90 + description = "Mark TVPaint color group as one Render Layer." + detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS + + # Settings + # - Default render pass name for beauty + default_pass_name = "beauty" + # - Mark by default instance for review + mark_for_review = True + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_layer"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.default_pass_name = plugin_settings["default_pass_name"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = self.default_pass_name + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _get_selected_group_ids(self): + return { + layer["group_id"] + for layer in get_layers_data() + if layer["selected"] + } + + def create(self, subset_name, instance_data, pre_create_data): + self.log.debug("Query data from workfile.") + + group_name = instance_data["variant"] + group_id = pre_create_data.get("group_id") + # This creator should run only on one group + if group_id is None or group_id == -1: + selected_groups = self._get_selected_group_ids() + selected_groups.discard(0) + if len(selected_groups) > 1: + raise CreatorError("You have selected more than one group") + + if len(selected_groups) == 0: + raise CreatorError("You don't have selected any group") + group_id = tuple(selected_groups)[0] + + self.log.debug("Querying groups data from workfile.") + groups_data = get_groups_data() + group_item = None + for group_data in groups_data: + if group_data["group_id"] == group_id: + group_item = group_data + + for instance in self.create_context.instances: + if ( + instance.creator_identifier == self.identifier + and instance["creator_attributes"]["group_id"] == group_id + ): + raise CreatorError(( + f"Group \"{group_item.get('name')}\" is already used" + f" by another render layer \"{instance['subset']}\"" + )) + + self.log.debug(f"Selected group id is \"{group_id}\".") + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["group_id"] = group_id + creator_attributes["mark_for_review"] = mark_for_review + + self.log.info(f"Subset name is {subset_name}") + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + self._store_new_instance(new_instance) + + if not group_id or group_item["name"] == group_name: + return new_instance + + self.log.debug("Changing name of the group.") + # Rename TVPaint group (keep color same) + # - groups can't contain spaces + rename_script = self.rename_script_template.format( + clip_id=group_item["clip_id"], + group_id=group_item["group_id"], + r=group_item["red"], + g=group_item["green"], + b=group_item["blue"], + name=group_name + ) + execute_george_through_file(rename_script) + + self.log.info(( + f"Name of group with index {group_id}" + f" was changed to \"{group_name}\"." + )) + return new_instance + + def _get_groups_enum(self): + groups_enum = [] + empty_groups = [] + for group in get_groups_data(): + group_name = group["name"] + item = { + "label": group_name, + "value": group["group_id"] + } + # TVPaint have defined how many color groups is available, but + # the count is not consistent across versions. It is not possible + # to know how many groups there is. + # + if group_name and group_name != "0": + if empty_groups: + groups_enum.extend(empty_groups) + empty_groups = [] + groups_enum.append(item) + else: + empty_groups.append(item) + return groups_enum + + def get_pre_create_attr_defs(self): + groups_enum = self._get_groups_enum() + groups_enum.insert(0, {"label": "", "value": -1}) + + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + groups_enum = self._get_groups_enum() + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def update_instances(self, update_list): + self._update_color_groups() + self._update_renderpass_groups() + + super().update_instances(update_list) + + def _update_color_groups(self): + render_layer_instances = [] + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + render_layer_instances.append(instance) + + if not render_layer_instances: + return + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + grg_script_lines = [] + for instance in render_layer_instances: + group_id = instance["creator_attributes"]["group_id"] + variant = instance["variant"] + group = groups_by_id[group_id] + if group["name"] == variant: + continue + + grg_script_lines.append(self.rename_script_template.format( + clip_id=group["clip_id"], + group_id=group["group_id"], + r=group["red"], + g=group["green"], + b=group["blue"], + name=variant + )) + + if grg_script_lines: + execute_george_through_file("\n".join(grg_script_lines)) + + def _update_renderpass_groups(self): + render_layer_instances = {} + render_pass_instances = collections.defaultdict(list) + + for instance in self.create_context.instances: + if instance.creator_identifier == CreateRenderPass.identifier: + render_layer_id = ( + instance["creator_attributes"]["render_layer_instance_id"] + ) + render_pass_instances[render_layer_id].append(instance) + elif instance.creator_identifier == self.identifier: + render_layer_instances[instance.id] = instance + + if not render_pass_instances or not render_layer_instances: + return + + layers_data = get_layers_data() + layers_by_name = collections.defaultdict(list) + for layer in layers_data: + layers_by_name[layer["name"]].append(layer) + + george_lines = [] + for render_layer_id, instances in render_pass_instances.items(): + render_layer_inst = render_layer_instances.get(render_layer_id) + if render_layer_inst is None: + continue + group_id = render_layer_inst["creator_attributes"]["group_id"] + layer_names = set() + for instance in instances: + layer_names |= set(instance["layer_names"]) + + for layer_name in layer_names: + george_lines.extend( + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in layers_by_name[layer_name] + if layer["group_id"] != group_id + ) + if george_lines: + execute_george_through_file("\n".join(george_lines)) + + +class CreateRenderPass(TVPaintCreator): + family = "render" + subset_template_family_filter = "renderPass" + identifier = "render.pass" + label = "Render Pass" + icon = "fa5.image" + description = "Mark selected TVPaint layers as pass of Render Layer." + detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS + + order = CreateRenderlayer.order + 10 + + # Settings + mark_for_review = True + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_pass"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def collect_instances(self): + instances_by_identifier = self._cache_and_get_instances() + render_layers = { + instance_data["instance_id"]: { + "variant": instance_data["variant"], + "template_data": prepare_template_data({ + "renderlayer": instance_data["variant"] + }) + } + for instance_data in ( + instances_by_identifier[CreateRenderlayer.identifier] + ) + } + + for instance_data in instances_by_identifier[self.identifier]: + render_layer_instance_id = ( + instance_data + .get("creator_attributes", {}) + .get("render_layer_instance_id") + ) + render_layer_info = render_layers.get(render_layer_instance_id, {}) + self.update_instance_labels( + instance_data, + render_layer_info.get("variant"), + render_layer_info.get("template_data") + ) + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = variant + dynamic_data["renderlayer"] = "{renderlayer}" + return dynamic_data + + def update_instance_labels( + self, instance, render_layer_variant, render_layer_data=None + ): + old_label = instance.get("label") + old_group = instance.get("group") + new_label = None + new_group = None + if render_layer_variant is not None: + if render_layer_data is None: + render_layer_data = prepare_template_data({ + "renderlayer": render_layer_variant + }) + try: + new_label = instance["subset"].format(**render_layer_data) + except (KeyError, ValueError): + pass + + new_group = f"{self.get_group_label()} ({render_layer_variant})" + + instance["label"] = new_label + instance["group"] = new_group + return old_group != new_group or old_label != new_label + + def create(self, subset_name, instance_data, pre_create_data): + render_layer_instance_id = pre_create_data.get( + "render_layer_instance_id" + ) + if not render_layer_instance_id: + raise CreatorError(( + "You cannot create a Render Pass without a Render Layer." + " Please select one first" + )) + + render_layer_instance = self.create_context.instances_by_id.get( + render_layer_instance_id + ) + if render_layer_instance is None: + raise CreatorError(( + "RenderLayer instance was not found" + f" by id \"{render_layer_instance_id}\"" + )) + + group_id = render_layer_instance["creator_attributes"]["group_id"] + self.log.debug("Query data from workfile.") + layers_data = get_layers_data() + + self.log.debug("Checking selection.") + # Get all selected layers and their group ids + marked_layer_names = pre_create_data.get("layer_names") + if marked_layer_names is not None: + layers_by_name = {layer["name"]: layer for layer in layers_data} + marked_layers = [] + for layer_name in marked_layer_names: + layer = layers_by_name.get(layer_name) + if layer is None: + raise CreatorError( + f"Layer with name \"{layer_name}\" was not found") + marked_layers.append(layer) + + else: + marked_layers = [ + layer + for layer in layers_data + if layer["selected"] + ] + + # Raise if nothing is selected + if not marked_layers: + raise CreatorError( + "Nothing is selected. Please select layers.") + + marked_layer_names = {layer["name"] for layer in marked_layers} + + marked_layer_names = set(marked_layer_names) + + instances_to_remove = [] + for instance in self.create_context.instances: + if instance.creator_identifier != self.identifier: + continue + cur_layer_names = set(instance["layer_names"]) + if not cur_layer_names.intersection(marked_layer_names): + continue + new_layer_names = cur_layer_names - marked_layer_names + if new_layer_names: + instance["layer_names"] = list(new_layer_names) + else: + instances_to_remove.append(instance) + + render_layer = render_layer_instance["variant"] + subset_name_fill_data = {"renderlayer": render_layer} + + # Format dynamic keys in subset name + label = subset_name + try: + label = label.format( + **prepare_template_data(subset_name_fill_data) + ) + except (KeyError, ValueError): + pass + + self.log.info(f"New subset name is \"{label}\".") + instance_data["label"] = label + instance_data["group"] = f"{self.get_group_label()} ({render_layer})" + instance_data["layer_names"] = list(marked_layer_names) + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["mark_for_review"] = mark_for_review + creator_attributes["render_layer_instance_id"] = ( + render_layer_instance_id + ) + + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + instances_data = self._remove_and_filter_instances( + instances_to_remove + ) + instances_data.append(new_instance.data_to_store()) + + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + self._change_layers_group(marked_layers, group_id) + + return new_instance + + def _change_layers_group(self, layers, group_id): + filtered_layers = [ + layer + for layer in layers + if layer["group_id"] != group_id + ] + if filtered_layers: + self.log.info(( + "Changing group of " + f"{','.join([l['name'] for l in filtered_layers])}" + f" to {group_id}" + )) + george_lines = [ + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in filtered_layers + ] + execute_george_through_file("\n".join(george_lines)) + + def _remove_and_filter_instances(self, instances_to_remove): + instances_data = self.host.list_instances() + if not instances_to_remove: + return instances_data + + removed_ids = set() + for instance in instances_to_remove: + removed_ids.add(instance.id) + self._remove_instance_from_context(instance) + + return [ + instance_data + for instance_data in instances_data + if instance_data.get("instance_id") not in removed_ids + ] + + def get_pre_create_attr_defs(self): + # Find available Render Layers + # - instances are created after creators reset + current_instances = self.host.list_instances() + render_layers = [ + { + "value": inst["instance_id"], + "label": inst["subset"] + } + for inst in current_instances + if inst.get("creator_identifier") == CreateRenderlayer.identifier + ] + if not render_layers: + render_layers.append({"value": None, "label": "N/A"}) + + return [ + EnumDef( + "render_layer_instance_id", + label="Render Layer", + items=render_layers + ), + UILabelDef( + "NOTE: Try to hit refresh if you don't see a Render Layer" + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + # Find available Render Layers + current_instances = self.create_context.instances + render_layers = [ + { + "value": instance.id, + "label": instance.label + } + for instance in current_instances + if instance.creator_identifier == CreateRenderlayer.identifier + ] + if not render_layers: + render_layers.append({"value": None, "label": "N/A"}) + + return [ + EnumDef( + "render_layer_instance_id", + label="Render Layer", + items=render_layers + ), + UILabelDef( + "NOTE: Try to hit refresh if you don't see a Render Layer" + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + +class TVPaintAutoDetectRenderCreator(TVPaintCreator): + """Create Render Layer and Render Pass instances based on scene data. + + This is auto-detection creator which can be triggered by user to create + instances based on information in scene. Each used color group in scene + will be created as Render Layer where group name is used as variant and + each TVPaint layer as Render Pass where layer name is used as variant. + + Never will have any instances, all instances belong to different creators. + """ + + family = "render" + label = "Render Layer/Passes" + identifier = "render.auto.detect.creator" + order = CreateRenderPass.order + 10 + description = ( + "Create Render Layers and Render Passes based on scene setup" + ) + detailed_description = AUTODETECT_RENDER_DETAILED_DESCRIPTION + + # Settings + enabled = False + allow_group_rename = True + group_name_template = "L{group_index}" + group_idx_offset = 10 + group_idx_padding = 3 + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings + ["tvpaint"] + ["create"] + ["auto_detect_render"] + ) + self.enabled = plugin_settings.get("enabled", False) + self.allow_group_rename = plugin_settings["allow_group_rename"] + self.group_name_template = plugin_settings["group_name_template"] + self.group_idx_offset = plugin_settings["group_idx_offset"] + self.group_idx_padding = plugin_settings["group_idx_padding"] + + def _rename_groups( + self, + groups_order: list[int], + scene_groups: list[dict[str, Any]] + ): + new_group_name_by_id: dict[int, str] = {} + groups_by_id: dict[int, dict[str, Any]] = { + group["group_id"]: group + for group in scene_groups + } + # Count only renamed groups + for idx, group_id in enumerate(groups_order): + group_index_value: str = ( + "{{:0>{}}}" + .format(self.group_idx_padding) + .format((idx + 1) * self.group_idx_offset) + ) + group_name_fill_values: dict[str, str] = { + "groupIdx": group_index_value, + "groupidx": group_index_value, + "group_idx": group_index_value, + "group_index": group_index_value, + } + + group_name: str = self.group_name_template.format( + **group_name_fill_values + ) + group: dict[str, Any] = groups_by_id[group_id] + if group["name"] != group_name: + new_group_name_by_id[group_id] = group_name + + grg_lines: list[str] = [] + for group_id, group_name in new_group_name_by_id.items(): + group: dict[str, Any] = groups_by_id[group_id] + grg_line: str = "tv_layercolor \"setcolor\" {} {} {} {} {}".format( + group["clip_id"], + group_id, + group["red"], + group["green"], + group["blue"], + group_name + ) + grg_lines.append(grg_line) + group["name"] = group_name + + if grg_lines: + execute_george_through_file("\n".join(grg_lines)) + + def _prepare_render_layer( + self, + project_name: str, + asset_doc: dict[str, Any], + task_name: str, + group_id: int, + groups: list[dict[str, Any]], + mark_for_review: bool, + existing_instance: Optional[CreatedInstance] = None, + ) -> Union[CreatedInstance, None]: + match_group: Union[dict[str, Any], None] = next( + ( + group + for group in groups + if group["group_id"] == group_id + ), + None + ) + if not match_group: + return None + + variant: str = match_group["name"] + creator: CreateRenderlayer = ( + self.create_context.creators[CreateRenderlayer.identifier] + ) + + subset_name: str = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + host_name=self.create_context.host_name, + ) + asset_name = get_asset_name_identifier(asset_doc) + if existing_instance is not None: + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + return existing_instance + + instance_data: dict[str, str] = { + "folderPath": asset_name, + "task": task_name, + "family": creator.family, + "variant": variant, + } + pre_create_data: dict[str, str] = { + "group_id": group_id, + "mark_for_review": mark_for_review + } + return creator.create(subset_name, instance_data, pre_create_data) + + def _prepare_render_passes( + self, + project_name: str, + asset_doc: dict[str, Any], + task_name: str, + render_layer_instance: CreatedInstance, + layers: list[dict[str, Any]], + mark_for_review: bool, + existing_render_passes: list[CreatedInstance] + ): + creator: CreateRenderPass = ( + self.create_context.creators[CreateRenderPass.identifier] + ) + render_pass_by_layer_name = {} + for render_pass in existing_render_passes: + for layer_name in render_pass["layer_names"]: + render_pass_by_layer_name[layer_name] = render_pass + + asset_name = get_asset_name_identifier(asset_doc) + + for layer in layers: + layer_name = layer["name"] + variant = layer_name + render_pass = render_pass_by_layer_name.get(layer_name) + if render_pass is not None: + if (render_pass["layer_names"]) > 1: + variant = render_pass["variant"] + + subset_name = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + host_name=self.create_context.host_name, + instance=render_pass + ) + + if render_pass is not None: + render_pass["folderPath"] = asset_name + render_pass["task"] = task_name + render_pass["subset"] = subset_name + continue + + instance_data: dict[str, str] = { + "folderPath": asset_name, + "task": task_name, + "family": creator.family, + "variant": variant + } + + pre_create_data: dict[str, Any] = { + "render_layer_instance_id": render_layer_instance.id, + "layer_names": [layer_name], + "mark_for_review": mark_for_review + } + creator.create(subset_name, instance_data, pre_create_data) + + def _filter_groups( + self, + layers_by_group_id, + groups_order, + only_visible_groups + ): + new_groups_order = [] + for group_id in groups_order: + layers: list[dict[str, Any]] = layers_by_group_id[group_id] + if not layers: + continue + + if ( + only_visible_groups + and not any( + layer + for layer in layers + if layer["visible"] + ) + ): + continue + new_groups_order.append(group_id) + return new_groups_order + + def create(self, subset_name, instance_data, pre_create_data): + project_name: str = self.create_context.get_current_project_name() + asset_name: str = instance_data["folderPath"] + task_name: str = instance_data["task"] + asset_doc: dict[str, Any] = get_asset_by_name( + project_name, asset_name) + + render_layers_by_group_id: dict[int, CreatedInstance] = {} + render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = ( + collections.defaultdict(list) + ) + for instance in self.create_context.instances: + if instance.creator_identifier == CreateRenderlayer.identifier: + group_id = instance["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id] = instance + elif instance.creator_identifier == CreateRenderPass.identifier: + render_layer_id = ( + instance + ["creator_attributes"] + ["render_layer_instance_id"] + ) + render_passes_by_render_layer_id[render_layer_id].append( + instance + ) + + layers_by_group_id: dict[int, list[dict[str, Any]]] = ( + collections.defaultdict(list) + ) + scene_layers: list[dict[str, Any]] = get_layers_data() + scene_groups: list[dict[str, Any]] = get_groups_data() + groups_order: list[int] = [] + for layer in scene_layers: + group_id: int = layer["group_id"] + # Skip 'default' group + if group_id == 0: + continue + + layers_by_group_id[group_id].append(layer) + if group_id not in groups_order: + groups_order.append(group_id) + + groups_order.reverse() + + mark_layers_for_review = pre_create_data.get( + "mark_layers_for_review", False + ) + mark_passes_for_review = pre_create_data.get( + "mark_passes_for_review", False + ) + rename_groups = pre_create_data.get("rename_groups", False) + only_visible_groups = pre_create_data.get("only_visible_groups", False) + groups_order = self._filter_groups( + layers_by_group_id, + groups_order, + only_visible_groups + ) + if not groups_order: + return + + if rename_groups: + self._rename_groups(groups_order, scene_groups) + + # Make sure all render layers are created + for group_id in groups_order: + instance: Union[CreatedInstance, None] = ( + self._prepare_render_layer( + project_name, + asset_doc, + task_name, + group_id, + scene_groups, + mark_layers_for_review, + render_layers_by_group_id.get(group_id), + ) + ) + if instance is not None: + render_layers_by_group_id[group_id] = instance + + for group_id in groups_order: + layers: list[dict[str, Any]] = layers_by_group_id[group_id] + render_layer_instance: Union[CreatedInstance, None] = ( + render_layers_by_group_id.get(group_id) + ) + if not layers or render_layer_instance is None: + continue + + self._prepare_render_passes( + project_name, + asset_doc, + task_name, + render_layer_instance, + layers, + mark_passes_for_review, + render_passes_by_render_layer_id[render_layer_instance.id] + ) + + def get_pre_create_attr_defs(self) -> list[AbstractAttrDef]: + render_layer_creator: CreateRenderlayer = ( + self.create_context.creators[CreateRenderlayer.identifier] + ) + render_pass_creator: CreateRenderPass = ( + self.create_context.creators[CreateRenderPass.identifier] + ) + output = [] + if self.allow_group_rename: + output.extend([ + BoolDef( + "rename_groups", + label="Rename color groups", + tooltip="Will rename color groups using studio template", + default=True + ), + BoolDef( + "only_visible_groups", + label="Only visible color groups", + tooltip=( + "Render Layers and rename will happen only on color" + " groups with visible layers." + ), + default=True + ), + UISeparatorDef() + ]) + output.extend([ + BoolDef( + "mark_layers_for_review", + label="Mark RenderLayers for review", + default=render_layer_creator.mark_for_review + ), + BoolDef( + "mark_passes_for_review", + label="Mark RenderPasses for review", + default=render_pass_creator.mark_for_review + ) + ]) + return output + + +class TVPaintSceneRenderCreator(TVPaintAutoCreator): + family = "render" + subset_template_family_filter = "renderScene" + identifier = "render.scene" + label = "Scene Render" + icon = "fa.file-image-o" + + # Settings + default_pass_name = "beauty" + mark_for_review = True + active_on_create = False + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_scene"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + self.active_on_create = plugin_settings["active_on_create"] + self.default_pass_name = plugin_settings["default_pass_name"] + + def get_dynamic_data(self, variant, *args, **kwargs): + dynamic_data = super().get_dynamic_data(variant, *args, **kwargs) + dynamic_data["renderpass"] = "{renderpass}" + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _create_new_instance(self): + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant, + "creator_attributes": { + "render_pass_name": self.default_pass_name, + "mark_for_review": True + }, + "label": self._get_label( + subset_name, + self.default_pass_name + ) + } + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + return new_instance + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + if existing_instance is None: + return self._create_new_instance() + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + existing_name = existing_instance.get("folderPath") + if ( + existing_name != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + + existing_instance["label"] = self._get_label( + existing_instance["subset"], + existing_instance["creator_attributes"]["render_pass_name"] + ) + + def _get_label(self, subset_name, render_pass_name): + try: + subset_name = subset_name.format(**prepare_template_data({ + "renderpass": render_pass_name + })) + except (KeyError, ValueError): + pass + + return subset_name + + def get_instance_attr_defs(self): + return [ + TextDef( + "render_pass_name", + label="Pass Name", + default=self.default_pass_name, + tooltip=( + "Value is calculated during publishing and UI will update" + " label after refresh." + ) + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] diff --git a/client/ayon_core/hosts/tvpaint/plugins/create/create_review.py b/client/ayon_core/hosts/tvpaint/plugins/create/create_review.py new file mode 100644 index 0000000000..773b85c1f5 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/create/create_review.py @@ -0,0 +1,82 @@ +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import CreatedInstance +from ayon_core.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintReviewCreator(TVPaintAutoCreator): + family = "review" + identifier = "scene.review" + label = "Review" + icon = "ei.video" + + # Settings + active_on_create = True + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_review"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.active_on_create = plugin_settings["active_on_create"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + existing_asset_name = None + else: + existing_asset_name = existing_instance["folderPath"] + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant, + } + + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_asset_name != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/client/ayon_core/hosts/tvpaint/plugins/create/create_workfile.py b/client/ayon_core/hosts/tvpaint/plugins/create/create_workfile.py new file mode 100644 index 0000000000..f0d1c7bae6 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/create/create_workfile.py @@ -0,0 +1,75 @@ +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import CreatedInstance +from ayon_core.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintWorkfileCreator(TVPaintAutoCreator): + family = "workfile" + identifier = "workfile" + label = "Workfile" + icon = "fa.file-o" + + def apply_settings(self, project_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_workfile"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + existing_asset_name = None + else: + existing_asset_name = existing_instance["folderPath"] + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "folderPath": asset_name, + "task": task_name, + "variant": self.default_variant + } + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_asset_name != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["folderPath"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/client/ayon_core/hosts/tvpaint/plugins/load/load_image.py b/client/ayon_core/hosts/tvpaint/plugins/load/load_image.py new file mode 100644 index 0000000000..924c0f2835 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/load/load_image.py @@ -0,0 +1,86 @@ +from ayon_core.lib.attribute_definitions import BoolDef +from ayon_core.hosts.tvpaint.api import plugin +from ayon_core.hosts.tvpaint.api.lib import execute_george_through_file + + +class ImportImage(plugin.Loader): + """Load image or image sequence to TVPaint as new layer.""" + + families = ["render", "image", "background", "plate", "review"] + representations = ["*"] + + label = "Import Image" + order = 1 + icon = "image" + color = "white" + + import_script = ( + "filepath = \"{}\"\n" + "layer_name = \"{}\"\n" + "tv_loadsequence filepath {}PARSE layer_id\n" + "tv_layerrename layer_id layer_name" + ) + + defaults = { + "stretch": True, + "timestretch": True, + "preload": True + } + + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] + + def load(self, context, name, namespace, options): + stretch = options.get("stretch", self.defaults["stretch"]) + timestretch = options.get("timestretch", self.defaults["timestretch"]) + preload = options.get("preload", self.defaults["preload"]) + + load_options = [] + if stretch: + load_options.append("\"STRETCH\"") + if timestretch: + load_options.append("\"TIMESTRETCH\"") + if preload: + load_options.append("\"PRELOAD\"") + + load_options_str = "" + for load_option in load_options: + load_options_str += (load_option + " ") + + # Prepare layer name + asset_name = context["asset"]["name"] + version_name = context["version"]["name"] + layer_name = "{}_{}_v{:0>3}".format( + asset_name, + name, + version_name + ) + # Fill import script with filename and layer name + # - filename mus not contain backwards slashes + path = self.filepath_from_context(context).replace("\\", "/") + george_script = self.import_script.format( + path, + layer_name, + load_options_str + ) + return execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/client/ayon_core/hosts/tvpaint/plugins/load/load_reference_image.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/load/load_reference_image.py rename to client/ayon_core/hosts/tvpaint/plugins/load/load_reference_image.py index 53061c6885..0a12e93f44 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/client/ayon_core/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,16 +1,16 @@ import collections -from openpype.lib.attribute_definitions import BoolDef -from openpype.pipeline import ( +from ayon_core.lib.attribute_definitions import BoolDef +from ayon_core.pipeline import ( get_representation_context, register_host, ) -from openpype.hosts.tvpaint.api import plugin -from openpype.hosts.tvpaint.api.lib import ( +from ayon_core.hosts.tvpaint.api import plugin +from ayon_core.hosts.tvpaint.api.lib import ( get_layers_data, execute_george_through_file, ) -from openpype.hosts.tvpaint.api.pipeline import ( +from ayon_core.hosts.tvpaint.api.pipeline import ( write_workfile_metadata, SECTION_NAME_CONTAINERS, containerise, diff --git a/openpype/hosts/tvpaint/plugins/load/load_sound.py b/client/ayon_core/hosts/tvpaint/plugins/load/load_sound.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/load/load_sound.py rename to client/ayon_core/hosts/tvpaint/plugins/load/load_sound.py index 3003280eef..86f3e6857f 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_sound.py +++ b/client/ayon_core/hosts/tvpaint/plugins/load/load_sound.py @@ -1,7 +1,7 @@ import os import tempfile -from openpype.hosts.tvpaint.api import plugin -from openpype.hosts.tvpaint.api.lib import ( +from ayon_core.hosts.tvpaint.api import plugin +from ayon_core.hosts.tvpaint.api.lib import ( execute_george_through_file, ) diff --git a/client/ayon_core/hosts/tvpaint/plugins/load/load_workfile.py b/client/ayon_core/hosts/tvpaint/plugins/load/load_workfile.py new file mode 100644 index 0000000000..e29ecfd442 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/load/load_workfile.py @@ -0,0 +1,115 @@ +import os + +from ayon_core.lib import StringTemplate +from ayon_core.pipeline import ( + registered_host, + get_current_context, + Anatomy, +) +from ayon_core.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile_with_version, +) +from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.hosts.tvpaint.api import plugin +from ayon_core.hosts.tvpaint.api.lib import ( + execute_george_through_file, +) +from ayon_core.hosts.tvpaint.api.pipeline import ( + get_current_workfile_context, +) +from ayon_core.pipeline.version_start import get_versioning_start + + +class LoadWorkfile(plugin.Loader): + """Load workfile.""" + + families = ["workfile"] + representations = ["tvpp"] + + label = "Load Workfile" + + def load(self, context, name, namespace, options): + # Load context of current workfile as first thing + # - which context and extension has + filepath = self.filepath_from_context(context) + filepath = filepath.replace("\\", "/") + + if not os.path.exists(filepath): + raise FileExistsError( + "The loaded file does not exist. Try downloading it first." + ) + + host = registered_host() + current_file = host.get_current_workfile() + work_context = get_current_workfile_context() + + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( + filepath + ) + execute_george_through_file(george_script) + + # Save workfile. + host_name = "tvpaint" + project_name = work_context.get("project") + asset_name = work_context.get("asset") + task_name = work_context.get("task") + # Far cases when there is workfile without work_context + if not asset_name: + context = get_current_context() + project_name = context["project_name"] + asset_name = context["asset_name"] + task_name = context["task_name"] + + template_key = get_workfile_template_key_from_context( + asset_name, + task_name, + host_name, + project_name=project_name + ) + anatomy = Anatomy(project_name) + + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) + data["root"] = anatomy.roots + + file_template = anatomy.templates[template_key]["file"] + + # Define saving file extension + extensions = host.get_workfile_extensions() + if current_file: + # Match the extension of current file + _, extension = os.path.splitext(current_file) + else: + # Fall back to the first extension supported for this host. + extension = extensions[0] + + data["ext"] = extension + + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data + ) + version = get_last_workfile_with_version( + work_root, file_template, data, extensions + )[1] + + if version is None: + version = get_versioning_start( + project_name, + "tvpaint", + task_name=task_name, + task_type=data["task"]["type"], + family="workfile" + ) + else: + version += 1 + + data["version"] = version + + filename = StringTemplate.format_strict_template( + file_template, data + ) + path = os.path.join(work_root, filename) + host.save_workfile(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_instance_frames.py similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/collect_instance_frames.py diff --git a/client/ayon_core/hosts/tvpaint/plugins/publish/collect_render_instances.py b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..029c4b7e18 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_render_instances.py @@ -0,0 +1,114 @@ +import copy +import pyblish.api +from ayon_core.lib import prepare_template_data + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + label = "Collect Render Instances" + order = pyblish.api.CollectorOrder - 0.4 + hosts = ["tvpaint"] + families = ["render", "review"] + + ignore_render_pass_transparency = False + + def process(self, instance): + context = instance.context + creator_identifier = instance.data["creator_identifier"] + if creator_identifier == "render.layer": + self._collect_data_for_render_layer(instance) + + elif creator_identifier == "render.pass": + self._collect_data_for_render_pass(instance) + + elif creator_identifier == "render.scene": + self._collect_data_for_render_scene(instance) + + else: + if creator_identifier == "scene.review": + self._collect_data_for_review(instance) + return + + subset_name = instance.data["subset"] + instance.data["name"] = subset_name + instance.data["label"] = "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ) + + def _collect_data_for_render_layer(self, instance): + instance.data["families"].append("renderLayer") + creator_attributes = instance.data["creator_attributes"] + group_id = creator_attributes["group_id"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + layers_data = instance.context.data["layersData"] + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["group_id"] == group_id + ] + + def _collect_data_for_render_pass(self, instance): + instance.data["families"].append("renderPass") + + layer_names = set(instance.data["layer_names"]) + layers_data = instance.context.data["layersData"] + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["name"] in layer_names + ] + instance.data["ignoreLayersTransparency"] = ( + self.ignore_render_pass_transparency + ) + + render_layer_data = None + render_layer_id = creator_attributes["render_layer_instance_id"] + for in_data in instance.context.data["workfileInstances"]: + if ( + in_data.get("creator_identifier") == "render.layer" + and in_data["instance_id"] == render_layer_id + ): + render_layer_data = in_data + break + + instance.data["renderLayerData"] = copy.deepcopy(render_layer_data) + # Invalid state + if render_layer_data is None: + return + render_layer_name = render_layer_data["variant"] + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderlayer": render_layer_name}) + ) + + def _collect_data_for_render_scene(self, instance): + instance.data["families"].append("renderScene") + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) + + render_pass_name = ( + instance.data["creator_attributes"]["render_pass_name"] + ) + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderpass": render_pass_name}) + ) + + def _collect_data_for_review(self, instance): + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_workfile.py similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/collect_workfile.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/collect_workfile.py diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_workfile_data.py similarity index 98% rename from openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/collect_workfile_data.py index 56b51c812a..9fbf67863a 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -4,14 +4,14 @@ import pyblish.api -from openpype.pipeline import legacy_io -from openpype.hosts.tvpaint.api.lib import ( +from ayon_core.pipeline import legacy_io +from ayon_core.hosts.tvpaint.api.lib import ( execute_george, execute_george_through_file, get_layers_data, get_groups_data, ) -from openpype.hosts.tvpaint.api.pipeline import ( +from ayon_core.hosts.tvpaint.api.pipeline import ( SECTION_NAME_CONTEXT, SECTION_NAME_INSTANCES, SECTION_NAME_CONTAINERS, diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py b/client/ayon_core/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py index c10fc4de97..d1bc68ef35 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py @@ -8,12 +8,12 @@ import json import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( get_oiio_tool_args, ToolNotFoundError, run_subprocess, ) -from openpype.pipeline import KnownPublishError +from ayon_core.pipeline import KnownPublishError class ExtractConvertToEXR(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/client/ayon_core/hosts/tvpaint/plugins/publish/extract_sequence.py similarity index 99% rename from openpype/hosts/tvpaint/plugins/publish/extract_sequence.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/extract_sequence.py index fd568b2826..6d54d8ec32 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -6,17 +6,17 @@ import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( KnownPublishError, get_publish_instance_families, ) -from openpype.hosts.tvpaint.api.lib import ( +from ayon_core.hosts.tvpaint.api.lib import ( execute_george, execute_george_through_file, get_layers_pre_post_behavior, get_layers_exposure_frames, ) -from openpype.hosts.tvpaint.lib import ( +from ayon_core.hosts.tvpaint.lib import ( calculate_layers_extraction_data, get_frame_filename_template, fill_reference_frames, diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_asset_name.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_duplicated_layer_names.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_marks.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_marks.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_marks.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_missing_layer_names.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_render_pass_group.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_scene_settings.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_start_frame.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml similarity index 100% rename from openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml rename to client/ayon_core/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml diff --git a/client/ayon_core/hosts/tvpaint/plugins/publish/increment_workfile_version.py b/client/ayon_core/hosts/tvpaint/plugins/publish/increment_workfile_version.py new file mode 100644 index 0000000000..5dd6110bc7 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/increment_workfile_version.py @@ -0,0 +1,23 @@ +import pyblish.api + +from ayon_core.lib import version_up +from ayon_core.pipeline import registered_host + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 1 + label = "Increment Workfile Version" + optional = True + hosts = ["tvpaint"] + + def process(self, context): + + assert all(result["success"] for result in context.data["results"]), ( + "Publishing not successful so version is not increased.") + + host = registered_host() + path = context.data["currentFile"] + host.save_workfile(version_up(path)) + self.log.info('Incrementing workfile version') diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_asset_name.py similarity index 80% rename from openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_asset_name.py index dc29e6c278..62603a460b 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,10 +1,9 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin, ) -from openpype.hosts.tvpaint.api.pipeline import ( +from ayon_core.hosts.tvpaint.api.pipeline import ( list_instances, write_instances, ) @@ -25,19 +24,12 @@ def process(self, context, plugin): old_instance_items = list_instances() new_instance_items = [] for instance_item in old_instance_items: - if AYON_SERVER_ENABLED: - instance_asset_name = instance_item.get("folderPath") - else: - instance_asset_name = instance_item.get("asset") - + instance_asset_name = instance_item.get("folderPath") if ( instance_asset_name and instance_asset_name != context_asset_name ): - if AYON_SERVER_ENABLED: - instance_item["folderPath"] = context_asset_name - else: - instance_item["asset"] = context_asset_name + instance_item["folderPath"] = context_asset_name new_instance_items.append(instance_item) write_instances(new_instance_items) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py similarity index 96% rename from openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py index 722d76b4d2..aab0557bdd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateLayersGroup(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_layers_visibility.py similarity index 95% rename from openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 8e52a636f4..1bcdf7baa1 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError # TODO @iLLiCiTiT add repair action to disable instances? diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_marks.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/publish/validate_marks.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_marks.py index 7b2cc62bb5..6bfbe840bb 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_marks.py @@ -1,11 +1,11 @@ import json import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin, ) -from openpype.hosts.tvpaint.api.lib import execute_george +from ayon_core.hosts.tvpaint.api.lib import execute_george class ValidateMarksRepair(pyblish.api.Action): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py similarity index 96% rename from openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py index 294ce6cf4f..3fc80f6e78 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_missing_layer_names.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateMissingLayers(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_layer_group.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_layer_group.py index bb0a9a4ffe..66793cbc7f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_layer_group.py @@ -1,6 +1,6 @@ import collections import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateRenderLayerGroups(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_pass_group.py similarity index 98% rename from openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_pass_group.py index 2a3173c698..874af38dd4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_render_pass_group.py @@ -1,6 +1,6 @@ import collections import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateLayersGroup(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..2268e59d88 --- /dev/null +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -0,0 +1,58 @@ +import json + +import pyblish.api +from ayon_core.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) + + +# TODO @iLliCiTiT add fix action for fps +class ValidateProjectSettings( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): + """Validate scene settings against database.""" + + label = "Validate Scene Settings" + order = pyblish.api.ValidatorOrder + optional = True + + def process(self, context): + if not self.is_active(context.data): + return + + expected_data = context.data["assetEntity"]["data"] + scene_data = { + "fps": context.data.get("sceneFps"), + "resolutionWidth": context.data.get("sceneWidth"), + "resolutionHeight": context.data.get("sceneHeight"), + "pixelAspect": context.data.get("scenePixelAspect") + } + invalid = {} + for k in scene_data.keys(): + expected_value = expected_data[k] + if scene_data[k] != expected_value: + invalid[k] = { + "current": scene_data[k], "expected": expected_value + } + + if not invalid: + return + + raise PublishXmlValidationError( + self, + "Scene settings does not match database:\n{}".format( + json.dumps(invalid, sort_keys=True, indent=4) + ), + formatting_data={ + "expected_fps": expected_data["fps"], + "current_fps": scene_data["fps"], + "expected_width": expected_data["resolutionWidth"], + "expected_height": expected_data["resolutionHeight"], + "current_width": scene_data["resolutionWidth"], + "current_height": scene_data["resolutionHeight"], + "expected_pixel_ratio": expected_data["pixelAspect"], + "current_pixel_ratio": scene_data["pixelAspect"] + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_start_frame.py similarity index 91% rename from openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_start_frame.py index 229ccfcd18..fea64bd6a8 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,9 +1,9 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin, ) -from openpype.hosts.tvpaint.api.lib import execute_george +from ayon_core.hosts.tvpaint.api.lib import execute_george class RepairStartFrame(pyblish.api.Action): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py similarity index 98% rename from openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index b38231e208..bdc46d02cd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, PublishValidationError, registered_host, diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py similarity index 97% rename from openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py rename to client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py index 2ed5afa11c..be3259bfd8 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py +++ b/client/ayon_core/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from ayon_core.pipeline import PublishXmlValidationError class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/resources/template.tvpp b/client/ayon_core/hosts/tvpaint/resources/template.tvpp similarity index 100% rename from openpype/hosts/tvpaint/resources/template.tvpp rename to client/ayon_core/hosts/tvpaint/resources/template.tvpp diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/__init__.py b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/__init__.py similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/__init__.py rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/__init__.py diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/CMakeLists.txt diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp similarity index 99% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp index ec45a45123..c6c8ff244e 100644 --- a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp +++ b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp @@ -488,9 +488,9 @@ static char* GetLocalString( PIFilter* iFilter, int iNum, char* iDefault ) std::string label_from_evn() { std::string _plugin_label = "OpenPype"; - if (std::getenv("AVALON_LABEL") && std::getenv("AVALON_LABEL") != "") + if (std::getenv("AYON_MENU_LABEL") && std::getenv("AYON_MENU_LABEL") != "") { - _plugin_label = std::getenv("AVALON_LABEL"); + _plugin_label = std::getenv("AYON_MENU_LABEL"); } return _plugin_label; } diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.def b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/library.def similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.def rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_code/library.def diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll b/client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll similarity index 100% rename from openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll rename to client/ayon_core/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll diff --git a/openpype/hosts/tvpaint/worker/__init__.py b/client/ayon_core/hosts/tvpaint/worker/__init__.py similarity index 100% rename from openpype/hosts/tvpaint/worker/__init__.py rename to client/ayon_core/hosts/tvpaint/worker/__init__.py diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/client/ayon_core/hosts/tvpaint/worker/init_file.tvpp similarity index 100% rename from openpype/hosts/tvpaint/worker/init_file.tvpp rename to client/ayon_core/hosts/tvpaint/worker/init_file.tvpp diff --git a/openpype/hosts/tvpaint/worker/worker.py b/client/ayon_core/hosts/tvpaint/worker/worker.py similarity index 98% rename from openpype/hosts/tvpaint/worker/worker.py rename to client/ayon_core/hosts/tvpaint/worker/worker.py index 9295c8afb4..3d9b1ef2b8 100644 --- a/openpype/hosts/tvpaint/worker/worker.py +++ b/client/ayon_core/hosts/tvpaint/worker/worker.py @@ -5,7 +5,7 @@ import shutil import asyncio -from openpype.hosts.tvpaint.api.communication_server import ( +from ayon_core.hosts.tvpaint.api.communication_server import ( BaseCommunicator, CommunicationWrapper ) diff --git a/openpype/hosts/tvpaint/worker/worker_job.py b/client/ayon_core/hosts/tvpaint/worker/worker_job.py similarity index 98% rename from openpype/hosts/tvpaint/worker/worker_job.py rename to client/ayon_core/hosts/tvpaint/worker/worker_job.py index 95c0a678bc..f111ed369a 100644 --- a/openpype/hosts/tvpaint/worker/worker_job.py +++ b/client/ayon_core/hosts/tvpaint/worker/worker_job.py @@ -9,8 +9,8 @@ import six -from openpype.lib import Logger -from openpype.modules import ModulesManager +from ayon_core.lib import Logger +from ayon_core.addons import AddonsManger TMP_FILE_PREFIX = "opw_tvp_" @@ -256,7 +256,7 @@ class CollectSceneData(BaseCommand): name = "collect_scene_data" def execute(self): - from openpype.hosts.tvpaint.api.lib import ( + from ayon_core.hosts.tvpaint.api.lib import ( get_layers_data, get_groups_data, get_layers_pre_post_behavior, @@ -309,8 +309,8 @@ def __init__(self, workfile, job_queue_module=None): self._commands = [] self._command_classes_by_name = None if job_queue_module is None: - manager = ModulesManager() - job_queue_module = manager.modules_by_name["job_queue"] + manager = AddonsManger() + job_queue_module = manager["job_queue"] self._job_queue_module = job_queue_module self._workfile = self._prepare_workfile(workfile) diff --git a/openpype/hosts/unreal/README.md b/client/ayon_core/hosts/unreal/README.md similarity index 100% rename from openpype/hosts/unreal/README.md rename to client/ayon_core/hosts/unreal/README.md diff --git a/openpype/hosts/unreal/__init__.py b/client/ayon_core/hosts/unreal/__init__.py similarity index 100% rename from openpype/hosts/unreal/__init__.py rename to client/ayon_core/hosts/unreal/__init__.py diff --git a/client/ayon_core/hosts/unreal/addon.py b/client/ayon_core/hosts/unreal/addon.py new file mode 100644 index 0000000000..745df951c1 --- /dev/null +++ b/client/ayon_core/hosts/unreal/addon.py @@ -0,0 +1,77 @@ +import os +import re +from ayon_core.modules import IHostAddon, OpenPypeModule + +UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class UnrealAddon(OpenPypeModule, IHostAddon): + name = "unreal" + host_name = "unreal" + + def initialize(self, module_settings): + self.enabled = True + + def get_global_environments(self): + return { + "AYON_UNREAL_ROOT": UNREAL_ROOT_DIR, + } + + def add_implementation_envs(self, env, app): + """Modify environments to contain all required for implementation.""" + # Set AYON_UNREAL_PLUGIN required for Unreal implementation + # Imports are in this method for Python 2 compatiblity of an addon + from pathlib import Path + + from .lib import get_compatible_integration + + from ayon_core.tools.utils import show_message_dialog + + pattern = re.compile(r'^\d+-\d+$') + + if not pattern.match(app.name): + msg = ( + "Unreal application key in the settings must be in format" + "'5-0' or '5-1'" + ) + show_message_dialog( + parent=None, + title="Unreal application name format", + message=msg, + level="critical") + raise ValueError(msg) + + ue_version = app.name.replace("-", ".") + unreal_plugin_path = os.path.join( + UNREAL_ROOT_DIR, "integration", "UE_{}".format(ue_version), "Ayon" + ) + if not Path(unreal_plugin_path).exists(): + compatible_versions = get_compatible_integration( + ue_version, Path(UNREAL_ROOT_DIR) / "integration" + ) + if compatible_versions: + unreal_plugin_path = compatible_versions[-1] / "Ayon" + unreal_plugin_path = unreal_plugin_path.as_posix() + + if not env.get("AYON_UNREAL_PLUGIN") or \ + env.get("AYON_UNREAL_PLUGIN") != unreal_plugin_path: + env["AYON_UNREAL_PLUGIN"] = unreal_plugin_path + + # Set default environments if are not set via settings + defaults = { + "AYON_LOG_NO_COLORS": "1", + "UE_PYTHONPATH": os.environ.get("PYTHONPATH", ""), + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(UNREAL_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".uproject"] diff --git a/openpype/hosts/unreal/api/__init__.py b/client/ayon_core/hosts/unreal/api/__init__.py similarity index 100% rename from openpype/hosts/unreal/api/__init__.py rename to client/ayon_core/hosts/unreal/api/__init__.py diff --git a/openpype/hosts/unreal/api/helpers.py b/client/ayon_core/hosts/unreal/api/helpers.py similarity index 100% rename from openpype/hosts/unreal/api/helpers.py rename to client/ayon_core/hosts/unreal/api/helpers.py diff --git a/client/ayon_core/hosts/unreal/api/pipeline.py b/client/ayon_core/hosts/unreal/api/pipeline.py new file mode 100644 index 0000000000..922fc8abd8 --- /dev/null +++ b/client/ayon_core/hosts/unreal/api/pipeline.py @@ -0,0 +1,802 @@ +# -*- coding: utf-8 -*- +import os +import json +import logging +from typing import List +from contextlib import contextmanager +import semver +import time + +import pyblish.api + +from ayon_core.client import get_asset_by_name, get_assets +from ayon_core.pipeline import ( + register_loader_plugin_path, + register_creator_plugin_path, + register_inventory_action_path, + deregister_loader_plugin_path, + deregister_creator_plugin_path, + deregister_inventory_action_path, + AYON_CONTAINER_ID, + get_current_project_name, +) +from ayon_core.tools.utils import host_tools +import ayon_core.hosts.unreal +from ayon_core.host import HostBase, ILoadHost, IPublishHost + +import unreal # noqa + +# Rename to Ayon once parent module renames +logger = logging.getLogger("ayon_core.hosts.unreal") + +AYON_CONTAINERS = "AyonContainers" +AYON_ASSET_DIR = "/Game/Ayon/Assets" +CONTEXT_CONTAINER = "Ayon/context.json" +UNREAL_VERSION = semver.VersionInfo( + *os.getenv("AYON_UNREAL_VERSION").split(".") +) + +HOST_DIR = os.path.dirname(os.path.abspath(ayon_core.hosts.unreal.__file__)) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + + +class UnrealHost(HostBase, ILoadHost, IPublishHost): + """Unreal host implementation. + + For some time this class will re-use functions from module based + implementation for backwards compatibility of older unreal projects. + """ + + name = "unreal" + + def install(self): + install() + + def get_containers(self): + return ls() + + @staticmethod + def show_tools_popup(): + """Show tools popup with actions leading to show other tools.""" + show_tools_popup() + + @staticmethod + def show_tools_dialog(): + """Show tools dialog with actions leading to show other tools.""" + show_tools_dialog() + + def update_context_data(self, data, changes): + content_path = unreal.Paths.project_content_dir() + op_ctx = content_path + CONTEXT_CONTAINER + attempts = 3 + for i in range(attempts): + try: + with open(op_ctx, "w+") as f: + json.dump(data, f) + break + except IOError as e: + if i == attempts - 1: + raise Exception( + "Failed to write context data. Aborting.") from e + unreal.log_warning("Failed to write context data. Retrying...") + i += 1 + time.sleep(3) + continue + + def get_context_data(self): + content_path = unreal.Paths.project_content_dir() + op_ctx = content_path + CONTEXT_CONTAINER + if not os.path.isfile(op_ctx): + return {} + with open(op_ctx, "r") as fp: + data = json.load(fp) + return data + + +def install(): + """Install Unreal configuration for OpenPype.""" + print("-=" * 40) + logo = '''. +. + ยท + โ”‚ + ยทโˆ™/ + ยท-โˆ™โ€ขโˆ™-ยท + / \\ /โˆ™ยท / \\ + โˆ™ \\ โ”‚ / โˆ™ + \\ \\ ยท / / + \\\\ โˆ™ โˆ™ // + \\\\/ \\// + ___ + โ”‚ โ”‚ + โ”‚ โ”‚ + โ”‚ โ”‚ + โ”‚___โ”‚ + -ยท + + ยท-โ”€โ•โ”€-โˆ™ A Y O N โˆ™-โ”€โ•โ”€-ยท + by YNPUT +. +''' + print(logo) + print("installing Ayon for Unreal ...") + print("-=" * 40) + logger.info("installing Ayon for Unreal") + pyblish.api.register_host("unreal") + pyblish.api.register_plugin_path(str(PUBLISH_PATH)) + register_loader_plugin_path(str(LOAD_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) + register_inventory_action_path(str(INVENTORY_PATH)) + _register_callbacks() + _register_events() + + +def uninstall(): + """Uninstall Unreal configuration for Ayon.""" + pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) + deregister_loader_plugin_path(str(LOAD_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) + deregister_inventory_action_path(str(INVENTORY_PATH)) + + +def _register_callbacks(): + """ + TODO: Implement callbacks if supported by UE + """ + pass + + +def _register_events(): + """ + TODO: Implement callbacks if supported by UE + """ + pass + + +def ls(): + """List all containers. + + List all found in *Content Manager* of Unreal and return + metadata from them. Adding `objectName` to set. + + """ + ar = unreal.AssetRegistryHelpers.get_asset_registry() + # UE 5.1 changed how class name is specified + class_name = ["/Script/Ayon", "AyonAssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AyonAssetContainer" # noqa + ayon_containers = ar.get_assets_by_class(class_name, True) + + # get_asset_by_class returns AssetData. To get all metadata we need to + # load asset. get_tag_values() work only on metadata registered in + # Asset Registry Project settings (and there is no way to set it with + # python short of editing ini configuration file). + for asset_data in ayon_containers: + asset = asset_data.get_asset() + data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) + data["objectName"] = asset_data.asset_name + yield cast_map_to_str_dict(data) + + +def ls_inst(): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + # UE 5.1 changed how class name is specified + class_name = [ + "/Script/Ayon", + "AyonPublishInstance" + ] if ( + UNREAL_VERSION.major == 5 + and UNREAL_VERSION.minor > 0 + ) else "AyonPublishInstance" # noqa + instances = ar.get_assets_by_class(class_name, True) + + # get_asset_by_class returns AssetData. To get all metadata we need to + # load asset. get_tag_values() work only on metadata registered in + # Asset Registry Project settings (and there is no way to set it with + # python short of editing ini configuration file). + for asset_data in instances: + asset = asset_data.get_asset() + data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) + data["objectName"] = asset_data.asset_name + yield cast_map_to_str_dict(data) + + +def parse_container(container): + """To get data from container, AyonAssetContainer must be loaded. + + Args: + container(str): path to container + + Returns: + dict: metadata stored on container + """ + asset = unreal.EditorAssetLibrary.load_asset(container) + data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) + data["objectName"] = asset.get_name() + data = cast_map_to_str_dict(data) + + return data + + +def publish(): + """Shorthand to publish from within host.""" + import pyblish.util + + return pyblish.util.publish() + + +def containerise(name, namespace, nodes, context, loader=None, suffix="_CON"): + """Bundles *nodes* (assets) into a *container* and add metadata to it. + + Unreal doesn't support *groups* of assets that you can add metadata to. + But it does support folders that helps to organize asset. Unfortunately + those folders are just that - you cannot add any additional information + to them. Ayon Integration Plugin is providing way out - Implementing + `AssetContainer` Blueprint class. This class when added to folder can + handle metadata on it using standard + :func:`unreal.EditorAssetLibrary.set_metadata_tag()` and + :func:`unreal.EditorAssetLibrary.get_metadata_tag_values()`. It also + stores and monitor all changes in assets in path where it resides. List of + those assets is available as `assets` property. + + This is list of strings starting with asset type and ending with its path: + `Material /Game/Ayon/Test/TestMaterial.TestMaterial` + + """ + # 1 - create directory for container + root = "/Game" + container_name = f"{name}{suffix}" + new_name = move_assets_to_path(root, container_name, nodes) + + # 2 - create Asset Container there + path = f"{root}/{new_name}" + create_container(container=container_name, path=path) + + namespace = path + + data = { + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, + "name": new_name, + "namespace": namespace, + "loader": str(loader), + "representation": context["representation"]["_id"], + } + # 3 - imprint data + imprint(f"{path}/{container_name}", data) + return path + + +def instantiate(root, name, data, assets=None, suffix="_INS"): + """Bundles *nodes* into *container*. + + Marking it with metadata as publishable instance. If assets are provided, + they are moved to new path where `AyonPublishInstance` class asset is + created and imprinted with metadata. + + This can then be collected for publishing by Pyblish for example. + + Args: + root (str): root path where to create instance container + name (str): name of the container + data (dict): data to imprint on container + assets (list of str): list of asset paths to include in publish + instance + suffix (str): suffix string to append to instance name + + """ + container_name = f"{name}{suffix}" + + # if we specify assets, create new folder and move them there. If not, + # just create empty folder + if assets: + new_name = move_assets_to_path(root, container_name, assets) + else: + new_name = create_folder(root, name) + + path = f"{root}/{new_name}" + create_publish_instance(instance=container_name, path=path) + + imprint(f"{path}/{container_name}", data) + + +def imprint(node, data): + loaded_asset = unreal.EditorAssetLibrary.load_asset(node) + for key, value in data.items(): + # Support values evaluated at imprint + if callable(value): + value = value() + # Unreal doesn't support NoneType in metadata values + if value is None: + value = "" + unreal.EditorAssetLibrary.set_metadata_tag( + loaded_asset, key, str(value) + ) + + with unreal.ScopedEditorTransaction("Ayon containerising"): + unreal.EditorAssetLibrary.save_asset(node) + + +def show_tools_popup(): + """Show popup with tools. + + Popup will disappear on click or losing focus. + """ + from ayon_core.hosts.unreal.api import tools_ui + + tools_ui.show_tools_popup() + + +def show_tools_dialog(): + """Show dialog with tools. + + Dialog will stay visible. + """ + from ayon_core.hosts.unreal.api import tools_ui + + tools_ui.show_tools_dialog() + + +def show_creator(): + host_tools.show_creator() + + +def show_loader(): + host_tools.show_loader(use_context=True) + + +def show_publisher(): + host_tools.show_publish() + + +def show_manager(): + host_tools.show_scene_inventory() + + +def show_experimental_tools(): + host_tools.show_experimental_tools_dialog() + + +def create_folder(root: str, name: str) -> str: + """Create new folder. + + If folder exists, append number at the end and try again, incrementing + if needed. + + Args: + root (str): path root + name (str): folder name + + Returns: + str: folder name + + Example: + >>> create_folder("/Game/Foo") + /Game/Foo + >>> create_folder("/Game/Foo") + /Game/Foo1 + + """ + eal = unreal.EditorAssetLibrary + index = 1 + while True: + if eal.does_directory_exist(f"{root}/{name}"): + name = f"{name}{index}" + index += 1 + else: + eal.make_directory(f"{root}/{name}") + break + + return name + + +def move_assets_to_path(root: str, name: str, assets: List[str]) -> str: + """Moving (renaming) list of asset paths to new destination. + + Args: + root (str): root of the path (eg. `/Game`) + name (str): name of destination directory (eg. `Foo` ) + assets (list of str): list of asset paths + + Returns: + str: folder name + + Example: + This will get paths of all assets under `/Game/Test` and move them + to `/Game/NewTest`. If `/Game/NewTest` already exists, then resulting + path will be `/Game/NewTest1` + + >>> assets = unreal.EditorAssetLibrary.list_assets("/Game/Test") + >>> move_assets_to_path("/Game", "NewTest", assets) + NewTest + + """ + eal = unreal.EditorAssetLibrary + name = create_folder(root, name) + + unreal.log(assets) + for asset in assets: + loaded = eal.load_asset(asset) + eal.rename_asset(asset, f"{root}/{name}/{loaded.get_name()}") + + return name + + +def create_container(container: str, path: str) -> unreal.Object: + """Helper function to create Asset Container class on given path. + + This Asset Class helps to mark given path as Container + and enable asset version control on it. + + Args: + container (str): Asset Container name + path (str): Path where to create Asset Container. This path should + point into container folder + + Returns: + :class:`unreal.Object`: instance of created asset + + Example: + + create_container( + "/Game/modelingFooCharacter_CON", + "modelingFooCharacter_CON" + ) + + """ + factory = unreal.AyonAssetContainerFactory() + tools = unreal.AssetToolsHelpers().get_asset_tools() + + return tools.create_asset(container, path, None, factory) + + +def create_publish_instance(instance: str, path: str) -> unreal.Object: + """Helper function to create Ayon Publish Instance on given path. + + This behaves similarly as :func:`create_ayon_container`. + + Args: + path (str): Path where to create Publish Instance. + This path should point into container folder + instance (str): Publish Instance name + + Returns: + :class:`unreal.Object`: instance of created asset + + Example: + + create_publish_instance( + "/Game/modelingFooCharacter_INST", + "modelingFooCharacter_INST" + ) + + """ + factory = unreal.AyonPublishInstanceFactory() + tools = unreal.AssetToolsHelpers().get_asset_tools() + return tools.create_asset(instance, path, None, factory) + + +def cast_map_to_str_dict(umap) -> dict: + """Cast Unreal Map to dict. + + Helper function to cast Unreal Map object to plain old python + dict. This will also cast values and keys to str. Useful for + metadata dicts. + + Args: + umap: Unreal Map object + + Returns: + dict + + """ + return {str(key): str(value) for (key, value) in umap.items()} + + +def get_subsequences(sequence: unreal.LevelSequence): + """Get list of subsequences from sequence. + + Args: + sequence (unreal.LevelSequence): Sequence + + Returns: + list(unreal.LevelSequence): List of subsequences + + """ + tracks = sequence.get_master_tracks() + subscene_track = next( + ( + t + for t in tracks + if t.get_class() == unreal.MovieSceneSubTrack.static_class() + ), + None, + ) + if subscene_track is not None and subscene_track.get_sections(): + return subscene_track.get_sections() + return [] + + +def set_sequence_hierarchy( + seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths +): + # Get existing sequencer tracks or create them if they don't exist + tracks = seq_i.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if not subscene_track: + subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + if not visibility_track: + visibility_track = seq_i.add_master_track( + unreal.MovieSceneLevelVisibilityTrack) + + # Create the sub-scene section + subscenes = subscene_track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = subscene_track.add_section() + subscene.set_row_index(len(subscene_track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + + # Create the visibility section + ar = unreal.AssetRegistryHelpers.get_asset_registry() + maps = [] + for m in map_paths: + # Unreal requires to load the level to get the map name + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(m) + maps.append(str(ar.get_asset_by_object_path(m).asset_name)) + + vis_section = visibility_track.add_section() + index = len(visibility_track.get_sections()) + + vis_section.set_range( + min_frame_j, + max_frame_j + 1) + vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) + vis_section.set_row_index(index) + vis_section.set_level_names(maps) + + if min_frame_j > 1: + hid_section = visibility_track.add_section() + hid_section.set_range( + 1, + min_frame_j) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + if max_frame_j < max_frame_i: + hid_section = visibility_track.add_section() + hid_section.set_range( + max_frame_j + 1, + max_frame_i + 1) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + + +def generate_sequence(h, h_dir): + tools = unreal.AssetToolsHelpers().get_asset_tools() + + sequence = tools.create_asset( + asset_name=h, + package_path=h_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + project_name = get_current_project_name() + asset_data = get_asset_by_name( + project_name, + h_dir.split('/')[-1], + fields=["_id", "data.fps"] + ) + + start_frames = [] + end_frames = [] + + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + fps = asset_data.get('data').get("fps") + + sequence.set_display_rate( + unreal.FrameRate(fps, 1.0)) + sequence.set_playback_start(min_frame) + sequence.set_playback_end(max_frame) + + sequence.set_work_range_start(min_frame / fps) + sequence.set_work_range_end(max_frame / fps) + sequence.set_view_range_start(min_frame / fps) + sequence.set_view_range_end(max_frame / fps) + + tracks = sequence.get_master_tracks() + track = None + for t in tracks: + if (t.get_class() == + unreal.MovieSceneCameraCutTrack.static_class()): + track = t + break + if not track: + track = sequence.add_master_track( + unreal.MovieSceneCameraCutTrack) + + return sequence, (min_frame, max_frame) + + +def _get_comps_and_assets( + component_class, asset_class, old_assets, new_assets, selected +): + eas = unreal.get_editor_subsystem(unreal.EditorActorSubsystem) + + components = [] + if selected: + sel_actors = eas.get_selected_level_actors() + for actor in sel_actors: + comps = actor.get_components_by_class(component_class) + components.extend(comps) + else: + comps = eas.get_all_level_actors_components() + components = [ + c for c in comps if isinstance(c, component_class) + ] + + # Get all the static meshes among the old assets in a dictionary with + # the name as key + selected_old_assets = {} + for a in old_assets: + asset = unreal.EditorAssetLibrary.load_asset(a) + if isinstance(asset, asset_class): + selected_old_assets[asset.get_name()] = asset + + # Get all the static meshes among the new assets in a dictionary with + # the name as key + selected_new_assets = {} + for a in new_assets: + asset = unreal.EditorAssetLibrary.load_asset(a) + if isinstance(asset, asset_class): + selected_new_assets[asset.get_name()] = asset + + return components, selected_old_assets, selected_new_assets + + +def replace_static_mesh_actors(old_assets, new_assets, selected): + smes = unreal.get_editor_subsystem(unreal.StaticMeshEditorSubsystem) + + static_mesh_comps, old_meshes, new_meshes = _get_comps_and_assets( + unreal.StaticMeshComponent, + unreal.StaticMesh, + old_assets, + new_assets, + selected + ) + + for old_name, old_mesh in old_meshes.items(): + new_mesh = new_meshes.get(old_name) + + if not new_mesh: + continue + + smes.replace_mesh_components_meshes( + static_mesh_comps, old_mesh, new_mesh) + + +def replace_skeletal_mesh_actors(old_assets, new_assets, selected): + skeletal_mesh_comps, old_meshes, new_meshes = _get_comps_and_assets( + unreal.SkeletalMeshComponent, + unreal.SkeletalMesh, + old_assets, + new_assets, + selected + ) + + for old_name, old_mesh in old_meshes.items(): + new_mesh = new_meshes.get(old_name) + + if not new_mesh: + continue + + for comp in skeletal_mesh_comps: + if comp.get_skeletal_mesh_asset() == old_mesh: + comp.set_skeletal_mesh_asset(new_mesh) + + +def replace_geometry_cache_actors(old_assets, new_assets, selected): + geometry_cache_comps, old_caches, new_caches = _get_comps_and_assets( + unreal.GeometryCacheComponent, + unreal.GeometryCache, + old_assets, + new_assets, + selected + ) + + for old_name, old_mesh in old_caches.items(): + new_mesh = new_caches.get(old_name) + + if not new_mesh: + continue + + for comp in geometry_cache_comps: + if comp.get_editor_property("geometry_cache") == old_mesh: + comp.set_geometry_cache(new_mesh) + + +def delete_asset_if_unused(container, asset_content): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + references = set() + + for asset_path in asset_content: + asset = ar.get_asset_by_object_path(asset_path) + refs = ar.get_referencers( + asset.package_name, + unreal.AssetRegistryDependencyOptions( + include_soft_package_references=False, + include_hard_package_references=True, + include_searchable_names=False, + include_soft_management_references=False, + include_hard_management_references=False + )) + if not refs: + continue + references = references.union(set(refs)) + + # Filter out references that are in the Temp folder + cleaned_references = { + ref for ref in references if not str(ref).startswith("/Temp/")} + + # Check which of the references are Levels + for ref in cleaned_references: + loaded_asset = unreal.EditorAssetLibrary.load_asset(ref) + if isinstance(loaded_asset, unreal.World): + # If there is at least a level, we can stop, we don't want to + # delete the container + return + + unreal.log("Previous version unused, deleting...") + + # No levels, delete the asset + unreal.EditorAssetLibrary.delete_directory(container["namespace"]) + + +@contextmanager +def maintained_selection(): + """Stub to be either implemented or replaced. + + This is needed for old publisher implementation, but + it is not supported (yet) in UE. + """ + try: + yield + finally: + pass diff --git a/client/ayon_core/hosts/unreal/api/plugin.py b/client/ayon_core/hosts/unreal/api/plugin.py new file mode 100644 index 0000000000..ddf54f6c79 --- /dev/null +++ b/client/ayon_core/hosts/unreal/api/plugin.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +import ast +import collections +import sys +import six +from abc import ( + ABC, + ABCMeta, +) + +import unreal + +from .pipeline import ( + create_publish_instance, + imprint, + ls_inst, + UNREAL_VERSION +) +from ayon_core.lib import ( + BoolDef, + UILabelDef +) +from ayon_core.pipeline import ( + Creator, + LoaderPlugin, + CreatorError, + CreatedInstance +) + + +@six.add_metaclass(ABCMeta) +class UnrealBaseCreator(Creator): + """Base class for Unreal creator plugins.""" + root = "/Game/Ayon/AyonPublishInstances" + suffix = "_INS" + + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `unreal_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `unreal_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("unreal_cached_subsets") is None: + unreal_cached_subsets = collections.defaultdict(list) + unreal_cached_legacy_subsets = collections.defaultdict(list) + for instance in ls_inst(): + creator_id = instance.get("creator_identifier") + if creator_id: + unreal_cached_subsets[creator_id].append(instance) + else: + family = instance.get("family") + unreal_cached_legacy_subsets[family].append(instance) + + shared_data["unreal_cached_subsets"] = unreal_cached_subsets + shared_data["unreal_cached_legacy_subsets"] = ( + unreal_cached_legacy_subsets + ) + return shared_data + + def create(self, subset_name, instance_data, pre_create_data): + try: + instance_name = f"{subset_name}{self.suffix}" + pub_instance = create_publish_instance(instance_name, self.root) + + instance_data["subset"] = subset_name + instance_data["instance_path"] = f"{self.root}/{instance_name}" + + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + + pub_instance.set_editor_property('add_external_assets', True) + assets = pub_instance.get_editor_property('asset_data_external') + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for member in pre_create_data.get("members", []): + obj = ar.get_asset_by_object_path(member).get_asset() + assets.add(obj) + + imprint(f"{self.root}/{instance_name}", instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def collect_instances(self): + # cache instances if missing + self.cache_subsets(self.collection_shared_data) + for instance in self.collection_shared_data[ + "unreal_cached_subsets"].get(self.identifier, []): + # Unreal saves metadata as string, so we need to convert it back + instance['creator_attributes'] = ast.literal_eval( + instance.get('creator_attributes', '{}')) + instance['publish_attributes'] = ast.literal_eval( + instance.get('publish_attributes', '{}')) + created_instance = CreatedInstance.from_existing(instance, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, changes in update_list: + instance_node = created_inst.get("instance_path", "") + + if not instance_node: + unreal.log_warning( + f"Instance node not found for {created_inst}") + continue + + new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + imprint( + instance_node, + new_values + ) + + def remove_instances(self, instances): + for instance in instances: + instance_node = instance.data.get("instance_path", "") + if instance_node: + unreal.EditorAssetLibrary.delete_asset(instance_node) + + self._remove_instance_from_context(instance) + + +@six.add_metaclass(ABCMeta) +class UnrealAssetCreator(UnrealBaseCreator): + """Base class for Unreal creator plugins based on assets.""" + + def create(self, subset_name, instance_data, pre_create_data): + """Create instance of the asset. + + Args: + subset_name (str): Name of the subset. + instance_data (dict): Data for the instance. + pre_create_data (dict): Data for the instance. + + Returns: + CreatedInstance: Created instance. + """ + try: + # Check if instance data has members, filled by the plugin. + # If not, use selection. + if not pre_create_data.get("members"): + pre_create_data["members"] = [] + + if pre_create_data.get("use_selection"): + utilib = unreal.EditorUtilityLibrary + sel_objects = utilib.get_selected_assets() + pre_create_data["members"] = [ + a.get_path_name() for a in sel_objects] + + super(UnrealAssetCreator, self).create( + subset_name, + instance_data, + pre_create_data) + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection", default=True) + ] + + +@six.add_metaclass(ABCMeta) +class UnrealActorCreator(UnrealBaseCreator): + """Base class for Unreal creator plugins based on actors.""" + + def create(self, subset_name, instance_data, pre_create_data): + """Create instance of the asset. + + Args: + subset_name (str): Name of the subset. + instance_data (dict): Data for the instance. + pre_create_data (dict): Data for the instance. + + Returns: + CreatedInstance: Created instance. + """ + try: + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + + # Check if the level is saved + if world.get_path_name().startswith("/Temp/"): + raise CreatorError( + "Level must be saved before creating instances.") + + # Check if instance data has members, filled by the plugin. + # If not, use selection. + if not instance_data.get("members"): + actor_subsystem = unreal.EditorActorSubsystem() + sel_actors = actor_subsystem.get_selected_level_actors() + selection = [a.get_path_name() for a in sel_actors] + + instance_data["members"] = selection + + instance_data["level"] = world.get_path_name() + + super(UnrealActorCreator, self).create( + subset_name, + instance_data, + pre_create_data) + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef("Select actors to create instance from them.") + ] + + +class Loader(LoaderPlugin, ABC): + """This serves as skeleton for future Ayon specific functionality""" + pass diff --git a/openpype/hosts/unreal/api/rendering.py b/client/ayon_core/hosts/unreal/api/rendering.py similarity index 96% rename from openpype/hosts/unreal/api/rendering.py rename to client/ayon_core/hosts/unreal/api/rendering.py index efe6fc54ad..8717788732 100644 --- a/openpype/hosts/unreal/api/rendering.py +++ b/client/ayon_core/hosts/unreal/api/rendering.py @@ -2,10 +2,10 @@ import unreal -from openpype.settings import get_project_settings -from openpype.pipeline import Anatomy -from openpype.hosts.unreal.api import pipeline -from openpype.widgets.message_window import Window +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import Anatomy +from ayon_core.hosts.unreal.api import pipeline +from ayon_core.tools.utils import show_message_dialog queue = None @@ -40,8 +40,7 @@ def start_rendering(): assets = unreal.EditorUtilityLibrary.get_selected_assets() if not assets: - Window( - parent=None, + show_message_dialog( title="No assets selected", message="No assets selected. Select a render instance.", level="warning") diff --git a/openpype/hosts/unreal/api/tools_ui.py b/client/ayon_core/hosts/unreal/api/tools_ui.py similarity index 95% rename from openpype/hosts/unreal/api/tools_ui.py rename to client/ayon_core/hosts/unreal/api/tools_ui.py index 5a4c689918..084da9a0f0 100644 --- a/openpype/hosts/unreal/api/tools_ui.py +++ b/client/ayon_core/hosts/unreal/api/tools_ui.py @@ -1,13 +1,13 @@ import sys from qtpy import QtWidgets, QtCore, QtGui -from openpype import ( +from ayon_core import ( resources, style ) -from openpype.tools.utils import host_tools -from openpype.tools.utils.lib import qt_app_context -from openpype.hosts.unreal.api import rendering +from ayon_core.tools.utils import host_tools +from ayon_core.tools.utils.lib import qt_app_context +from ayon_core.hosts.unreal.api import rendering class ToolsBtnsWidget(QtWidgets.QWidget): @@ -65,7 +65,7 @@ def __init__(self, *args, **kwargs): super(ToolsDialog, self).__init__(*args, **kwargs) self.setWindowTitle("Ayon tools") - icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) self.setWindowIcon(icon) self.setWindowFlags( diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/client/ayon_core/hosts/unreal/hooks/pre_workfile_preparation.py similarity index 97% rename from openpype/hosts/unreal/hooks/pre_workfile_preparation.py rename to client/ayon_core/hosts/unreal/hooks/pre_workfile_preparation.py index a635bd4cab..4317844ca5 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/client/ayon_core/hosts/unreal/hooks/pre_workfile_preparation.py @@ -8,19 +8,19 @@ from qtpy import QtCore -from openpype import resources -from openpype.lib.applications import ( +from ayon_core import resources +from ayon_core.lib.applications import ( PreLaunchHook, ApplicationLaunchFailed, LaunchTypes, ) -from openpype.pipeline.workfile import get_workfile_template_key -import openpype.hosts.unreal.lib as unreal_lib -from openpype.hosts.unreal.ue_workers import ( +from ayon_core.pipeline.workfile import get_workfile_template_key +import ayon_core.hosts.unreal.lib as unreal_lib +from ayon_core.hosts.unreal.ue_workers import ( UEProjectGenerationWorker, UEPluginInstallWorker ) -from openpype.hosts.unreal.ui import SplashScreen +from ayon_core.hosts.unreal.ui import SplashScreen class UnrealPrelaunchHook(PreLaunchHook): diff --git a/client/ayon_core/hosts/unreal/integration b/client/ayon_core/hosts/unreal/integration new file mode 160000 index 0000000000..6d2793170e --- /dev/null +++ b/client/ayon_core/hosts/unreal/integration @@ -0,0 +1 @@ +Subproject commit 6d2793170ed57187842f683a943593973abcc337 diff --git a/client/ayon_core/hosts/unreal/lib.py b/client/ayon_core/hosts/unreal/lib.py new file mode 100644 index 0000000000..fe9e239ed5 --- /dev/null +++ b/client/ayon_core/hosts/unreal/lib.py @@ -0,0 +1,539 @@ +# -*- coding: utf-8 -*- +"""Unreal launching and project tools.""" + +import json +import os +import platform +import re +import subprocess +from collections import OrderedDict +from distutils import dir_util +from pathlib import Path +from typing import List + +from ayon_core.settings import get_project_settings + + +def get_engine_versions(env=None): + """Detect Unreal Engine versions. + + This will try to detect location and versions of installed Unreal Engine. + Location can be overridden by `UNREAL_ENGINE_LOCATION` environment + variable. + + .. deprecated:: 3.15.4 + + Args: + env (dict, optional): Environment to use. + + Returns: + OrderedDict: dictionary with version as a key and dir as value. + so the highest version is first. + + Example: + >>> get_engine_versions() + { + "4.23": "C:/Epic Games/UE_4.23", + "4.24": "C:/Epic Games/UE_4.24" + } + + """ + env = env or os.environ + engine_locations = {} + try: + root, dirs, _ = next(os.walk(env["UNREAL_ENGINE_LOCATION"])) + + for directory in dirs: + if directory.startswith("UE"): + try: + ver = re.split(r"[-_]", directory)[1] + except IndexError: + continue + engine_locations[ver] = os.path.join(root, directory) + except KeyError: + # environment variable not set + pass + except OSError: + # specified directory doesn't exist + pass + except StopIteration: + # specified directory doesn't exist + pass + + # if we've got something, terminate auto-detection process + if engine_locations: + return OrderedDict(sorted(engine_locations.items())) + + # else kick in platform specific detection + if platform.system().lower() == "windows": + return OrderedDict(sorted(_win_get_engine_versions().items())) + if platform.system().lower() == "linux": + # on linux, there is no installation and getting Unreal Engine involves + # git clone. So we'll probably depend on `UNREAL_ENGINE_LOCATION`. + pass + if platform.system().lower() == "darwin": + return OrderedDict(sorted(_darwin_get_engine_version().items())) + + return OrderedDict() + + +def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path: + """Get UE Editor executable path.""" + ue_path = engine_path / "Engine/Binaries" + if platform.system().lower() == "windows": + if engine_version.split(".")[0] == "4": + ue_path /= "Win64/UE4Editor.exe" + elif engine_version.split(".")[0] == "5": + ue_path /= "Win64/UnrealEditor.exe" + + elif platform.system().lower() == "linux": + ue_path /= "Linux/UE4Editor" + + elif platform.system().lower() == "darwin": + ue_path /= "Mac/UE4Editor" + + return ue_path + + +def _win_get_engine_versions(): + """Get Unreal Engine versions on Windows. + + If engines are installed via Epic Games Launcher then there is: + `%PROGRAMDATA%/Epic/UnrealEngineLauncher/LauncherInstalled.dat` + This file is JSON file listing installed stuff, Unreal engines + are marked with `"AppName" = "UE_X.XX"`` like `UE_4.24` + + .. deprecated:: 3.15.4 + + Returns: + dict: version as a key and path as a value. + + """ + install_json_path = os.path.join( + os.getenv("PROGRAMDATA"), + "Epic", + "UnrealEngineLauncher", + "LauncherInstalled.dat", + ) + + return _parse_launcher_locations(install_json_path) + + +def _darwin_get_engine_version() -> dict: + """Get Unreal Engine versions on MacOS. + + It works the same as on Windows, just JSON file location is different. + + .. deprecated:: 3.15.4 + + Returns: + dict: version as a key and path as a value. + + See Also: + :func:`_win_get_engine_versions`. + + """ + install_json_path = os.path.join( + os.getenv("HOME"), + "Library", + "Application Support", + "Epic", + "UnrealEngineLauncher", + "LauncherInstalled.dat", + ) + + return _parse_launcher_locations(install_json_path) + + +def _parse_launcher_locations(install_json_path: str) -> dict: + """This will parse locations from json file. + + .. deprecated:: 3.15.4 + + Args: + install_json_path (str): Path to `LauncherInstalled.dat`. + + Returns: + dict: with unreal engine versions as keys and + paths to those engine installations as value. + + """ + engine_locations = {} + if os.path.isfile(install_json_path): + with open(install_json_path, "r") as ilf: + try: + install_data = json.load(ilf) + except json.JSONDecodeError as e: + raise Exception( + "Invalid `LauncherInstalled.dat file. `" + "Cannot determine Unreal Engine location." + ) from e + + for installation in install_data.get("InstallationList", []): + if installation.get("AppName").startswith("UE_"): + ver = installation.get("AppName").split("_")[1] + engine_locations[ver] = installation.get("InstallLocation") + + return engine_locations + + +def create_unreal_project(project_name: str, + unreal_project_name: str, + ue_version: str, + pr_dir: Path, + engine_path: Path, + dev_mode: bool = False, + env: dict = None) -> None: + """This will create `.uproject` file at specified location. + + As there is no way I know to create a project via command line, this is + easiest option. Unreal project file is basically a JSON file. If we find + the `AYON_UNREAL_PLUGIN` environment variable we assume this is the + location of the Integration Plugin and we copy its content to the project + folder and enable this plugin. + + Args: + project_name (str): Name of the project in AYON. + unreal_project_name (str): Name of the project in Unreal. + ue_version (str): Unreal engine version (like 4.23). + pr_dir (Path): Path to directory where project will be created. + engine_path (Path): Path to Unreal Engine installation. + dev_mode (bool, optional): Flag to trigger C++ style Unreal project + needing Visual Studio and other tools to compile plugins from + sources. This will trigger automatically if `Binaries` + directory is not found in plugin folders as this indicates + this is only source distribution of the plugin. Dev mode + is also set in Settings. + env (dict, optional): Environment to use. If not set, `os.environ`. + + Throws: + NotImplementedError: For unsupported platforms. + + Returns: + None + + Deprecated: + since 3.16.0 + + """ + env = env or os.environ + + preset = get_project_settings(project_name)["unreal"]["project_setup"] + ue_id = ".".join(ue_version.split(".")[:2]) + # get unreal engine identifier + # ------------------------------------------------------------------------- + # FIXME (antirotor): As of 4.26 this is problem with UE4 built from + # sources. In that case Engine ID is calculated per machine/user and not + # from Engine files as this code then reads. This then prevents UE4 + # to directly open project as it will complain about project being + # created in different UE4 version. When user convert such project + # to his UE4 version, Engine ID is replaced in uproject file. If some + # other user tries to open it, it will present him with similar error. + + # engine_path should be the location of UE_X.X folder + + ue_editor_exe: Path = get_editor_exe_path(engine_path, ue_version) + cmdlet_project: Path = get_path_to_cmdlet_project(ue_version) + + project_file = pr_dir / f"{unreal_project_name}.uproject" + + print("--- Generating a new project ...") + commandlet_cmd = [f'{ue_editor_exe.as_posix()}', + f'{cmdlet_project.as_posix()}', + f'-run=AyonGenerateProject', + f'{project_file.resolve().as_posix()}'] + + if dev_mode or preset["dev_mode"]: + commandlet_cmd.append('-GenerateCode') + + gen_process = subprocess.Popen(commandlet_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + for line in gen_process.stdout: + print(line.decode(), end='') + gen_process.stdout.close() + return_code = gen_process.wait() + + if return_code and return_code != 0: + raise RuntimeError( + (f"Failed to generate '{unreal_project_name}' project! " + f"Exited with return code {return_code}")) + + print("--- Project has been generated successfully.") + + with open(project_file.as_posix(), mode="r+") as pf: + pf_json = json.load(pf) + pf_json["EngineAssociation"] = get_build_id(engine_path, ue_version) + pf.seek(0) + json.dump(pf_json, pf, indent=4) + pf.truncate() + print(f'--- Engine ID has been written into the project file') + + if dev_mode or preset["dev_mode"]: + u_build_tool = get_path_to_ubt(engine_path, ue_version) + + arch = "Win64" + if platform.system().lower() == "windows": + arch = "Win64" + elif platform.system().lower() == "linux": + arch = "Linux" + elif platform.system().lower() == "darwin": + # we need to test this out + arch = "Mac" + + command1 = [u_build_tool.as_posix(), "-projectfiles", + f"-project={project_file}", "-progress"] + + subprocess.run(command1) + + command2 = [u_build_tool.as_posix(), + f"-ModuleWithSuffix={unreal_project_name},3555", arch, + "Development", "-TargetType=Editor", + f'-Project={project_file}', + f'{project_file}', + "-IgnoreJunk"] + + subprocess.run(command2) + + # ensure we have PySide2 installed in engine + python_path = None + if platform.system().lower() == "windows": + python_path = engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Win64/python.exe") + + if platform.system().lower() == "linux": + python_path = engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Linux/bin/python3") + + if platform.system().lower() == "darwin": + python_path = engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Mac/bin/python3") + + if not python_path: + raise NotImplementedError("Unsupported platform") + if not python_path.exists(): + raise RuntimeError(f"Unreal Python not found at {python_path}") + subprocess.check_call( + [python_path.as_posix(), "-m", "pip", "install", "pyside2"]) + + +def get_path_to_uat(engine_path: Path) -> Path: + if platform.system().lower() == "windows": + return engine_path / "Engine/Build/BatchFiles/RunUAT.bat" + + if platform.system().lower() in ["linux", "darwin"]: + return engine_path / "Engine/Build/BatchFiles/RunUAT.sh" + + +def get_compatible_integration( + ue_version: str, integration_root: Path) -> List[Path]: + """Get path to compatible version of integration plugin. + + This will try to get the closest compatible versions to the one + specified in sorted list. + + Args: + ue_version (str): version of the current Unreal Engine. + integration_root (Path): path to built-in integration plugins. + + Returns: + list of Path: Sorted list of paths closest to the specified + version. + + """ + major, minor = ue_version.split(".") + integration_paths = [p for p in integration_root.iterdir() + if p.is_dir()] + + compatible_versions = [] + for i in integration_paths: + # parse version from path + try: + i_major, i_minor = re.search( + r"(?P\d+).(?P\d+)$", i.name).groups() + except AttributeError: + # in case there is no match, just skip to next + continue + + # consider versions with different major so different that they + # are incompatible + if int(major) != int(i_major): + continue + + compatible_versions.append(i) + + sorted(set(compatible_versions)) + return compatible_versions + + +def get_path_to_cmdlet_project(ue_version: str) -> Path: + cmd_project = Path( + os.path.dirname(os.path.abspath(__file__))) + + # For now, only tested on Windows (For Linux and Mac + # it has to be implemented) + cmd_project /= f"integration/UE_{ue_version}" + + # if the integration doesn't exist for current engine version + # try to find the closest to it. + if cmd_project.exists(): + return cmd_project / "CommandletProject/CommandletProject.uproject" + + if compatible_versions := get_compatible_integration( + ue_version, cmd_project.parent + ): + return compatible_versions[-1] / "CommandletProject/CommandletProject.uproject" # noqa: E501 + else: + raise RuntimeError( + ("There are no compatible versions of Unreal " + "integration plugin compatible with running version " + f"of Unreal Engine {ue_version}")) + + +def get_path_to_ubt(engine_path: Path, ue_version: str) -> Path: + u_build_tool_path = engine_path / "Engine/Binaries/DotNET" + + if ue_version.split(".")[0] == "4": + u_build_tool_path /= "UnrealBuildTool.exe" + elif ue_version.split(".")[0] == "5": + u_build_tool_path /= "UnrealBuildTool/UnrealBuildTool.exe" + + return Path(u_build_tool_path) + + +def get_build_id(engine_path: Path, ue_version: str) -> str: + ue_modules = Path() + if platform.system().lower() == "windows": + ue_modules_path = engine_path / "Engine/Binaries/Win64" + if ue_version.split(".")[0] == "4": + ue_modules_path /= "UE4Editor.modules" + elif ue_version.split(".")[0] == "5": + ue_modules_path /= "UnrealEditor.modules" + ue_modules = Path(ue_modules_path) + + if platform.system().lower() == "linux": + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + "Linux", "UE4Editor.modules")) + + if platform.system().lower() == "darwin": + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + "Mac", "UE4Editor.modules")) + + if ue_modules.exists(): + print("--- Loading Engine ID from modules file ...") + with open(ue_modules, "r") as mp: + loaded_modules = json.load(mp) + + if loaded_modules.get("BuildId"): + return "{" + loaded_modules.get("BuildId") + "}" + + +def check_built_plugin_existance(plugin_path) -> bool: + if not plugin_path: + return False + + integration_plugin_path = Path(plugin_path) + + if not integration_plugin_path.is_dir(): + raise RuntimeError("Path to the integration plugin is null!") + + if not (integration_plugin_path / "Binaries").is_dir() \ + or not (integration_plugin_path / "Intermediate").is_dir(): + return False + + return True + + +def copy_built_plugin(engine_path: Path, plugin_path: Path) -> None: + ayon_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" + + if not ayon_plugin_path.is_dir(): + ayon_plugin_path.mkdir(parents=True, exist_ok=True) + + engine_plugin_config_path: Path = ayon_plugin_path / "Config" + engine_plugin_config_path.mkdir(exist_ok=True) + + dir_util._path_created = {} + + dir_util.copy_tree(plugin_path.as_posix(), ayon_plugin_path.as_posix()) + + +def check_plugin_existence(engine_path: Path, env: dict = None) -> bool: + env = env or os.environ + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) + + if not os.path.isdir(integration_plugin_path): + raise RuntimeError("Path to the integration plugin is null!") + + # Create a path to the plugin in the engine + op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" + + if not op_plugin_path.is_dir(): + return False + + if not (op_plugin_path / "Binaries").is_dir() \ + or not (op_plugin_path / "Intermediate").is_dir(): + return False + + return True + + +def try_installing_plugin(engine_path: Path, env: dict = None) -> None: + env = env or os.environ + + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) + + if not os.path.isdir(integration_plugin_path): + raise RuntimeError("Path to the integration plugin is null!") + + # Create a path to the plugin in the engine + op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" + + if not op_plugin_path.is_dir(): + op_plugin_path.mkdir(parents=True, exist_ok=True) + + engine_plugin_config_path: Path = op_plugin_path / "Config" + engine_plugin_config_path.mkdir(exist_ok=True) + + dir_util._path_created = {} + + if not (op_plugin_path / "Binaries").is_dir() \ + or not (op_plugin_path / "Intermediate").is_dir(): + _build_and_move_plugin(engine_path, op_plugin_path, env) + + +def _build_and_move_plugin(engine_path: Path, + plugin_build_path: Path, + env: dict = None) -> None: + uat_path: Path = get_path_to_uat(engine_path) + + env = env or os.environ + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) + + if uat_path.is_file(): + temp_dir: Path = integration_plugin_path.parent / "Temp" + temp_dir.mkdir(exist_ok=True) + uplugin_path: Path = integration_plugin_path / "Ayon.uplugin" + + # in order to successfully build the plugin, + # It must be built outside the Engine directory and then moved + build_plugin_cmd: List[str] = [f'{uat_path.as_posix()}', + 'BuildPlugin', + f'-Plugin={uplugin_path.as_posix()}', + f'-Package={temp_dir.as_posix()}'] + subprocess.run(build_plugin_cmd) + + # Copy the contents of the 'Temp' dir into the + # 'Ayon' directory in the engine + dir_util.copy_tree(temp_dir.as_posix(), plugin_build_path.as_posix()) + + # We need to also copy the config folder. + # The UAT doesn't include the Config folder in the build + plugin_install_config_path: Path = plugin_build_path / "Config" + integration_plugin_config_path = integration_plugin_path / "Config" + + dir_util.copy_tree(integration_plugin_config_path.as_posix(), + plugin_install_config_path.as_posix()) + + dir_util.remove_tree(temp_dir.as_posix()) diff --git a/openpype/hosts/resolve/otio/__init__.py b/client/ayon_core/hosts/unreal/plugins/__init__.py similarity index 100% rename from openpype/hosts/resolve/otio/__init__.py rename to client/ayon_core/hosts/unreal/plugins/__init__.py diff --git a/client/ayon_core/hosts/unreal/plugins/create/create_camera.py b/client/ayon_core/hosts/unreal/plugins/create/create_camera.py new file mode 100644 index 0000000000..f78de00f44 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/create/create_camera.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +import unreal + +from ayon_core.pipeline import CreatorError +from ayon_core.hosts.unreal.api.pipeline import UNREAL_VERSION +from ayon_core.hosts.unreal.api.plugin import ( + UnrealAssetCreator, +) + + +class CreateCamera(UnrealAssetCreator): + """Create Camera.""" + + identifier = "io.ayon.creators.unreal.camera" + label = "Camera" + family = "camera" + icon = "fa.camera" + + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("use_selection"): + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [a.get_path_name() for a in sel_objects] + + if len(selection) != 1: + raise CreatorError("Please select only one object.") + + # Add the current level path to the metadata + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + + instance_data["level"] = world.get_path_name() + + super(CreateCamera, self).create( + subset_name, + instance_data, + pre_create_data) diff --git a/client/ayon_core/hosts/unreal/plugins/create/create_layout.py b/client/ayon_core/hosts/unreal/plugins/create/create_layout.py new file mode 100644 index 0000000000..0ec8a8d445 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/create/create_layout.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from ayon_core.hosts.unreal.api.plugin import ( + UnrealActorCreator, +) + + +class CreateLayout(UnrealActorCreator): + """Layout output for character rigs.""" + + identifier = "io.ayon.creators.unreal.layout" + label = "Layout" + family = "layout" + icon = "cubes" diff --git a/client/ayon_core/hosts/unreal/plugins/create/create_look.py b/client/ayon_core/hosts/unreal/plugins/create/create_look.py new file mode 100644 index 0000000000..ecc0783c35 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/create/create_look.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +import unreal + +from ayon_core.pipeline import CreatorError +from ayon_core.hosts.unreal.api.pipeline import ( + create_folder +) +from ayon_core.hosts.unreal.api.plugin import ( + UnrealAssetCreator +) +from ayon_core.lib import UILabelDef + + +class CreateLook(UnrealAssetCreator): + """Shader connections defining shape look.""" + + identifier = "io.ayon.creators.unreal.look" + label = "Look" + family = "look" + icon = "paint-brush" + + def create(self, subset_name, instance_data, pre_create_data): + # We need to set this to True for the parent class to work + pre_create_data["use_selection"] = True + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [a.get_path_name() for a in sel_objects] + + if len(selection) != 1: + raise CreatorError("Please select only one asset.") + + selected_asset = selection[0] + + look_directory = "/Game/Ayon/Looks" + + # Create the folder + folder_name = create_folder(look_directory, subset_name) + path = f"{look_directory}/{folder_name}" + + instance_data["look"] = path + + # Create a new cube static mesh + ar = unreal.AssetRegistryHelpers.get_asset_registry() + cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube") + + # Get the mesh of the selected object + original_mesh = ar.get_asset_by_object_path(selected_asset).get_asset() + materials = original_mesh.get_editor_property('static_materials') + + pre_create_data["members"] = [] + + # Add the materials to the cube + for material in materials: + mat_name = material.get_editor_property('material_slot_name') + object_path = f"{path}/{mat_name}.{mat_name}" + unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset( + cube.get_asset(), object_path + ) + + # Remove the default material of the cube object + unreal_object.get_editor_property('static_materials').pop() + + unreal_object.add_material( + material.get_editor_property('material_interface')) + + pre_create_data["members"].append(object_path) + + unreal.EditorAssetLibrary.save_asset(object_path) + + super(CreateLook, self).create( + subset_name, + instance_data, + pre_create_data) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef("Select the asset from which to create the look.") + ] diff --git a/client/ayon_core/hosts/unreal/plugins/create/create_render.py b/client/ayon_core/hosts/unreal/plugins/create/create_render.py new file mode 100644 index 0000000000..5bb782e7ea --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/create/create_render.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +from pathlib import Path + +import unreal + +from ayon_core.hosts.unreal.api.pipeline import ( + UNREAL_VERSION, + create_folder, + get_subsequences, +) +from ayon_core.hosts.unreal.api.plugin import ( + UnrealAssetCreator +) +from ayon_core.lib import ( + UILabelDef, + UISeparatorDef, + BoolDef, + NumberDef +) + + +class CreateRender(UnrealAssetCreator): + """Create instance for sequence for rendering""" + + identifier = "io.ayon.creators.unreal.render" + label = "Render" + family = "render" + icon = "eye" + + def create_instance( + self, instance_data, subset_name, pre_create_data, + selected_asset_path, master_seq, master_lvl, seq_data + ): + instance_data["members"] = [selected_asset_path] + instance_data["sequence"] = selected_asset_path + instance_data["master_sequence"] = master_seq + instance_data["master_level"] = master_lvl + instance_data["output"] = seq_data.get('output') + instance_data["frameStart"] = seq_data.get('frame_range')[0] + instance_data["frameEnd"] = seq_data.get('frame_range')[1] + + super(CreateRender, self).create( + subset_name, + instance_data, + pre_create_data) + + def create_with_new_sequence( + self, subset_name, instance_data, pre_create_data + ): + # If the option to create a new level sequence is selected, + # create a new level sequence and a master level. + + root = f"/Game/Ayon/Sequences" + + # Create a new folder for the sequence in root + sequence_dir_name = create_folder(root, subset_name) + sequence_dir = f"{root}/{sequence_dir_name}" + + unreal.log_warning(f"sequence_dir: {sequence_dir}") + + # Create the level sequence + asset_tools = unreal.AssetToolsHelpers.get_asset_tools() + seq = asset_tools.create_asset( + asset_name=subset_name, + package_path=sequence_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew()) + + seq.set_playback_start(pre_create_data.get("start_frame")) + seq.set_playback_end(pre_create_data.get("end_frame")) + + pre_create_data["members"] = [seq.get_path_name()] + + unreal.EditorAssetLibrary.save_asset(seq.get_path_name()) + + # Create the master level + if UNREAL_VERSION.major >= 5: + curr_level = unreal.LevelEditorSubsystem().get_current_level() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + levels = unreal.EditorLevelUtils.get_levels(world) + curr_level = levels[0] if len(levels) else None + if not curr_level: + raise RuntimeError("No level loaded.") + curr_level_path = curr_level.get_outer().get_path_name() + + # If the level path does not start with "/Game/", the current + # level is a temporary, unsaved level. + if curr_level_path.startswith("/Game/"): + if UNREAL_VERSION.major >= 5: + unreal.LevelEditorSubsystem().save_current_level() + else: + unreal.EditorLevelLibrary.save_current_level() + + ml_path = f"{sequence_dir}/{subset_name}_MasterLevel" + + if UNREAL_VERSION.major >= 5: + unreal.LevelEditorSubsystem().new_level(ml_path) + else: + unreal.EditorLevelLibrary.new_level(ml_path) + + seq_data = { + "sequence": seq, + "output": f"{seq.get_name()}", + "frame_range": ( + seq.get_playback_start(), + seq.get_playback_end())} + + self.create_instance( + instance_data, subset_name, pre_create_data, + seq.get_path_name(), seq.get_path_name(), ml_path, seq_data) + + def create_from_existing_sequence( + self, subset_name, instance_data, pre_create_data + ): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [ + a.get_path_name() for a in sel_objects + if a.get_class().get_name() == "LevelSequence"] + + if len(selection) == 0: + raise RuntimeError("Please select at least one Level Sequence.") + + seq_data = None + + for sel in selection: + selected_asset = ar.get_asset_by_object_path(sel).get_asset() + selected_asset_path = selected_asset.get_path_name() + + # Check if the selected asset is a level sequence asset. + if selected_asset.get_class().get_name() != "LevelSequence": + unreal.log_warning( + f"Skipping {selected_asset.get_name()}. It isn't a Level " + "Sequence.") + + if pre_create_data.get("use_hierarchy"): + # The asset name is the the third element of the path which + # contains the map. + # To take the asset name, we remove from the path the prefix + # "/Game/OpenPype/" and then we split the path by "/". + sel_path = selected_asset_path + asset_name = sel_path.replace( + "/Game/Ayon/", "").split("/")[0] + + search_path = f"/Game/Ayon/{asset_name}" + else: + search_path = Path(selected_asset_path).parent.as_posix() + + # Get the master sequence and the master level. + # There should be only one sequence and one level in the directory. + try: + ar_filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[search_path], + recursive_paths=False) + sequences = ar.get_assets(ar_filter) + master_seq = sequences[0].get_asset().get_path_name() + master_seq_obj = sequences[0].get_asset() + ar_filter = unreal.ARFilter( + class_names=["World"], + package_paths=[search_path], + recursive_paths=False) + levels = ar.get_assets(ar_filter) + master_lvl = levels[0].get_asset().get_path_name() + except IndexError: + raise RuntimeError( + f"Could not find the hierarchy for the selected sequence.") + + # If the selected asset is the master sequence, we get its data + # and then we create the instance for the master sequence. + # Otherwise, we cycle from the master sequence to find the selected + # sequence and we get its data. This data will be used to create + # the instance for the selected sequence. In particular, + # we get the frame range of the selected sequence and its final + # output path. + master_seq_data = { + "sequence": master_seq_obj, + "output": f"{master_seq_obj.get_name()}", + "frame_range": ( + master_seq_obj.get_playback_start(), + master_seq_obj.get_playback_end())} + + if (selected_asset_path == master_seq or + pre_create_data.get("use_hierarchy")): + seq_data = master_seq_data + else: + seq_data_list = [master_seq_data] + + for seq in seq_data_list: + subscenes = get_subsequences(seq.get('sequence')) + + for sub_seq in subscenes: + sub_seq_obj = sub_seq.get_sequence() + curr_data = { + "sequence": sub_seq_obj, + "output": (f"{seq.get('output')}/" + f"{sub_seq_obj.get_name()}"), + "frame_range": ( + sub_seq.get_start_frame(), + sub_seq.get_end_frame() - 1)} + + # If the selected asset is the current sub-sequence, + # we get its data and we break the loop. + # Otherwise, we add the current sub-sequence data to + # the list of sequences to check. + if sub_seq_obj.get_path_name() == selected_asset_path: + seq_data = curr_data + break + + seq_data_list.append(curr_data) + + # If we found the selected asset, we break the loop. + if seq_data is not None: + break + + # If we didn't find the selected asset, we don't create the + # instance. + if not seq_data: + unreal.log_warning( + f"Skipping {selected_asset.get_name()}. It isn't a " + "sub-sequence of the master sequence.") + continue + + self.create_instance( + instance_data, subset_name, pre_create_data, + selected_asset_path, master_seq, master_lvl, seq_data) + + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("create_seq"): + self.create_with_new_sequence( + subset_name, instance_data, pre_create_data) + else: + self.create_from_existing_sequence( + subset_name, instance_data, pre_create_data) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef( + "Select a Level Sequence to render or create a new one." + ), + BoolDef( + "create_seq", + label="Create a new Level Sequence", + default=False + ), + UILabelDef( + "WARNING: If you create a new Level Sequence, the current\n" + "level will be saved and a new Master Level will be created." + ), + NumberDef( + "start_frame", + label="Start Frame", + default=0, + minimum=-999999, + maximum=999999 + ), + NumberDef( + "end_frame", + label="Start Frame", + default=150, + minimum=-999999, + maximum=999999 + ), + UISeparatorDef(), + UILabelDef( + "The following settings are valid only if you are not\n" + "creating a new sequence." + ), + BoolDef( + "use_hierarchy", + label="Use Hierarchy", + default=False + ), + ] diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/client/ayon_core/hosts/unreal/plugins/create/create_staticmeshfbx.py similarity index 85% rename from openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py rename to client/ayon_core/hosts/unreal/plugins/create/create_staticmeshfbx.py index 80816d8386..7fcd6f165a 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/client/ayon_core/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from openpype.hosts.unreal.api.plugin import ( +from ayon_core.hosts.unreal.api.plugin import ( UnrealAssetCreator, ) diff --git a/openpype/hosts/unreal/plugins/create/create_uasset.py b/client/ayon_core/hosts/unreal/plugins/create/create_uasset.py similarity index 95% rename from openpype/hosts/unreal/plugins/create/create_uasset.py rename to client/ayon_core/hosts/unreal/plugins/create/create_uasset.py index f70ecc55b3..500726497d 100644 --- a/openpype/hosts/unreal/plugins/create/create_uasset.py +++ b/client/ayon_core/hosts/unreal/plugins/create/create_uasset.py @@ -3,8 +3,8 @@ import unreal -from openpype.pipeline import CreatorError -from openpype.hosts.unreal.api.plugin import ( +from ayon_core.pipeline import CreatorError +from ayon_core.hosts.unreal.api.plugin import ( UnrealAssetCreator, ) diff --git a/openpype/hosts/unreal/plugins/inventory/delete_unused_assets.py b/client/ayon_core/hosts/unreal/plugins/inventory/delete_unused_assets.py similarity index 81% rename from openpype/hosts/unreal/plugins/inventory/delete_unused_assets.py rename to client/ayon_core/hosts/unreal/plugins/inventory/delete_unused_assets.py index 8320e3c92d..1f63a1697a 100644 --- a/openpype/hosts/unreal/plugins/inventory/delete_unused_assets.py +++ b/client/ayon_core/hosts/unreal/plugins/inventory/delete_unused_assets.py @@ -1,8 +1,8 @@ import unreal -from openpype.hosts.unreal.api.tools_ui import qt_app_context -from openpype.hosts.unreal.api.pipeline import delete_asset_if_unused -from openpype.pipeline import InventoryAction +from ayon_core.hosts.unreal.api.tools_ui import qt_app_context +from ayon_core.hosts.unreal.api.pipeline import delete_asset_if_unused +from ayon_core.pipeline import InventoryAction class DeleteUnusedAssets(InventoryAction): @@ -34,21 +34,21 @@ def _delete_unused_assets(self, containers): def _show_confirmation_dialog(self, containers): from qtpy import QtCore - from openpype.widgets import popup - from openpype.style import load_stylesheet + from ayon_core.tools.utils import SimplePopup + from ayon_core.style import load_stylesheet - dialog = popup.Popup() + dialog = SimplePopup() dialog.setWindowFlags( QtCore.Qt.Window | QtCore.Qt.WindowStaysOnTopHint ) dialog.setFocusPolicy(QtCore.Qt.StrongFocus) dialog.setWindowTitle("Delete all unused assets") - dialog.setMessage( + dialog.set_message( "You are about to delete all the assets in the project that \n" "are not used in any level. Are you sure you want to continue?" ) - dialog.setButtonText("Delete") + dialog.set_button_text("Delete") dialog.on_clicked.connect( lambda: self._delete_unused_assets(containers) diff --git a/openpype/hosts/unreal/plugins/inventory/update_actors.py b/client/ayon_core/hosts/unreal/plugins/inventory/update_actors.py similarity index 96% rename from openpype/hosts/unreal/plugins/inventory/update_actors.py rename to client/ayon_core/hosts/unreal/plugins/inventory/update_actors.py index b0d941ba80..96965d68e6 100644 --- a/openpype/hosts/unreal/plugins/inventory/update_actors.py +++ b/client/ayon_core/hosts/unreal/plugins/inventory/update_actors.py @@ -1,12 +1,12 @@ import unreal -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api.pipeline import ( ls, replace_static_mesh_actors, replace_skeletal_mesh_actors, replace_geometry_cache_actors, ) -from openpype.pipeline import InventoryAction +from ayon_core.pipeline import InventoryAction def update_assets(containers, selected): diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_animation.py b/client/ayon_core/hosts/unreal/plugins/load/load_alembic_animation.py similarity index 97% rename from openpype/hosts/unreal/plugins/load/load_alembic_animation.py rename to client/ayon_core/hosts/unreal/plugins/load/load_alembic_animation.py index 0328d2ae9f..4d7760e385 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_animation.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_alembic_animation.py @@ -2,12 +2,12 @@ """Load Alembic Animation.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa diff --git a/client/ayon_core/hosts/unreal/plugins/load/load_animation.py b/client/ayon_core/hosts/unreal/plugins/load/load_animation.py new file mode 100644 index 0000000000..4d44b6c0c2 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/load/load_animation.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- +"""Load FBX with animations.""" +import os +import json + +import unreal +from unreal import EditorAssetLibrary +from unreal import MovieSceneSkeletalAnimationTrack +from unreal import MovieSceneSkeletalAnimationSection + +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.pipeline import ( + get_representation_path, + AYON_CONTAINER_ID +) +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api import pipeline as unreal_pipeline + + +class AnimationFBXLoader(plugin.Loader): + """Load Unreal SkeletalMesh from FBX.""" + + families = ["animation"] + label = "Import FBX Animation" + representations = ["fbx"] + icon = "cube" + color = "orange" + + def _process(self, path, asset_dir, asset_name, instance_name): + automated = False + actor = None + + task = unreal.AssetImportTask() + task.options = unreal.FbxImportUI() + + if instance_name: + automated = True + # Old method to get the actor + # actor_name = 'PersistentLevel.' + instance_name + # actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name) + actors = unreal.EditorLevelLibrary.get_all_level_actors() + for a in actors: + if a.get_class().get_name() != "SkeletalMeshActor": + continue + if a.get_actor_label() == instance_name: + actor = a + break + if not actor: + raise Exception(f"Could not find actor {instance_name}") + skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton + task.options.set_editor_property('skeleton', skeleton) + + if not actor: + return None + + asset_doc = get_current_project_asset(fields=["data.fps"]) + + task.set_editor_property('filename', path) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', automated) + task.set_editor_property('save', False) + + # set import options here + task.options.set_editor_property( + 'automated_import_should_detect_type', False) + task.options.set_editor_property( + 'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH) + task.options.set_editor_property( + 'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION) + task.options.set_editor_property('import_mesh', False) + task.options.set_editor_property('import_animations', True) + task.options.set_editor_property('override_full_name', True) + + task.options.anim_sequence_import_data.set_editor_property( + 'animation_length', + unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME + ) + task.options.anim_sequence_import_data.set_editor_property( + 'import_meshes_in_bone_hierarchy', False) + task.options.anim_sequence_import_data.set_editor_property( + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) + task.options.anim_sequence_import_data.set_editor_property( + 'import_custom_attribute', True) + task.options.anim_sequence_import_data.set_editor_property( + 'import_bone_tracks', True) + task.options.anim_sequence_import_data.set_editor_property( + 'remove_redundant_keys', False) + task.options.anim_sequence_import_data.set_editor_property( + 'convert_scene', True) + + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + animation = None + + for a in asset_content: + imported_asset_data = EditorAssetLibrary.find_asset_data(a) + imported_asset = unreal.AssetRegistryHelpers.get_asset( + imported_asset_data) + if imported_asset.__class__ == unreal.AnimSequence: + animation = imported_asset + break + + if animation: + animation.set_editor_property('enable_root_motion', True) + actor.skeletal_mesh_component.set_editor_property( + 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) + actor.skeletal_mesh_component.animation_data.set_editor_property( + 'anim_to_play', animation) + + return animation + + def load(self, context, name, namespace, options=None): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + # Create directory for asset and Ayon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/Ayon" + asset = context.get('asset').get('name') + suffix = "_CON" + asset_name = f"{asset}_{name}" if asset else f"{name}" + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + f"{root}/Animations/{asset}/{name}", suffix="") + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{hierarchy[0]}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_asset().get_path_name() + + hierarchy_dir = root + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir = f"{hierarchy_dir}/{asset}" + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{hierarchy_dir}/"], + recursive_paths=True) + levels = ar.get_assets(_filter) + level = levels[0].get_asset().get_path_name() + + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(level) + + container_name += suffix + + EditorAssetLibrary.make_directory(asset_dir) + + path = self.filepath_from_context(context) + libpath = path.replace(".fbx", ".json") + + with open(libpath, "r") as fp: + data = json.load(fp) + + instance_name = data.get("instance_name") + + animation = self._process(path, asset_dir, asset_name, instance_name) + + asset_content = EditorAssetLibrary.list_assets( + hierarchy_dir, recursive=True, include_folder=False) + + # Get the sequence for the layout, excluding the camera one. + sequences = [a for a in asset_content + if (EditorAssetLibrary.find_asset_data(a).get_class() == + unreal.LevelSequence.static_class() and + "_camera" not in a.split("/")[-1])] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for s in sequences: + sequence = ar.get_asset_by_object_path(s).get_asset() + possessables = [ + p for p in sequence.get_possessables() + if p.get_display_name() == instance_name] + + for p in possessables: + tracks = [ + t for t in p.get_tracks() + if (t.get_class() == + MovieSceneSkeletalAnimationTrack.static_class())] + + for t in tracks: + sections = [ + s for s in t.get_sections() + if (s.get_class() == + MovieSceneSkeletalAnimationSection.static_class())] + + for s in sections: + s.params.set_editor_property('animation', animation) + + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) + + data = { + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) + + imported_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) + + for a in imported_content: + EditorAssetLibrary.save_asset(a) + + unreal.EditorLevelLibrary.save_current_level() + unreal.EditorLevelLibrary.load_level(master_level) + + def update(self, container, representation): + name = container["asset_name"] + source_path = get_representation_path(representation) + asset_doc = get_current_project_asset(fields=["data.fps"]) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + task.options = unreal.FbxImportUI() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + task.options.set_editor_property( + 'automated_import_should_detect_type', False) + task.options.set_editor_property( + 'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH) + task.options.set_editor_property( + 'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION) + task.options.set_editor_property('import_mesh', False) + task.options.set_editor_property('import_animations', True) + task.options.set_editor_property('override_full_name', True) + + task.options.anim_sequence_import_data.set_editor_property( + 'animation_length', + unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME + ) + task.options.anim_sequence_import_data.set_editor_property( + 'import_meshes_in_bone_hierarchy', False) + task.options.anim_sequence_import_data.set_editor_property( + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) + task.options.anim_sequence_import_data.set_editor_property( + 'import_custom_attribute', True) + task.options.anim_sequence_import_data.set_editor_property( + 'import_bone_tracks', True) + task.options.anim_sequence_import_data.set_editor_property( + 'remove_redundant_keys', False) + task.options.anim_sequence_import_data.set_editor_property( + 'convert_scene', True) + + skeletal_mesh = EditorAssetLibrary.load_asset( + container.get('namespace') + "/" + container.get('asset_name')) + skeleton = skeletal_mesh.get_editor_property('skeleton') + task.options.set_editor_property('skeleton', skeleton) + + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = f'{container["namespace"]}/{container["objectName"]}' + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + EditorAssetLibrary.delete_directory(path) + + asset_content = EditorAssetLibrary.list_assets( + parent_path, recursive=False, include_folder=True + ) + + if len(asset_content) == 0: + EditorAssetLibrary.delete_directory(parent_path) diff --git a/client/ayon_core/hosts/unreal/plugins/load/load_camera.py b/client/ayon_core/hosts/unreal/plugins/load/load_camera.py new file mode 100644 index 0000000000..faba561085 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/load/load_camera.py @@ -0,0 +1,567 @@ +# -*- coding: utf-8 -*- +"""Load camera from FBX.""" +from pathlib import Path + +import unreal +from unreal import ( + EditorAssetLibrary, + EditorLevelLibrary, + EditorLevelUtils, + LevelSequenceEditorBlueprintLibrary as LevelSequenceLib, +) +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import ( + AYON_CONTAINER_ID, + get_current_project_name, +) +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( + generate_sequence, + set_sequence_hierarchy, + create_container, + imprint, +) + + +class CameraLoader(plugin.Loader): + """Load Unreal StaticMesh from FBX""" + + families = ["camera"] + label = "Load Camera" + representations = ["fbx"] + icon = "cube" + color = "orange" + + def _import_camera( + self, world, sequence, bindings, import_fbx_settings, import_filename + ): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = int(ue_version[0]) + ue_minor = int(ue_version[1]) + + if ue_major == 4 and ue_minor <= 26: + unreal.SequencerTools.import_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5: + unreal.SequencerTools.import_level_sequence_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + else: + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and Ayon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/Ayon" + hierarchy_dir = root + hierarchy_dir_list = [] + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir_list.append(hierarchy_dir) + asset = context.get('asset').get('name') + suffix = "_CON" + asset_name = f"{asset}_{name}" if asset else f"{name}" + + tools = unreal.AssetToolsHelpers().get_asset_tools() + + # Create a unique name for the camera directory + unique_number = 1 + if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"): + asset_content = EditorAssetLibrary.list_assets( + f"{root}/{asset}", recursive=False, include_folder=True + ) + + # Get highest number to make a unique name + folders = [a for a in asset_content + if a[-1] == "/" and f"{name}_" in a] + # Get number from folder name. Splits the string by "_" and + # removes the last element (which is a "/"). + f_numbers = [int(f.split("_")[-1][:-1]) for f in folders] + f_numbers.sort() + unique_number = f_numbers[-1] + 1 if f_numbers else 1 + + asset_dir, container_name = tools.create_unique_asset_name( + f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") + + container_name += suffix + + EditorAssetLibrary.make_directory(asset_dir) + + # Create map for the shot, and create hierarchy of map. If the maps + # already exist, we will use them. + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + level = f"{asset_dir}/{asset}_map_camera.{asset}_map_camera" + if not EditorAssetLibrary.does_asset_exist(level): + EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map_camera") + + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) + + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + frame_ranges = [] + sequences = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): + root_content = EditorAssetLibrary.list_assets( + h_dir, recursive=False, include_folder=False) + + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] + + if existing_sequences: + for seq in existing_sequences: + sequences.append(seq.get_asset()) + frame_ranges.append(( + seq.get_asset().get_playback_start(), + seq.get_asset().get_playback_end())) + else: + sequence, frame_range = generate_sequence(h, h_dir) + + sequences.append(sequence) + frame_ranges.append(frame_range) + + EditorAssetLibrary.make_directory(asset_dir) + + cam_seq = tools.create_asset( + asset_name=f"{asset}_camera", + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + # Add sequences data to hierarchy + for i in range(len(sequences) - 1): + set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) + + project_name = get_current_project_name() + data = get_asset_by_name(project_name, asset)["data"] + cam_seq.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + cam_seq.set_playback_start(data.get('clipIn')) + cam_seq.set_playback_end(data.get('clipOut') + 1) + set_sequence_hierarchy( + sequences[-1], cam_seq, + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) + + settings = unreal.MovieSceneUserImportFBXSettings() + settings.set_editor_property('reduce_keys', False) + + if cam_seq: + path = self.filepath_from_context(context) + self._import_camera( + EditorLevelLibrary.get_editor_world(), + cam_seq, + cam_seq.get_bindings(), + settings, + path + ) + + # Set range of all sections + # Changing the range of the section is not enough. We need to change + # the frame of all the keys in the section. + for possessable in cam_seq.get_possessables(): + for tracks in possessable.get_tracks(): + for section in tracks.get_sections(): + section.set_range( + data.get('clipIn'), + data.get('clipOut') + 1) + for channel in section.get_all_channels(): + for key in channel.get_keys(): + old_time = key.get_time().get_editor_property( + 'frame_number') + old_time_value = old_time.get_editor_property( + 'value') + new_time = old_time_value + ( + data.get('clipIn') - data.get('frameStart') + ) + key.set_time(unreal.FrameNumber(value=new_time)) + + # Create Asset Container + create_container( + container=container_name, path=asset_dir) + + data = { + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + imprint(f"{asset_dir}/{container_name}", data) + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(master_level) + + # Save all assets in the hierarchy + asset_content = EditorAssetLibrary.list_assets( + hierarchy_dir_list[0], recursive=True, include_folder=False + ) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + curr_level_sequence = LevelSequenceLib.get_current_level_sequence() + curr_time = LevelSequenceLib.get_current_time() + is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport() + + editor_subsystem = unreal.UnrealEditorSubsystem() + vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info() + + asset_dir = container.get('namespace') + + EditorLevelLibrary.save_current_level() + + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(_filter) + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[asset_dir], + recursive_paths=True) + maps = ar.get_assets(_filter) + + # There should be only one map in the list + EditorLevelLibrary.load_level(maps[0].get_asset().get_path_name()) + + level_sequence = sequences[0].get_asset() + + display_rate = level_sequence.get_display_rate() + playback_start = level_sequence.get_playback_start() + playback_end = level_sequence.get_playback_end() + + sequence_name = f"{container.get('asset')}_camera" + + # Get the actors in the level sequence. + objs = unreal.SequencerTools.get_bound_objects( + unreal.EditorLevelLibrary.get_editor_world(), + level_sequence, + level_sequence.get_bindings(), + unreal.SequencerScriptingRange( + has_start_value=True, + has_end_value=True, + inclusive_start=level_sequence.get_playback_start(), + exclusive_end=level_sequence.get_playback_end() + ) + ) + + # Delete actors from the map + for o in objs: + if o.bound_objects[0].get_class().get_name() == "CineCameraActor": + actor_path = o.bound_objects[0].get_path_name().split(":")[-1] + actor = EditorLevelLibrary.get_actor_reference(actor_path) + EditorLevelLibrary.destroy_actor(actor) + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/Ayon" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(_filter) + master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_asset().get_path_name() + + sequences = [master_sequence] + + parent = None + sub_scene = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + sub_scene = ss + break + sequences.append(ss.get_sequence()) + for i, ss in enumerate(sections): + ss.set_row_index(i) + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) + + settings = unreal.MovieSceneUserImportFBXSettings() + settings.set_editor_property('reduce_keys', False) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + new_sequence = tools.create_asset( + asset_name=sequence_name, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + new_sequence.set_display_rate(display_rate) + new_sequence.set_playback_start(playback_start) + new_sequence.set_playback_end(playback_end) + + sub_scene.set_sequence(new_sequence) + + self._import_camera( + EditorLevelLibrary.get_editor_world(), + new_sequence, + new_sequence.get_bindings(), + settings, + str(representation["data"]["path"]) + ) + + # Set range of all sections + # Changing the range of the section is not enough. We need to change + # the frame of all the keys in the section. + project_name = get_current_project_name() + asset = container.get('asset') + data = get_asset_by_name(project_name, asset)["data"] + + for possessable in new_sequence.get_possessables(): + for tracks in possessable.get_tracks(): + for section in tracks.get_sections(): + section.set_range( + data.get('clipIn'), + data.get('clipOut') + 1) + for channel in section.get_all_channels(): + for key in channel.get_keys(): + old_time = key.get_time().get_editor_property( + 'frame_number') + old_time_value = old_time.get_editor_property( + 'value') + new_time = old_time_value + ( + data.get('clipIn') - data.get('frameStart') + ) + key.set_time(unreal.FrameNumber(value=new_time)) + + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + } + imprint(f"{asset_dir}/{container.get('container_name')}", data) + + EditorLevelLibrary.save_current_level() + + asset_content = EditorAssetLibrary.list_assets( + f"{root}/{ms_asset}", recursive=True, include_folder=False) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + EditorLevelLibrary.load_level(master_level) + + if curr_level_sequence: + LevelSequenceLib.open_level_sequence(curr_level_sequence) + LevelSequenceLib.set_current_time(curr_time) + LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock) + + editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot) + + def remove(self, container): + asset_dir = container.get('namespace') + path = Path(asset_dir) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(_filter) + + if not sequences: + raise Exception("Could not find sequence.") + + world = ar.get_asset_by_object_path( + EditorLevelLibrary.get_editor_world().get_path_name()) + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[asset_dir], + recursive_paths=True) + maps = ar.get_assets(_filter) + + # There should be only one map in the list + if not maps: + raise Exception("Could not find map.") + + map = maps[0] + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(map.get_asset().get_path_name()) + + # Remove the camera from the level. + actors = EditorLevelLibrary.get_all_level_actors() + + for a in actors: + if a.__class__ == unreal.CineCameraActor: + EditorLevelLibrary.destroy_actor(a) + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(world.get_asset().get_path_name()) + + # There should be only one sequence in the path. + sequence_name = sequences[0].asset_name + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/Ayon" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(_filter) + master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_full_name() + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + for i, ss in enumerate(sections): + ss.set_row_index(i) + + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map_camera") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() + if parent: + break + + assert parent, "Could not find the parent sequence" + + # Create a temporary level to delete the layout level. + EditorLevelLibrary.save_all_dirty_levels() + EditorAssetLibrary.make_directory(f"{root}/tmp") + tmp_level = f"{root}/tmp/temp_map" + if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + EditorLevelLibrary.new_level(tmp_level) + else: + EditorLevelLibrary.load_level(tmp_level) + + # Delete the layout directory. + EditorAssetLibrary.delete_directory(asset_dir) + + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") + + # Check if there isn't any more assets in the parent folder, and + # delete it if not. + asset_content = EditorAssetLibrary.list_assets( + path.parent.as_posix(), recursive=False, include_folder=True + ) + + if len(asset_content) == 0: + EditorAssetLibrary.delete_directory(path.parent.as_posix()) diff --git a/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py b/client/ayon_core/hosts/unreal/plugins/load/load_geometrycache_abc.py similarity index 98% rename from openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py rename to client/ayon_core/hosts/unreal/plugins/load/load_geometrycache_abc.py index ec9c52b9fb..360737cbc5 100644 --- a/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_geometrycache_abc.py @@ -2,12 +2,12 @@ """Loader for published alembics.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( AYON_ASSET_DIR, create_container, imprint, diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/client/ayon_core/hosts/unreal/plugins/load/load_layout.py similarity index 99% rename from openpype/hosts/unreal/plugins/load/load_layout.py rename to client/ayon_core/hosts/unreal/plugins/load/load_layout.py index 3b82da5068..a1cc2e785a 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_layout.py @@ -16,8 +16,8 @@ LevelSequenceEditorBlueprintLibrary as LevelSequenceLib, ) -from openpype.client import get_asset_by_name, get_representations -from openpype.pipeline import ( +from ayon_core.client import get_asset_by_name, get_representations +from ayon_core.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, @@ -25,10 +25,10 @@ AYON_CONTAINER_ID, get_current_project_name, ) -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.settings import get_current_project_settings -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.pipeline.context_tools import get_current_project_asset +from ayon_core.settings import get_current_project_settings +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( generate_sequence, set_sequence_hierarchy, create_container, diff --git a/openpype/hosts/unreal/plugins/load/load_layout_existing.py b/client/ayon_core/hosts/unreal/plugins/load/load_layout_existing.py similarity index 98% rename from openpype/hosts/unreal/plugins/load/load_layout_existing.py rename to client/ayon_core/hosts/unreal/plugins/load/load_layout_existing.py index c53e92930a..6f390b4920 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout_existing.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_layout_existing.py @@ -4,8 +4,8 @@ import unreal from unreal import EditorLevelLibrary -from openpype.client import get_representations -from openpype.pipeline import ( +from ayon_core.client import get_representations +from ayon_core.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, @@ -13,8 +13,8 @@ AYON_CONTAINER_ID, get_current_project_name, ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as upipeline +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api import pipeline as upipeline class ExistingLayoutLoader(plugin.Loader): diff --git a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py b/client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_abc.py similarity index 98% rename from openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py rename to client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_abc.py index 8ebd9a82b6..225df3b440 100644 --- a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_abc.py @@ -2,12 +2,12 @@ """Load Skeletal Mesh alembics.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( AYON_ASSET_DIR, create_container, imprint, diff --git a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py b/client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py similarity index 98% rename from openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py rename to client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py index a5a8730732..1c45c58d02 100644 --- a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py @@ -2,12 +2,12 @@ """Load Skeletal Meshes form FBX.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( AYON_ASSET_DIR, create_container, imprint, diff --git a/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py b/client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_abc.py similarity index 98% rename from openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py rename to client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_abc.py index 019a95a9bf..a0814b5b07 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_abc.py @@ -2,12 +2,12 @@ """Loader for Static Mesh alembics.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( AYON_ASSET_DIR, create_container, imprint, diff --git a/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py b/client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_fbx.py similarity index 97% rename from openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py rename to client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_fbx.py index 66088d793c..a78b1bc959 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_staticmesh_fbx.py @@ -2,12 +2,12 @@ """Load Static meshes form FBX.""" import os -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api.pipeline import ( AYON_ASSET_DIR, create_container, imprint, diff --git a/openpype/hosts/unreal/plugins/load/load_uasset.py b/client/ayon_core/hosts/unreal/plugins/load/load_uasset.py similarity index 97% rename from openpype/hosts/unreal/plugins/load/load_uasset.py rename to client/ayon_core/hosts/unreal/plugins/load/load_uasset.py index dfd92d2fe5..048ec8eaba 100644 --- a/openpype/hosts/unreal/plugins/load/load_uasset.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_uasset.py @@ -3,12 +3,12 @@ from pathlib import Path import shutil -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa diff --git a/openpype/hosts/unreal/plugins/load/load_yeticache.py b/client/ayon_core/hosts/unreal/plugins/load/load_yeticache.py similarity index 97% rename from openpype/hosts/unreal/plugins/load/load_yeticache.py rename to client/ayon_core/hosts/unreal/plugins/load/load_yeticache.py index 780ed7c484..b643f352b7 100644 --- a/openpype/hosts/unreal/plugins/load/load_yeticache.py +++ b/client/ayon_core/hosts/unreal/plugins/load/load_yeticache.py @@ -3,12 +3,12 @@ import os import json -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_representation_path, AYON_CONTAINER_ID ) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline +from ayon_core.hosts.unreal.api import plugin +from ayon_core.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa diff --git a/openpype/hosts/unreal/plugins/publish/collect_current_file.py b/client/ayon_core/hosts/unreal/plugins/publish/collect_current_file.py similarity index 100% rename from openpype/hosts/unreal/plugins/publish/collect_current_file.py rename to client/ayon_core/hosts/unreal/plugins/publish/collect_current_file.py diff --git a/openpype/hosts/unreal/plugins/publish/collect_instance_members.py b/client/ayon_core/hosts/unreal/plugins/publish/collect_instance_members.py similarity index 100% rename from openpype/hosts/unreal/plugins/publish/collect_instance_members.py rename to client/ayon_core/hosts/unreal/plugins/publish/collect_instance_members.py diff --git a/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py b/client/ayon_core/hosts/unreal/plugins/publish/collect_remove_marked.py similarity index 100% rename from openpype/hosts/unreal/plugins/publish/collect_remove_marked.py rename to client/ayon_core/hosts/unreal/plugins/publish/collect_remove_marked.py diff --git a/client/ayon_core/hosts/unreal/plugins/publish/collect_render_instances.py b/client/ayon_core/hosts/unreal/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..8641094610 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/publish/collect_render_instances.py @@ -0,0 +1,114 @@ +import os +from pathlib import Path + +import unreal + +from ayon_core.pipeline import get_current_project_name +from ayon_core.pipeline import Anatomy +from ayon_core.hosts.unreal.api import pipeline +import pyblish.api + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + """ This collector will try to find all the rendered frames. + + """ + order = pyblish.api.CollectorOrder + hosts = ["unreal"] + families = ["render"] + label = "Collect Render Instances" + + def process(self, instance): + self.log.debug("Preparing Rendering Instances") + + context = instance.context + + data = instance.data + data['remove'] = True + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + sequence = ar.get_asset_by_object_path( + data.get('sequence')).get_asset() + + sequences = [{ + "sequence": sequence, + "output": data.get('output'), + "frame_range": ( + data.get('frameStart'), data.get('frameEnd')) + }] + + for s in sequences: + self.log.debug(f"Processing: {s.get('sequence').get_name()}") + subscenes = pipeline.get_subsequences(s.get('sequence')) + + if subscenes: + for ss in subscenes: + sequences.append({ + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame() - 1) + }) + else: + # Avoid creating instances for camera sequences + if "_camera" not in s.get('sequence').get_name(): + seq = s.get('sequence') + seq_name = seq.get_name() + + new_instance = context.create_instance( + f"{data.get('subset')}_" + f"{seq_name}") + new_instance[:] = seq_name + + new_data = new_instance.data + + new_data["asset"] = seq_name + new_data["setMembers"] = seq_name + new_data["family"] = "render" + new_data["families"] = ["render", "review"] + new_data["parent"] = data.get("parent") + new_data["subset"] = f"{data.get('subset')}_{seq_name}" + new_data["level"] = data.get("level") + new_data["output"] = s.get('output') + new_data["fps"] = seq.get_display_rate().numerator + new_data["frameStart"] = int(s.get('frame_range')[0]) + new_data["frameEnd"] = int(s.get('frame_range')[1]) + new_data["sequence"] = seq.get_path_name() + new_data["master_sequence"] = data["master_sequence"] + new_data["master_level"] = data["master_level"] + + self.log.debug(f"new instance data: {new_data}") + + try: + project = get_current_project_name() + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception as e: + raise Exception(( + "Could not find render root " + "in anatomy settings.")) from e + + render_dir = f"{root}/{project}/{s.get('output')}" + render_path = Path(render_dir) + + frames = [] + + for x in render_path.iterdir(): + if x.is_file() and x.suffix == '.png': + frames.append(str(x.name)) + + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] + + repr = { + 'frameStart': instance.data["frameStart"], + 'frameEnd': instance.data["frameEnd"], + 'name': 'png', + 'ext': 'png', + 'files': frames, + 'stagingDir': render_dir, + 'tags': ['review'] + } + new_instance.data["representations"].append(repr) diff --git a/client/ayon_core/hosts/unreal/plugins/publish/extract_camera.py b/client/ayon_core/hosts/unreal/plugins/publish/extract_camera.py new file mode 100644 index 0000000000..ebc5452011 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/publish/extract_camera.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +"""Extract camera from Unreal.""" +import os + +import unreal + +from ayon_core.pipeline import publish +from ayon_core.hosts.unreal.api.pipeline import UNREAL_VERSION + + +class ExtractCamera(publish.Extractor): + """Extract a camera.""" + + label = "Extract Camera" + hosts = ["unreal"] + families = ["camera"] + optional = True + + def process(self, instance): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + # Define extract output file path + staging_dir = self.staging_dir(instance) + fbx_filename = "{}.fbx".format(instance.name) + + # Perform extraction + self.log.info("Performing extraction..") + + # Check if the loaded level is the same of the instance + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + current_level = world.get_path_name() + assert current_level == instance.data.get("level"), \ + "Wrong level loaded" + + for member in instance.data.get('members'): + data = ar.get_asset_by_object_path(member) + if UNREAL_VERSION.major == 5: + is_level_sequence = ( + data.asset_class_path.asset_name == "LevelSequence") + else: + is_level_sequence = (data.asset_class == "LevelSequence") + + if is_level_sequence: + sequence = data.get_asset() + if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor >= 1: + params = unreal.SequencerExportFBXParams( + world=world, + root_sequence=sequence, + sequence=sequence, + bindings=sequence.get_bindings(), + master_tracks=sequence.get_master_tracks(), + fbx_file_name=os.path.join(staging_dir, fbx_filename) + ) + unreal.SequencerTools.export_level_sequence_fbx(params) + elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26: + unreal.SequencerTools.export_fbx( + world, + sequence, + sequence.get_bindings(), + unreal.FbxExportOption(), + os.path.join(staging_dir, fbx_filename) + ) + else: + # Unreal 5.0 or 4.27 + unreal.SequencerTools.export_level_sequence_fbx( + world, + sequence, + sequence.get_bindings(), + unreal.FbxExportOption(), + os.path.join(staging_dir, fbx_filename) + ) + + if not os.path.isfile(os.path.join(staging_dir, fbx_filename)): + raise RuntimeError("Failed to extract camera") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fbx_representation = { + 'name': 'fbx', + 'ext': 'fbx', + 'files': fbx_filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(fbx_representation) diff --git a/client/ayon_core/hosts/unreal/plugins/publish/extract_layout.py b/client/ayon_core/hosts/unreal/plugins/publish/extract_layout.py new file mode 100644 index 0000000000..a508f79f18 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/publish/extract_layout.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +import os +import json +import math + +import unreal +from unreal import EditorLevelLibrary as ell +from unreal import EditorAssetLibrary as eal + +from ayon_core.client import get_representation_by_name +from ayon_core.pipeline import publish + + +class ExtractLayout(publish.Extractor): + """Extract a layout.""" + + label = "Extract Layout" + hosts = ["unreal"] + families = ["layout"] + optional = True + + def process(self, instance): + # Define extract output file path + staging_dir = self.staging_dir(instance) + + # Perform extraction + self.log.info("Performing extraction..") + + # Check if the loaded level is the same of the instance + current_level = ell.get_editor_world().get_path_name() + assert current_level == instance.data.get("level"), \ + "Wrong level loaded" + + json_data = [] + project_name = instance.context.data["projectName"] + + for member in instance[:]: + actor = ell.get_actor_reference(member) + mesh = None + + # Check type the type of mesh + if actor.get_class().get_name() == 'SkeletalMeshActor': + mesh = actor.skeletal_mesh_component.skeletal_mesh + elif actor.get_class().get_name() == 'StaticMeshActor': + mesh = actor.static_mesh_component.static_mesh + + if mesh: + # Search the reference to the Asset Container for the object + path = unreal.Paths.get_path(mesh.get_path_name()) + filter = unreal.ARFilter( + class_names=["AyonAssetContainer"], package_paths=[path]) + ar = unreal.AssetRegistryHelpers.get_asset_registry() + try: + asset_container = ar.get_assets(filter)[0].get_asset() + except IndexError: + self.log.error("AssetContainer not found.") + return + + parent_id = eal.get_metadata_tag(asset_container, "parent") + family = eal.get_metadata_tag(asset_container, "family") + + self.log.info("Parent: {}".format(parent_id)) + blend = get_representation_by_name( + project_name, "blend", parent_id, fields=["_id"] + ) + blend_id = blend["_id"] + + json_element = {} + json_element["reference"] = str(blend_id) + json_element["family"] = family + json_element["instance_name"] = actor.get_name() + json_element["asset_name"] = mesh.get_name() + import_data = mesh.get_editor_property("asset_import_data") + json_element["file_path"] = import_data.get_first_filename() + transform = actor.get_actor_transform() + + json_element["transform"] = { + "translation": { + "x": -transform.translation.x, + "y": transform.translation.y, + "z": transform.translation.z + }, + "rotation": { + "x": math.radians(transform.rotation.euler().x), + "y": math.radians(transform.rotation.euler().y), + "z": math.radians(180.0 - transform.rotation.euler().z) + }, + "scale": { + "x": transform.scale3d.x, + "y": transform.scale3d.y, + "z": transform.scale3d.z + } + } + json_data.append(json_element) + + json_filename = "{}.json".format(instance.name) + json_path = os.path.join(staging_dir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(json_representation) diff --git a/client/ayon_core/hosts/unreal/plugins/publish/extract_look.py b/client/ayon_core/hosts/unreal/plugins/publish/extract_look.py new file mode 100644 index 0000000000..fd1277e302 --- /dev/null +++ b/client/ayon_core/hosts/unreal/plugins/publish/extract_look.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +import json +import os + +import unreal +from unreal import MaterialEditingLibrary as mat_lib + +from ayon_core.pipeline import publish + + +class ExtractLook(publish.Extractor): + """Extract look.""" + + label = "Extract Look" + hosts = ["unreal"] + families = ["look"] + optional = True + + def process(self, instance): + # Define extract output file path + staging_dir = self.staging_dir(instance) + resources_dir = instance.data["resourcesDir"] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + transfers = [] + + json_data = [] + + for member in instance: + asset = ar.get_asset_by_object_path(member) + obj = asset.get_asset() + + name = asset.get_editor_property('asset_name') + + json_element = {'material': str(name)} + + material_obj = obj.get_editor_property('static_materials')[0] + material = material_obj.material_interface + + base_color = mat_lib.get_material_property_input_node( + material, unreal.MaterialProperty.MP_BASE_COLOR) + + base_color_name = base_color.get_editor_property('parameter_name') + + texture = mat_lib.get_material_default_texture_parameter_value( + material, base_color_name) + + if texture: + # Export Texture + tga_filename = f"{instance.name}_{name}_texture.tga" + + tga_exporter = unreal.TextureExporterTGA() + + tga_export_task = unreal.AssetExportTask() + + tga_export_task.set_editor_property('exporter', tga_exporter) + tga_export_task.set_editor_property('automated', True) + tga_export_task.set_editor_property('object', texture) + tga_export_task.set_editor_property( + 'filename', f"{staging_dir}/{tga_filename}") + tga_export_task.set_editor_property('prompt', False) + tga_export_task.set_editor_property('selected', False) + + unreal.Exporter.run_asset_export_task(tga_export_task) + + json_element['tga_filename'] = tga_filename + + transfers.append(( + f"{staging_dir}/{tga_filename}", + f"{resources_dir}/{tga_filename}")) + + fbx_filename = f"{instance.name}_{name}.fbx" + + fbx_exporter = unreal.StaticMeshExporterFBX() + fbx_exporter.set_editor_property('text', False) + + options = unreal.FbxExportOption() + options.set_editor_property('ascii', False) + options.set_editor_property('collision', False) + + task = unreal.AssetExportTask() + task.set_editor_property('exporter', fbx_exporter) + task.set_editor_property('options', options) + task.set_editor_property('automated', True) + task.set_editor_property('object', object) + task.set_editor_property( + 'filename', f"{staging_dir}/{fbx_filename}") + task.set_editor_property('prompt', False) + task.set_editor_property('selected', False) + + unreal.Exporter.run_asset_export_task(task) + + json_element['fbx_filename'] = fbx_filename + + transfers.append(( + f"{staging_dir}/{fbx_filename}", + f"{resources_dir}/{fbx_filename}")) + + json_data.append(json_element) + + json_filename = f"{instance.name}.json" + json_path = os.path.join(staging_dir, json_filename) + + with open(json_path, "w+") as file: + json.dump(json_data, fp=file, indent=2) + + if "transfers" not in instance.data: + instance.data["transfers"] = [] + if "representations" not in instance.data: + instance.data["representations"] = [] + + json_representation = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(json_representation) + instance.data["transfers"].extend(transfers) diff --git a/openpype/hosts/unreal/plugins/publish/extract_uasset.py b/client/ayon_core/hosts/unreal/plugins/publish/extract_uasset.py similarity index 97% rename from openpype/hosts/unreal/plugins/publish/extract_uasset.py rename to client/ayon_core/hosts/unreal/plugins/publish/extract_uasset.py index 0dd7ff4a0d..fa4fb4c04a 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_uasset.py +++ b/client/ayon_core/hosts/unreal/plugins/publish/extract_uasset.py @@ -3,7 +3,7 @@ import unreal -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractUAsset(publish.Extractor): diff --git a/openpype/hosts/unreal/plugins/publish/validate_no_dependencies.py b/client/ayon_core/hosts/unreal/plugins/publish/validate_no_dependencies.py similarity index 100% rename from openpype/hosts/unreal/plugins/publish/validate_no_dependencies.py rename to client/ayon_core/hosts/unreal/plugins/publish/validate_no_dependencies.py diff --git a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py b/client/ayon_core/hosts/unreal/plugins/publish/validate_sequence_frames.py similarity index 97% rename from openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py rename to client/ayon_core/hosts/unreal/plugins/publish/validate_sequence_frames.py index 06acbf0992..205436ad37 100644 --- a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py +++ b/client/ayon_core/hosts/unreal/plugins/publish/validate_sequence_frames.py @@ -3,7 +3,7 @@ import re import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateSequenceFrames(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/unreal/ue_workers.py b/client/ayon_core/hosts/unreal/ue_workers.py similarity index 99% rename from openpype/hosts/unreal/ue_workers.py rename to client/ayon_core/hosts/unreal/ue_workers.py index 386ad877d7..e3f8729c2e 100644 --- a/openpype/hosts/unreal/ue_workers.py +++ b/client/ayon_core/hosts/unreal/ue_workers.py @@ -11,8 +11,8 @@ from qtpy import QtCore -import openpype.hosts.unreal.lib as ue_lib -from openpype.settings import get_project_settings +import ayon_core.hosts.unreal.lib as ue_lib +from ayon_core.settings import get_project_settings def parse_comp_progress(line: str, progress_signal: QtCore.Signal(int)): diff --git a/openpype/hosts/unreal/ui/__init__.py b/client/ayon_core/hosts/unreal/ui/__init__.py similarity index 100% rename from openpype/hosts/unreal/ui/__init__.py rename to client/ayon_core/hosts/unreal/ui/__init__.py diff --git a/openpype/hosts/unreal/ui/splash_screen.py b/client/ayon_core/hosts/unreal/ui/splash_screen.py similarity index 98% rename from openpype/hosts/unreal/ui/splash_screen.py rename to client/ayon_core/hosts/unreal/ui/splash_screen.py index 7ac77821d9..cf34943515 100644 --- a/openpype/hosts/unreal/ui/splash_screen.py +++ b/client/ayon_core/hosts/unreal/ui/splash_screen.py @@ -1,5 +1,5 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype import style, resources +from ayon_core import style, resources class SplashScreen(QtWidgets.QDialog): @@ -37,10 +37,10 @@ def __init__(self, super(SplashScreen, self).__init__() if splash_icon is None: - splash_icon = resources.get_openpype_icon_filepath() + splash_icon = resources.get_ayon_icon_filepath() if window_icon is None: - window_icon = resources.get_openpype_icon_filepath() + window_icon = resources.get_ayon_icon_filepath() self.splash_icon = splash_icon self.setWindowIcon(QtGui.QIcon(window_icon)) diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py new file mode 100644 index 0000000000..12a5535a1c --- /dev/null +++ b/client/ayon_core/lib/__init__.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- +# flake8: noqa E402 +"""AYON lib functions.""" +# add vendor to sys path based on Python version +import sys +import os +import site +from ayon_core import AYON_CORE_ROOT + +# Add Python version specific vendor folder +python_version_dir = os.path.join( + AYON_CORE_ROOT, "vendor", "python", "python_{}".format(sys.version[0]) +) +# Prepend path in sys paths +sys.path.insert(0, python_version_dir) +site.addsitedir(python_version_dir) + + +from .events import ( + emit_event, + register_event_callback +) + +from .vendor_bin_utils import ( + ToolNotFoundError, + find_executable, + get_oiio_tools_path, + get_oiio_tool_args, + get_ffmpeg_tool_path, + get_ffmpeg_tool_args, + is_oiio_supported, +) + +from .attribute_definitions import ( + AbstractAttrDef, + + UIDef, + UISeparatorDef, + UILabelDef, + + UnknownDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef, + FileDefItem, +) + +from .env_tools import ( + env_value_to_bool, + get_paths_from_environ, +) + +from .terminal import Terminal +from .execute import ( + get_ayon_launcher_args, + get_openpype_execute_args, + get_linux_launcher_args, + execute, + run_subprocess, + run_detached_process, + run_ayon_launcher_process, + run_openpype_process, + path_to_subprocess_arg, + CREATE_NO_WINDOW +) +from .log import ( + Logger, +) + +from .path_templates import ( + merge_dict, + TemplateMissingKey, + TemplateUnsolved, + StringTemplate, + TemplatesDict, + FormatObject, +) + +from .dateutils import ( + get_datetime_data, + get_timestamp, + get_formatted_current_time +) + +from .python_module_tools import ( + import_filepath, + modules_from_path, + recursive_bases_from_class, + classes_from_module, + import_module_from_dirpath, + is_func_signature_supported, +) + +from .profiles_filtering import ( + compile_list_of_regexes, + filter_profiles +) + +from .transcoding import ( + get_transcode_temp_directory, + should_convert_for_ffmpeg, + convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, + get_ffprobe_data, + get_ffprobe_streams, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, + convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, + get_rescaled_command_arguments, +) + +from .local_settings import ( + IniSettingRegistry, + JSONSettingRegistry, + AYONSecureRegistry, + AYONSettingsRegistry, + OpenPypeSecureRegistry, + OpenPypeSettingsRegistry, + get_local_site_id, + get_ayon_username, + get_openpype_username, +) + +from .applications import ( + ApplicationLaunchFailed, + ApplictionExecutableNotFound, + ApplicationNotFound, + ApplicationManager, + + PreLaunchHook, + PostLaunchHook, + + EnvironmentPrepData, + prepare_app_environments, + prepare_context_environments, + get_app_environments_for_context, + apply_project_environments_value +) + +from .plugin_tools import ( + prepare_template_data, + source_hash, +) + +from .path_tools import ( + format_file_size, + collect_frames, + create_hard_link, + version_up, + get_version_from_path, + get_last_version_from_path, +) + +from .ayon_info import ( + is_running_from_build, + is_staging_enabled, + is_dev_mode_enabled, +) + + +from .connections import ( + requests_get, + requests_post +) + +terminal = Terminal + +__all__ = [ + "emit_event", + "register_event_callback", + + "get_ayon_launcher_args", + "get_openpype_execute_args", + "get_linux_launcher_args", + "execute", + "run_subprocess", + "run_detached_process", + "run_ayon_launcher_process", + "run_openpype_process", + "path_to_subprocess_arg", + "CREATE_NO_WINDOW", + + "env_value_to_bool", + "get_paths_from_environ", + + "ToolNotFoundError", + "find_executable", + "get_oiio_tools_path", + "get_oiio_tool_args", + "get_ffmpeg_tool_path", + "get_ffmpeg_tool_args", + "is_oiio_supported", + + "AbstractAttrDef", + + "UIDef", + "UISeparatorDef", + "UILabelDef", + + "UnknownDef", + "NumberDef", + "TextDef", + "EnumDef", + "BoolDef", + "FileDef", + "FileDefItem", + + "import_filepath", + "modules_from_path", + "recursive_bases_from_class", + "classes_from_module", + "import_module_from_dirpath", + "is_func_signature_supported", + + "get_transcode_temp_directory", + "should_convert_for_ffmpeg", + "convert_for_ffmpeg", + "convert_input_paths_for_ffmpeg", + "get_ffprobe_data", + "get_ffprobe_streams", + "get_ffmpeg_codec_args", + "get_ffmpeg_format_args", + "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", + "get_rescaled_command_arguments", + + "IniSettingRegistry", + "JSONSettingRegistry", + "OpenPypeSecureRegistry", + "OpenPypeSettingsRegistry", + "get_local_site_id", + "get_ayon_username", + "get_openpype_username", + + "ApplicationLaunchFailed", + "ApplictionExecutableNotFound", + "ApplicationNotFound", + "ApplicationManager", + "PreLaunchHook", + "PostLaunchHook", + "EnvironmentPrepData", + "prepare_app_environments", + "prepare_context_environments", + "get_app_environments_for_context", + "apply_project_environments_value", + + "compile_list_of_regexes", + + "filter_profiles", + + "prepare_template_data", + "source_hash", + + "format_file_size", + "collect_frames", + "create_hard_link", + "version_up", + "get_version_from_path", + "get_last_version_from_path", + + "merge_dict", + "TemplateMissingKey", + "TemplateUnsolved", + "StringTemplate", + "TemplatesDict", + "FormatObject", + + "terminal", + + "get_datetime_data", + "get_formatted_current_time", + + "Logger", + + "is_running_from_build", + "is_staging_enabled", + "is_dev_mode_enabled", + + "requests_get", + "requests_post" +] diff --git a/openpype/lib/applications.py b/client/ayon_core/lib/applications.py similarity index 95% rename from openpype/lib/applications.py rename to client/ayon_core/lib/applications.py index 4d75a01e1d..febdaacdd1 100644 --- a/openpype/lib/applications.py +++ b/client/ayon_core/lib/applications.py @@ -11,20 +11,20 @@ import six -from openpype import AYON_SERVER_ENABLED, PACKAGE_DIR -from openpype.client import get_asset_name_identifier -from openpype.settings import ( +from ayon_core import AYON_CORE_ROOT +from ayon_core.client import get_asset_name_identifier +from ayon_core.settings import ( get_system_settings, get_project_settings, get_local_settings ) -from openpype.settings.constants import ( +from ayon_core.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL ) from .log import Logger from .profiles_filtering import filter_profiles -from .local_settings import get_openpype_username +from .local_settings import get_ayon_username from .python_module_tools import ( modules_from_path, @@ -868,9 +868,17 @@ def app_group(self): def app_name(self): return getattr(self.application, "full_name", None) + @property + def addons_manager(self): + return getattr(self.launch_context, "addons_manager", None) + @property def modules_manager(self): - return getattr(self.launch_context, "modules_manager", None) + """ + Deprecated: + Use 'addons_wrapper' instead. + """ + return self.addons_manager def validate(self): """Optional validation of launch hook on initialization. @@ -950,12 +958,12 @@ def __init__( launch_type=None, **data ): - from openpype.modules import ModulesManager + from ayon_core.addon import AddonsManager # Application object self.application = application - self.modules_manager = ModulesManager() + self.addons_manager = AddonsManager() # Logger logger_name = "{}-{}".format(self.__class__.__name__, @@ -1042,6 +1050,15 @@ def env(self, value): ) self.kwargs["env"] = value + @property + def modules_manager(self): + """ + Deprecated: + Use 'addons_manager' instead. + + """ + return self.addons_manager + def _collect_addons_launch_hook_paths(self): """Helper to collect application launch hooks from addons. @@ -1055,7 +1072,7 @@ def _collect_addons_launch_hook_paths(self): expected_types = (list, tuple, set) output = [] - for module in self.modules_manager.get_enabled_modules(): + for module in self.addons_manager.get_enabled_addons(): # Skip module if does not have implemented 'get_launch_hook_paths' func = getattr(module, "get_launch_hook_paths", None) if func is None: @@ -1105,10 +1122,7 @@ def paths_to_launch_hooks(self): paths = [] # TODO load additional studio paths from settings - import openpype - openpype_dir = os.path.dirname(os.path.abspath(openpype.__file__)) - - global_hooks_dir = os.path.join(openpype_dir, "hooks") + global_hooks_dir = os.path.join(AYON_CORE_ROOT, "hooks") hooks_dirs = [ global_hooks_dir @@ -1117,10 +1131,10 @@ def paths_to_launch_hooks(self): # If host requires launch hooks and is module then launch hooks # should be collected using 'collect_launch_hook_paths' # - module have to implement 'get_launch_hook_paths' - host_module = self.modules_manager.get_host_module(self.host_name) + host_module = self.addons_manager.get_host_addon(self.host_name) if not host_module: hooks_dirs.append(os.path.join( - openpype_dir, "hosts", self.host_name, "hooks" + AYON_CORE_ROOT, "hosts", self.host_name, "hooks" )) for path in hooks_dirs: @@ -1248,7 +1262,7 @@ def _run_process(self): # Linux uses mid process # - it is possible that the mid process executable is not - # available for this version of OpenPype in that case use standard + # available for this version of AYON in that case use standard # launch launch_args = get_linux_launcher_args() if launch_args is None: @@ -1445,7 +1459,7 @@ def get_app_environments_for_context( env_group=None, launch_type=None, env=None, - modules_manager=None + addons_manager=None ): """Prepare environment variables by context. Args: @@ -1460,7 +1474,7 @@ def get_app_environments_for_context( executed. env (Optional[dict[str, str]]): Initial environment variables. `os.environ` is used when not passed. - modules_manager (Optional[ModulesManager]): Initialized modules + addons_manager (Optional[AddonsManager]): Initialized modules manager. Returns: @@ -1477,7 +1491,8 @@ def get_app_environments_for_context( env_group=env_group, launch_type=launch_type, env=env, - modules_manager=modules_manager, + addons_manager=addons_manager, + modules_manager=addons_manager, ) context.run_prelaunch_hooks() return context.env @@ -1495,11 +1510,11 @@ def _merge_env(env, current_env): return result -def _add_python_version_paths(app, env, logger, modules_manager): +def _add_python_version_paths(app, env, logger, addons_manager): """Add vendor packages specific for a Python version.""" - for module in modules_manager.get_enabled_modules(): - module.modify_application_launch_arguments(app, env) + for addon in addons_manager.get_enabled_addons(): + addon.modify_application_launch_arguments(app, env) # Skip adding if host name is not set if not app.host_name: @@ -1507,7 +1522,7 @@ def _add_python_version_paths(app, env, logger, modules_manager): # Add Python 2/3 modules python_vendor_dir = os.path.join( - PACKAGE_DIR, + AYON_CORE_ROOT, "vendor", "python" ) @@ -1532,7 +1547,7 @@ def _add_python_version_paths(app, env, logger, modules_manager): def prepare_app_environments( - data, env_group=None, implementation_envs=True, modules_manager=None + data, env_group=None, implementation_envs=True, addons_manager=None ): """Modify launch environments based on launched app and context. @@ -1546,12 +1561,12 @@ def prepare_app_environments( log = data["log"] source_env = data["env"].copy() - if modules_manager is None: - from openpype.modules import ModulesManager + if addons_manager is None: + from ayon_core.addon import AddonsManager - modules_manager = ModulesManager() + addons_manager = AddonsManager() - _add_python_version_paths(app, source_env, log, modules_manager) + _add_python_version_paths(app, source_env, log, addons_manager) # Use environments from local settings filtered_local_envs = {} @@ -1631,14 +1646,14 @@ def prepare_app_environments( final_env = None # Add host specific environments if app.host_name and implementation_envs: - host_module = modules_manager.get_host_module(app.host_name) - if not host_module: - module = __import__("openpype.hosts", fromlist=[app.host_name]) + host_addon = addons_manager.get_host_addon(app.host_name) + if not host_addon: + module = __import__("ayon_core.hosts", fromlist=[app.host_name]) host_module = getattr(module, app.host_name, None) add_implementation_envs = None - if host_module: + if host_addon: add_implementation_envs = getattr( - host_module, "add_implementation_envs", None + host_addon, "add_implementation_envs", None ) if add_implementation_envs: # Function may only modify passed dict without returning value @@ -1693,7 +1708,7 @@ def apply_project_environments_value( return env -def prepare_context_environments(data, env_group=None, modules_manager=None): +def prepare_context_environments(data, env_group=None, addons_manager=None): """Modify launch environments with context data for launched host. Args: @@ -1701,7 +1716,7 @@ def prepare_context_environments(data, env_group=None, modules_manager=None): result will be stored. """ - from openpype.pipeline.template_data import get_template_data + from ayon_core.pipeline.template_data import get_template_data # Context environments log = data["log"] @@ -1772,7 +1787,7 @@ def prepare_context_environments(data, env_group=None, modules_manager=None): data["task_type"] = task_type try: - from openpype.pipeline.workfile import get_workdir_with_workdir_data + from ayon_core.pipeline.workfile import get_workdir_with_workdir_data workdir = get_workdir_with_workdir_data( workdir_data, @@ -1799,10 +1814,10 @@ def prepare_context_environments(data, env_group=None, modules_manager=None): data["env"]["AVALON_WORKDIR"] = workdir - _prepare_last_workfile(data, workdir, modules_manager) + _prepare_last_workfile(data, workdir, addons_manager) -def _prepare_last_workfile(data, workdir, modules_manager): +def _prepare_last_workfile(data, workdir, addons_manager): """last workfile workflow preparation. Function check if should care about last workfile workflow and tries @@ -1818,11 +1833,11 @@ def _prepare_last_workfile(data, workdir, modules_manager): workdir (str): Path to folder where workfiles should be stored. """ - from openpype.modules import ModulesManager - from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + from ayon_core.addon import AddonsManager + from ayon_core.pipeline import HOST_WORKFILE_EXTENSIONS - if not modules_manager: - modules_manager = ModulesManager() + if not addons_manager: + addons_manager = AddonsManager() log = data["log"] @@ -1859,7 +1874,7 @@ def _prepare_last_workfile(data, workdir, modules_manager): data["env"]["AVALON_OPEN_LAST_WORKFILE"] = ( str(int(bool(start_last_workfile))) ) - data["env"]["OPENPYPE_WORKFILE_TOOL_ON_START"] = ( + data["env"]["AYON_WORKFILE_TOOL_ON_START"] = ( str(int(bool(workfile_startup))) ) @@ -1871,14 +1886,14 @@ def _prepare_last_workfile(data, workdir, modules_manager): # Last workfile path last_workfile_path = data.get("last_workfile_path") or "" if not last_workfile_path: - host_module = modules_manager.get_host_module(app.host_name) - if host_module: - extensions = host_module.get_workfile_extensions() + host_addon = addons_manager.get_host_addon(app.host_name) + if host_addon: + extensions = host_addon.get_workfile_extensions() else: extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: - from openpype.pipeline.workfile import ( + from ayon_core.pipeline.workfile import ( get_workfile_template_key, get_last_workfile ) @@ -1897,7 +1912,7 @@ def _prepare_last_workfile(data, workdir, modules_manager): workdir_data.update({ "version": 1, - "user": get_openpype_username(), + "user": get_ayon_username(), "ext": extensions[0] }) @@ -1924,7 +1939,7 @@ def should_start_last_workfile( """Define if host should start last version workfile if possible. Default output is `False`. Can be overridden with environment variable - `AVALON_OPEN_LAST_WORKFILE`, valid values without case sensitivity are + `AYON_OPEN_LAST_WORKFILE`, valid values without case sensitivity are `"0", "1", "true", "false", "yes", "no"`. Args: @@ -1974,7 +1989,7 @@ def should_workfile_tool_start( """Define if host should start workfile tool at host launch. Default output is `False`. Can be overridden with environment variable - `OPENPYPE_WORKFILE_TOOL_ON_START`, valid values without case sensitivity are + `AYON_WORKFILE_TOOL_ON_START`, valid values without case sensitivity are `"0", "1", "true", "false", "yes", "no"`. Args: @@ -2022,8 +2037,8 @@ def get_non_python_host_kwargs(kwargs, allow_console=True): """Explicit setting of kwargs for Popen for AE/PS/Harmony. Expected behavior - - openpype_console opens window with logs - - openpype_gui has stdout/stderr available for capturing + - ayon_console opens window with logs + - ayon has stdout/stderr available for capturing Args: kwargs (dict) or None @@ -2037,20 +2052,13 @@ def get_non_python_host_kwargs(kwargs, allow_console=True): if platform.system().lower() != "windows": return kwargs - if AYON_SERVER_ENABLED: - executable_path = os.environ.get("AYON_EXECUTABLE") - else: - executable_path = os.environ.get("OPENPYPE_EXECUTABLE") + executable_path = os.environ.get("AYON_EXECUTABLE") executable_filename = "" if executable_path: executable_filename = os.path.basename(executable_path) - if AYON_SERVER_ENABLED: - is_gui_executable = "ayon_console" not in executable_filename - else: - is_gui_executable = "openpype_gui" in executable_filename - + is_gui_executable = "ayon_console" not in executable_filename if is_gui_executable: kwargs.update({ "creationflags": subprocess.CREATE_NO_WINDOW, diff --git a/openpype/lib/attribute_definitions.py b/client/ayon_core/lib/attribute_definitions.py similarity index 100% rename from openpype/lib/attribute_definitions.py rename to client/ayon_core/lib/attribute_definitions.py diff --git a/client/ayon_core/lib/ayon_info.py b/client/ayon_core/lib/ayon_info.py new file mode 100644 index 0000000000..725e10fa0e --- /dev/null +++ b/client/ayon_core/lib/ayon_info.py @@ -0,0 +1,119 @@ +import os +import json +import datetime +import platform +import getpass +import socket + +from ayon_core.settings.lib import get_local_settings +from .execute import get_ayon_launcher_args +from .local_settings import get_local_site_id + + +def get_ayon_launcher_version(): + version_filepath = os.path.join(os.environ["AYON_ROOT"], "version.py") + if not os.path.exists(version_filepath): + return None + content = {} + with open(version_filepath, "r") as stream: + exec(stream.read(), content) + return content["__version__"] + + +def is_running_from_build(): + """Determine if current process is running from build or code. + + Returns: + bool: True if running from build. + """ + + executable_path = os.environ["AYON_EXECUTABLE"] + executable_filename = os.path.basename(executable_path) + if "python" in executable_filename.lower(): + return False + return True + + +def is_staging_enabled(): + return os.getenv("AYON_USE_STAGING") == "1" + + +def is_dev_mode_enabled(): + """Dev mode is enabled in AYON. + + Returns: + bool: True if dev mode is enabled. + """ + + return os.getenv("AYON_USE_DEV") == "1" + + +def get_ayon_info(): + executable_args = get_ayon_launcher_args() + if is_running_from_build(): + version_type = "build" + else: + version_type = "code" + return { + "ayon_launcher_version": get_ayon_launcher_version(), + "version_type": version_type, + "executable": executable_args[-1], + "ayon_root": os.environ["AYON_ROOT"], + "server_url": os.environ["AYON_SERVER_URL"] + } + + +def get_workstation_info(): + """Basic information about workstation.""" + host_name = socket.gethostname() + try: + host_ip = socket.gethostbyname(host_name) + except socket.gaierror: + host_ip = "127.0.0.1" + + return { + "hostname": host_name, + "host_ip": host_ip, + "username": getpass.getuser(), + "system_name": platform.system(), + "local_id": get_local_site_id() + } + + +def get_all_current_info(): + """All information about current process in one dictionary.""" + + return { + "workstation": get_workstation_info(), + "env": os.environ.copy(), + "local_settings": get_local_settings(), + "ayon": get_ayon_info(), + } + + +def extract_ayon_info_to_file(dirpath, filename=None): + """Extract all current info to a file. + + It is possible to define only directory path. Filename is concatenated with + pype version, workstation site id and timestamp. + + Args: + dirpath (str): Path to directory where file will be stored. + filename (Optional[str]): Filename. If not defined, it is generated. + + Returns: + filepath (str): Full path to file where data were extracted. + """ + if not filename: + filename = "{}_{}.json".format( + get_local_site_id(), + datetime.datetime.now().strftime("%y%m%d%H%M%S") + ) + filepath = os.path.join(dirpath, filename) + data = get_all_current_info() + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + with open(filepath, "w") as file_stream: + json.dump(data, file_stream, indent=4) + return filepath diff --git a/openpype/lib/connections.py b/client/ayon_core/lib/connections.py similarity index 100% rename from openpype/lib/connections.py rename to client/ayon_core/lib/connections.py diff --git a/openpype/lib/dateutils.py b/client/ayon_core/lib/dateutils.py similarity index 100% rename from openpype/lib/dateutils.py rename to client/ayon_core/lib/dateutils.py diff --git a/openpype/lib/env_tools.py b/client/ayon_core/lib/env_tools.py similarity index 100% rename from openpype/lib/env_tools.py rename to client/ayon_core/lib/env_tools.py diff --git a/openpype/lib/events.py b/client/ayon_core/lib/events.py similarity index 100% rename from openpype/lib/events.py rename to client/ayon_core/lib/events.py diff --git a/openpype/lib/execute.py b/client/ayon_core/lib/execute.py similarity index 77% rename from openpype/lib/execute.py rename to client/ayon_core/lib/execute.py index c54541a116..4e3257c3a0 100644 --- a/openpype/lib/execute.py +++ b/client/ayon_core/lib/execute.py @@ -5,22 +5,14 @@ import json import tempfile -from openpype import AYON_SERVER_ENABLED - from .log import Logger from .vendor_bin_utils import find_executable -from .openpype_version import is_running_from_build - # MSDN process creation flag (Windows only) CREATE_NO_WINDOW = 0x08000000 -def execute(args, - silent=False, - cwd=None, - env=None, - shell=None): +def execute(args, silent=False, cwd=None, env=None, shell=None): """Execute command as process. This will execute given command as process, monitor its output @@ -41,10 +33,9 @@ def execute(args, int: return code of process """ + log_levels = ["DEBUG:", "INFO:", "ERROR:", "WARNING:", "CRITICAL:"] - log_levels = ['DEBUG:', 'INFO:', 'ERROR:', 'WARNING:', 'CRITICAL:'] - - log = Logger.get_logger('execute') + log = Logger.get_logger("execute") log.info("Executing ({})".format(" ".join(args))) popen = subprocess.Popen( args, @@ -60,7 +51,7 @@ def execute(args, # Blocks until finished while True: line = popen.stdout.readline() - if line == '': + if line == "": break if silent: continue @@ -100,8 +91,8 @@ def run_subprocess(*args, **kwargs): Raises: RuntimeError: Exception is raised if process finished with nonzero return code. - """ + """ # Modify creation flags on windows to hide console window if in UI mode if ( platform.system().lower() == "windows" @@ -175,29 +166,8 @@ def clean_envs_for_ayon_process(env=None): Returns: dict[str, str]: Environment variables for ayon process. - """ - - if env is None: - env = os.environ - - # Exclude some environment variables from a copy of the environment - env = env.copy() - for key in ["PYTHONPATH", "PYTHONHOME"]: - env.pop(key, None) - - return env - - -def clean_envs_for_openpype_process(env=None): - """Modify environments that may affect OpenPype process. - Main reason to implement this function is to pop PYTHONPATH which may be - affected by in-host environments. """ - - if AYON_SERVER_ENABLED: - return clean_envs_for_ayon_process(env=env) - if env is None: env = os.environ @@ -210,13 +180,13 @@ def clean_envs_for_openpype_process(env=None): def run_ayon_launcher_process(*args, **kwargs): - """Execute OpenPype process with passed arguments and wait. + """Execute AYON process with passed arguments and wait. - Wrapper for 'run_process' which prepends OpenPype executable arguments + Wrapper for 'run_process' which prepends AYON executable arguments before passed arguments and define environments if are not passed. Values from 'os.environ' are used for environments if are not passed. - They are cleaned using 'clean_envs_for_openpype_process' function. + They are cleaned using 'clean_envs_for_ayon_process' function. Example: ``` @@ -229,77 +199,55 @@ def run_ayon_launcher_process(*args, **kwargs): Returns: str: Full output of subprocess concatenated stdout and stderr. - """ + """ args = get_ayon_launcher_args(*args) env = kwargs.pop("env", None) # Keep env untouched if are passed and not empty if not env: - # Skip envs that can affect OpenPype process + # Skip envs that can affect AYON launcher process # - fill more if you find more - env = clean_envs_for_openpype_process(os.environ) - - # Only keep OpenPype version if we are running from build. - if not is_running_from_build(): - env.pop("OPENPYPE_VERSION", None) + env = clean_envs_for_ayon_process(os.environ) return run_subprocess(args, env=env, **kwargs) def run_openpype_process(*args, **kwargs): - """Execute OpenPype process with passed arguments and wait. + """Execute AYON process with passed arguments and wait. - Wrapper for 'run_process' which prepends OpenPype executable arguments + Wrapper for 'run_process' which prepends AYON executable arguments before passed arguments and define environments if are not passed. Values from 'os.environ' are used for environments if are not passed. - They are cleaned using 'clean_envs_for_openpype_process' function. + They are cleaned using 'clean_envs_for_ayon_process' function. Example: >>> run_openpype_process("version") Args: - *args (tuple): OpenPype cli arguments. + *args (tuple): AYON cli arguments. **kwargs (dict): Keyword arguments for subprocess.Popen. - """ - - if AYON_SERVER_ENABLED: - return run_ayon_launcher_process(*args, **kwargs) - - args = get_openpype_execute_args(*args) - env = kwargs.pop("env", None) - # Keep env untouched if are passed and not empty - if not env: - # Skip envs that can affect OpenPype process - # - fill more if you find more - env = clean_envs_for_openpype_process(os.environ) - - # Only keep OpenPype version if we are running from build. - if not is_running_from_build(): - env.pop("OPENPYPE_VERSION", None) - return run_subprocess(args, env=env, **kwargs) + """ + return run_ayon_launcher_process(*args, **kwargs) def run_detached_process(args, **kwargs): """Execute process with passed arguments as separated process. - Values from 'os.environ' are used for environments if are not passed. - They are cleaned using 'clean_envs_for_openpype_process' function. - Example: >>> run_detached_process("run", "./path_to.py") Args: - *args (tuple): OpenPype cli arguments. + *args (tuple): AYON cli arguments. **kwargs (dict): Keyword arguments for subprocess.Popen. Returns: subprocess.Popen: Pointer to launched process but it is possible that launched process is already killed (on linux). - """ + """ env = kwargs.pop("env", None) # Keep env untouched if are passed and not empty if not env: @@ -359,6 +307,12 @@ def path_to_subprocess_arg(path): """Prepare path for subprocess arguments. Returned path can be wrapped with quotes or kept as is. + + Args: + path (str): Path to be converted. + + Returns: + str: Converted path. """ return subprocess.list2cmdline([path]) @@ -381,7 +335,6 @@ def get_ayon_launcher_args(*args): Returns: list[str]: List of arguments to run ayon-launcher process. """ - executable = os.environ["AYON_EXECUTABLE"] launch_args = [executable] @@ -396,46 +349,13 @@ def get_ayon_launcher_args(*args): return launch_args -def get_openpype_execute_args(*args): - """Arguments to run pype command. - - Arguments for subprocess when need to spawn new pype process. Which may be - needed when new python process for pype scripts must be executed in build - pype. - - ## Why is this needed? - Pype executed from code has different executable set to virtual env python - and must have path to script as first argument which is not needed for - build pype. - - It is possible to pass any arguments that will be added after pype - executables. - """ - - if AYON_SERVER_ENABLED: - return get_ayon_launcher_args(*args) - - executable = os.environ["OPENPYPE_EXECUTABLE"] - launch_args = [executable] - - executable_filename = os.path.basename(executable) - if "python" in executable_filename.lower(): - filepath = os.path.join(os.environ["OPENPYPE_ROOT"], "start.py") - launch_args.append(filepath) - - if args: - launch_args.extend(args) - - return launch_args - - def get_linux_launcher_args(*args): """Path to application mid process executable. This function should be able as arguments are different when used from code and build. - It is possible that this function is used in OpenPype build which does + It is possible that this function is used in AYON build which does not have yet the new executable. In that case 'None' is returned. Todos: @@ -449,19 +369,12 @@ def get_linux_launcher_args(*args): list: Executables with possible positional argument to script when called from code. """ - filename = "app_launcher" - if AYON_SERVER_ENABLED: - executable = os.environ["AYON_EXECUTABLE"] - else: - executable = os.environ["OPENPYPE_EXECUTABLE"] + executable = os.environ["AYON_EXECUTABLE"] executable_filename = os.path.basename(executable) if "python" in executable_filename.lower(): - if AYON_SERVER_ENABLED: - root = os.environ["AYON_ROOT"] - else: - root = os.environ["OPENPYPE_ROOT"] + root = os.environ["AYON_ROOT"] script_path = os.path.join(root, "{}.py".format(filename)) launch_args = [executable, script_path] else: @@ -478,3 +391,21 @@ def get_linux_launcher_args(*args): launch_args.extend(args) return launch_args + + +def get_openpype_execute_args(*args): + """Arguments to run pype command. + + Arguments for subprocess when need to spawn new pype process. Which may be + needed when new python process for pype scripts must be executed in build + pype. + + ## Why is this needed? + Pype executed from code has different executable set to virtual env python + and must have path to script as first argument which is not needed for + build pype. + + It is possible to pass any arguments that will be added after pype + executables. + """ + return get_ayon_launcher_args(*args) diff --git a/openpype/lib/file_transaction.py b/client/ayon_core/lib/file_transaction.py similarity index 99% rename from openpype/lib/file_transaction.py rename to client/ayon_core/lib/file_transaction.py index 80f4e81f2c..81a3b386f6 100644 --- a/openpype/lib/file_transaction.py +++ b/client/ayon_core/lib/file_transaction.py @@ -4,7 +4,7 @@ import errno import six -from openpype.lib import create_hard_link +from ayon_core.lib import create_hard_link # this is needed until speedcopy for linux is fixed if sys.platform == "win32": diff --git a/openpype/lib/local_settings.py b/client/ayon_core/lib/local_settings.py similarity index 79% rename from openpype/lib/local_settings.py rename to client/ayon_core/lib/local_settings.py index ea42d2f0b5..33b3232128 100644 --- a/openpype/lib/local_settings.py +++ b/client/ayon_core/lib/local_settings.py @@ -2,12 +2,10 @@ """Package to deal with saving and retrieving user specific settings.""" import os import json -import getpass import platform from datetime import datetime from abc import ABCMeta, abstractmethod -# TODO Use pype igniter logic instead of using duplicated code # disable lru cache in Python 2 try: from functools import lru_cache @@ -29,19 +27,12 @@ def wrapper(*args, **kwargs): import six import appdirs -from openpype import AYON_SERVER_ENABLED -from openpype.settings import ( - get_local_settings, - get_system_settings -) - -from openpype.client.mongo import validate_mongo_connection -from openpype.client import get_ayon_server_api_connection +from ayon_core.client import get_ayon_server_api_connection _PLACEHOLDER = object() -class OpenPypeSecureRegistry: +class AYONSecureRegistry: """Store information using keyring. Registry should be used for private data that should be available only for @@ -68,8 +59,8 @@ def __init__(self, name): keyring.set_keyring(Windows.WinVaultKeyring()) - # Force "OpenPype" prefix - self._name = "/".join(("OpenPype", name)) + # Force "AYON" prefix + self._name = "/".join(("AYON", name)) def set_item(self, name, value): # type: (str, str) -> None @@ -225,7 +216,7 @@ def _delete_item(self, name): """Delete item from settings. Note: - see :meth:`openpype.lib.user_settings.ARegistrySettings.delete_item` + see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item` """ pass @@ -246,17 +237,14 @@ def __init__(self, name, path): # type: (str, str) -> IniSettingRegistry super(IniSettingRegistry, self).__init__(name) # get registry file - version = os.getenv("OPENPYPE_VERSION", "N/A") self._registry_file = os.path.join(path, "{}.ini".format(name)) if not os.path.exists(self._registry_file): with open(self._registry_file, mode="w") as cfg: print("# Settings registry", cfg) - print("# Generated by OpenPype {}".format(version), cfg) now = datetime.now().strftime("%d/%m/%Y %H:%M:%S") print("# {}".format(now), cfg) - def set_item_section( - self, section, name, value): + def set_item_section(self, section, name, value): # type: (str, str, str) -> None """Set item to specific section of ini registry. @@ -385,7 +373,7 @@ def _delete_item(self, name): """Delete item from default section. Note: - See :meth:`~openpype.lib.IniSettingsRegistry.delete_item_from_section` + See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section` """ self.delete_item_from_section("MAIN", name) @@ -401,10 +389,7 @@ def __init__(self, name, path): self._registry_file = os.path.join(path, "{}.json".format(name)) now = datetime.now().strftime("%d/%m/%Y %H:%M:%S") header = { - "__metadata__": { - "openpype-version": os.getenv("OPENPYPE_VERSION", "N/A"), - "generated": now - }, + "__metadata__": {"generated": now}, "registry": {} } @@ -420,7 +405,7 @@ def _get_item(self, name): """Get item value from registry json. Note: - See :meth:`openpype.lib.JSONSettingRegistry.get_item` + See :meth:`ayon_core.lib.JSONSettingRegistry.get_item` """ with open(self._registry_file, mode="r") as cfg: @@ -453,7 +438,7 @@ def _set_item(self, name, value): """Set item value to registry json. Note: - See :meth:`openpype.lib.JSONSettingRegistry.set_item` + See :meth:`ayon_core.lib.JSONSettingRegistry.set_item` """ with open(self._registry_file, "r+") as cfg: @@ -485,30 +470,24 @@ def _delete_item(self, name): json.dump(data, cfg, indent=4) -class OpenPypeSettingsRegistry(JSONSettingRegistry): - """Class handling OpenPype general settings registry. +class AYONSettingsRegistry(JSONSettingRegistry): + """Class handling AYON general settings registry. Attributes: vendor (str): Name used for path construction. product (str): Additional name used for path construction. + Args: + name (Optional[str]): Name of the registry. """ def __init__(self, name=None): - if AYON_SERVER_ENABLED: - vendor = "Ynput" - product = "AYON" - default_name = "AYON_settings" - else: - vendor = "pypeclub" - product = "openpype" - default_name = "openpype_settings" - self.vendor = vendor - self.product = product + self.vendor = "Ynput" + self.product = "AYON" if not name: - name = default_name + name = "AYON_settings" path = appdirs.user_data_dir(self.product, self.vendor) - super(OpenPypeSettingsRegistry, self).__init__(name, path) + super(AYONSettingsRegistry, self).__init__(name, path) def _create_local_site_id(registry=None): @@ -516,7 +495,7 @@ def _create_local_site_id(registry=None): from coolname import generate_slug if registry is None: - registry = OpenPypeSettingsRegistry() + registry = AYONSettingsRegistry() new_id = generate_slug(3) @@ -543,7 +522,11 @@ def get_ayon_appdirs(*args): ) -def _get_ayon_local_site_id(): +def get_local_site_id(): + """Get local site identifier. + + Identifier is created if does not exists yet. + """ # used for background syncing site_id = os.environ.get("AYON_SITE_ID") if site_id: @@ -566,78 +549,22 @@ def _get_ayon_local_site_id(): return site_id -def get_local_site_id(): - """Get local site identifier. - - Identifier is created if does not exists yet. - """ - - if AYON_SERVER_ENABLED: - return _get_ayon_local_site_id() - - # override local id from environment - # used for background syncing - if os.environ.get("OPENPYPE_LOCAL_ID"): - return os.environ["OPENPYPE_LOCAL_ID"] - - registry = OpenPypeSettingsRegistry() - try: - return registry.get_item("localId") - except ValueError: - return _create_local_site_id() +def get_ayon_username(): + """AYON username used for templates and publishing. + Uses curet ayon api username. -def change_openpype_mongo_url(new_mongo_url): - """Change mongo url in pype registry. - - Change of OpenPype mongo URL require restart of running pype processes or - processes using pype. + Returns: + str: Username. """ - validate_mongo_connection(new_mongo_url) - key = "openPypeMongo" - registry = OpenPypeSecureRegistry("mongodb") - existing_value = registry.get_item(key, None) - if existing_value is not None: - registry.delete_item(key) - registry.set_item(key, new_mongo_url) + con = get_ayon_server_api_connection() + return con.get_user()["name"] def get_openpype_username(): - """OpenPype username used for templates and publishing. + return get_ayon_username() - May be different than machine's username. - Always returns "OPENPYPE_USERNAME" environment if is set then tries local - settings and last option is to use `getpass.getuser()` which returns - machine username. - """ - - if AYON_SERVER_ENABLED: - con = get_ayon_server_api_connection() - return con.get_user()["name"] - - username = os.environ.get("OPENPYPE_USERNAME") - if not username: - local_settings = get_local_settings() - username = ( - local_settings - .get("general", {}) - .get("username") - ) - if not username: - username = getpass.getuser() - return username - - -def is_admin_password_required(): - system_settings = get_system_settings() - password = system_settings["general"].get("admin_password") - if not password: - return False - - local_settings = get_local_settings() - is_admin = local_settings.get("general", {}).get("is_admin", False) - if is_admin: - return False - return True +OpenPypeSecureRegistry = AYONSecureRegistry +OpenPypeSettingsRegistry = AYONSettingsRegistry diff --git a/client/ayon_core/lib/log.py b/client/ayon_core/lib/log.py new file mode 100644 index 0000000000..cbb1e41bae --- /dev/null +++ b/client/ayon_core/lib/log.py @@ -0,0 +1,274 @@ +import os +import sys +import uuid +import getpass +import logging +import platform +import socket +import time +import threading +import copy + +from . import Terminal + +# Check for `unicode` in builtins +USE_UNICODE = hasattr(__builtins__, "unicode") + + +class LogStreamHandler(logging.StreamHandler): + """ StreamHandler class designed to handle utf errors in python 2.x hosts. + + """ + + def __init__(self, stream=None): + super(LogStreamHandler, self).__init__(stream) + self.enabled = True + + def enable(self): + """ Enable StreamHandler + + Used to silence output + """ + self.enabled = True + + def disable(self): + """ Disable StreamHandler + + Make StreamHandler output again + """ + self.enabled = False + + def emit(self, record): + if not self.enable: + return + try: + msg = self.format(record) + msg = Terminal.log(msg) + stream = self.stream + if stream is None: + return + fs = "%s\n" + # if no unicode support... + if not USE_UNICODE: + stream.write(fs % msg) + else: + try: + if (isinstance(msg, unicode) and # noqa: F821 + getattr(stream, 'encoding', None)): + ufs = u'%s\n' + try: + stream.write(ufs % msg) + except UnicodeEncodeError: + stream.write((ufs % msg).encode(stream.encoding)) + else: + if (getattr(stream, 'encoding', 'utf-8')): + ufs = u'%s\n' + stream.write(ufs % unicode(msg)) # noqa: F821 + else: + stream.write(fs % msg) + except UnicodeError: + stream.write(fs % msg.encode("UTF-8")) + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + + except OSError: + self.handleError(record) + + except Exception: + print(repr(record)) + self.handleError(record) + + +class LogFormatter(logging.Formatter): + + DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' + default_formatter = logging.Formatter(DFT) + + def __init__(self, formats): + super(LogFormatter, self).__init__() + self.formatters = {} + for loglevel in formats: + self.formatters[loglevel] = logging.Formatter(formats[loglevel]) + + def format(self, record): + formatter = self.formatters.get(record.levelno, self.default_formatter) + + _exc_info = record.exc_info + record.exc_info = None + + out = formatter.format(record) + record.exc_info = _exc_info + + if record.exc_info is not None: + line_len = len(str(record.exc_info[1])) + if line_len > 30: + line_len = 30 + out = "{}\n{}\n{}\n{}\n{}".format( + out, + line_len * "=", + str(record.exc_info[1]), + line_len * "=", + self.formatException(record.exc_info) + ) + return out + + +class Logger: + DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' + DBG = " - { %(name)s }: [ %(message)s ] " + INF = ">>> [ %(message)s ] " + WRN = "*** WRN: >>> { %(name)s }: [ %(message)s ] " + ERR = "!!! ERR: %(asctime)s >>> { %(name)s }: [ %(message)s ] " + CRI = "!!! CRI: %(asctime)s >>> { %(name)s }: [ %(message)s ] " + + FORMAT_FILE = { + logging.INFO: INF, + logging.DEBUG: DBG, + logging.WARNING: WRN, + logging.ERROR: ERR, + logging.CRITICAL: CRI, + } + + # Is static class initialized + initialized = False + _init_lock = threading.Lock() + + # Logging level - AYON_LOG_LEVEL + log_level = None + + # Data same for all record documents + process_data = None + # Cached process name or ability to set different process name + _process_name = None + # TODO Remove 'mongo_process_id' in 1.x.x + mongo_process_id = uuid.uuid4().hex + + @classmethod + def get_logger(cls, name=None): + if not cls.initialized: + cls.initialize() + + logger = logging.getLogger(name or "__main__") + + logger.setLevel(cls.log_level) + + add_console_handler = True + + for handler in logger.handlers: + if isinstance(handler, LogStreamHandler): + add_console_handler = False + + if add_console_handler: + logger.addHandler(cls._get_console_handler()) + + # Do not propagate logs to root logger + logger.propagate = False + + return logger + + @classmethod + def _get_console_handler(cls): + formatter = LogFormatter(cls.FORMAT_FILE) + console_handler = LogStreamHandler() + + console_handler.set_name("LogStreamHandler") + console_handler.setFormatter(formatter) + return console_handler + + @classmethod + def initialize(cls): + # TODO update already created loggers on re-initialization + if not cls._init_lock.locked(): + with cls._init_lock: + cls._initialize() + else: + # If lock is locked wait until is finished + while cls._init_lock.locked(): + time.sleep(0.1) + + @classmethod + def _initialize(cls): + # Change initialization state to prevent runtime changes + # if is executed during runtime + cls.initialized = False + + # Define what is logging level + log_level = os.getenv("AYON_LOG_LEVEL") + if not log_level: + # Check AYON_DEBUG for debug level + op_debug = os.getenv("AYON_DEBUG") + if op_debug and int(op_debug) > 0: + log_level = 10 + else: + log_level = 20 + cls.log_level = int(log_level) + + # Mark as initialized + cls.initialized = True + + @classmethod + def get_process_data(cls): + """Data about current process which should be same for all records. + + Process data are used for each record sent to mongo database. + """ + if cls.process_data is not None: + return copy.deepcopy(cls.process_data) + + if not cls.initialized: + cls.initialize() + + host_name = socket.gethostname() + try: + host_ip = socket.gethostbyname(host_name) + except socket.gaierror: + host_ip = "127.0.0.1" + + process_name = cls.get_process_name() + + cls.process_data = { + "hostname": host_name, + "hostip": host_ip, + "username": getpass.getuser(), + "system_name": platform.system(), + "process_name": process_name + } + return copy.deepcopy(cls.process_data) + + @classmethod + def set_process_name(cls, process_name): + """Set process name for mongo logs.""" + # Just change the attribute + cls._process_name = process_name + # Update process data if are already set + if cls.process_data is not None: + cls.process_data["process_name"] = process_name + + @classmethod + def get_process_name(cls): + """Process name that is like "label" of a process. + + AYON logging can be used from OpenPyppe itself of from hosts. + Even in AYON process it's good to know if logs are from tray or + from other cli commands. This should help to identify that information. + """ + if cls._process_name is not None: + return cls._process_name + + # Get process name + process_name = os.environ.get("AVALON_APP_NAME") + if not process_name: + try: + import psutil + process = psutil.Process(os.getpid()) + process_name = process.name() + + except ImportError: + pass + + if not process_name: + process_name = os.path.basename(sys.executable) + + cls._process_name = process_name + return cls._process_name diff --git a/openpype/lib/path_templates.py b/client/ayon_core/lib/path_templates.py similarity index 100% rename from openpype/lib/path_templates.py rename to client/ayon_core/lib/path_templates.py diff --git a/openpype/lib/path_tools.py b/client/ayon_core/lib/path_tools.py similarity index 100% rename from openpype/lib/path_tools.py rename to client/ayon_core/lib/path_tools.py diff --git a/openpype/lib/plugin_tools.py b/client/ayon_core/lib/plugin_tools.py similarity index 100% rename from openpype/lib/plugin_tools.py rename to client/ayon_core/lib/plugin_tools.py diff --git a/openpype/lib/profiles_filtering.py b/client/ayon_core/lib/profiles_filtering.py similarity index 100% rename from openpype/lib/profiles_filtering.py rename to client/ayon_core/lib/profiles_filtering.py diff --git a/openpype/lib/profiling.py b/client/ayon_core/lib/profiling.py similarity index 100% rename from openpype/lib/profiling.py rename to client/ayon_core/lib/profiling.py diff --git a/openpype/lib/python_2_comp.py b/client/ayon_core/lib/python_2_comp.py similarity index 100% rename from openpype/lib/python_2_comp.py rename to client/ayon_core/lib/python_2_comp.py diff --git a/openpype/lib/python_module_tools.py b/client/ayon_core/lib/python_module_tools.py similarity index 100% rename from openpype/lib/python_module_tools.py rename to client/ayon_core/lib/python_module_tools.py diff --git a/openpype/lib/terminal.py b/client/ayon_core/lib/terminal.py similarity index 95% rename from openpype/lib/terminal.py rename to client/ayon_core/lib/terminal.py index f6072ed209..f822a37286 100644 --- a/openpype/lib/terminal.py +++ b/client/ayon_core/lib/terminal.py @@ -21,7 +21,7 @@ class Terminal: If :mod:`Colorama` is not found, it will still work, but without colors. Depends on :mod:`Colorama` - Using **OPENPYPE_LOG_NO_COLORS** environment variable. + Using **AYON_LOG_NO_COLORS** environment variable. """ # Is Terminal initialized @@ -38,7 +38,7 @@ def _initialize(): """Initialize Terminal class as object. First check if colorized output is disabled by environment variable - `OPENPYPE_LOG_NO_COLORS` value. By default is colorized output turned + `AYON_LOG_NO_COLORS` value. By default is colorized output turned on. Then tries to import python module that do the colors magic and create @@ -48,9 +48,9 @@ def _initialize(): Set `_initialized` attribute to `True` when is done. """ - from openpype.lib import env_value_to_bool + from ayon_core.lib import env_value_to_bool log_no_colors = env_value_to_bool( - "OPENPYPE_LOG_NO_COLORS", default=None + "AYON_LOG_NO_COLORS", default=None ) if log_no_colors is not None: Terminal.use_colors = not log_no_colors @@ -169,7 +169,7 @@ def echo(message): def log(message): """Return color formatted message. - If environment variable `OPENPYPE_LOG_NO_COLORS` is set to + If environment variable `AYON_LOG_NO_COLORS` is set to whatever value, message will be formatted but not colorized. Args: diff --git a/openpype/lib/transcoding.py b/client/ayon_core/lib/transcoding.py similarity index 99% rename from openpype/lib/transcoding.py rename to client/ayon_core/lib/transcoding.py index 1cfe9ac14b..6c6837dcf9 100644 --- a/openpype/lib/transcoding.py +++ b/client/ayon_core/lib/transcoding.py @@ -558,9 +558,9 @@ def convert_for_ffmpeg( logger = logging.getLogger(__name__) logger.warning(( - "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is" + "DEPRECATED: 'ayon_core.lib.transcoding.convert_for_ffmpeg' is" " deprecated function of conversion for FFMpeg. Please replace usage" - " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'" + " with 'ayon_core.lib.transcoding.convert_input_paths_for_ffmpeg'" )) ext = os.path.splitext(first_input_path)[1].lower() diff --git a/openpype/lib/usdlib.py b/client/ayon_core/lib/usdlib.py similarity index 98% rename from openpype/lib/usdlib.py rename to client/ayon_core/lib/usdlib.py index c166feb3a6..2a5a317d72 100644 --- a/openpype/lib/usdlib.py +++ b/client/ayon_core/lib/usdlib.py @@ -8,8 +8,8 @@ # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import Anatomy, get_current_project_name +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.pipeline import Anatomy, get_current_project_name log = logging.getLogger(__name__) diff --git a/openpype/lib/vendor_bin_utils.py b/client/ayon_core/lib/vendor_bin_utils.py similarity index 84% rename from openpype/lib/vendor_bin_utils.py rename to client/ayon_core/lib/vendor_bin_utils.py index dc8bb7435e..41654476c2 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/client/ayon_core/lib/vendor_bin_utils.py @@ -3,8 +3,6 @@ import platform import subprocess -from openpype import AYON_SERVER_ENABLED - log = logging.getLogger("Vendor utils") @@ -140,30 +138,6 @@ def find_executable(executable): return None -def get_vendor_bin_path(bin_app): - """Path to OpenPype vendorized binaries. - - Vendorized executables are expected in specific hierarchy inside build or - in code source. - - "{OPENPYPE_ROOT}/vendor/bin/{name of vendorized app}/{platform}" - - Args: - bin_app (str): Name of vendorized application. - - Returns: - str: Path to vendorized binaries folder. - """ - - return os.path.join( - os.environ["OPENPYPE_ROOT"], - "vendor", - "bin", - bin_app, - platform.system().lower() - ) - - def find_tool_in_custom_paths(paths, tool, validation_func=None): """Find a tool executable in custom paths. @@ -322,32 +296,23 @@ def get_oiio_tools_path(tool="oiiotool"): if CachedToolPaths.is_tool_cached(tool): return CachedToolPaths.get_executable_path(tool) - if AYON_SERVER_ENABLED: - args = _get_ayon_oiio_tool_args(tool) - if args: - if len(args) > 1: - raise ValueError( - "AYON oiio arguments consist of multiple arguments." - ) - tool_executable_path = args[0] - CachedToolPaths.cache_executable_path(tool, tool_executable_path) - return tool_executable_path + args = _get_ayon_oiio_tool_args(tool) + if args: + if len(args) > 1: + raise ValueError( + "AYON oiio arguments consist of multiple arguments." + ) + tool_executable_path = args[0] + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path - custom_paths_str = os.environ.get("OPENPYPE_OIIO_PATHS") or "" + custom_paths_str = os.environ.get("AYON_OIIO_PATHS") or "" tool_executable_path = find_tool_in_custom_paths( custom_paths_str.split(os.pathsep), tool, _oiio_executable_validation ) - if not tool_executable_path: - oiio_dir = get_vendor_bin_path("oiio") - if platform.system().lower() == "linux": - oiio_dir = os.path.join(oiio_dir, "bin") - default_path = find_executable(os.path.join(oiio_dir, tool)) - if default_path and _oiio_executable_validation(default_path): - tool_executable_path = default_path - # Look to PATH for the tool if not tool_executable_path: from_path = find_executable(tool) @@ -371,10 +336,9 @@ def get_oiio_tool_args(tool_name, *extra_args): extra_args = list(extra_args) - if AYON_SERVER_ENABLED: - args = _get_ayon_oiio_tool_args(tool_name) - if args: - return args + extra_args + args = _get_ayon_oiio_tool_args(tool_name) + if args: + return args + extra_args path = get_oiio_tools_path(tool_name) if path: @@ -449,32 +413,23 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): if CachedToolPaths.is_tool_cached(tool): return CachedToolPaths.get_executable_path(tool) - if AYON_SERVER_ENABLED: - args = _get_ayon_ffmpeg_tool_args(tool) - if args is not None: - if len(args) > 1: - raise ValueError( - "AYON ffmpeg arguments consist of multiple arguments." - ) - tool_executable_path = args[0] - CachedToolPaths.cache_executable_path(tool, tool_executable_path) - return tool_executable_path + args = _get_ayon_ffmpeg_tool_args(tool) + if args is not None: + if len(args) > 1: + raise ValueError( + "AYON ffmpeg arguments consist of multiple arguments." + ) + tool_executable_path = args[0] + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path - custom_paths_str = os.environ.get("OPENPYPE_FFMPEG_PATHS") or "" + custom_paths_str = os.environ.get("AYON_FFMPEG_PATHS") or "" tool_executable_path = find_tool_in_custom_paths( custom_paths_str.split(os.pathsep), tool, _ffmpeg_executable_validation ) - if not tool_executable_path: - ffmpeg_dir = get_vendor_bin_path("ffmpeg") - if platform.system().lower() == "windows": - ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") - tool_path = find_executable(os.path.join(ffmpeg_dir, tool)) - if tool_path and _ffmpeg_executable_validation(tool_path): - tool_executable_path = tool_path - # Look to PATH for the tool if not tool_executable_path: from_path = find_executable(tool) @@ -498,10 +453,9 @@ def get_ffmpeg_tool_args(tool_name, *extra_args): extra_args = list(extra_args) - if AYON_SERVER_ENABLED: - args = _get_ayon_ffmpeg_tool_args(tool_name) - if args: - return args + extra_args + args = _get_ayon_ffmpeg_tool_args(tool_name) + if args: + return args + extra_args executable_path = get_ffmpeg_tool_path(tool_name) if executable_path: diff --git a/client/ayon_core/modules/__init__.py b/client/ayon_core/modules/__init__.py new file mode 100644 index 0000000000..0dfd7d663c --- /dev/null +++ b/client/ayon_core/modules/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +from . import click_wrap +from .interfaces import ( + IPluginPaths, + ITrayAddon, + ITrayModule, + ITrayAction, + ITrayService, + IHostAddon, +) + +from .base import ( + AYONAddon, + OpenPypeModule, + OpenPypeAddOn, + + load_modules, + + ModulesManager, + TrayModulesManager, +) + + +__all__ = ( + "click_wrap", + + "IPluginPaths", + "ITrayAddon", + "ITrayModule", + "ITrayAction", + "ITrayService", + "IHostAddon", + + "AYONAddon", + "OpenPypeModule", + "OpenPypeAddOn", + + "load_modules", + + "ModulesManager", + "TrayModulesManager", +) diff --git a/client/ayon_core/modules/base.py b/client/ayon_core/modules/base.py new file mode 100644 index 0000000000..8a78edf961 --- /dev/null +++ b/client/ayon_core/modules/base.py @@ -0,0 +1,14 @@ +from ayon_core.addon import ( + AYONAddon, + AddonsManager, + TrayAddonsManager, + load_addons, +) +from ayon_core.addon.base import ( + OpenPypeModule, + OpenPypeAddOn, +) + +ModulesManager = AddonsManager +TrayModulesManager = TrayAddonsManager +load_modules = load_addons diff --git a/client/ayon_core/modules/click_wrap.py b/client/ayon_core/modules/click_wrap.py new file mode 100644 index 0000000000..8f68de187a --- /dev/null +++ b/client/ayon_core/modules/click_wrap.py @@ -0,0 +1 @@ +from ayon_core.addon.click_wrap import * diff --git a/openpype/modules/clockify/__init__.py b/client/ayon_core/modules/clockify/__init__.py similarity index 100% rename from openpype/modules/clockify/__init__.py rename to client/ayon_core/modules/clockify/__init__.py diff --git a/openpype/modules/clockify/clockify_api.py b/client/ayon_core/modules/clockify/clockify_api.py similarity index 98% rename from openpype/modules/clockify/clockify_api.py rename to client/ayon_core/modules/clockify/clockify_api.py index 47af002f7a..f8c9c537ee 100644 --- a/openpype/modules/clockify/clockify_api.py +++ b/client/ayon_core/modules/clockify/clockify_api.py @@ -9,8 +9,8 @@ ADMIN_PERMISSION_NAMES, ) -from openpype.lib.local_settings import OpenPypeSecureRegistry -from openpype.lib import Logger +from ayon_core.lib.local_settings import AYONSecureRegistry +from ayon_core.lib import Logger class ClockifyAPI: @@ -27,7 +27,7 @@ def __init__(self, api_key=None, master_parent=None): @property def secure_registry(self): if self._secure_registry is None: - self._secure_registry = OpenPypeSecureRegistry("clockify") + self._secure_registry = AYONSecureRegistry("clockify") return self._secure_registry @property diff --git a/openpype/modules/clockify/clockify_module.py b/client/ayon_core/modules/clockify/clockify_module.py similarity index 98% rename from openpype/modules/clockify/clockify_module.py rename to client/ayon_core/modules/clockify/clockify_module.py index b6efec7907..adb7eb66af 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/client/ayon_core/modules/clockify/clockify_module.py @@ -2,8 +2,8 @@ import threading import time -from openpype.modules import OpenPypeModule, ITrayModule, IPluginPaths -from openpype.client import get_asset_by_name +from ayon_core.modules import OpenPypeModule, ITrayModule, IPluginPaths +from ayon_core.client import get_asset_by_name from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH diff --git a/openpype/modules/clockify/constants.py b/client/ayon_core/modules/clockify/constants.py similarity index 100% rename from openpype/modules/clockify/constants.py rename to client/ayon_core/modules/clockify/constants.py diff --git a/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py b/client/ayon_core/modules/clockify/ftrack/server/action_clockify_sync_server.py similarity index 100% rename from openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py rename to client/ayon_core/modules/clockify/ftrack/server/action_clockify_sync_server.py diff --git a/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py b/client/ayon_core/modules/clockify/ftrack/user/action_clockify_sync_local.py similarity index 100% rename from openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py rename to client/ayon_core/modules/clockify/ftrack/user/action_clockify_sync_local.py diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/client/ayon_core/modules/clockify/launcher_actions/ClockifyStart.py similarity index 95% rename from openpype/modules/clockify/launcher_actions/ClockifyStart.py rename to client/ayon_core/modules/clockify/launcher_actions/ClockifyStart.py index 4a653c1b8d..19aa2ef195 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/client/ayon_core/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,5 +1,5 @@ -from openpype.client import get_asset_by_name -from openpype.pipeline import LauncherAction +from ayon_core.client import get_asset_by_name +from ayon_core.pipeline import LauncherAction from openpype_modules.clockify.clockify_api import ClockifyAPI diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/client/ayon_core/modules/clockify/launcher_actions/ClockifySync.py similarity index 96% rename from openpype/modules/clockify/launcher_actions/ClockifySync.py rename to client/ayon_core/modules/clockify/launcher_actions/ClockifySync.py index cbd2519a04..30f5ae698f 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/client/ayon_core/modules/clockify/launcher_actions/ClockifySync.py @@ -1,6 +1,6 @@ -from openpype.client import get_projects, get_project +from ayon_core.client import get_projects, get_project from openpype_modules.clockify.clockify_api import ClockifyAPI -from openpype.pipeline import LauncherAction +from ayon_core.pipeline import LauncherAction class ClockifyPermissionsCheckFailed(Exception): diff --git a/client/ayon_core/modules/clockify/widgets.py b/client/ayon_core/modules/clockify/widgets.py new file mode 100644 index 0000000000..e64b64601d --- /dev/null +++ b/client/ayon_core/modules/clockify/widgets.py @@ -0,0 +1,207 @@ +from qtpy import QtCore, QtGui, QtWidgets +from ayon_core import resources, style + + +class MessageWidget(QtWidgets.QWidget): + + SIZE_W = 300 + SIZE_H = 130 + + closed = QtCore.Signal() + + def __init__(self, messages, title): + super(MessageWidget, self).__init__() + + # Icon + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint | + QtCore.Qt.WindowMinimizeButtonHint + ) + + # Size setting + self.resize(self.SIZE_W, self.SIZE_H) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) + + # Style + self.setStyleSheet(style.load_stylesheet()) + + self.setLayout(self._ui_layout(messages)) + self.setWindowTitle(title) + + def _ui_layout(self, messages): + if not messages: + messages = ["*Missing messages (This is a bug)*", ] + + elif not isinstance(messages, (tuple, list)): + messages = [messages, ] + + main_layout = QtWidgets.QVBoxLayout(self) + + labels = [] + for message in messages: + label = QtWidgets.QLabel(message) + label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) + label.setTextFormat(QtCore.Qt.RichText) + label.setWordWrap(True) + + labels.append(label) + main_layout.addWidget(label) + + btn_close = QtWidgets.QPushButton("Close") + btn_close.setToolTip('Close this window') + btn_close.clicked.connect(self.on_close_clicked) + + btn_group = QtWidgets.QHBoxLayout() + btn_group.addStretch(1) + btn_group.addWidget(btn_close) + + main_layout.addLayout(btn_group) + + self.labels = labels + self.btn_group = btn_group + self.btn_close = btn_close + self.main_layout = main_layout + + return main_layout + + def on_close_clicked(self): + self.close() + + def close(self, *args, **kwargs): + self.closed.emit() + super(MessageWidget, self).close(*args, **kwargs) + + +class ClockifySettings(QtWidgets.QWidget): + SIZE_W = 500 + SIZE_H = 130 + + loginSignal = QtCore.Signal(object, object, object) + + def __init__(self, clockify_api, optional=True): + super(ClockifySettings, self).__init__() + + self.clockify_api = clockify_api + self.optional = optional + self.validated = False + + # Icon + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowTitle("Clockify settings") + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint | + QtCore.Qt.WindowMinimizeButtonHint + ) + + # Size setting + self.resize(self.SIZE_W, self.SIZE_H) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) + self.setStyleSheet(style.load_stylesheet()) + + self._ui_init() + + def _ui_init(self): + label_api_key = QtWidgets.QLabel("Clockify API key:") + + input_api_key = QtWidgets.QLineEdit() + input_api_key.setFrame(True) + input_api_key.setPlaceholderText("e.g. XX1XxXX2x3x4xXxx") + + error_label = QtWidgets.QLabel("") + error_label.setTextFormat(QtCore.Qt.RichText) + error_label.setWordWrap(True) + error_label.hide() + + form_layout = QtWidgets.QFormLayout() + form_layout.setContentsMargins(10, 15, 10, 5) + form_layout.addRow(label_api_key, input_api_key) + form_layout.addRow(error_label) + + btn_ok = QtWidgets.QPushButton("Ok") + btn_ok.setToolTip('Sets Clockify API Key so can Start/Stop timer') + + btn_cancel = QtWidgets.QPushButton("Cancel") + cancel_tooltip = 'Application won\'t start' + if self.optional: + cancel_tooltip = 'Close this window' + btn_cancel.setToolTip(cancel_tooltip) + + btn_group = QtWidgets.QHBoxLayout() + btn_group.addStretch(1) + btn_group.addWidget(btn_ok) + btn_group.addWidget(btn_cancel) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addLayout(form_layout) + main_layout.addLayout(btn_group) + + btn_ok.clicked.connect(self.click_ok) + btn_cancel.clicked.connect(self._close_widget) + + self.label_api_key = label_api_key + self.input_api_key = input_api_key + self.error_label = error_label + + self.btn_ok = btn_ok + self.btn_cancel = btn_cancel + + def setError(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def invalid_input(self, entity): + entity.setStyleSheet("border: 1px solid red;") + + def click_ok(self): + api_key = self.input_api_key.text().strip() + if self.optional is True and api_key == '': + self.clockify_api.save_api_key(None) + self.clockify_api.set_api(api_key) + self.validated = False + self._close_widget() + return + + validation = self.clockify_api.validate_api_key(api_key) + + if validation: + self.clockify_api.save_api_key(api_key) + self.clockify_api.set_api(api_key) + self.validated = True + self._close_widget() + else: + self.invalid_input(self.input_api_key) + self.validated = False + self.setError( + "Entered invalid API key" + ) + + def showEvent(self, event): + super(ClockifySettings, self).showEvent(event) + + # Make btns same width + max_width = max( + self.btn_ok.sizeHint().width(), + self.btn_cancel.sizeHint().width() + ) + self.btn_ok.setMinimumWidth(max_width) + self.btn_cancel.setMinimumWidth(max_width) + + def closeEvent(self, event): + if self.optional is True: + event.ignore() + self._close_widget() + else: + self.validated = False + + def _close_widget(self): + if self.optional is True: + self.hide() + else: + self.close() diff --git a/openpype/modules/deadline/__init__.py b/client/ayon_core/modules/deadline/__init__.py similarity index 100% rename from openpype/modules/deadline/__init__.py rename to client/ayon_core/modules/deadline/__init__.py diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/client/ayon_core/modules/deadline/abstract_submit_deadline.py similarity index 97% rename from openpype/modules/deadline/abstract_submit_deadline.py rename to client/ayon_core/modules/deadline/abstract_submit_deadline.py index b0d5d7f222..b2da4d1398 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/client/ayon_core/modules/deadline/abstract_submit_deadline.py @@ -17,15 +17,14 @@ import requests import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( AbstractMetaInstancePlugin, KnownPublishError, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ) -from openpype.pipeline.publish.lib import ( +from ayon_core.pipeline.publish.lib import ( replace_with_published_scene_path ) -from openpype import AYON_SERVER_ENABLED JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) @@ -400,17 +399,14 @@ def update(self, data): def add_render_job_env_var(self): """Check if in OP or AYON mode and use appropriate env var.""" - if AYON_SERVER_ENABLED: - self.EnvironmentKeyValue["AYON_RENDER_JOB"] = "1" - self.EnvironmentKeyValue["AYON_BUNDLE_NAME"] = ( - os.environ["AYON_BUNDLE_NAME"]) - else: - self.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" + self.EnvironmentKeyValue["AYON_RENDER_JOB"] = "1" + self.EnvironmentKeyValue["AYON_BUNDLE_NAME"] = ( + os.environ["AYON_BUNDLE_NAME"]) @six.add_metaclass(AbstractMetaInstancePlugin) class AbstractSubmitDeadline(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Class abstracting access to Deadline.""" label = "Submit to Deadline" diff --git a/openpype/modules/deadline/deadline_module.py b/client/ayon_core/modules/deadline/deadline_module.py similarity index 95% rename from openpype/modules/deadline/deadline_module.py rename to client/ayon_core/modules/deadline/deadline_module.py index 9855f8c1b1..c98d04759e 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/client/ayon_core/modules/deadline/deadline_module.py @@ -3,8 +3,8 @@ import six import sys -from openpype.lib import requests_get, Logger -from openpype.modules import OpenPypeModule, IPluginPaths +from ayon_core.lib import requests_get, Logger +from ayon_core.modules import OpenPypeModule, IPluginPaths class DeadlineWebserviceError(Exception): diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py similarity index 98% rename from openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py rename to client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 1d3dad769f..0cfe7c9b39 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -6,7 +6,7 @@ """ import pyblish.api -from openpype.pipeline.publish import KnownPublishError +from ayon_core.pipeline.publish import KnownPublishError class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py new file mode 100644 index 0000000000..8123409052 --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +"""Collect default Deadline server.""" +import pyblish.api + + +class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): + """Collect default Deadline Webservice URL. + + DL webservice addresses must be configured first in System Settings for + project settings enum to work. + + Default webservice could be overriden by + `project_settings/deadline/deadline_servers`. Currently only single url + is expected. + + This url could be overriden by some hosts directly on instances with + `CollectDeadlineServerFromInstance`. + """ + + # Run before collect_deadline_server_instance. + order = pyblish.api.CollectorOrder + 0.0025 + label = "Default Deadline Webservice" + + pass_mongo_url = False + + def process(self, context): + try: + deadline_module = context.data["ayonAddonsManager"]["deadline"] + except AttributeError: + self.log.error("Cannot get AYON Deadline addon.") + raise AssertionError("AYON Deadline addon not found.") + + deadline_settings = context.data["project_settings"]["deadline"] + deadline_server_name = deadline_settings["deadline_server"] + + deadline_webservice = None + if deadline_server_name: + deadline_webservice = deadline_module.deadline_urls.get( + deadline_server_name) + + default_deadline_webservice = deadline_module.deadline_urls["default"] + deadline_webservice = ( + deadline_webservice + or default_deadline_webservice + ) + + context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/client/ayon_core/modules/deadline/plugins/publish/collect_pools.py similarity index 95% rename from openpype/modules/deadline/plugins/publish/collect_pools.py rename to client/ayon_core/modules/deadline/plugins/publish/collect_pools.py index 9ee079b892..6c35012173 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_pools.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.lib import TextDef -from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from ayon_core.lib import TextDef +from ayon_core.pipeline.publish import AYONPyblishPluginMixin class CollectDeadlinePools(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Collect pools from instance or Publisher attributes, from Setting otherwise. diff --git a/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py b/client/ayon_core/modules/deadline/plugins/publish/collect_publishable_instances.py similarity index 96% rename from openpype/modules/deadline/plugins/publish/collect_publishable_instances.py rename to client/ayon_core/modules/deadline/plugins/publish/collect_publishable_instances.py index b00381b6cf..347da86360 100644 --- a/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_publishable_instances.py @@ -5,7 +5,7 @@ import os import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class CollectDeadlinePublishableInstances(pyblish.api.InstancePlugin): diff --git a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_pools.xml similarity index 92% rename from openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml rename to client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_pools.xml index aa21df3734..879adcee97 100644 --- a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml +++ b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_pools.xml @@ -25,7 +25,7 @@ Available deadline pools: This error is shown when a configured pool is not available on Deadline. It can happen when publishing old workfiles which were created with previous deadline pools, or someone changed the available pools in Deadline, -but didn't modify Openpype Settings to match the changes. +but didn't modify AYON Settings to match the changes. \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_aftereffects_deadline.py similarity index 90% rename from openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 009375e87e..f7bc5529fb 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -4,15 +4,14 @@ import pyblish.api from datetime import datetime -from openpype.lib import ( +from ayon_core.lib import ( env_value_to_bool, collect_frames, ) -from openpype.pipeline import legacy_io +from ayon_core.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build +from ayon_core.tests.lib import is_in_tests @attr.s @@ -86,19 +85,10 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", + "AYON_LOG_NO_COLORS", "IS_TEST" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) for key in keys: diff --git a/openpype/modules/deadline/plugins/publish/submit_blender_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py similarity index 90% rename from openpype/modules/deadline/plugins/publish/submit_blender_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py index 8f9e9a7425..c8b72ca52b 100644 --- a/openpype/modules/deadline/plugins/publish/submit_blender_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py @@ -6,16 +6,15 @@ import attr from datetime import datetime -from openpype.lib import ( - is_running_from_build, +from ayon_core.lib import ( BoolDef, NumberDef, TextDef, ) -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import OpenPypePyblishPluginMixin -from openpype.pipeline.farm.tools import iter_expected_files -from openpype.tests.lib import is_in_tests +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.pipeline.farm.tools import iter_expected_files +from ayon_core.tests.lib import is_in_tests from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -29,7 +28,7 @@ class BlenderPluginInfo(): class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["blender"] families = ["render"] @@ -108,18 +107,9 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV" "IS_TEST" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) @@ -131,7 +121,7 @@ def get_job_info(self): # to recognize job from PYPE for turning Event On/Off job_info.add_render_job_env_var() - job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + job_info.EnvironmentKeyValue["AYON_LOG_NO_COLORS"] = "1" # Adding file dependencies. if self.asset_dependencies: diff --git a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py similarity index 100% rename from openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py diff --git a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py similarity index 90% rename from openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py index 9a718aa089..77505eb623 100644 --- a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -6,21 +6,19 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ( - OpenPypePyblishPluginMixin +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import ( + AYONPyblishPluginMixin ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef, NumberDef, - is_running_from_build ) class FusionSubmitDeadline( pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ): """Submit current Comp to Deadline @@ -95,7 +93,7 @@ def process(self, instance): else: context.data[key] = True - from openpype.hosts.fusion.api.lib import get_frame_path + from ayon_core.hosts.fusion.api.lib import get_frame_path # get default deadline webservice url from deadline module deadline_url = instance.context.data["defaultDeadline"] @@ -227,26 +225,16 @@ def process(self, instance): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", - "IS_TEST" + "AYON_LOG_NO_COLORS", + "IS_TEST", + "AYON_BUNDLE_NAME", ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) # to recognize render jobs - if AYON_SERVER_ENABLED: - environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"] - render_job_label = "AYON_RENDER_JOB" - else: - render_job_label = "OPENPYPE_RENDER_JOB" - - environment[render_job_label] = "1" + environment["AYON_RENDER_JOB"] = "1" payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_harmony_deadline.py similarity index 96% rename from openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_harmony_deadline.py index 17e672334c..f2f1c90559 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -10,11 +10,10 @@ import attr import pyblish.api -from openpype.pipeline import legacy_io +from ayon_core.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build +from ayon_core.tests.lib import is_in_tests class _ZipFile(ZipFile): @@ -253,7 +252,7 @@ class HarmonySubmitDeadline( def get_job_info(self): job_info = DeadlineJobInfo("Harmony") job_info.Name = self._instance.data["name"] - job_info.Plugin = "HarmonyOpenPype" + job_info.Plugin = "HarmonyAYON" job_info.Frames = "{}-{}".format( self._instance.data["frameStartHandle"], self._instance.data["frameEndHandle"] @@ -279,19 +278,10 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "AYON_LOG_NO_COLORS" "IS_TEST" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) for key in keys: diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py similarity index 85% rename from openpype/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py index ada69575a8..eed930e372 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_cache_deadline.py @@ -4,16 +4,15 @@ import attr import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( TextDef, NumberDef, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( legacy_io, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ) -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build +from ayon_core.tests.lib import is_in_tests from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -32,16 +31,12 @@ class HoudiniPluginInfo(object): class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # noqa - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Submit Houdini scene to perform a local publish in Deadline. Publishing in Deadline can be helpful for scenes that publish very slow. This way it can process in the background on another machine without the Artist having to wait for the publish to finish on their local machine. - - Submission is done through the Deadline Web Service as - supplied via the environment variable AVALON_DEADLINE. - """ label = "Submit Scene to Deadline" @@ -65,12 +60,6 @@ def get_job_info(self): result["success"] for result in context.data["results"] ), "Errors found, aborting integration.." - # Deadline connection - AVALON_DEADLINE = legacy_io.Session.get( - "AVALON_DEADLINE", "http://localhost:8082" - ) - assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" - project_name = instance.context.data["projectName"] filepath = context.data["currentFile"] scenename = os.path.basename(filepath) @@ -114,17 +103,9 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", + "AYON_LOG_NO_COLORS", ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py similarity index 94% rename from openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index bf7fb45a8b..9988248957 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -5,12 +5,11 @@ import pyblish.api -from openpype.pipeline import legacy_io, OpenPypePyblishPluginMixin -from openpype.tests.lib import is_in_tests +from ayon_core.pipeline import legacy_io, AYONPyblishPluginMixin +from ayon_core.tests.lib import is_in_tests from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.lib import ( - is_running_from_build, +from ayon_core.lib import ( BoolDef, NumberDef ) @@ -50,7 +49,7 @@ class RedshiftRenderPluginInfo(): class HoudiniSubmitDeadline( abstract_submit_deadline.AbstractSubmitDeadline, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ): """Submit Render ROPs to Deadline. @@ -209,18 +208,9 @@ def get_job_info(self, dependency_job_ids=None): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", + "AYON_LOG_NO_COLORS", ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py similarity index 94% rename from openpype/modules/deadline/plugins/publish/submit_max_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py index f06bd4dbe6..0a7c96008e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py @@ -3,27 +3,26 @@ import copy import attr -from openpype.lib import ( +from ayon_core.lib import ( TextDef, BoolDef, NumberDef, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( legacy_io, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ) -from openpype.pipeline.publish.lib import ( +from ayon_core.pipeline.publish.lib import ( replace_with_published_scene_path ) -from openpype.pipeline.publish import KnownPublishError -from openpype.hosts.max.api.lib import ( +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.hosts.max.api.lib import ( get_current_renderer, get_multipass_setting ) -from openpype.hosts.max.api.lib_rendersettings import RenderSettings +from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.lib import is_running_from_build @attr.s @@ -35,7 +34,7 @@ class MaxPluginInfo(object): class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["max"] @@ -112,18 +111,9 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", "IS_TEST" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) @@ -135,7 +125,7 @@ def get_job_info(self): # to recognize render jobs job_info.add_render_job_env_var() - job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + job_info.EnvironmentKeyValue["AYON_LOG_NO_COLORS"] = "1" # Add list of expected files to job # --------------------------------- @@ -200,11 +190,11 @@ def process_submission(self): def _use_published_name(self, data, project_settings): # Not all hosts can import these modules. - from openpype.hosts.max.api.lib import ( + from ayon_core.hosts.max.api.lib import ( get_current_renderer, get_multipass_setting ) - from openpype.hosts.max.api.lib_rendersettings import RenderSettings + from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings instance = self._instance job_info = copy.deepcopy(self.job_info) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py similarity index 97% rename from openpype/modules/deadline/plugins/publish/submit_maya_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py index 5591db151a..84e6e93e6a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -28,24 +28,23 @@ import attr -from openpype.pipeline import ( +from ayon_core.pipeline import ( legacy_io, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ) -from openpype.lib import ( +from ayon_core.lib import ( BoolDef, NumberDef, TextDef, EnumDef ) -from openpype.hosts.maya.api.lib_rendersettings import RenderSettings -from openpype.hosts.maya.api.lib import get_attr_in_layer +from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings +from ayon_core.hosts.maya.api.lib import get_attr_in_layer from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build -from openpype.pipeline.farm.tools import iter_expected_files +from ayon_core.tests.lib import is_in_tests +from ayon_core.pipeline.farm.tools import iter_expected_files def _validate_deadline_bool_value(instance, attribute, value): @@ -100,7 +99,7 @@ class ArnoldPluginInfo(object): class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["maya"] @@ -205,18 +204,9 @@ def get_job_info(self): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV" "IS_TEST" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if self._instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) @@ -228,7 +218,7 @@ def get_job_info(self): # to recognize render jobs job_info.add_render_job_env_var() - job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + job_info.EnvironmentKeyValue["AYON_LOG_NO_COLORS"] = "1" # Adding file dependencies. if not bool(os.environ.get("IS_TEST")) and self.asset_dependencies: diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py similarity index 87% rename from openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 41a2a64ab5..02338c5c32 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -2,10 +2,8 @@ import attr from datetime import datetime -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import legacy_io, PublishXmlValidationError -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build +from ayon_core.pipeline import legacy_io, PublishXmlValidationError +from ayon_core.tests.lib import is_in_tests from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -100,10 +98,6 @@ def get_job_info(self): "FTRACK_SERVER" ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) @@ -112,15 +106,11 @@ def get_job_info(self): environment["AVALON_ASSET"] = instance.context.data["asset"] environment["AVALON_TASK"] = instance.context.data["task"] environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") - environment["OPENPYPE_LOG_NO_COLORS"] = "1" - environment["OPENPYPE_USERNAME"] = instance.context.data["user"] environment["OPENPYPE_PUBLISH_SUBSET"] = instance.data["subset"] - environment["OPENPYPE_REMOTE_PUBLISH"] = "1" + environment["AYON_LOG_NO_COLORS"] = "1" + environment["AYON_USERNAME"] = instance.context.data["user"] + environment["AYON_REMOTE_PUBLISH"] = "1" - if AYON_SERVER_ENABLED: - environment["AYON_REMOTE_PUBLISH"] = "1" - else: - environment["OPENPYPE_REMOTE_PUBLISH"] = "1" for key, value in environment.items(): job_info.EnvironmentKeyValue[key] = value diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py similarity index 95% rename from openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py index 746b009255..9fff8edee6 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -7,21 +7,19 @@ import requests import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ( - OpenPypePyblishPluginMixin +from ayon_core.pipeline import legacy_io +from ayon_core.pipeline.publish import ( + AYONPyblishPluginMixin ) -from openpype.tests.lib import is_in_tests -from openpype.lib import ( - is_running_from_build, +from ayon_core.tests.lib import is_in_tests +from ayon_core.lib import ( BoolDef, NumberDef ) class NukeSubmitDeadline(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + AYONPyblishPluginMixin): """Submit write to Deadline Renders are submitted to a Deadline Web Service as @@ -388,16 +386,9 @@ def payload_submit( "TOOL_ENV", "FOUNDRY_LICENSE", "OPENPYPE_SG_USER", + "AYON_BUNDLE_NAME", ] - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - # add allowed keys from preset if any if self.env_allowed_keys: keys += self.env_allowed_keys @@ -406,13 +397,7 @@ def payload_submit( if key in os.environ}, **legacy_io.Session) # to recognize render jobs - if AYON_SERVER_ENABLED: - environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"] - render_job_label = "AYON_RENDER_JOB" - else: - render_job_label = "OPENPYPE_RENDER_JOB" - - environment[render_job_label] = "1" + environment["AYON_RENDER_JOB"] = "1" # finally search replace in values of any key if self.env_search_replace_values: diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_cache_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py similarity index 89% rename from openpype/modules/deadline/plugins/publish/submit_publish_cache_job.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py index 1bb45b77cc..a90397baa2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_cache_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py @@ -8,16 +8,15 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( +from ayon_core.client import ( get_last_version_by_subset_name, ) -from openpype.pipeline import publish, legacy_io -from openpype.lib import EnumDef, is_running_from_build -from openpype.tests.lib import is_in_tests -from openpype.pipeline.version_start import get_versioning_start +from ayon_core.pipeline import publish, legacy_io +from ayon_core.lib import EnumDef, is_running_from_build +from ayon_core.tests.lib import is_in_tests +from ayon_core.pipeline.version_start import get_versioning_start -from openpype.pipeline.farm.pyblish_functions import ( +from ayon_core.pipeline.farm.pyblish_functions import ( create_skeleton_instance_cache, create_instances_for_cache, attach_instances_to_subset, @@ -27,7 +26,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin, + publish.AYONPyblishPluginMixin, publish.ColormanagedPyblishPluginMixin): """Process Cache Job submitted on farm This is replicated version of submit publish job @@ -65,16 +64,12 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, families = ["publish.hou"] - environ_job_filter = [ - "OPENPYPE_METADATA_FILE" - ] - environ_keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", + "AYON_USERNAME", "OPENPYPE_SG_USER", "KITSU_LOGIN", "KITSU_PWD" @@ -134,43 +129,20 @@ def _submit_deadline_post_job(self, instance, job): "AVALON_PROJECT": instance.context.data["projectName"], "AVALON_ASSET": instance.context.data["asset"], "AVALON_TASK": instance.context.data["task"], - "OPENPYPE_USERNAME": instance.context.data["user"], - "OPENPYPE_LOG_NO_COLORS": "1", - "IS_TEST": str(int(is_in_tests())) + "AYON_USERNAME": instance.context.data["user"], + "AYON_LOG_NO_COLORS": "1", + "IS_TEST": str(int(is_in_tests())), + "AYON_PUBLISH_JOB": "1", + "AYON_RENDER_JOB": "0", + "AYON_REMOTE_PUBLISH": "0", + "AYON_BUNDLE_NAME": os.environ["AYON_BUNDLE_NAME"], } - if AYON_SERVER_ENABLED: - environment["AYON_PUBLISH_JOB"] = "1" - environment["AYON_RENDER_JOB"] = "0" - environment["AYON_REMOTE_PUBLISH"] = "0" - environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"] - deadline_plugin = "Ayon" - else: - environment["OPENPYPE_PUBLISH_JOB"] = "1" - environment["OPENPYPE_RENDER_JOB"] = "0" - environment["OPENPYPE_REMOTE_PUBLISH"] = "0" - deadline_plugin = "OpenPype" - # Add OpenPype version if we are running from build. - if is_running_from_build(): - self.environ_keys.append("OPENPYPE_VERSION") - # add environments from self.environ_keys for env_key in self.environ_keys: if os.getenv(env_key): environment[env_key] = os.environ[env_key] - # pass environment keys from self.environ_job_filter - job_environ = job["Props"].get("Env", {}) - for env_j_key in self.environ_job_filter: - if job_environ.get(env_j_key): - environment[env_j_key] = job_environ[env_j_key] - - # Add mongo url if it's enabled - if instance.context.data.get("deadlinePassMongoUrl"): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url: - environment["OPENPYPE_MONGO"] = mongo_url - priority = self.deadline_priority or instance.data.get("priority", 50) instance_settings = self.get_attr_values_from_data(instance.data) @@ -196,7 +168,7 @@ def _submit_deadline_post_job(self, instance, job): ) payload = { "JobInfo": { - "Plugin": deadline_plugin, + "Plugin": "Ayon", "BatchName": job["Props"]["Batch"], "Name": job_name, "UserName": job["Props"]["User"], @@ -424,7 +396,7 @@ def _get_publish_folder(self, anatomy, template_data, get publish_path Args: - anatomy (openpype.pipeline.anatomy.Anatomy): + anatomy (ayon_core.pipeline.anatomy.Anatomy): template_data (dict): pre-calculated collected data for process asset (string): asset name subset (string): subset name (actually group name of subset) diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py similarity index 92% rename from openpype/modules/deadline/plugins/publish/submit_publish_job.py rename to client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py index 82971daee5..bd343e103a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py @@ -9,16 +9,15 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( +from ayon_core.client import ( get_last_version_by_subset_name, ) -from openpype.pipeline import publish, legacy_io -from openpype.lib import EnumDef, is_running_from_build -from openpype.tests.lib import is_in_tests -from openpype.pipeline.version_start import get_versioning_start +from ayon_core.pipeline import publish, legacy_io +from ayon_core.lib import EnumDef, is_running_from_build +from ayon_core.tests.lib import is_in_tests +from ayon_core.pipeline.version_start import get_versioning_start -from openpype.pipeline.farm.pyblish_functions import ( +from ayon_core.pipeline.farm.pyblish_functions import ( create_skeleton_instance, create_instances_for_aov, attach_instances_to_subset, @@ -55,7 +54,7 @@ def get_resource_files(resources, frame_range=None): class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, - publish.OpenPypePyblishPluginMixin, + publish.AYONPyblishPluginMixin, publish.ColormanagedPyblishPluginMixin): """Process Job submitted on farm. @@ -107,16 +106,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, "celaction": [r".*"], "max": [r".*"]} - environ_job_filter = [ - "OPENPYPE_METADATA_FILE" - ] - environ_keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", + "AYON_USERNAME", "OPENPYPE_SG_USER", "KITSU_LOGIN", "KITSU_PWD" @@ -190,43 +185,20 @@ def _submit_deadline_post_job(self, instance, job, instances): "AVALON_PROJECT": instance.context.data["projectName"], "AVALON_ASSET": instance.context.data["asset"], "AVALON_TASK": instance.context.data["task"], - "OPENPYPE_USERNAME": instance.context.data["user"], - "OPENPYPE_LOG_NO_COLORS": "1", - "IS_TEST": str(int(is_in_tests())) + "AYON_USERNAME": instance.context.data["user"], + "AYON_LOG_NO_COLORS": "1", + "IS_TEST": str(int(is_in_tests())), + "AYON_PUBLISH_JOB": "1", + "AYON_RENDER_JOB": "0", + "AYON_REMOTE_PUBLISH": "0", + "AYON_BUNDLE_NAME": os.environ["AYON_BUNDLE_NAME"], } - if AYON_SERVER_ENABLED: - environment["AYON_PUBLISH_JOB"] = "1" - environment["AYON_RENDER_JOB"] = "0" - environment["AYON_REMOTE_PUBLISH"] = "0" - environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"] - deadline_plugin = "Ayon" - else: - environment["OPENPYPE_PUBLISH_JOB"] = "1" - environment["OPENPYPE_RENDER_JOB"] = "0" - environment["OPENPYPE_REMOTE_PUBLISH"] = "0" - deadline_plugin = "OpenPype" - # Add OpenPype version if we are running from build. - if is_running_from_build(): - self.environ_keys.append("OPENPYPE_VERSION") - # add environments from self.environ_keys for env_key in self.environ_keys: if os.getenv(env_key): environment[env_key] = os.environ[env_key] - # pass environment keys from self.environ_job_filter - job_environ = job["Props"].get("Env", {}) - for env_j_key in self.environ_job_filter: - if job_environ.get(env_j_key): - environment[env_j_key] = job_environ[env_j_key] - - # Add mongo url if it's enabled - if instance.context.data.get("deadlinePassMongoUrl"): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url: - environment["OPENPYPE_MONGO"] = mongo_url - priority = self.deadline_priority or instance.data.get("priority", 50) instance_settings = self.get_attr_values_from_data(instance.data) @@ -252,7 +224,7 @@ def _submit_deadline_post_job(self, instance, job, instances): ) payload = { "JobInfo": { - "Plugin": deadline_plugin, + "Plugin": "Ayon", "BatchName": job["Props"]["Batch"], "Name": job_name, "UserName": job["Props"]["User"], @@ -663,7 +635,7 @@ def _get_publish_folder(self, anatomy, template_data, get publish_path Args: - anatomy (openpype.pipeline.anatomy.Anatomy): + anatomy (ayon_core.pipeline.anatomy.Anatomy): template_data (dict): pre-calculated collected data for process asset (string): asset name subset (string): subset name (actually group name of subset) diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_connection.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py similarity index 100% rename from openpype/modules/deadline/plugins/publish/validate_deadline_connection.py rename to client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py similarity index 96% rename from openpype/modules/deadline/plugins/publish/validate_deadline_pools.py rename to client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py index 90b8241803..2feb044cf1 100644 --- a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -1,10 +1,10 @@ import pyblish.api -from openpype.pipeline import ( +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin ) -from openpype.modules.deadline.deadline_module import DeadlineModule +from ayon_core.modules.deadline.deadline_module import DeadlineModule class ValidateDeadlinePools(OptionalPyblishPluginMixin, diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py similarity index 99% rename from openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py rename to client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index 5d37e7357e..a666c5c2dc 100644 --- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -3,7 +3,7 @@ import pyblish.api -from openpype.lib import collect_frames +from ayon_core.lib import collect_frames from openpype_modules.deadline.abstract_submit_deadline import requests_get diff --git a/openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.ico b/client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.ico similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.ico rename to client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.ico diff --git a/openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.options b/client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.options similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.options rename to client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.options diff --git a/openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.param b/client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.param similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.param rename to client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.param diff --git a/openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.py b/client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.py similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/Ayon/Ayon.py rename to client/ayon_core/modules/deadline/repository/custom/plugins/Ayon/Ayon.py diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico b/client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico rename to client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.param b/client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.param similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.param rename to client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.param diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.py b/client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.py similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.py rename to client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/CelAction.py diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/client/ayon_core/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py similarity index 89% rename from openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py rename to client/ayon_core/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 96f131b922..81aab00b93 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/client/ayon_core/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -323,7 +323,7 @@ def inject_openpype_environment(deadlinePlugin): # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( - datetime.utcnow().strftime('%Y%m%d%H%M%S%f'), + datetime.utcnow().strftime("%Y%m%d%H%M%S%f"), str(uuid.uuid1()) ) export_url = os.path.join(tempfile.gettempdir(), temp_file_name) @@ -343,7 +343,7 @@ def inject_openpype_environment(deadlinePlugin): "envgroup": "farm" } - if job.GetJobEnvironmentKeyValue('IS_TEST'): + if job.GetJobEnvironmentKeyValue("IS_TEST"): args.append("--automatic-tests") if all(add_kwargs.values()): @@ -412,13 +412,13 @@ def inject_openpype_environment(deadlinePlugin): def inject_ayon_environment(deadlinePlugin): - """ Pull env vars from Ayon and push them to rendering process. + """ Pull env vars from AYON and push them to rendering process. - Used for correct paths, configuration from OpenPype etc. + Used for correct paths, configuration from AYON etc. """ job = deadlinePlugin.GetJob() - print(">>> Injecting Ayon environments ...") + print(">>> Injecting AYON environments ...") try: exe_list = get_ayon_executable() exe = FileUtils.SearchFileList(exe_list) @@ -435,17 +435,18 @@ def inject_ayon_environment(deadlinePlugin): ayon_bundle_name = job.GetJobEnvironmentKeyValue("AYON_BUNDLE_NAME") if not ayon_bundle_name: - raise RuntimeError("Missing env var in job properties " - "AYON_BUNDLE_NAME") + raise RuntimeError( + "Missing env var in job properties AYON_BUNDLE_NAME" + ) config = RepositoryUtils.GetPluginConfig("Ayon") ayon_server_url = ( - job.GetJobEnvironmentKeyValue("AYON_SERVER_URL") or - config.GetConfigEntryWithDefault("AyonServerUrl", "") + job.GetJobEnvironmentKeyValue("AYON_SERVER_URL") or + config.GetConfigEntryWithDefault("AyonServerUrl", "") ) ayon_api_key = ( - job.GetJobEnvironmentKeyValue("AYON_API_KEY") or - config.GetConfigEntryWithDefault("AyonApiKey", "") + job.GetJobEnvironmentKeyValue("AYON_API_KEY") or + config.GetConfigEntryWithDefault("AyonApiKey", "") ) if not all([ayon_server_url, ayon_api_key]): @@ -457,7 +458,7 @@ def inject_ayon_environment(deadlinePlugin): # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( - datetime.utcnow().strftime('%Y%m%d%H%M%S%f'), + datetime.utcnow().strftime("%Y%m%d%H%M%S%f"), str(uuid.uuid1()) ) export_url = os.path.join(tempfile.gettempdir(), temp_file_name) @@ -477,7 +478,7 @@ def inject_ayon_environment(deadlinePlugin): "envgroup": "farm", } - if job.GetJobEnvironmentKeyValue('IS_TEST'): + if job.GetJobEnvironmentKeyValue("IS_TEST"): args.append("--automatic-tests") if all(add_kwargs.values()): @@ -545,19 +546,23 @@ def inject_ayon_environment(deadlinePlugin): def get_ayon_executable(): - """Return OpenPype Executable from Event Plug-in Settings + """Return AYON Executable from Event Plug-in Settings Returns: - (list) of paths + list[str]: AYON executable paths. + Raises: - (RuntimeError) if no path configured at all + RuntimeError: When no path configured at all. + """ config = RepositoryUtils.GetPluginConfig("Ayon") exe_list = config.GetConfigEntryWithDefault("AyonExecutable", "") if not exe_list: - raise RuntimeError("Path to Ayon executable not configured." - "Please set it in Ayon Deadline Plugin.") + raise RuntimeError( + "Path to AYON executable not configured." + "Please set it in Ayon Deadline Plugin." + ) # clean '\ ' for MacOS pasting if platform.system().lower() == "darwin": @@ -581,8 +586,9 @@ def inject_render_job_id(deadlinePlugin): print(">>> Dependency IDs: {}".format(dependency_ids)) render_job_ids = ",".join(dependency_ids) - deadlinePlugin.SetProcessEnvironmentVariable("RENDER_JOB_IDS", - render_job_ids) + deadlinePlugin.SetProcessEnvironmentVariable( + "RENDER_JOB_IDS", render_job_ids + ) print(">>> Injection end.") @@ -591,34 +597,33 @@ def __main__(deadlinePlugin): print(">>> Getting job ...") job = deadlinePlugin.GetJob() - openpype_render_job = \ - job.GetJobEnvironmentKeyValue('OPENPYPE_RENDER_JOB') or '0' - openpype_publish_job = \ - job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0' - openpype_remote_job = \ - job.GetJobEnvironmentKeyValue('OPENPYPE_REMOTE_PUBLISH') or '0' + openpype_render_job = job.GetJobEnvironmentKeyValue( + "OPENPYPE_RENDER_JOB") + openpype_publish_job = job.GetJobEnvironmentKeyValue( + "OPENPYPE_PUBLISH_JOB") + openpype_remote_job = job.GetJobEnvironmentKeyValue( + "OPENPYPE_REMOTE_PUBLISH") - if openpype_publish_job == '1' and openpype_render_job == '1': - raise RuntimeError("Misconfiguration. Job couldn't be both " + - "render and publish.") + if openpype_publish_job == "1" and openpype_render_job == "1": + raise RuntimeError( + "Misconfiguration. Job couldn't be both render and publish." + ) - if openpype_publish_job == '1': + if openpype_publish_job == "1": inject_render_job_id(deadlinePlugin) - if openpype_render_job == '1' or openpype_remote_job == '1': + if openpype_render_job == "1" or openpype_remote_job == "1": inject_openpype_environment(deadlinePlugin) - ayon_render_job = \ - job.GetJobEnvironmentKeyValue('AYON_RENDER_JOB') or '0' - ayon_publish_job = \ - job.GetJobEnvironmentKeyValue('AYON_PUBLISH_JOB') or '0' - ayon_remote_job = \ - job.GetJobEnvironmentKeyValue('AYON_REMOTE_PUBLISH') or '0' + ayon_render_job = job.GetJobEnvironmentKeyValue("AYON_RENDER_JOB") + ayon_publish_job = job.GetJobEnvironmentKeyValue("AYON_PUBLISH_JOB") + ayon_remote_job = job.GetJobEnvironmentKeyValue("AYON_REMOTE_PUBLISH") - if ayon_publish_job == '1' and ayon_render_job == '1': - raise RuntimeError("Misconfiguration. Job couldn't be both " + - "render and publish.") + if ayon_publish_job == "1" and ayon_render_job == "1": + raise RuntimeError( + "Misconfiguration. Job couldn't be both render and publish." + ) - if ayon_publish_job == '1': + if ayon_publish_job == "1": inject_render_job_id(deadlinePlugin) - if ayon_render_job == '1' or ayon_remote_job == '1': + if ayon_render_job == "1" or ayon_remote_job == "1": inject_ayon_environment(deadlinePlugin) diff --git a/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico b/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.ico similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.ico rename to client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.ico diff --git a/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options b/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.options similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.options rename to client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.options diff --git a/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param b/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.param similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.param rename to client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.param diff --git a/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.py b/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.py new file mode 100644 index 0000000000..d9fd0b49ef --- /dev/null +++ b/client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/HarmonyAYON.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +from System import * +from System.Diagnostics import * +from System.IO import * +from System.Text import * + +from Deadline.Plugins import * +from Deadline.Scripting import * + +def GetDeadlinePlugin(): + return HarmonyAYONPlugin() + +def CleanupDeadlinePlugin(deadlinePlugin): + deadlinePlugin.Cleanup() + +class HarmonyAYONPlugin(DeadlinePlugin): + + def __init__( self ): + super().__init__() + self.InitializeProcessCallback += self.InitializeProcess + self.RenderExecutableCallback += self.RenderExecutable + self.RenderArgumentCallback += self.RenderArgument + self.CheckExitCodeCallback += self.CheckExitCode + + def Cleanup( self ): + print("Cleanup") + for stdoutHandler in self.StdoutHandlers: + del stdoutHandler.HandleCallback + + del self.InitializeProcessCallback + del self.RenderExecutableCallback + del self.RenderArgumentCallback + + def CheckExitCode( self, exitCode ): + print("check code") + if exitCode != 0: + if exitCode == 100: + self.LogInfo( "Renderer reported an error with error code 100. This will be ignored, since the option to ignore it is specified in the Job Properties." ) + else: + self.FailRender( "Renderer returned non-zero error code %d. Check the renderer's output." % exitCode ) + + def InitializeProcess( self ): + self.PluginType = PluginType.Simple + self.StdoutHandling = True + self.PopupHandling = True + + self.AddStdoutHandlerCallback( "Rendered frame ([0-9]+)" ).HandleCallback += self.HandleStdoutProgress + + def HandleStdoutProgress( self ): + startFrame = self.GetStartFrame() + endFrame = self.GetEndFrame() + if( endFrame - startFrame + 1 != 0 ): + self.SetProgress( 100 * ( int(self.GetRegexMatch(1)) - startFrame + 1 ) / ( endFrame - startFrame + 1 ) ) + + def RenderExecutable( self ): + version = int( self.GetPluginInfoEntry( "Version" ) ) + exe = "" + exeList = self.GetConfigEntry( "Harmony_RenderExecutable_" + str(version) ) + exe = FileUtils.SearchFileList( exeList ) + if( exe == "" ): + self.FailRender( "Harmony render executable was not found in the configured separated list \"" + exeList + "\". The path to the render executable can be configured from the Plugin Configuration in the Deadline Monitor." ) + return exe + + def RenderArgument( self ): + renderArguments = "-batch" + + if self.GetBooleanPluginInfoEntryWithDefault( "UsingResPreset", False ): + resName = self.GetPluginInfoEntryWithDefault( "ResolutionName", "HDTV_1080p24" ) + if resName == "Custom": + renderArguments += " -res " + self.GetPluginInfoEntryWithDefault( "PresetName", "HDTV_1080p24" ) + else: + renderArguments += " -res " + resName + else: + resolutionX = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionX", -1 ) + resolutionY = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionY", -1 ) + fov = self.GetFloatPluginInfoEntryWithDefault( "FieldOfView", -1 ) + + if resolutionX > 0 and resolutionY > 0 and fov > 0: + renderArguments += " -res " + str( resolutionX ) + " " + str( resolutionY ) + " " + str( fov ) + + camera = self.GetPluginInfoEntryWithDefault( "Camera", "" ) + + if not camera == "": + renderArguments += " -camera " + camera + + startFrame = str( self.GetStartFrame() ) + endFrame = str( self.GetEndFrame() ) + + renderArguments += " -frames " + startFrame + " " + endFrame + + if not self.GetBooleanPluginInfoEntryWithDefault( "IsDatabase", False ): + sceneFilename = self.GetPluginInfoEntryWithDefault( "SceneFile", self.GetDataFilename() ) + sceneFilename = RepositoryUtils.CheckPathMapping( sceneFilename ) + renderArguments += " \"" + sceneFilename + "\"" + else: + environment = self.GetPluginInfoEntryWithDefault( "Environment", "" ) + renderArguments += " -env " + environment + job = self.GetPluginInfoEntryWithDefault( "Job", "" ) + renderArguments += " -job " + job + scene = self.GetPluginInfoEntryWithDefault( "SceneName", "" ) + renderArguments += " -scene " + scene + version = self.GetPluginInfoEntryWithDefault( "SceneVersion", "" ) + renderArguments += " -version " + version + + #tempSceneDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) ) + #preRenderScript = + rendernodeNum = 0 + scriptBuilder = StringBuilder() + + while True: + nodeName = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Node", "" ) + if nodeName == "": + break + nodeType = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Type", "Image" ) + if nodeType == "Image": + nodePath = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Path", "" ) + nodeLeadingZero = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "LeadingZero", "" ) + nodeFormat = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Format", "" ) + nodeStartFrame = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "StartFrame", "" ) + + if not nodePath == "": + scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingName\", 1, \"" + nodePath + "\" );") + + if not nodeLeadingZero == "": + scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"leadingZeros\", 1, \"" + nodeLeadingZero + "\" );") + + if not nodeFormat == "": + scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingType\", 1, \"" + nodeFormat + "\" );") + + if not nodeStartFrame == "": + scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"start\", 1, \"" + nodeStartFrame + "\" );") + + if nodeType == "Movie": + nodePath = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Path", "" ) + if not nodePath == "": + scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"moviePath\", 1, \"" + nodePath + "\" );") + + rendernodeNum += 1 + + tempDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) ) + preRenderScriptName = Path.Combine( tempDirectory, "preRenderScript.txt" ) + + File.WriteAllText( preRenderScriptName, scriptBuilder.ToString() ) + + preRenderInlineScript = self.GetPluginInfoEntryWithDefault( "PreRenderInlineScript", "" ) + if preRenderInlineScript: + renderArguments += " -preRenderInlineScript \"" + preRenderInlineScript +"\"" + + renderArguments += " -preRenderScript \"" + preRenderScriptName +"\"" + + return renderArguments diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico b/client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico rename to client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.ico diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options b/client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options rename to client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.options diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param b/client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param rename to client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.param diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py similarity index 100% rename from openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py rename to client/ayon_core/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py diff --git a/openpype/modules/deadline/repository/readme.md b/client/ayon_core/modules/deadline/repository/readme.md similarity index 100% rename from openpype/modules/deadline/repository/readme.md rename to client/ayon_core/modules/deadline/repository/readme.md diff --git a/client/ayon_core/modules/interfaces.py b/client/ayon_core/modules/interfaces.py new file mode 100644 index 0000000000..4b114b7a0e --- /dev/null +++ b/client/ayon_core/modules/interfaces.py @@ -0,0 +1,21 @@ +from ayon_core.addon.interfaces import ( + IPluginPaths, + ITrayAddon, + ITrayAction, + ITrayService, + IHostAddon, +) + +ITrayModule = ITrayAddon +ILaunchHookPaths = object + + +__all__ = ( + "IPluginPaths", + "ITrayAddon", + "ITrayAction", + "ITrayService", + "IHostAddon", + "ITrayModule", + "ILaunchHookPaths", +) diff --git a/openpype/modules/job_queue/__init__.py b/client/ayon_core/modules/job_queue/__init__.py similarity index 100% rename from openpype/modules/job_queue/__init__.py rename to client/ayon_core/modules/job_queue/__init__.py diff --git a/openpype/modules/job_queue/job_server/__init__.py b/client/ayon_core/modules/job_queue/job_server/__init__.py similarity index 100% rename from openpype/modules/job_queue/job_server/__init__.py rename to client/ayon_core/modules/job_queue/job_server/__init__.py diff --git a/openpype/modules/job_queue/job_server/job_queue_route.py b/client/ayon_core/modules/job_queue/job_server/job_queue_route.py similarity index 100% rename from openpype/modules/job_queue/job_server/job_queue_route.py rename to client/ayon_core/modules/job_queue/job_server/job_queue_route.py diff --git a/openpype/modules/job_queue/job_server/jobs.py b/client/ayon_core/modules/job_queue/job_server/jobs.py similarity index 100% rename from openpype/modules/job_queue/job_server/jobs.py rename to client/ayon_core/modules/job_queue/job_server/jobs.py diff --git a/openpype/modules/job_queue/job_server/server.py b/client/ayon_core/modules/job_queue/job_server/server.py similarity index 100% rename from openpype/modules/job_queue/job_server/server.py rename to client/ayon_core/modules/job_queue/job_server/server.py diff --git a/openpype/modules/job_queue/job_server/utils.py b/client/ayon_core/modules/job_queue/job_server/utils.py similarity index 100% rename from openpype/modules/job_queue/job_server/utils.py rename to client/ayon_core/modules/job_queue/job_server/utils.py diff --git a/openpype/modules/job_queue/job_server/workers.py b/client/ayon_core/modules/job_queue/job_server/workers.py similarity index 100% rename from openpype/modules/job_queue/job_server/workers.py rename to client/ayon_core/modules/job_queue/job_server/workers.py diff --git a/openpype/modules/job_queue/job_server/workers_rpc_route.py b/client/ayon_core/modules/job_queue/job_server/workers_rpc_route.py similarity index 100% rename from openpype/modules/job_queue/job_server/workers_rpc_route.py rename to client/ayon_core/modules/job_queue/job_server/workers_rpc_route.py diff --git a/openpype/modules/job_queue/job_workers/__init__.py b/client/ayon_core/modules/job_queue/job_workers/__init__.py similarity index 100% rename from openpype/modules/job_queue/job_workers/__init__.py rename to client/ayon_core/modules/job_queue/job_workers/__init__.py diff --git a/openpype/modules/job_queue/job_workers/base_worker.py b/client/ayon_core/modules/job_queue/job_workers/base_worker.py similarity index 100% rename from openpype/modules/job_queue/job_workers/base_worker.py rename to client/ayon_core/modules/job_queue/job_workers/base_worker.py diff --git a/client/ayon_core/modules/job_queue/module.py b/client/ayon_core/modules/job_queue/module.py new file mode 100644 index 0000000000..1cecd62de5 --- /dev/null +++ b/client/ayon_core/modules/job_queue/module.py @@ -0,0 +1,244 @@ +"""Job queue OpenPype module was created for remote execution of commands. + +## Why is needed +Primarily created for hosts which are not easilly controlled from command line +or in headless mode and is easier to keep one process of host running listening +for jobs to do. + +### Example +One of examples is TVPaint which does not have headless mode, can run only one +process at one time and it's impossible to know what should be executed inside +TVPaint before we know all data about the file that should be processed. + +## Idea +Idea is that there is a server, workers and workstation/s which need to process +something on a worker. + +Workers and workstation/s must have access to server through adress to it's +running instance. Workers use WebSockets and workstations are using HTTP calls. +Also both of them must have access to job queue root which is set in +settings. Root is used as temp where files needed for job can be stored before +sending the job or where result files are stored when job is done. + +Server's address must be set in settings when is running so workers and +workstations know where to send or receive jobs. + +## Command line commands +### start_server +- start server which is handles jobs +- it is possible to specify port and host address (default is localhost:8079) + +### start_worker +- start worker which will process jobs +- has required possitional argument which is application name from OpenPype + settings e.g. 'tvpaint/11-5' ('tvpaint' is group '11-5' is variant) +- it is possible to specify server url but url from settings is used when not + passed (this is added mainly for developing purposes) +""" + +import sys +import json +import copy +import platform + +from ayon_core.addon import click_wrap +from ayon_core.modules import OpenPypeModule +from ayon_core.settings import get_system_settings + + +class JobQueueModule(OpenPypeModule): + name = "job_queue" + + def initialize(self, modules_settings): + module_settings = modules_settings.get(self.name) or {} + server_url = module_settings.get("server_url") or "" + + self._server_url = self.url_conversion(server_url) + jobs_root_mapping = self._roots_mapping_conversion( + module_settings.get("jobs_root") + ) + + self._jobs_root_mapping = jobs_root_mapping + + # Is always enabled + # - the module does nothing until is used + self.enabled = True + + @classmethod + def _root_conversion(cls, root_path): + """Make sure root path does not end with slash.""" + # Return empty string if path is invalid + if not root_path: + return "" + + # Remove all slashes + while root_path.endswith("/") or root_path.endswith("\\"): + root_path = root_path[:-1] + return root_path + + @classmethod + def _roots_mapping_conversion(cls, roots_mapping): + roots_mapping = roots_mapping or {} + for platform_name in ("windows", "linux", "darwin"): + roots_mapping[platform_name] = cls._root_conversion( + roots_mapping.get(platform_name) + ) + return roots_mapping + + @staticmethod + def url_conversion(url, ws=False): + if sys.version_info[0] == 2: + from urlparse import urlsplit, urlunsplit + else: + from urllib.parse import urlsplit, urlunsplit + + if not url: + return url + + url_parts = list(urlsplit(url)) + scheme = url_parts[0] + if not scheme: + if ws: + url = "ws://{}".format(url) + else: + url = "http://{}".format(url) + url_parts = list(urlsplit(url)) + + elif ws: + if scheme not in ("ws", "wss"): + if scheme == "https": + url_parts[0] = "wss" + else: + url_parts[0] = "ws" + + elif scheme not in ("http", "https"): + if scheme == "wss": + url_parts[0] = "https" + else: + url_parts[0] = "http" + + return urlunsplit(url_parts) + + def get_jobs_root_mapping(self): + return copy.deepcopy(self._jobs_root_mapping) + + def get_jobs_root(self): + return self._jobs_root_mapping.get(platform.system().lower()) + + @classmethod + def get_jobs_root_from_settings(cls): + module_settings = get_system_settings()["modules"] + jobs_root_mapping = module_settings.get(cls.name, {}).get("jobs_root") + converted_mapping = cls._roots_mapping_conversion(jobs_root_mapping) + + return converted_mapping[platform.system().lower()] + + @property + def server_url(self): + return self._server_url + + def send_job(self, host_name, job_data): + import requests + + job_data = job_data or {} + job_data["host_name"] = host_name + api_path = "{}/api/jobs".format(self._server_url) + post_request = requests.post(api_path, data=json.dumps(job_data)) + return str(post_request.content.decode()) + + def get_job_status(self, job_id): + import requests + + api_path = "{}/api/jobs/{}".format(self._server_url, job_id) + return requests.get(api_path).json() + + def cli(self, click_group): + click_group.add_command(cli_main.to_click_obj()) + + @classmethod + def get_server_url_from_settings(cls): + module_settings = get_system_settings()["modules"] + return cls.url_conversion( + module_settings + .get(cls.name, {}) + .get("server_url") + ) + + @classmethod + def start_server(cls, port=None, host=None): + from .job_server import main + + return main(port, host) + + @classmethod + def start_worker(cls, app_name, server_url=None): + import requests + from ayon_core.lib import ApplicationManager + + if not server_url: + server_url = cls.get_server_url_from_settings() + + if not server_url: + raise ValueError("Server url is not set.") + + http_server_url = cls.url_conversion(server_url) + + # Validate url + requests.get(http_server_url) + + ws_server_url = cls.url_conversion(server_url) + "/ws" + + app_manager = ApplicationManager() + app = app_manager.applications.get(app_name) + if app is None: + raise ValueError( + "Didn't find application \"{}\" in settings.".format(app_name) + ) + + if app.host_name == "tvpaint": + return cls._start_tvpaint_worker(app, ws_server_url) + raise ValueError("Unknown host \"{}\"".format(app.host_name)) + + @classmethod + def _start_tvpaint_worker(cls, app, server_url): + from ayon_core.hosts.tvpaint.worker import main + + executable = app.find_executable() + if not executable: + raise ValueError(( + "Executable for app \"{}\" is not set" + " or accessible on this workstation." + ).format(app.full_name)) + + return main(str(executable), server_url) + + +@click_wrap.group( + JobQueueModule.name, + help="Application job server. Can be used as render farm." +) +def cli_main(): + pass + + +@cli_main.command( + "start_server", + help="Start server handling workers and their jobs." +) +@click_wrap.option("--port", help="Server port") +@click_wrap.option("--host", help="Server host (ip address)") +def cli_start_server(port, host): + JobQueueModule.start_server(port, host) + + +@cli_main.command( + "start_worker", help=( + "Start a worker for a specific application. (e.g. \"tvpaint/11.5\")" + ) +) +@click_wrap.argument("app_name") +@click_wrap.option( + "--server_url", + help="Server url which handle workers and jobs.") +def cli_start_worker(app_name, server_url): + JobQueueModule.start_worker(app_name, server_url) diff --git a/openpype/modules/launcher_action.py b/client/ayon_core/modules/launcher_action.py similarity index 81% rename from openpype/modules/launcher_action.py rename to client/ayon_core/modules/launcher_action.py index 4f0674c94f..c0266e3a57 100644 --- a/openpype/modules/launcher_action.py +++ b/client/ayon_core/modules/launcher_action.py @@ -1,7 +1,7 @@ import os -from openpype import PLUGINS_DIR, AYON_SERVER_ENABLED -from openpype.modules import ( +from ayon_core import AYON_CORE_ROOT +from ayon_core.modules import ( OpenPypeModule, ITrayAction, ) @@ -26,14 +26,14 @@ def tray_init(self): def tray_start(self): return - def connect_with_modules(self, enabled_modules): + def connect_with_addons(self, enabled_modules): # Register actions if not self.tray_initialized: return - from openpype.pipeline.actions import register_launcher_action_path + from ayon_core.pipeline.actions import register_launcher_action_path - actions_dir = os.path.join(PLUGINS_DIR, "actions") + actions_dir = os.path.join(AYON_CORE_ROOT, "plugins", "actions") if os.path.exists(actions_dir): register_launcher_action_path(actions_dir) @@ -67,10 +67,7 @@ def on_action_trigger(self): def _create_window(self): if self._window: return - if AYON_SERVER_ENABLED: - from openpype.tools.ayon_launcher.ui import LauncherWindow - else: - from openpype.tools.launcher import LauncherWindow + from ayon_core.tools.launcher.ui import LauncherWindow self._window = LauncherWindow() def _show_launcher(self): diff --git a/client/ayon_core/modules/library_loader_action.py b/client/ayon_core/modules/library_loader_action.py new file mode 100644 index 0000000000..524c4f7144 --- /dev/null +++ b/client/ayon_core/modules/library_loader_action.py @@ -0,0 +1,67 @@ +from ayon_core.modules import AYONAddon, ITrayModule + + +class LibraryLoaderAddon(AYONAddon, ITrayModule): + name = "library_tool" + + def initialize(self, modules_settings): + # Tray attributes + self._library_loader_imported = None + self._library_loader_window = None + + def tray_init(self): + # Add library tool + self._library_loader_imported = False + try: + from ayon_core.tools.loader.ui import LoaderWindow + + self._library_loader_imported = True + except Exception: + self.log.warning( + "Couldn't load Library loader tool for tray.", + exc_info=True + ) + + # Definition of Tray menu + def tray_menu(self, tray_menu): + if not self._library_loader_imported: + return + + from qtpy import QtWidgets + # Actions + action_library_loader = QtWidgets.QAction( + "Loader", tray_menu + ) + + action_library_loader.triggered.connect(self.show_library_loader) + + tray_menu.addAction(action_library_loader) + + def tray_start(self, *_a, **_kw): + return + + def tray_exit(self, *_a, **_kw): + return + + def show_library_loader(self): + if self._library_loader_window is None: + from ayon_core.pipeline import install_ayon_plugins + + self._init_library_loader() + + install_ayon_plugins() + + self._library_loader_window.show() + + # Raise and activate the window + # for MacOS + self._library_loader_window.raise_() + # for Windows + self._library_loader_window.activateWindow() + + def _init_library_loader(self): + from ayon_core.tools.loader.ui import LoaderWindow + + libraryloader = LoaderWindow() + + self._library_loader_window = libraryloader diff --git a/openpype/modules/python_console_interpreter/__init__.py b/client/ayon_core/modules/python_console_interpreter/__init__.py similarity index 100% rename from openpype/modules/python_console_interpreter/__init__.py rename to client/ayon_core/modules/python_console_interpreter/__init__.py diff --git a/client/ayon_core/modules/python_console_interpreter/module.py b/client/ayon_core/modules/python_console_interpreter/module.py new file mode 100644 index 0000000000..7819c9cbf3 --- /dev/null +++ b/client/ayon_core/modules/python_console_interpreter/module.py @@ -0,0 +1,42 @@ +from ayon_core.modules import OpenPypeModule, ITrayAction + + +class PythonInterpreterAction(OpenPypeModule, ITrayAction): + label = "Console" + name = "python_interpreter" + admin_action = True + + def initialize(self, modules_settings): + self.enabled = True + self._interpreter_window = None + + def tray_init(self): + self.create_interpreter_window() + + def tray_exit(self): + if self._interpreter_window is not None: + self._interpreter_window.save_registry() + + def create_interpreter_window(self): + """Initializa Settings Qt window.""" + if self._interpreter_window: + return + + from openpype_modules.python_console_interpreter.window import ( + PythonInterpreterWidget + ) + + self._interpreter_window = PythonInterpreterWidget() + + def on_action_trigger(self): + self.show_interpreter_window() + + def show_interpreter_window(self): + self.create_interpreter_window() + + if self._interpreter_window.isVisible(): + self._interpreter_window.activateWindow() + self._interpreter_window.raise_() + return + + self._interpreter_window.show() diff --git a/openpype/modules/python_console_interpreter/window/__init__.py b/client/ayon_core/modules/python_console_interpreter/window/__init__.py similarity index 100% rename from openpype/modules/python_console_interpreter/window/__init__.py rename to client/ayon_core/modules/python_console_interpreter/window/__init__.py diff --git a/client/ayon_core/modules/python_console_interpreter/window/widgets.py b/client/ayon_core/modules/python_console_interpreter/window/widgets.py new file mode 100644 index 0000000000..628a2e72ff --- /dev/null +++ b/client/ayon_core/modules/python_console_interpreter/window/widgets.py @@ -0,0 +1,660 @@ +import os +import re +import sys +import collections +from code import InteractiveInterpreter + +import appdirs +from qtpy import QtCore, QtWidgets, QtGui + +from ayon_core import resources +from ayon_core.style import load_stylesheet +from ayon_core.lib import JSONSettingRegistry + + +ayon_art = r""" + + โ–„โ–ˆโ–ˆโ–„ + โ–„โ–ˆโ–ˆโ–ˆโ–„ โ–€โ–ˆโ–ˆโ–„ โ–€โ–ˆโ–ˆโ–€ โ–„โ–ˆโ–ˆโ–€ โ–„โ–ˆโ–ˆโ–€โ–€โ–€โ–ˆโ–ˆโ–„ โ–€โ–ˆโ–ˆโ–ˆโ–„ โ–ˆโ–„ + โ–„โ–„ โ–€โ–ˆโ–ˆโ–„ โ–€โ–ˆโ–ˆโ–„ โ–„โ–ˆโ–ˆโ–€ โ–ˆโ–ˆโ–€ โ–€โ–ˆโ–ˆโ–„ โ–„ โ–€โ–ˆโ–ˆโ–„ โ–ˆโ–ˆโ–ˆ + โ–„โ–ˆโ–ˆโ–€ โ–ˆโ–ˆโ–„ โ–€ โ–„โ–„ โ–€ โ–ˆโ–ˆ โ–„โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–€โ–ˆโ–ˆโ–„ โ–ˆโ–ˆโ–ˆ + โ–„โ–ˆโ–ˆโ–€ โ–€โ–ˆโ–ˆโ–„ โ–ˆโ–ˆ โ–€โ–ˆโ–ˆโ–„ โ–„โ–ˆโ–ˆโ–€ โ–ˆโ–ˆโ–ˆ โ–€โ–ˆโ–ˆ โ–€โ–ˆโ–€ + โ–„โ–ˆโ–ˆโ–€ โ–€โ–ˆโ–ˆโ–„ โ–€โ–ˆ โ–€โ–ˆโ–ˆโ–„โ–„โ–„โ–„โ–ˆโ–ˆโ–€ โ–ˆโ–€ โ–€โ–ˆโ–ˆโ–„ + + ยท ยท - =[ by YNPUT ]:[ http://ayon.ynput.io ]= - ยท ยท + +""" + + +class PythonInterpreterRegistry(JSONSettingRegistry): + """Class handling OpenPype general settings registry. + + Attributes: + vendor (str): Name used for path construction. + product (str): Additional name used for path construction. + + """ + + def __init__(self): + self.vendor = "Ynput" + self.product = "AYON" + name = "python_interpreter_tool" + path = appdirs.user_data_dir(self.product, self.vendor) + super(PythonInterpreterRegistry, self).__init__(name, path) + + +class StdOEWrap: + def __init__(self): + self._origin_stdout_write = None + self._origin_stderr_write = None + self._listening = False + self.lines = collections.deque() + + if not sys.stdout: + sys.stdout = open(os.devnull, "w") + + if not sys.stderr: + sys.stderr = open(os.devnull, "w") + + if self._origin_stdout_write is None: + self._origin_stdout_write = sys.stdout.write + + if self._origin_stderr_write is None: + self._origin_stderr_write = sys.stderr.write + + self._listening = True + sys.stdout.write = self._stdout_listener + sys.stderr.write = self._stderr_listener + + def stop_listen(self): + self._listening = False + + def _stdout_listener(self, text): + if self._listening: + self.lines.append(text) + if self._origin_stdout_write is not None: + self._origin_stdout_write(text) + + def _stderr_listener(self, text): + if self._listening: + self.lines.append(text) + if self._origin_stderr_write is not None: + self._origin_stderr_write(text) + + +class PythonCodeEditor(QtWidgets.QPlainTextEdit): + execute_requested = QtCore.Signal() + + def __init__(self, parent): + super(PythonCodeEditor, self).__init__(parent) + + self.setObjectName("PythonCodeEditor") + + self._indent = 4 + + def _tab_shift_right(self): + cursor = self.textCursor() + selected_text = cursor.selectedText() + if not selected_text: + cursor.insertText(" " * self._indent) + return + + sel_start = cursor.selectionStart() + sel_end = cursor.selectionEnd() + cursor.setPosition(sel_end) + end_line = cursor.blockNumber() + cursor.setPosition(sel_start) + while True: + cursor.movePosition(QtGui.QTextCursor.StartOfLine) + text = cursor.block().text() + spaces = len(text) - len(text.lstrip(" ")) + new_spaces = spaces % self._indent + if not new_spaces: + new_spaces = self._indent + + cursor.insertText(" " * new_spaces) + if cursor.blockNumber() == end_line: + break + + cursor.movePosition(QtGui.QTextCursor.NextBlock) + + def _tab_shift_left(self): + tmp_cursor = self.textCursor() + sel_start = tmp_cursor.selectionStart() + sel_end = tmp_cursor.selectionEnd() + + cursor = QtGui.QTextCursor(self.document()) + cursor.setPosition(sel_end) + end_line = cursor.blockNumber() + cursor.setPosition(sel_start) + while True: + cursor.movePosition(QtGui.QTextCursor.StartOfLine) + text = cursor.block().text() + spaces = len(text) - len(text.lstrip(" ")) + if spaces: + spaces_to_remove = (spaces % self._indent) or self._indent + if spaces_to_remove > spaces: + spaces_to_remove = spaces + + cursor.setPosition( + cursor.position() + spaces_to_remove, + QtGui.QTextCursor.KeepAnchor + ) + cursor.removeSelectedText() + + if cursor.blockNumber() == end_line: + break + + cursor.movePosition(QtGui.QTextCursor.NextBlock) + + def keyPressEvent(self, event): + if event.key() == QtCore.Qt.Key_Backtab: + self._tab_shift_left() + event.accept() + return + + if event.key() == QtCore.Qt.Key_Tab: + if event.modifiers() == QtCore.Qt.NoModifier: + self._tab_shift_right() + event.accept() + return + + if ( + event.key() == QtCore.Qt.Key_Return + and event.modifiers() == QtCore.Qt.ControlModifier + ): + self.execute_requested.emit() + event.accept() + return + + super(PythonCodeEditor, self).keyPressEvent(event) + + +class PythonTabWidget(QtWidgets.QWidget): + add_tab_requested = QtCore.Signal() + before_execute = QtCore.Signal(str) + + def __init__(self, parent): + super(PythonTabWidget, self).__init__(parent) + + code_input = PythonCodeEditor(self) + + self.setFocusProxy(code_input) + + add_tab_btn = QtWidgets.QPushButton("Add tab...", self) + add_tab_btn.setToolTip("Add new tab") + + execute_btn = QtWidgets.QPushButton("Execute", self) + execute_btn.setToolTip("Execute command (Ctrl + Enter)") + + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addWidget(add_tab_btn) + btns_layout.addStretch(1) + btns_layout.addWidget(execute_btn) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(code_input, 1) + layout.addLayout(btns_layout, 0) + + add_tab_btn.clicked.connect(self._on_add_tab_clicked) + execute_btn.clicked.connect(self._on_execute_clicked) + code_input.execute_requested.connect(self.execute) + + self._code_input = code_input + self._interpreter = InteractiveInterpreter() + + def _on_add_tab_clicked(self): + self.add_tab_requested.emit() + + def _on_execute_clicked(self): + self.execute() + + def get_code(self): + return self._code_input.toPlainText() + + def set_code(self, code_text): + self._code_input.setPlainText(code_text) + + def execute(self): + code_text = self._code_input.toPlainText() + self.before_execute.emit(code_text) + self._interpreter.runcode(code_text) + + +class TabNameDialog(QtWidgets.QDialog): + default_width = 330 + default_height = 85 + + def __init__(self, parent): + super(TabNameDialog, self).__init__(parent) + + self.setWindowTitle("Enter tab name") + + name_label = QtWidgets.QLabel("Tab name:", self) + name_input = QtWidgets.QLineEdit(self) + + inputs_layout = QtWidgets.QHBoxLayout() + inputs_layout.addWidget(name_label) + inputs_layout.addWidget(name_input) + + ok_btn = QtWidgets.QPushButton("Ok", self) + cancel_btn = QtWidgets.QPushButton("Cancel", self) + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.addStretch(1) + btns_layout.addWidget(ok_btn) + btns_layout.addWidget(cancel_btn) + + layout = QtWidgets.QVBoxLayout(self) + layout.addLayout(inputs_layout) + layout.addStretch(1) + layout.addLayout(btns_layout) + + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + self._name_input = name_input + self._ok_btn = ok_btn + self._cancel_btn = cancel_btn + + self._result = None + + self.resize(self.default_width, self.default_height) + + def set_tab_name(self, name): + self._name_input.setText(name) + + def result(self): + return self._result + + def showEvent(self, event): + super(TabNameDialog, self).showEvent(event) + btns_width = max( + self._ok_btn.width(), + self._cancel_btn.width() + ) + + self._ok_btn.setMinimumWidth(btns_width) + self._cancel_btn.setMinimumWidth(btns_width) + + def _on_ok_clicked(self): + self._result = self._name_input.text() + self.accept() + + def _on_cancel_clicked(self): + self._result = None + self.reject() + + +class OutputTextWidget(QtWidgets.QTextEdit): + v_max_offset = 4 + + def vertical_scroll_at_max(self): + v_scroll = self.verticalScrollBar() + return v_scroll.value() > v_scroll.maximum() - self.v_max_offset + + def scroll_to_bottom(self): + v_scroll = self.verticalScrollBar() + return v_scroll.setValue(v_scroll.maximum()) + + +class EnhancedTabBar(QtWidgets.QTabBar): + double_clicked = QtCore.Signal(QtCore.QPoint) + right_clicked = QtCore.Signal(QtCore.QPoint) + mid_clicked = QtCore.Signal(QtCore.QPoint) + + def __init__(self, parent): + super(EnhancedTabBar, self).__init__(parent) + + self.setDrawBase(False) + + def mouseDoubleClickEvent(self, event): + self.double_clicked.emit(event.globalPos()) + event.accept() + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.RightButton: + self.right_clicked.emit(event.globalPos()) + event.accept() + return + + elif event.button() == QtCore.Qt.MidButton: + self.mid_clicked.emit(event.globalPos()) + event.accept() + + else: + super(EnhancedTabBar, self).mouseReleaseEvent(event) + + +class PythonInterpreterWidget(QtWidgets.QWidget): + default_width = 1000 + default_height = 600 + + def __init__(self, allow_save_registry=True, parent=None): + super(PythonInterpreterWidget, self).__init__(parent) + + self.setWindowTitle("AYON Console") + self.setWindowIcon(QtGui.QIcon(resources.get_ayon_icon_filepath())) + + self.ansi_escape = re.compile( + r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]" + ) + + self._tabs = [] + + self._stdout_err_wrapper = StdOEWrap() + + output_widget = OutputTextWidget(self) + output_widget.setObjectName("PythonInterpreterOutput") + output_widget.setLineWrapMode(QtWidgets.QTextEdit.NoWrap) + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + + tab_widget = QtWidgets.QTabWidget(self) + tab_bar = EnhancedTabBar(tab_widget) + tab_widget.setTabBar(tab_bar) + tab_widget.setTabsClosable(False) + tab_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + widgets_splitter = QtWidgets.QSplitter(self) + widgets_splitter.setOrientation(QtCore.Qt.Vertical) + widgets_splitter.addWidget(output_widget) + widgets_splitter.addWidget(tab_widget) + widgets_splitter.setStretchFactor(0, 1) + widgets_splitter.setStretchFactor(1, 1) + height = int(self.default_height / 2) + widgets_splitter.setSizes([height, self.default_height - height]) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(widgets_splitter) + + line_check_timer = QtCore.QTimer() + line_check_timer.setInterval(200) + + line_check_timer.timeout.connect(self._on_timer_timeout) + tab_bar.right_clicked.connect(self._on_tab_right_click) + tab_bar.double_clicked.connect(self._on_tab_double_click) + tab_bar.mid_clicked.connect(self._on_tab_mid_click) + tab_widget.tabCloseRequested.connect(self._on_tab_close_req) + + self._widgets_splitter = widgets_splitter + self._output_widget = output_widget + self._tab_widget = tab_widget + self._line_check_timer = line_check_timer + + self._append_lines([ayon_art]) + + self._first_show = True + self._splitter_size_ratio = None + self._allow_save_registry = allow_save_registry + self._registry_saved = True + + self._init_from_registry() + + if self._tab_widget.count() < 1: + self.add_tab("Python") + + def _init_from_registry(self): + setting_registry = PythonInterpreterRegistry() + width = None + height = None + try: + width = setting_registry.get_item("width") + height = setting_registry.get_item("height") + + except ValueError: + pass + + if width is None or width < 200: + width = self.default_width + + if height is None or height < 200: + height = self.default_height + + self.resize(width, height) + + try: + self._splitter_size_ratio = ( + setting_registry.get_item("splitter_sizes") + ) + + except ValueError: + pass + + try: + tab_defs = setting_registry.get_item("tabs") or [] + for tab_def in tab_defs: + widget = self.add_tab(tab_def["name"]) + widget.set_code(tab_def["code"]) + + except ValueError: + pass + + def save_registry(self): + # Window was not showed + if not self._allow_save_registry or self._registry_saved: + return + + self._registry_saved = True + setting_registry = PythonInterpreterRegistry() + + setting_registry.set_item("width", self.width()) + setting_registry.set_item("height", self.height()) + + setting_registry.set_item( + "splitter_sizes", self._widgets_splitter.sizes() + ) + + tabs = [] + for tab_idx in range(self._tab_widget.count()): + widget = self._tab_widget.widget(tab_idx) + tab_code = widget.get_code() + tab_name = self._tab_widget.tabText(tab_idx) + tabs.append({ + "name": tab_name, + "code": tab_code + }) + + setting_registry.set_item("tabs", tabs) + + def _on_tab_right_click(self, global_point): + point = self._tab_widget.mapFromGlobal(global_point) + tab_bar = self._tab_widget.tabBar() + tab_idx = tab_bar.tabAt(point) + last_index = tab_bar.count() - 1 + if tab_idx < 0 or tab_idx > last_index: + return + + menu = QtWidgets.QMenu(self._tab_widget) + + add_tab_action = QtWidgets.QAction("Add tab...", menu) + add_tab_action.setToolTip("Add new tab") + + rename_tab_action = QtWidgets.QAction("Rename...", menu) + rename_tab_action.setToolTip("Rename tab") + + duplicate_tab_action = QtWidgets.QAction("Duplicate...", menu) + duplicate_tab_action.setToolTip("Duplicate code to new tab") + + close_tab_action = QtWidgets.QAction("Close", menu) + close_tab_action.setToolTip("Close tab and lose content") + close_tab_action.setEnabled(self._tab_widget.tabsClosable()) + + menu.addAction(add_tab_action) + menu.addAction(rename_tab_action) + menu.addAction(duplicate_tab_action) + menu.addAction(close_tab_action) + + result = menu.exec_(global_point) + if result is None: + return + + if result is rename_tab_action: + self._rename_tab_req(tab_idx) + + elif result is add_tab_action: + self._on_add_requested() + + elif result is duplicate_tab_action: + self._duplicate_requested(tab_idx) + + elif result is close_tab_action: + self._on_tab_close_req(tab_idx) + + def _rename_tab_req(self, tab_idx): + dialog = TabNameDialog(self) + dialog.set_tab_name(self._tab_widget.tabText(tab_idx)) + dialog.exec_() + tab_name = dialog.result() + if tab_name: + self._tab_widget.setTabText(tab_idx, tab_name) + + def _duplicate_requested(self, tab_idx=None): + if tab_idx is None: + tab_idx = self._tab_widget.currentIndex() + + src_widget = self._tab_widget.widget(tab_idx) + dst_widget = self._add_tab() + if dst_widget is None: + return + dst_widget.set_code(src_widget.get_code()) + + def _on_tab_mid_click(self, global_point): + point = self._tab_widget.mapFromGlobal(global_point) + tab_bar = self._tab_widget.tabBar() + tab_idx = tab_bar.tabAt(point) + last_index = tab_bar.count() - 1 + if tab_idx < 0 or tab_idx > last_index: + return + + self._on_tab_close_req(tab_idx) + + def _on_tab_double_click(self, global_point): + point = self._tab_widget.mapFromGlobal(global_point) + tab_bar = self._tab_widget.tabBar() + tab_idx = tab_bar.tabAt(point) + last_index = tab_bar.count() - 1 + if tab_idx < 0 or tab_idx > last_index: + return + + self._rename_tab_req(tab_idx) + + def _on_tab_close_req(self, tab_index): + if self._tab_widget.count() == 1: + return + + widget = self._tab_widget.widget(tab_index) + if widget in self._tabs: + self._tabs.remove(widget) + self._tab_widget.removeTab(tab_index) + + if self._tab_widget.count() == 1: + self._tab_widget.setTabsClosable(False) + + def _append_lines(self, lines): + at_max = self._output_widget.vertical_scroll_at_max() + tmp_cursor = QtGui.QTextCursor(self._output_widget.document()) + tmp_cursor.movePosition(QtGui.QTextCursor.End) + for line in lines: + tmp_cursor.insertText(line) + + if at_max: + self._output_widget.scroll_to_bottom() + + def _on_timer_timeout(self): + if self._stdout_err_wrapper.lines: + lines = [] + while self._stdout_err_wrapper.lines: + line = self._stdout_err_wrapper.lines.popleft() + lines.append(self.ansi_escape.sub("", line)) + self._append_lines(lines) + + def _on_add_requested(self): + self._add_tab() + + def _add_tab(self): + dialog = TabNameDialog(self) + dialog.exec_() + tab_name = dialog.result() + if tab_name: + return self.add_tab(tab_name) + + return None + + def _on_before_execute(self, code_text): + at_max = self._output_widget.vertical_scroll_at_max() + document = self._output_widget.document() + tmp_cursor = QtGui.QTextCursor(document) + tmp_cursor.movePosition(QtGui.QTextCursor.End) + tmp_cursor.insertText("{}\nExecuting command:\n".format(20 * "-")) + + code_block_format = QtGui.QTextFrameFormat() + code_block_format.setBackground(QtGui.QColor(27, 27, 27)) + code_block_format.setPadding(4) + + tmp_cursor.insertFrame(code_block_format) + char_format = tmp_cursor.charFormat() + char_format.setForeground( + QtGui.QBrush(QtGui.QColor(114, 224, 198)) + ) + tmp_cursor.setCharFormat(char_format) + tmp_cursor.insertText(code_text) + + # Create new cursor + tmp_cursor = QtGui.QTextCursor(document) + tmp_cursor.movePosition(QtGui.QTextCursor.End) + tmp_cursor.insertText("{}\n".format(20 * "-")) + + if at_max: + self._output_widget.scroll_to_bottom() + + def add_tab(self, tab_name, index=None): + widget = PythonTabWidget(self) + widget.before_execute.connect(self._on_before_execute) + widget.add_tab_requested.connect(self._on_add_requested) + if index is None: + if self._tab_widget.count() > 0: + index = self._tab_widget.currentIndex() + 1 + else: + index = 0 + + self._tabs.append(widget) + self._tab_widget.insertTab(index, widget, tab_name) + self._tab_widget.setCurrentIndex(index) + + if self._tab_widget.count() > 1: + self._tab_widget.setTabsClosable(True) + widget.setFocus() + return widget + + def showEvent(self, event): + self._line_check_timer.start() + self._registry_saved = False + super(PythonInterpreterWidget, self).showEvent(event) + # First show setup + if self._first_show: + self._first_show = False + self._on_first_show() + + self._output_widget.scroll_to_bottom() + + def _on_first_show(self): + # Change stylesheet + self.setStyleSheet(load_stylesheet()) + # Check if splitter size ratio is set + # - first store value to local variable and then unset it + splitter_size_ratio = self._splitter_size_ratio + self._splitter_size_ratio = None + # Skip if is not set + if not splitter_size_ratio: + return + + # Skip if number of size items does not match to splitter + splitters_count = len(self._widgets_splitter.sizes()) + if len(splitter_size_ratio) == splitters_count: + self._widgets_splitter.setSizes(splitter_size_ratio) + + def closeEvent(self, event): + self.save_registry() + super(PythonInterpreterWidget, self).closeEvent(event) + self._line_check_timer.stop() diff --git a/openpype/modules/royalrender/__init__.py b/client/ayon_core/modules/royalrender/__init__.py similarity index 100% rename from openpype/modules/royalrender/__init__.py rename to client/ayon_core/modules/royalrender/__init__.py diff --git a/openpype/modules/royalrender/api.py b/client/ayon_core/modules/royalrender/api.py similarity index 96% rename from openpype/modules/royalrender/api.py rename to client/ayon_core/modules/royalrender/api.py index e610a0c8a8..cd72014a42 100644 --- a/openpype/modules/royalrender/api.py +++ b/client/ayon_core/modules/royalrender/api.py @@ -3,10 +3,10 @@ import sys import os -from openpype.lib.local_settings import OpenPypeSettingsRegistry -from openpype.lib import Logger, run_subprocess +from ayon_core.lib.local_settings import AYONSettingsRegistry +from ayon_core.lib import Logger, run_subprocess from .rr_job import RRJob, SubmitFile, SubmitterParameter -from openpype.lib.vendor_bin_utils import find_tool_in_custom_paths +from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths class Api: @@ -154,7 +154,7 @@ def _submit_using_api(self, file): # Probably best way until we setup our own user management would be # to encrypt password and save it to json locally. Not bulletproof # but at least it is not stored in plaintext. - reg = OpenPypeSettingsRegistry() + reg = AYONSettingsRegistry("rr_settings") try: rr_user = reg.get_item("rr_username") rr_password = reg.get_item("rr_password") diff --git a/client/ayon_core/modules/royalrender/lib.py b/client/ayon_core/modules/royalrender/lib.py new file mode 100644 index 0000000000..d985a39d24 --- /dev/null +++ b/client/ayon_core/modules/royalrender/lib.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +"""Submitting render job to RoyalRender.""" +import os +import json +import platform +import re +import tempfile +import uuid +from datetime import datetime + +import pyblish.api + +from ayon_core.lib import BoolDef, NumberDef, is_running_from_build +from ayon_core.lib.execute import run_ayon_launcher_process +from ayon_core.modules.royalrender.api import Api as rrApi +from ayon_core.modules.royalrender.rr_job import ( + CustomAttribute, + RRJob, + RREnvList, + get_rr_platform, +) +from ayon_core.pipeline import AYONPyblishPluginMixin +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.pipeline.publish.lib import get_published_workfile_instance +from ayon_core.tests.lib import is_in_tests + + +class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin, + AYONPyblishPluginMixin): + """Creates separate rendering job for Royal Render""" + label = "Create Nuke Render job in RR" + order = pyblish.api.IntegratorOrder + 0.1 + hosts = ["nuke"] + families = ["render", "prerender"] + targets = ["local"] + optional = True + + priority = 50 + chunk_size = 1 + concurrent_tasks = 1 + use_gpu = True + use_published = True + + @classmethod + def get_attribute_defs(cls): + return [ + NumberDef( + "priority", + label="Priority", + default=cls.priority, + decimals=0 + ), + NumberDef( + "chunk", + label="Frames Per Task", + default=cls.chunk_size, + decimals=0, + minimum=1, + maximum=1000 + ), + NumberDef( + "concurrency", + label="Concurrency", + default=cls.concurrent_tasks, + decimals=0, + minimum=1, + maximum=10 + ), + BoolDef( + "use_gpu", + default=cls.use_gpu, + label="Use GPU" + ), + BoolDef( + "suspend_publish", + default=False, + label="Suspend publish" + ), + BoolDef( + "use_published", + default=cls.use_published, + label="Use published workfile" + ) + ] + + def __init__(self, *args, **kwargs): + self._rr_root = None + self.scene_path = None + self.job = None + self.submission_parameters = None + self.rr_api = None + + def process(self, instance): + if not instance.data.get("farm"): + self.log.info("Skipping local instance.") + return + + instance.data["attributeValues"] = self.get_attr_values_from_data( + instance.data) + + # add suspend_publish attributeValue to instance data + instance.data["suspend_publish"] = instance.data["attributeValues"][ + "suspend_publish"] + + context = instance.context + + self._rr_root = self._resolve_rr_path(context, instance.data.get( + "rrPathName")) # noqa + self.log.debug(self._rr_root) + if not self._rr_root: + raise KnownPublishError( + ("Missing RoyalRender root. " + "You need to configure RoyalRender module.")) + + self.rr_api = rrApi(self._rr_root) + + self.scene_path = context.data["currentFile"] + if self.use_published: + published_workfile = get_published_workfile_instance(context) + + # fallback if nothing was set + if published_workfile is None: + self.log.warning("Falling back to workfile") + file_path = context.data["currentFile"] + else: + workfile_repre = published_workfile.data["representations"][0] + file_path = workfile_repre["published_path"] + + self.scene_path = file_path + self.log.info( + "Using published scene for render {}".format(self.scene_path) + ) + + if not instance.data.get("expectedFiles"): + instance.data["expectedFiles"] = [] + + if not instance.data.get("rrJobs"): + instance.data["rrJobs"] = [] + + def get_job(self, instance, script_path, render_path, node_name): + """Get RR job based on current instance. + + Args: + script_path (str): Path to Nuke script. + render_path (str): Output path. + node_name (str): Name of the render node. + + Returns: + RRJob: RoyalRender Job instance. + + """ + start_frame = int(instance.data["frameStartHandle"]) + end_frame = int(instance.data["frameEndHandle"]) + + batch_name = os.path.basename(script_path) + jobname = "%s - %s" % (batch_name, instance.name) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + + render_dir = os.path.normpath(os.path.dirname(render_path)) + output_filename_0 = self.pad_file_name(render_path, str(start_frame)) + file_name, file_ext = os.path.splitext( + os.path.basename(output_filename_0)) + + custom_attributes = [] + if is_running_from_build(): + custom_attributes = [ + CustomAttribute( + name="OpenPypeVersion", + value=os.environ.get("OPENPYPE_VERSION")) + ] + + # this will append expected files to instance as needed. + expected_files = self.expected_files( + instance, render_path, start_frame, end_frame) + instance.data["expectedFiles"].extend(expected_files) + + job = RRJob( + Software="", + Renderer="", + SeqStart=int(start_frame), + SeqEnd=int(end_frame), + SeqStep=int(instance.data.get("byFrameStep", 1)), + SeqFileOffset=0, + Version=0, + SceneName=script_path, + IsActive=True, + ImageDir=render_dir.replace("\\", "/"), + ImageFilename=file_name, + ImageExtension=file_ext, + ImagePreNumberLetter="", + ImageSingleOutputFile=False, + SceneOS=get_rr_platform(), + Layer=node_name, + SceneDatabaseDir=script_path, + CustomSHotName=jobname, + CompanyProjectName=instance.context.data["projectName"], + ImageWidth=instance.data["resolutionWidth"], + ImageHeight=instance.data["resolutionHeight"], + CustomAttributes=custom_attributes + ) + + return job + + def update_job_with_host_specific(self, instance, job): + """Host specific mapping for RRJob""" + raise NotImplementedError + + @staticmethod + def _resolve_rr_path(context, rr_path_name): + # type: (pyblish.api.Context, str) -> str + rr_settings = ( + context.data + ["system_settings"] + ["modules"] + ["royalrender"] + ) + try: + default_servers = rr_settings["rr_paths"] + project_servers = ( + context.data + ["project_settings"] + ["royalrender"] + ["rr_paths"] + ) + rr_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } + + except (AttributeError, KeyError): + # Handle situation were we had only one url for royal render. + return context.data["defaultRRPath"][platform.system().lower()] + + return rr_servers[rr_path_name][platform.system().lower()] + + def expected_files(self, instance, path, start_frame, end_frame): + """Get expected files. + + This function generate expected files from provided + path and start/end frames. + + It was taken from Deadline module, but this should be + probably handled better in collector to support more + flexible scenarios. + + Args: + instance (Instance) + path (str): Output path. + start_frame (int): Start frame. + end_frame (int): End frame. + + Returns: + list: List of expected files. + + """ + dir_name = os.path.dirname(path) + file = os.path.basename(path) + + expected_files = [] + + if "#" in file: + pparts = file.split("#") + padding = "%0{}d".format(len(pparts) - 1) + file = pparts[0] + padding + pparts[-1] + + if "%" not in file: + expected_files.append(path) + return expected_files + + if instance.data.get("slate"): + start_frame -= 1 + + expected_files.extend( + os.path.join(dir_name, (file % i)).replace("\\", "/") + for i in range(start_frame, (end_frame + 1)) + ) + return expected_files + + def pad_file_name(self, path, first_frame): + """Return output file path with #### for padding. + + RR requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + path (str): path to rendered image + first_frame (str): from representation to cleany replace with # + padding + + Returns: + str + + """ + self.log.debug("pad_file_name path: `{}`".format(path)) + if "%" in path: + search_results = re.search(r"(%0)(\d)(d.)", path).groups() + self.log.debug("_ search_results: `{}`".format(search_results)) + return int(search_results[1]) + if "#" in path: + self.log.debug("already padded: `{}`".format(path)) + return path + + if first_frame: + padding = len(first_frame) + path = path.replace(first_frame, "#" * padding) + + return path + + def inject_environment(self, instance, job): + # type: (pyblish.api.Instance, RRJob) -> RRJob + """Inject environment variables for RR submission. + + This function mimics the behaviour of the Deadline + integration. It is just temporary solution until proper + runtime environment injection is implemented in RR. + + Args: + instance (pyblish.api.Instance): Publishing instance + job (RRJob): RRJob instance to be injected. + + Returns: + RRJob: Injected RRJob instance. + + Throws: + RuntimeError: If any of the required env vars is missing. + + """ + + temp_file_name = "{}_{}.json".format( + datetime.utcnow().strftime('%Y%m%d%H%M%S%f'), + str(uuid.uuid1()) + ) + + export_url = os.path.join(tempfile.gettempdir(), temp_file_name) + print(">>> Temporary path: {}".format(export_url)) + + args = [ + "--headless", + "extractenvironments", + export_url + ] + + anatomy_data = instance.context.data["anatomyData"] + + add_kwargs = { + "project": anatomy_data["project"]["name"], + "asset": instance.context.data["asset"], + "task": anatomy_data["task"]["name"], + "app": instance.context.data.get("appName"), + "envgroup": "farm" + } + + if os.getenv('IS_TEST'): + args.append("--automatic-tests") + + if not all(add_kwargs.values()): + raise RuntimeError(( + "Missing required env vars: AVALON_PROJECT, AVALON_ASSET," + " AVALON_TASK, AVALON_APP_NAME" + )) + + for key, value in add_kwargs.items(): + args.extend([f"--{key}", value]) + self.log.debug("Executing: {}".format(" ".join(args))) + run_ayon_launcher_process(*args, logger=self.log) + + self.log.debug("Loading file ...") + with open(export_url) as fp: + contents = json.load(fp) + + job.rrEnvList = RREnvList(contents).serialize() + return job diff --git a/openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py b/client/ayon_core/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py similarity index 100% rename from openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py rename to client/ayon_core/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py diff --git a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py b/client/ayon_core/modules/royalrender/plugins/publish/collect_sequences_from_job.py similarity index 95% rename from openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py rename to client/ayon_core/modules/royalrender/plugins/publish/collect_sequences_from_job.py index 1bfee19e3d..a253a1ec5b 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py +++ b/client/ayon_core/modules/royalrender/plugins/publish/collect_sequences_from_job.py @@ -8,7 +8,7 @@ import pyblish.api -from openpype.pipeline import legacy_io +from ayon_core.pipeline import legacy_io def collect(root, @@ -70,7 +70,7 @@ def collect(root, class CollectSequencesFromJob(pyblish.api.ContextPlugin): """Gather file sequences from job directory. - When "OPENPYPE_PUBLISH_DATA" environment variable is set these paths + When "AYON_PUBLISH_DATA" environment variable is set these paths (folders or .json files) are parsed for image sequences. Otherwise, the current working directory is searched for file sequences. @@ -91,9 +91,13 @@ def process(self, context): ["review"] ) - if os.environ.get("OPENPYPE_PUBLISH_DATA"): - self.log.debug(os.environ.get("OPENPYPE_PUBLISH_DATA")) - paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) + publish_data_paths = ( + os.environ.get("AYON_PUBLISH_DATA") + or os.environ.get("OPENPYPE_PUBLISH_DATA") + ) + if publish_data_paths: + self.log.debug(publish_data_paths) + paths = publish_data_paths.split(os.pathsep) self.log.info("Collecting paths: {}".format(paths)) else: cwd = context.get("workspaceDir", os.getcwd()) diff --git a/openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py b/client/ayon_core/modules/royalrender/plugins/publish/create_maya_royalrender_job.py similarity index 92% rename from openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py rename to client/ayon_core/modules/royalrender/plugins/publish/create_maya_royalrender_job.py index 775a2964fd..d205b32b7d 100644 --- a/openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py +++ b/client/ayon_core/modules/royalrender/plugins/publish/create_maya_royalrender_job.py @@ -4,8 +4,8 @@ from maya.OpenMaya import MGlobal # noqa: F401 -from openpype.modules.royalrender import lib -from openpype.pipeline.farm.tools import iter_expected_files +from ayon_core.modules.royalrender import lib +from ayon_core.pipeline.farm.tools import iter_expected_files class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob): diff --git a/openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py b/client/ayon_core/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py similarity index 97% rename from openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py rename to client/ayon_core/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py index 4f589e56f8..4234535b23 100644 --- a/openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py +++ b/client/ayon_core/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py @@ -2,7 +2,7 @@ """Submitting render job to RoyalRender.""" import re -from openpype.modules.royalrender import lib +from ayon_core.modules.royalrender import lib class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob): diff --git a/openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py b/client/ayon_core/modules/royalrender/plugins/publish/create_publish_royalrender_job.py similarity index 91% rename from openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py rename to client/ayon_core/modules/royalrender/plugins/publish/create_publish_royalrender_job.py index d4af1c2aee..680795a329 100644 --- a/openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py +++ b/client/ayon_core/modules/royalrender/plugins/publish/create_publish_royalrender_job.py @@ -7,23 +7,23 @@ import pyblish.api -from openpype.modules.royalrender.rr_job import ( +from ayon_core.modules.royalrender.rr_job import ( RRJob, RREnvList, get_rr_platform ) -from openpype.pipeline.publish import KnownPublishError -from openpype.pipeline import ( +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.pipeline import ( legacy_io, ) -from openpype.pipeline.farm.pyblish_functions import ( +from ayon_core.pipeline.farm.pyblish_functions import ( create_skeleton_instance, create_instances_for_aov, attach_instances_to_subset, prepare_representations, create_metadata_path ) -from openpype.pipeline import publish +from ayon_core.pipeline import publish class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin, @@ -62,18 +62,13 @@ class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin, # list of family names to transfer to new family if present families_transfer = ["render3d", "render2d", "ftrack", "slate"] - environ_job_filter = [ - "OPENPYPE_METADATA_FILE" - ] - environ_keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", + "AYON_USERNAME", "OPENPYPE_SG_USER", - "OPENPYPE_MONGO" ] priority = 50 @@ -191,7 +186,7 @@ def get_job(self, instance, instances): "AVALON_PROJECT": anatomy_data["project"]["name"], "AVALON_ASSET": instance.context.data["asset"], "AVALON_TASK": anatomy_data["task"]["name"], - "OPENPYPE_USERNAME": anatomy_data["user"] + "AYON_USERNAME": anatomy_data["user"] }) # add environments from self.environ_keys @@ -201,22 +196,10 @@ def get_job(self, instance, instances): # pass environment keys from self.environ_job_filter # and collect all pre_ids to wait for - job_environ = {} jobs_pre_ids = [] for job in instance.data["rrJobs"]: # type: RRJob - if job.rrEnvList: - if len(job.rrEnvList) > 2000: - self.log.warning(("Job environment is too long " - f"{len(job.rrEnvList)} > 2000")) - job_environ.update( - dict(RREnvList.parse(job.rrEnvList)) - ) jobs_pre_ids.append(job.PreID) - for env_j_key in self.environ_job_filter: - if job_environ.get(env_j_key): - environment[env_j_key] = job_environ[env_j_key] - priority = self.priority or instance.data.get("priority", 50) # rr requires absolut path or all jobs won't show up in rControl diff --git a/openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py b/client/ayon_core/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py similarity index 97% rename from openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py rename to client/ayon_core/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py index 8fc8604b83..a76bdfc26c 100644 --- a/openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py +++ b/client/ayon_core/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py @@ -4,12 +4,12 @@ import platform import pyblish.api -from openpype.modules.royalrender.api import ( +from ayon_core.modules.royalrender.api import ( RRJob, Api as rrApi, SubmitterParameter ) -from openpype.pipeline.publish import KnownPublishError +from ayon_core.pipeline.publish import KnownPublishError class SubmitJobsToRoyalRender(pyblish.api.ContextPlugin): diff --git a/openpype/modules/royalrender/royal_render_module.py b/client/ayon_core/modules/royalrender/royal_render_module.py similarity index 89% rename from openpype/modules/royalrender/royal_render_module.py rename to client/ayon_core/modules/royalrender/royal_render_module.py index 10d74d01d1..66b09832d8 100644 --- a/openpype/modules/royalrender/royal_render_module.py +++ b/client/ayon_core/modules/royalrender/royal_render_module.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Module providing support for Royal Render.""" import os -import openpype.modules -from openpype.modules import OpenPypeModule, IPluginPaths +import ayon_core.modules +from ayon_core.modules import OpenPypeModule, IPluginPaths class RoyalRenderModule(OpenPypeModule, IPluginPaths): @@ -19,7 +19,7 @@ def api(self): return self._api def __init__(self, manager, settings): - # type: (openpype.modules.base.ModulesManager, dict) -> None + # type: (ayon_core.addon.AddonsManager, dict) -> None self.rr_paths = {} self._api = None self.settings = settings diff --git a/openpype/modules/royalrender/rr_job.py b/client/ayon_core/modules/royalrender/rr_job.py similarity index 100% rename from openpype/modules/royalrender/rr_job.py rename to client/ayon_core/modules/royalrender/rr_job.py diff --git a/openpype/modules/royalrender/rr_root/README.md b/client/ayon_core/modules/royalrender/rr_root/README.md similarity index 100% rename from openpype/modules/royalrender/rr_root/README.md rename to client/ayon_core/modules/royalrender/rr_root/README.md diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/client/ayon_core/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py similarity index 98% rename from openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py rename to client/ayon_core/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py index cdc37588cd..7118c5ebef 100644 --- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py +++ b/client/ayon_core/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """This is RR control plugin that runs on the job by user interaction. -It asks user for context to publish, getting it from OpenPype. In order to +It asks user for context to publish, getting it from ayon_core. In order to run it needs `OPENPYPE_ROOT` to be set to know where to execute OpenPype. """ diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png b/client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg b/client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc b/client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc diff --git a/openpype/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg b/client/ayon_core/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg diff --git a/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg b/client/ayon_core/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg diff --git a/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py b/client/ayon_core/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py similarity index 100% rename from openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py rename to client/ayon_core/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py diff --git a/openpype/modules/timers_manager/__init__.py b/client/ayon_core/modules/timers_manager/__init__.py similarity index 100% rename from openpype/modules/timers_manager/__init__.py rename to client/ayon_core/modules/timers_manager/__init__.py diff --git a/openpype/modules/timers_manager/exceptions.py b/client/ayon_core/modules/timers_manager/exceptions.py similarity index 100% rename from openpype/modules/timers_manager/exceptions.py rename to client/ayon_core/modules/timers_manager/exceptions.py diff --git a/openpype/modules/timers_manager/idle_threads.py b/client/ayon_core/modules/timers_manager/idle_threads.py similarity index 99% rename from openpype/modules/timers_manager/idle_threads.py rename to client/ayon_core/modules/timers_manager/idle_threads.py index eb11bbf117..d70f7790c4 100644 --- a/openpype/modules/timers_manager/idle_threads.py +++ b/client/ayon_core/modules/timers_manager/idle_threads.py @@ -2,7 +2,7 @@ from qtpy import QtCore from pynput import mouse, keyboard -from openpype.lib import Logger +from ayon_core.lib import Logger class IdleItem: diff --git a/openpype/modules/timers_manager/launch_hooks/post_start_timer.py b/client/ayon_core/modules/timers_manager/launch_hooks/post_start_timer.py similarity index 88% rename from openpype/modules/timers_manager/launch_hooks/post_start_timer.py rename to client/ayon_core/modules/timers_manager/launch_hooks/post_start_timer.py index 76c3cca33e..d8710a9b26 100644 --- a/openpype/modules/timers_manager/launch_hooks/post_start_timer.py +++ b/client/ayon_core/modules/timers_manager/launch_hooks/post_start_timer.py @@ -1,4 +1,4 @@ -from openpype.lib.applications import PostLaunchHook, LaunchTypes +from ayon_core.lib.applications import PostLaunchHook, LaunchTypes class PostStartTimerHook(PostLaunchHook): @@ -31,9 +31,7 @@ def execute(self): )) return - timers_manager = self.modules_manager.modules_by_name.get( - "timers_manager" - ) + timers_manager = self.addons_manager.get("timers_manager") if not timers_manager or not timers_manager.enabled: self.log.info(( "Skipping starting timer because" diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/client/ayon_core/modules/timers_manager/plugins/publish/start_timer.py similarity index 90% rename from openpype/modules/timers_manager/plugins/publish/start_timer.py rename to client/ayon_core/modules/timers_manager/plugins/publish/start_timer.py index 19a67292f5..51f707ecf6 100644 --- a/openpype/modules/timers_manager/plugins/publish/start_timer.py +++ b/client/ayon_core/modules/timers_manager/plugins/publish/start_timer.py @@ -1,7 +1,7 @@ """ Requires: context -> system_settings - context -> openPypeModules + context -> ayonAddonsManager """ import pyblish.api @@ -13,7 +13,7 @@ class StartTimer(pyblish.api.ContextPlugin): hosts = ["*"] def process(self, context): - timers_manager = context.data["openPypeModules"]["timers_manager"] + timers_manager = context.data["ayonAddonsManager"]["timers_manager"] if not timers_manager.enabled: self.log.debug("TimersManager is disabled") return diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/client/ayon_core/modules/timers_manager/plugins/publish/stop_timer.py similarity index 85% rename from openpype/modules/timers_manager/plugins/publish/stop_timer.py rename to client/ayon_core/modules/timers_manager/plugins/publish/stop_timer.py index a8674ff2ca..9d7cb33ba9 100644 --- a/openpype/modules/timers_manager/plugins/publish/stop_timer.py +++ b/client/ayon_core/modules/timers_manager/plugins/publish/stop_timer.py @@ -1,7 +1,7 @@ """ Requires: context -> system_settings - context -> openPypeModules + context -> ayonAddonsManager """ @@ -14,7 +14,7 @@ class StopTimer(pyblish.api.ContextPlugin): hosts = ["*"] def process(self, context): - timers_manager = context.data["openPypeModules"]["timers_manager"] + timers_manager = context.data["ayonAddonsManager"]["timers_manager"] if not timers_manager.enabled: self.log.debug("TimersManager is disabled") return diff --git a/client/ayon_core/modules/timers_manager/rest_api.py b/client/ayon_core/modules/timers_manager/rest_api.py new file mode 100644 index 0000000000..b460719f80 --- /dev/null +++ b/client/ayon_core/modules/timers_manager/rest_api.py @@ -0,0 +1,85 @@ +import json + +from aiohttp.web_response import Response +from ayon_core.lib import Logger + + +class TimersManagerModuleRestApi: + """ + REST API endpoint used for calling from hosts when context change + happens in Workfile app. + """ + def __init__(self, user_module, server_manager): + self._log = None + self.module = user_module + self.server_manager = server_manager + + self.prefix = "/timers_manager" + + self.register() + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def register(self): + self.server_manager.add_route( + "POST", + self.prefix + "/start_timer", + self.start_timer + ) + self.server_manager.add_route( + "POST", + self.prefix + "/stop_timer", + self.stop_timer + ) + self.server_manager.add_route( + "GET", + self.prefix + "/get_task_time", + self.get_task_time + ) + + async def start_timer(self, request): + data = await request.json() + try: + project_name = data["project_name"] + asset_name = data["asset_name"] + task_name = data["task_name"] + except KeyError: + msg = ( + "Payload must contain fields 'project_name," + " 'asset_name' and 'task_name'" + ) + self.log.error(msg) + return Response(status=400, message=msg) + + self.module.stop_timers() + try: + self.module.start_timer(project_name, asset_name, task_name) + except Exception as exc: + return Response(status=404, message=str(exc)) + + return Response(status=200) + + async def stop_timer(self, request): + self.module.stop_timers() + return Response(status=200) + + async def get_task_time(self, request): + data = await request.json() + try: + project_name = data['project_name'] + asset_name = data['asset_name'] + task_name = data['task_name'] + except KeyError: + message = ( + "Payload must contain fields 'project_name, 'asset_name'," + " 'task_name'" + ) + self.log.warning(message) + return Response(text=message, status=404) + + time = self.module.get_task_time(project_name, asset_name, task_name) + return Response(text=json.dumps(time)) diff --git a/openpype/modules/timers_manager/timers_manager.py b/client/ayon_core/modules/timers_manager/timers_manager.py similarity index 97% rename from openpype/modules/timers_manager/timers_manager.py rename to client/ayon_core/modules/timers_manager/timers_manager.py index 674d834a1d..daba0cead9 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/client/ayon_core/modules/timers_manager/timers_manager.py @@ -2,13 +2,13 @@ import platform -from openpype.client import get_asset_by_name -from openpype.modules import ( +from ayon_core.client import get_asset_by_name +from ayon_core.modules import ( OpenPypeModule, ITrayService, IPluginPaths ) -from openpype.lib.events import register_event_callback +from ayon_core.lib.events import register_event_callback from .exceptions import InvalidContextError @@ -339,7 +339,7 @@ def stop_timers(self): self.timer_stopped(None) - def connect_with_modules(self, enabled_modules): + def connect_with_addons(self, enabled_modules): for module in enabled_modules: connector = getattr(module, "timers_manager_connector", None) if connector is None: @@ -405,7 +405,7 @@ def start_timer_with_webserver( passed. """ - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + webserver_url = os.environ.get("AYON_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" if logger is not None: @@ -440,7 +440,7 @@ def stop_timer_with_webserver(logger=None): logger (logging.Logger): Logger used for logging messages. """ - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + webserver_url = os.environ.get("AYON_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" if logger is not None: diff --git a/openpype/modules/timers_manager/widget_user_idle.py b/client/ayon_core/modules/timers_manager/widget_user_idle.py similarity index 97% rename from openpype/modules/timers_manager/widget_user_idle.py rename to client/ayon_core/modules/timers_manager/widget_user_idle.py index 9df328e6b2..94d7a606ed 100644 --- a/openpype/modules/timers_manager/widget_user_idle.py +++ b/client/ayon_core/modules/timers_manager/widget_user_idle.py @@ -1,5 +1,5 @@ from qtpy import QtCore, QtGui, QtWidgets -from openpype import resources, style +from ayon_core import resources, style class WidgetUserIdle(QtWidgets.QWidget): @@ -11,12 +11,13 @@ def __init__(self, module): self.setWindowTitle("OpenPype - Stop timers") - icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) self.setWindowIcon(icon) self.setWindowFlags( QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint + | QtCore.Qt.WindowStaysOnTopHint ) self._is_showed = False diff --git a/client/ayon_core/modules/webserver/__init__.py b/client/ayon_core/modules/webserver/__init__.py new file mode 100644 index 0000000000..0d3f767638 --- /dev/null +++ b/client/ayon_core/modules/webserver/__init__.py @@ -0,0 +1,8 @@ +from .webserver_module import ( + WebServerAddon +) + + +__all__ = ( + "WebServerAddon", +) diff --git a/openpype/modules/webserver/base_routes.py b/client/ayon_core/modules/webserver/base_routes.py similarity index 100% rename from openpype/modules/webserver/base_routes.py rename to client/ayon_core/modules/webserver/base_routes.py diff --git a/openpype/modules/webserver/cors_middleware.py b/client/ayon_core/modules/webserver/cors_middleware.py similarity index 100% rename from openpype/modules/webserver/cors_middleware.py rename to client/ayon_core/modules/webserver/cors_middleware.py diff --git a/openpype/modules/webserver/host_console_listener.py b/client/ayon_core/modules/webserver/host_console_listener.py similarity index 97% rename from openpype/modules/webserver/host_console_listener.py rename to client/ayon_core/modules/webserver/host_console_listener.py index e5c11af9c2..ed8a32b9f2 100644 --- a/openpype/modules/webserver/host_console_listener.py +++ b/client/ayon_core/modules/webserver/host_console_listener.py @@ -1,11 +1,13 @@ -import aiohttp -from aiohttp import web import json import logging from concurrent.futures import CancelledError + +import aiohttp +from aiohttp import web from qtpy import QtWidgets -from openpype.modules import ITrayService +from ayon_core.addon import ITrayService +from ayon_core.tools.stdout_broker.window import ConsoleDialog log = logging.getLogger(__name__) @@ -34,7 +36,6 @@ def __init__(self, webserver, module): webserver.add_route('*', "/ws/host_listener", self.websocket_handler) def _host_is_connecting(self, host_name, label): - from openpype.tools.stdout_broker.window import ConsoleDialog """ Initialize dialog, adds to submenu. """ services_submenu = self.module._services_submenu action = QtWidgets.QAction(label, services_submenu) diff --git a/client/ayon_core/modules/webserver/server.py b/client/ayon_core/modules/webserver/server.py new file mode 100644 index 0000000000..99d9badb6a --- /dev/null +++ b/client/ayon_core/modules/webserver/server.py @@ -0,0 +1,180 @@ +import re +import threading +import asyncio + +from aiohttp import web + +from ayon_core.lib import Logger +from .cors_middleware import cors_middleware + + +class WebServerManager: + """Manger that care about web server thread.""" + + def __init__(self, port=None, host=None): + self._log = None + + self.port = port or 8079 + self.host = host or "localhost" + + self.client = None + self.handlers = {} + self.on_stop_callbacks = [] + + self.app = web.Application( + middlewares=[ + cors_middleware( + origins=[re.compile(r"^https?\:\/\/localhost")] + ) + ] + ) + + # add route with multiple methods for single "external app" + + self.webserver_thread = WebServerThread(self) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @property + def url(self): + return "http://{}:{}".format(self.host, self.port) + + def add_route(self, *args, **kwargs): + self.app.router.add_route(*args, **kwargs) + + def add_static(self, *args, **kwargs): + self.app.router.add_static(*args, **kwargs) + + def start_server(self): + if self.webserver_thread and not self.webserver_thread.is_alive(): + self.webserver_thread.start() + + def stop_server(self): + if not self.is_running: + return + try: + self.log.debug("Stopping Web server") + self.webserver_thread.is_running = False + self.webserver_thread.stop() + + except Exception: + self.log.warning( + "Error has happened during Killing Web server", + exc_info=True + ) + + @property + def is_running(self): + if not self.webserver_thread: + return False + return self.webserver_thread.is_running + + def thread_stopped(self): + for callback in self.on_stop_callbacks: + callback() + + +class WebServerThread(threading.Thread): + """ Listener for requests in thread.""" + + def __init__(self, manager): + self._log = None + + super(WebServerThread, self).__init__() + + self.is_running = False + self.manager = manager + self.loop = None + self.runner = None + self.site = None + self.tasks = [] + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @property + def port(self): + return self.manager.port + + @property + def host(self): + return self.manager.host + + def run(self): + self.is_running = True + + try: + self.log.info("Starting WebServer server") + self.loop = asyncio.new_event_loop() # create new loop for thread + asyncio.set_event_loop(self.loop) + + self.loop.run_until_complete(self.start_server()) + + self.log.debug( + "Running Web server on URL: \"localhost:{}\"".format(self.port) + ) + + asyncio.ensure_future(self.check_shutdown(), loop=self.loop) + self.loop.run_forever() + + except Exception: + self.log.warning( + "Web Server service has failed", exc_info=True + ) + finally: + self.loop.close() # optional + + self.is_running = False + self.manager.thread_stopped() + self.log.info("Web server stopped") + + async def start_server(self): + """ Starts runner and TCPsite """ + self.runner = web.AppRunner(self.manager.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, self.host, self.port) + await self.site.start() + + def stop(self): + """Sets is_running flag to false, 'check_shutdown' shuts server down""" + self.is_running = False + + async def check_shutdown(self): + """ Future that is running and checks if server should be running + periodically. + """ + while self.is_running: + while self.tasks: + task = self.tasks.pop(0) + self.log.debug("waiting for task {}".format(task)) + await task + self.log.debug("returned value {}".format(task.result)) + + await asyncio.sleep(0.5) + + self.log.debug("Starting shutdown") + await self.site.stop() + self.log.debug("Site stopped") + await self.runner.cleanup() + self.log.debug("Runner stopped") + tasks = [ + task + for task in asyncio.all_tasks() + if task is not asyncio.current_task() + ] + list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks + results = await asyncio.gather(*tasks, return_exceptions=True) + self.log.debug( + f'Finished awaiting cancelled tasks, results: {results}...' + ) + await self.loop.shutdown_asyncgens() + # to really make sure everything else has time to stop + await asyncio.sleep(0.07) + self.loop.stop() diff --git a/client/ayon_core/modules/webserver/webserver_module.py b/client/ayon_core/modules/webserver/webserver_module.py new file mode 100644 index 0000000000..ec143d0866 --- /dev/null +++ b/client/ayon_core/modules/webserver/webserver_module.py @@ -0,0 +1,209 @@ +"""WebServerAddon spawns aiohttp server in asyncio loop. + +Main usage of the module is in OpenPype tray where make sense to add ability +of other modules to add theirs routes. Module which would want use that +option must have implemented method `webserver_initialization` which must +expect `WebServerManager` object where is possible to add routes or paths +with handlers. + +WebServerManager is by default created only in tray. + +It is possible to create server manager without using module logic at all +using `create_new_server_manager`. That can be handy for standalone scripts +with predefined host and port and separated routes and logic. + +Running multiple servers in one process is not recommended and probably won't +work as expected. It is because of few limitations connected to asyncio module. + +When module's `create_server_manager` is called it is also set environment +variable "AYON_WEBSERVER_URL". Which should lead to root access point +of server. +""" + +import os +import socket + +from ayon_core import resources +from ayon_core.addon import AYONAddon, ITrayService + + +class WebServerAddon(AYONAddon, ITrayService): + name = "webserver" + label = "WebServer" + + webserver_url_env = "AYON_WEBSERVER_URL" + + def initialize(self, settings): + self._server_manager = None + self._host_listener = None + + self._port = self.find_free_port() + self._webserver_url = None + + @property + def server_manager(self): + """ + + Returns: + Union[WebServerManager, None]: Server manager instance. + + """ + return self._server_manager + + @property + def port(self): + """ + + Returns: + int: Port on which is webserver running. + + """ + return self._port + + @property + def webserver_url(self): + """ + + Returns: + str: URL to webserver. + + """ + return self._webserver_url + + def connect_with_addons(self, enabled_modules): + if not self._server_manager: + return + + for module in enabled_modules: + if not hasattr(module, "webserver_initialization"): + continue + + try: + module.webserver_initialization(self._server_manager) + except Exception: + self.log.warning( + ( + "Failed to connect module \"{}\" to webserver." + ).format(module.name), + exc_info=True + ) + + def tray_init(self): + self.create_server_manager() + self._add_resources_statics() + self._add_listeners() + + def tray_start(self): + self.start_server() + + def tray_exit(self): + self.stop_server() + + def start_server(self): + if self._server_manager is not None: + self._server_manager.start_server() + + def stop_server(self): + if self._server_manager is not None: + self._server_manager.stop_server() + + @staticmethod + def create_new_server_manager(port=None, host=None): + """Create webserver manager for passed port and host. + + Args: + port(int): Port on which wil webserver listen. + host(str): Host name or IP address. Default is 'localhost'. + + Returns: + WebServerManager: Prepared manager. + """ + from .server import WebServerManager + + return WebServerManager(port, host) + + def create_server_manager(self): + if self._server_manager is not None: + return + + self._server_manager = self.create_new_server_manager(self._port) + self._server_manager.on_stop_callbacks.append( + self.set_service_failed_icon + ) + + webserver_url = self._server_manager.url + os.environ["OPENPYPE_WEBSERVER_URL"] = str(webserver_url) + os.environ[self.webserver_url_env] = str(webserver_url) + self._webserver_url = webserver_url + + @staticmethod + def find_free_port( + port_from=None, port_to=None, exclude_ports=None, host=None + ): + """Find available socket port from entered range. + + It is also possible to only check if entered port is available. + + Args: + port_from (int): Port number which is checked as first. + port_to (int): Last port that is checked in sequence from entered + `port_from`. Only `port_from` is checked if is not entered. + Nothing is processed if is equeal to `port_from`! + exclude_ports (list, tuple, set): List of ports that won't be + checked form entered range. + host (str): Host where will check for free ports. Set to + "localhost" by default. + """ + if port_from is None: + port_from = 8079 + + if port_to is None: + port_to = 65535 + + # Excluded ports (e.g. reserved for other servers/clients) + if exclude_ports is None: + exclude_ports = [] + + # Default host is localhost but it is possible to look for other hosts + if host is None: + host = "localhost" + + found_port = None + for port in range(port_from, port_to + 1): + if port in exclude_ports: + continue + + sock = None + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind((host, port)) + found_port = port + + except socket.error: + continue + + finally: + if sock: + sock.close() + + if found_port is not None: + break + + return found_port + + def _add_resources_statics(self): + static_prefix = "/res" + self._server_manager.add_static(static_prefix, resources.RESOURCES_DIR) + statisc_url = "{}{}".format( + self._webserver_url, static_prefix + ) + + os.environ["AYON_STATICS_SERVER"] = statisc_url + os.environ["OPENPYPE_STATICS_SERVER"] = statisc_url + + def _add_listeners(self): + from . import host_console_listener + + self._host_listener = host_console_listener.HostListener( + self._server_manager, self + ) diff --git a/client/ayon_core/pipeline/__init__.py b/client/ayon_core/pipeline/__init__.py new file mode 100644 index 0000000000..c5507b0a7b --- /dev/null +++ b/client/ayon_core/pipeline/__init__.py @@ -0,0 +1,200 @@ +from .constants import ( + AVALON_CONTAINER_ID, + AYON_CONTAINER_ID, + HOST_WORKFILE_EXTENSIONS, +) + +from .anatomy import Anatomy + +from .create import ( + BaseCreator, + Creator, + AutoCreator, + HiddenCreator, + CreatedInstance, + CreatorError, + + LegacyCreator, + legacy_create, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, +) + +from .load import ( + HeroVersionType, + IncompatibleLoaderError, + LoaderPlugin, + SubsetLoaderPlugin, + + discover_loader_plugins, + register_loader_plugin, + deregister_loader_plugin_path, + register_loader_plugin_path, + deregister_loader_plugin, + + load_container, + remove_container, + update_container, + switch_container, + + loaders_from_representation, + get_representation_path, + get_representation_context, + get_repres_contexts, +) + +from .publish import ( + PublishValidationError, + PublishXmlValidationError, + KnownPublishError, + AYONPyblishPluginMixin, + OpenPypePyblishPluginMixin, + OptionalPyblishPluginMixin, +) + +from .actions import ( + LauncherAction, + + InventoryAction, + + discover_launcher_actions, + register_launcher_action, + register_launcher_action_path, + + discover_inventory_actions, + register_inventory_action, + register_inventory_action_path, + deregister_inventory_action, + deregister_inventory_action_path, +) + +from .context_tools import ( + install_ayon_plugins, + install_openpype_plugins, + install_host, + uninstall_host, + is_installed, + + register_root, + registered_root, + + register_host, + registered_host, + deregister_host, + get_process_id, + + get_global_context, + get_current_context, + get_current_host_name, + get_current_project_name, + get_current_asset_name, + get_current_task_name +) +install = install_host +uninstall = uninstall_host + + +__all__ = ( + "AVALON_CONTAINER_ID", + "AYON_CONTAINER_ID", + "HOST_WORKFILE_EXTENSIONS", + + # --- Anatomy --- + "Anatomy", + + # --- Create --- + "BaseCreator", + "Creator", + "AutoCreator", + "HiddenCreator", + "CreatedInstance", + "CreatorError", + + "CreatorError", + + # - legacy creation + "LegacyCreator", + "legacy_create", + + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + + # --- Load --- + "HeroVersionType", + "IncompatibleLoaderError", + "LoaderPlugin", + "SubsetLoaderPlugin", + + "discover_loader_plugins", + "register_loader_plugin", + "deregister_loader_plugin_path", + "register_loader_plugin_path", + "deregister_loader_plugin", + + "load_container", + "remove_container", + "update_container", + "switch_container", + + "loaders_from_representation", + "get_representation_path", + "get_representation_context", + "get_repres_contexts", + + # --- Publish --- + "PublishValidationError", + "PublishXmlValidationError", + "KnownPublishError", + "AYONPyblishPluginMixin", + "OpenPypePyblishPluginMixin", + "OptionalPyblishPluginMixin", + + # --- Actions --- + "LauncherAction", + "InventoryAction", + + "discover_launcher_actions", + "register_launcher_action", + "register_launcher_action_path", + + "discover_inventory_actions", + "register_inventory_action", + "register_inventory_action_path", + "deregister_inventory_action", + "deregister_inventory_action_path", + + # --- Process context --- + "install_ayon_plugins", + "install_openpype_plugins", + "install_host", + "uninstall_host", + "is_installed", + + "register_root", + "registered_root", + + "register_host", + "registered_host", + "deregister_host", + "get_process_id", + + "get_global_context", + "get_current_context", + "get_current_host_name", + "get_current_project_name", + "get_current_asset_name", + "get_current_task_name", + + # Backwards compatible function names + "install", + "uninstall", +) diff --git a/client/ayon_core/pipeline/actions.py b/client/ayon_core/pipeline/actions.py new file mode 100644 index 0000000000..1701498d10 --- /dev/null +++ b/client/ayon_core/pipeline/actions.py @@ -0,0 +1,147 @@ +import logging +from ayon_core.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) + +from .load.utils import get_representation_path_from_context + + +class LauncherAction(object): + """A custom action available""" + name = None + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("LauncherAction") + log.propagate = True + + def is_compatible(self, session): + """Return whether the class is compatible with the Session. + + Args: + session (dict[str, Union[str, None]]): Session data with + AVALON_PROJECT, AVALON_ASSET and AVALON_TASK. + """ + + return True + + def process(self, session, **kwargs): + pass + + +class InventoryAction(object): + """A custom action for the scene inventory tool + + If registered the action will be visible in the Right Mouse Button menu + under the submenu "Actions". + + """ + + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("InventoryAction") + log.propagate = True + + @staticmethod + def is_compatible(container): + """Override function in a custom class + + This method is specifically used to ensure the action can operate on + the container. + + Args: + container(dict): the data of a loaded asset, see host.ls() + + Returns: + bool + """ + return bool(container.get("objectName")) + + def process(self, containers): + """Override function in a custom class + + This method will receive all containers even those which are + incompatible. It is advised to create a small filter along the lines + of this example: + + valid_containers = filter(self.is_compatible(c) for c in containers) + + The return value will need to be a True-ish value to trigger + the data_changed signal in order to refresh the view. + + You can return a list of container names to trigger GUI to select + treeview items. + + You can return a dict to carry extra GUI options. For example: + { + "objectNames": [container names...], + "options": {"mode": "toggle", + "clear": False} + } + Currently workable GUI options are: + - clear (bool): Clear current selection before selecting by action. + Default `True`. + - mode (str): selection mode, use one of these: + "select", "deselect", "toggle". Default is "select". + + Args: + containers (list): list of dictionaries + + Return: + bool, list or dict + + """ + return True + + @classmethod + def filepath_from_context(cls, context): + return get_representation_path_from_context(context) + + +# Launcher action +def discover_launcher_actions(): + return discover(LauncherAction) + + +def register_launcher_action(plugin): + return register_plugin(LauncherAction, plugin) + + +def register_launcher_action_path(path): + return register_plugin_path(LauncherAction, path) + + +# Inventory action +def discover_inventory_actions(): + actions = discover(InventoryAction) + filtered_actions = [] + for action in actions: + if action is not InventoryAction: + filtered_actions.append(action) + + return filtered_actions + + +def register_inventory_action(plugin): + return register_plugin(InventoryAction, plugin) + + +def deregister_inventory_action(plugin): + deregister_plugin(InventoryAction, plugin) + + +def register_inventory_action_path(path): + return register_plugin_path(InventoryAction, path) + + +def deregister_inventory_action_path(path): + return deregister_plugin_path(InventoryAction, path) diff --git a/openpype/pipeline/anatomy.py b/client/ayon_core/pipeline/anatomy.py similarity index 95% rename from openpype/pipeline/anatomy.py rename to client/ayon_core/pipeline/anatomy.py index 0e5ab1d42e..86b7d92309 100644 --- a/openpype/pipeline/anatomy.py +++ b/client/ayon_core/pipeline/anatomy.py @@ -8,23 +8,19 @@ import six import time -from openpype import AYON_SERVER_ENABLED -from openpype.settings.lib import ( +from ayon_core.settings.lib import ( get_local_settings, ) -from openpype.settings.constants import ( - DEFAULT_PROJECT_KEY -) -from openpype.client import get_project, get_ayon_server_api_connection -from openpype.lib import Logger, get_local_site_id -from openpype.lib.path_templates import ( +from ayon_core.client import get_project, get_ayon_server_api_connection +from ayon_core.lib import Logger, get_local_site_id +from ayon_core.lib.path_templates import ( TemplateUnsolved, TemplateResult, StringTemplate, TemplatesDict, FormatObject, ) -from openpype.modules import ModulesManager +from ayon_core.addon import AddonsManager log = Logger.get_logger(__name__) @@ -129,7 +125,7 @@ def roots_obj(self): return self._roots_obj def root_environments(self): - """Return OPENPYPE_ROOT_* environments for current project in dict.""" + """Return AYON_PROJECT_ROOT_* environments for current project.""" return self._roots_obj.root_environments() def root_environmets_fill_data(self, template=None): @@ -154,7 +150,7 @@ def all_root_paths(self): return self.roots_obj.all_root_paths() def set_root_environments(self): - """Set OPENPYPE_ROOT_* environments for current project.""" + """Set AYON_PROJECT_ROOT_* environments for current project.""" self._roots_obj.set_root_environments() def root_names(self): @@ -293,7 +289,7 @@ def replace_root_with_env_key(self, filepath, template=None): "<{}>" ## Output - "/project/asset/task/animation_v001.ma" + "/project/asset/task/animation_v001.ma" Args: filepath (str): Full file path where root should be replaced. @@ -450,9 +446,9 @@ def get_project_doc_from_cache(cls, project_name): @classmethod def get_sync_server_addon(cls): if cls._sync_server_addon_cache.is_outdated: - manager = ModulesManager() + manager = AddonsManager() cls._sync_server_addon_cache.update_data( - manager.get_enabled_module("sync_server") + manager.get_enabled_addon("sync_server") ) return cls._sync_server_addon_cache.data @@ -474,40 +470,12 @@ def _get_studio_roots_overrides(cls, project_name, local_settings=None): Returns: Union[Dict[str, str], None]): Local root overrides. """ - - if AYON_SERVER_ENABLED: - if not project_name: - return - con = get_ayon_server_api_connection() - return con.get_project_roots_for_site( - project_name, get_local_site_id() - ) - - if local_settings is None: - local_settings = get_local_settings() - - local_project_settings = local_settings.get("projects") or {} - if not local_project_settings: - return None - - # Check for roots existence in local settings first - roots_project_locals = ( - local_project_settings - .get(project_name, {}) - ) - roots_default_locals = ( - local_project_settings - .get(DEFAULT_PROJECT_KEY, {}) - ) - - # Skip rest of processing if roots are not set - if not roots_project_locals and not roots_default_locals: + if not project_name: return - - # Combine roots from local settings - roots_locals = roots_default_locals.get("studio") or {} - roots_locals.update(roots_project_locals.get("studio") or {}) - return roots_locals + con = get_ayon_server_api_connection() + return con.get_project_roots_for_site( + project_name, get_local_site_id() + ) @classmethod def _get_site_root_overrides(cls, project_name, site_name): @@ -1238,7 +1206,7 @@ class Roots: anatomy Anatomy: Anatomy object created for a specific project. """ - env_prefix = "OPENPYPE_PROJECT_ROOT" + env_prefix = "AYON_PROJECT_ROOT" roots_filename = "roots.json" def __init__(self, anatomy): @@ -1344,8 +1312,8 @@ def set_root_environments(self): def root_environments(self): """Use root keys to create unique keys for environment variables. - Concatenates prefix "OPENPYPE_ROOT" with root keys to create unique - keys. + Concatenates prefix "AYON_PROJECT_ROOT_" with root keys to create + unique keys. Returns: dict: Result is `{(str): (str)}` dicitonary where key represents @@ -1367,14 +1335,10 @@ def root_environments(self): Result on windows platform:: { - "OPENPYPE_ROOT_WORK": "P:/projects/work", - "OPENPYPE_ROOT_PUBLISH": "P:/projects/publish" + "AYON_PROJECT_ROOT_WORK": "P:/projects/work", + "AYON_PROJECT_ROOT_PUBLISH": "P:/projects/publish" } - Short example when multiroot is not used:: - { - "OPENPYPE_ROOT": "P:/projects" - } """ return self._root_environments() diff --git a/client/ayon_core/pipeline/colorspace.py b/client/ayon_core/pipeline/colorspace.py new file mode 100644 index 0000000000..d77f301498 --- /dev/null +++ b/client/ayon_core/pipeline/colorspace.py @@ -0,0 +1,1194 @@ +import re +import os +import json +import contextlib +import functools +import platform +import tempfile +import warnings +from copy import deepcopy + +from ayon_core import AYON_CORE_ROOT +from ayon_core.settings import get_project_settings +from ayon_core.lib import ( + StringTemplate, + run_ayon_launcher_process, + Logger +) +from ayon_core.pipeline import Anatomy +from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS + + +log = Logger.get_logger(__name__) + + +class CachedData: + remapping = None + has_compatible_ocio_package = None + config_version_data = {} + ocio_config_colorspaces = {} + allowed_exts = { + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + } + + +class DeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@contextlib.contextmanager +def _make_temp_json_file(): + """Wrapping function for json temp file + """ + try: + # Store dumped json to temporary file + temporary_json_file = tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) + temporary_json_file.close() + temporary_json_filepath = temporary_json_file.name.replace( + "\\", "/" + ) + + yield temporary_json_filepath + + except IOError as _error: + raise IOError( + "Unable to create temp json file: {}".format( + _error + ) + ) + + finally: + # Remove the temporary json + os.remove(temporary_json_filepath) + + +def get_ocio_config_script_path(): + """Get path to ocio wrapper script + + Returns: + str: path string + """ + return os.path.normpath( + os.path.join( + AYON_CORE_ROOT, + "scripts", + "ocio_wrapper.py" + ) + ) + + +def get_colorspace_name_from_filepath( + filepath, host_name, project_name, + config_data=None, file_rules=None, + project_settings=None, + validate=True +): + """Get colorspace name from filepath + + Args: + filepath (str): path string, file rule pattern is tested on it + host_name (str): host name + project_name (str): project name + config_data (Optional[dict]): config path and template in dict. + Defaults to None. + file_rules (Optional[dict]): file rule data from settings. + Defaults to None. + project_settings (Optional[dict]): project settings. Defaults to None. + validate (Optional[bool]): should resulting colorspace be validated + with config file? Defaults to True. + + Returns: + str: name of colorspace + """ + project_settings, config_data, file_rules = _get_context_settings( + host_name, project_name, + config_data=config_data, file_rules=file_rules, + project_settings=project_settings + ) + + if not config_data: + # in case global or host color management is not enabled + return None + + # use ImageIO file rules + colorspace_name = get_imageio_file_rules_colorspace_from_filepath( + filepath, host_name, project_name, + config_data=config_data, file_rules=file_rules, + project_settings=project_settings + ) + + # try to get colorspace from OCIO v2 file rules + if ( + not colorspace_name + and compatibility_check_config_version(config_data["path"], major=2) + ): + colorspace_name = get_config_file_rules_colorspace_from_filepath( + config_data["path"], filepath) + + # use parse colorspace from filepath as fallback + colorspace_name = colorspace_name or parse_colorspace_from_filepath( + filepath, config_path=config_data["path"] + ) + + if not colorspace_name: + log.info("No imageio file rule matched input path: '{}'".format( + filepath + )) + return None + + # validate matching colorspace with config + if validate: + validate_imageio_colorspace_in_config( + config_data["path"], colorspace_name) + + return colorspace_name + + +# TODO: remove this in future - backward compatibility +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_imageio_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + +# TODO: remove this in future - backward compatibility +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + + +def _get_context_settings( + host_name, project_name, + config_data=None, file_rules=None, + project_settings=None +): + project_settings = project_settings or get_project_settings( + project_name + ) + + config_data = config_data or get_imageio_config( + project_name, host_name, project_settings) + + # in case host color management is not enabled + if not config_data: + return (None, None, None) + + file_rules = file_rules or get_imageio_file_rules( + project_name, host_name, project_settings) + + return project_settings, config_data, file_rules + + +def get_imageio_file_rules_colorspace_from_filepath( + filepath, host_name, project_name, + config_data=None, file_rules=None, + project_settings=None +): + """Get colorspace name from filepath + + ImageIO Settings file rules are tested for matching rule. + + Args: + filepath (str): path string, file rule pattern is tested on it + host_name (str): host name + project_name (str): project name + config_data (Optional[dict]): config path and template in dict. + Defaults to None. + file_rules (Optional[dict]): file rule data from settings. + Defaults to None. + project_settings (Optional[dict]): project settings. Defaults to None. + + Returns: + str: name of colorspace + """ + project_settings, config_data, file_rules = _get_context_settings( + host_name, project_name, + config_data=config_data, file_rules=file_rules, + project_settings=project_settings + ) + + if not config_data: + # in case global or host color management is not enabled + return None + + # match file rule from path + colorspace_name = None + for file_rule in file_rules.values(): + pattern = file_rule["pattern"] + extension = file_rule["ext"] + ext_match = re.match( + r".*(?=.{})".format(extension), filepath + ) + file_match = re.search( + pattern, filepath + ) + + if ext_match and file_match: + colorspace_name = file_rule["colorspace"] + + return colorspace_name + + +def get_config_file_rules_colorspace_from_filepath(config_path, filepath): + """Get colorspace from file path wrapper. + + Wrapper function for getting colorspace from file path + with use of OCIO v2 file-rules. + + Args: + config_path (str): path leading to config.ocio file + filepath (str): path leading to a file + + Returns: + Any[str, None]: matching colorspace name + """ + if not compatibility_check(): + # python environment is not compatible with PyOpenColorIO + # needs to be run in subprocess + result_data = _get_wrapped_with_subprocess( + "colorspace", "get_config_file_rules_colorspace_from_filepath", + config_path=config_path, + filepath=filepath + ) + if result_data: + return result_data[0] + + # TODO: refactor this so it is not imported but part of this file + from ayon_core.scripts.ocio_wrapper import _get_config_file_rules_colorspace_from_filepath # noqa: E501 + + result_data = _get_config_file_rules_colorspace_from_filepath( + config_path, filepath) + + if result_data: + return result_data[0] + + +def parse_colorspace_from_filepath( + filepath, colorspaces=None, config_path=None +): + """Parse colorspace name from filepath + + An input path can have colorspace name used as part of name + or as folder name. + + Example: + >>> config_path = "path/to/config.ocio" + >>> colorspaces = get_ocio_config_colorspaces(config_path) + >>> colorspace = parse_colorspace_from_filepath( + "path/to/file/acescg/file.exr", + colorspaces=colorspaces + ) + >>> print(colorspace) + acescg + + Args: + filepath (str): path string + colorspaces (Optional[dict[str]]): list of colorspaces + config_path (Optional[str]): path to config.ocio file + + Returns: + str: name of colorspace + """ + def _get_colorspace_match_regex(colorspaces): + """Return a regex pattern + + Allows to search a colorspace match in a filename + + Args: + colorspaces (list): List of colorspace names + + Returns: + re.Pattern: regex pattern + """ + pattern = "|".join( + # Allow to match spaces also as underscores because the + # integrator replaces spaces with underscores in filenames + re.escape(colorspace) for colorspace in + # Sort by longest first so the regex matches longer matches + # over smaller matches, e.g. matching 'Output - sRGB' over 'sRGB' + sorted(colorspaces, key=len, reverse=True) + ) + return re.compile(pattern) + + if not colorspaces and not config_path: + raise ValueError( + "Must provide `config_path` if `colorspaces` is not provided." + ) + + colorspaces = ( + colorspaces + or get_ocio_config_colorspaces(config_path)["colorspaces"] + ) + underscored_colorspaces = { + key.replace(" ", "_"): key for key in colorspaces + if " " in key + } + + # match colorspace from filepath + regex_pattern = _get_colorspace_match_regex( + list(colorspaces) + list(underscored_colorspaces)) + match = regex_pattern.search(filepath) + colorspace = match.group(0) if match else None + + if colorspace in underscored_colorspaces: + return underscored_colorspaces[colorspace] + + if colorspace: + return colorspace + + log.info("No matching colorspace in config '{}' for path: '{}'".format( + config_path, filepath + )) + return None + + +def validate_imageio_colorspace_in_config(config_path, colorspace_name): + """Validator making sure colorspace name is used in config.ocio + + Args: + config_path (str): path leading to config.ocio file + colorspace_name (str): tested colorspace name + + Raises: + KeyError: missing colorspace name + + Returns: + bool: True if exists + """ + colorspaces = get_ocio_config_colorspaces(config_path)["colorspaces"] + if colorspace_name not in colorspaces: + raise KeyError( + "Missing colorspace '{}' in config file '{}'".format( + colorspace_name, config_path) + ) + return True + + +# TODO: remove this in future - backward compatibility +@deprecated("_get_wrapped_with_subprocess") +def get_data_subprocess(config_path, data_type): + """[Deprecated] Get data via subprocess + + Wrapper for Python 2 hosts. + + Args: + config_path (str): path leading to config.ocio file + """ + return _get_wrapped_with_subprocess( + "config", data_type, in_path=config_path, + ) + + +def _get_wrapped_with_subprocess(command_group, command, **kwargs): + """Get data via subprocess + + Wrapper for Python 2 hosts. + + Args: + command_group (str): command group name + command (str): command name + **kwargs: command arguments + + Returns: + Any[dict, None]: data + """ + with _make_temp_json_file() as tmp_json_path: + # Prepare subprocess arguments + args = [ + "run", get_ocio_config_script_path(), + command_group, command + ] + + for key_, value_ in kwargs.items(): + args.extend(("--{}".format(key_), value_)) + + args.append("--out_path") + args.append(tmp_json_path) + + log.info("Executing: {}".format(" ".join(args))) + + run_ayon_launcher_process(*args, logger=log) + + # return all colorspaces + with open(tmp_json_path, "r") as f_: + return json.load(f_) + + +# TODO: this should be part of ocio_wrapper.py +def compatibility_check(): + """Making sure PyOpenColorIO is importable""" + if CachedData.has_compatible_ocio_package is not None: + return CachedData.has_compatible_ocio_package + + try: + import PyOpenColorIO # noqa: F401 + CachedData.has_compatible_ocio_package = True + except ImportError: + CachedData.has_compatible_ocio_package = False + + # compatible + return CachedData.has_compatible_ocio_package + + +# TODO: this should be part of ocio_wrapper.py +def compatibility_check_config_version(config_path, major=1, minor=None): + """Making sure PyOpenColorIO config version is compatible""" + + if not CachedData.config_version_data.get(config_path): + if compatibility_check(): + # TODO: refactor this so it is not imported but part of this file + from ayon_core.scripts.ocio_wrapper import _get_version_data + + CachedData.config_version_data[config_path] = \ + _get_version_data(config_path) + + else: + # python environment is not compatible with PyOpenColorIO + # needs to be run in subprocess + CachedData.config_version_data[config_path] = \ + _get_wrapped_with_subprocess( + "config", "get_version", config_path=config_path + ) + + # check major version + if CachedData.config_version_data[config_path]["major"] != major: + return False + + # check minor version + if minor and CachedData.config_version_data[config_path]["minor"] != minor: + return False + + # compatible + return True + + +def get_ocio_config_colorspaces(config_path): + """Get all colorspace data + + Wrapper function for aggregating all names and its families. + Families can be used for building menu and submenus in gui. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + if not CachedData.ocio_config_colorspaces.get(config_path): + if not compatibility_check(): + # python environment is not compatible with PyOpenColorIO + # needs to be run in subprocess + CachedData.ocio_config_colorspaces[config_path] = \ + _get_wrapped_with_subprocess( + "config", "get_colorspace", in_path=config_path + ) + else: + # TODO: refactor this so it is not imported but part of this file + from ayon_core.scripts.ocio_wrapper import _get_colorspace_data + + CachedData.ocio_config_colorspaces[config_path] = \ + _get_colorspace_data(config_path) + + return CachedData.ocio_config_colorspaces[config_path] + + +def convert_colorspace_enumerator_item( + colorspace_enum_item, + config_items +): + """Convert colorspace enumerator item to dictionary + + Args: + colorspace_item (str): colorspace and family in couple + config_items (dict[str,dict]): colorspace data + + Returns: + dict: colorspace data + """ + if "::" not in colorspace_enum_item: + return None + + # split string with `::` separator and set first as key and second as value + item_type, item_name = colorspace_enum_item.split("::") + + item_data = None + if item_type == "aliases": + # loop through all colorspaces and find matching alias + for name, _data in config_items.get("colorspaces", {}).items(): + if item_name in _data.get("aliases", []): + item_data = deepcopy(_data) + item_data.update({ + "name": name, + "type": "colorspace" + }) + break + else: + # find matching colorspace item found in labeled_colorspaces + item_data = config_items.get(item_type, {}).get(item_name) + if item_data: + item_data = deepcopy(item_data) + item_data.update({ + "name": item_name, + "type": item_type + }) + + # raise exception if item is not found + if not item_data: + message_config_keys = ", ".join( + "'{}':{}".format( + key, + set(config_items.get(key, {}).keys()) + ) for key in config_items.keys() + ) + raise KeyError( + "Missing colorspace item '{}' in config data: [{}]".format( + colorspace_enum_item, message_config_keys + ) + ) + + return item_data + + +def get_colorspaces_enumerator_items( + config_items, + include_aliases=False, + include_looks=False, + include_roles=False, + include_display_views=False +): + """Get all colorspace data with labels + + Wrapper function for aggregating all names and its families. + Families can be used for building menu and submenus in gui. + + Args: + config_items (dict[str,dict]): colorspace data coming from + `get_ocio_config_colorspaces` function + include_aliases (bool): include aliases in result + include_looks (bool): include looks in result + include_roles (bool): include roles in result + + Returns: + list[tuple[str,str]]: colorspace and family in couple + """ + labeled_colorspaces = [] + aliases = set() + colorspaces = set() + looks = set() + roles = set() + display_views = set() + for items_type, colorspace_items in config_items.items(): + if items_type == "colorspaces": + for color_name, color_data in colorspace_items.items(): + if color_data.get("aliases"): + aliases.update([ + ( + "aliases::{}".format(alias_name), + "[alias] {} ({})".format(alias_name, color_name) + ) + for alias_name in color_data["aliases"] + ]) + colorspaces.add(( + "{}::{}".format(items_type, color_name), + "[colorspace] {}".format(color_name) + )) + + elif items_type == "looks": + looks.update([ + ( + "{}::{}".format(items_type, name), + "[look] {} ({})".format(name, role_data["process_space"]) + ) + for name, role_data in colorspace_items.items() + ]) + + elif items_type == "displays_views": + display_views.update([ + ( + "{}::{}".format(items_type, name), + "[view (display)] {}".format(name) + ) + for name, _ in colorspace_items.items() + ]) + + elif items_type == "roles": + roles.update([ + ( + "{}::{}".format(items_type, name), + "[role] {} ({})".format(name, role_data["colorspace"]) + ) + for name, role_data in colorspace_items.items() + ]) + + if roles and include_roles: + roles = sorted(roles, key=lambda x: x[0]) + labeled_colorspaces.extend(roles) + + # add colorspaces as second so it is not first in menu + colorspaces = sorted(colorspaces, key=lambda x: x[0]) + labeled_colorspaces.extend(colorspaces) + + if aliases and include_aliases: + aliases = sorted(aliases, key=lambda x: x[0]) + labeled_colorspaces.extend(aliases) + + if looks and include_looks: + looks = sorted(looks, key=lambda x: x[0]) + labeled_colorspaces.extend(looks) + + if display_views and include_display_views: + display_views = sorted(display_views, key=lambda x: x[0]) + labeled_colorspaces.extend(display_views) + + return labeled_colorspaces + + +# TODO: remove this in future - backward compatibility +@deprecated("_get_wrapped_with_subprocess") +def get_colorspace_data_subprocess(config_path): + """[Deprecated] Get colorspace data via subprocess + + Wrapper for Python 2 hosts. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + return _get_wrapped_with_subprocess( + "config", "get_colorspace", in_path=config_path + ) + + +def get_ocio_config_views(config_path): + """Get all viewer data + + Wrapper function for aggregating all display and related viewers. + Key can be used for building gui menu with submenus. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + """ + if not compatibility_check(): + # python environment is not compatible with PyOpenColorIO + # needs to be run in subprocess + return _get_wrapped_with_subprocess( + "config", "get_views", in_path=config_path + ) + + # TODO: refactor this so it is not imported but part of this file + from ayon_core.scripts.ocio_wrapper import _get_views_data + + return _get_views_data(config_path) + + +# TODO: remove this in future - backward compatibility +@deprecated("_get_wrapped_with_subprocess") +def get_views_data_subprocess(config_path): + """[Deprecated] Get viewers data via subprocess + + Wrapper for Python 2 hosts. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + """ + return _get_wrapped_with_subprocess( + "config", "get_views", in_path=config_path + ) + + +def get_imageio_config( + project_name, + host_name, + project_settings=None, + anatomy_data=None, + anatomy=None, + env=None +): + """Returns config data from settings + + Config path is formatted in `path` key + and original settings input is saved into `template` key. + + Args: + project_name (str): project name + host_name (str): host name + project_settings (Optional[dict]): Project settings. + anatomy_data (Optional[dict]): anatomy formatting data. + anatomy (Optional[Anatomy]): Anatomy object. + env (Optional[dict]): Environment variables. + + Returns: + dict: config path data or empty dict + """ + project_settings = project_settings or get_project_settings(project_name) + anatomy = anatomy or Anatomy(project_name) + + if not anatomy_data: + from ayon_core.pipeline.context_tools import ( + get_template_data_from_session) + anatomy_data = get_template_data_from_session() + + formatting_data = deepcopy(anatomy_data) + + # Add project roots to anatomy data + formatting_data["root"] = anatomy.roots + formatting_data["platform"] = platform.system().lower() + + # Get colorspace settings + imageio_global, imageio_host = _get_imageio_settings( + project_settings, host_name) + + # Host 'ocio_config' is optional + host_ocio_config = imageio_host.get("ocio_config") or {} + + # Global color management must be enabled to be able to use host settings + activate_color_management = imageio_global.get( + "activate_global_color_management") + # TODO: remove this in future - backward compatibility + # For already saved overrides from previous version look for 'enabled' + # on host settings. + if activate_color_management is None: + activate_color_management = host_ocio_config.get("enabled", False) + + if not activate_color_management: + # if global settings are disabled return empty dict because + # it is expected that no colorspace management is needed + log.info("Colorspace management is disabled globally.") + return {} + + # Check if host settings group is having 'activate_host_color_management' + # - if it does not have activation key then default it to True so it uses + # global settings + # This is for backward compatibility. + # TODO: in future rewrite this to be more explicit + activate_host_color_management = imageio_host.get( + "activate_host_color_management") + + # TODO: remove this in future - backward compatibility + if activate_host_color_management is None: + activate_host_color_management = host_ocio_config.get("enabled", False) + + if not activate_host_color_management: + # if host settings are disabled return False because + # it is expected that no colorspace management is needed + log.info( + "Colorspace management for host '{}' is disabled.".format( + host_name) + ) + return {} + + # get config path from either global or host settings + # depending on override flag + # TODO: in future rewrite this to be more explicit + override_global_config = host_ocio_config.get("override_global_config") + if override_global_config is None: + # for already saved overrides from previous version + # TODO: remove this in future - backward compatibility + override_global_config = host_ocio_config.get("enabled") + + if override_global_config: + config_data = _get_config_data( + host_ocio_config["filepath"], formatting_data, env + ) + else: + # get config path from global + config_global = imageio_global["ocio_config"] + config_data = _get_config_data( + config_global["filepath"], formatting_data, env + ) + + if not config_data: + raise FileExistsError( + "No OCIO config found in settings. It is " + "either missing or there is typo in path inputs" + ) + + return config_data + + +def _get_config_data(path_list, anatomy_data, env=None): + """Return first existing path in path list. + + If template is used in path inputs, + then it is formatted by anatomy data + and environment variables + + Args: + path_list (list[str]): list of abs paths + anatomy_data (dict): formatting data + env (Optional[dict]): Environment variables. + + Returns: + dict: config data + """ + formatting_data = deepcopy(anatomy_data) + + environment_vars = env or dict(**os.environ) + + # format the path for potential env vars + formatting_data.update(environment_vars) + + # first try host config paths + for path_ in path_list: + formatted_path = _format_path(path_, formatting_data) + + if not os.path.exists(formatted_path): + continue + + return { + "path": os.path.normpath(formatted_path), + "template": path_ + } + + +def _format_path(template_path, formatting_data): + """Single template path formatting. + + Args: + template_path (str): template string + formatting_data (dict): data to be used for + template formatting + + Returns: + str: absolute formatted path + """ + # format path for anatomy keys + formatted_path = StringTemplate(template_path).format( + formatting_data) + + return os.path.abspath(formatted_path) + + +def get_imageio_file_rules(project_name, host_name, project_settings=None): + """Get ImageIO File rules from project settings + + Args: + project_name (str): project name + host_name (str): host name + project_settings (dict, optional): project settings. + Defaults to None. + + Returns: + dict: file rules data + """ + project_settings = project_settings or get_project_settings(project_name) + + imageio_global, imageio_host = _get_imageio_settings( + project_settings, host_name) + + # get file rules from global and host_name + frules_global = imageio_global["file_rules"] + activate_global_rules = ( + frules_global.get("activate_global_file_rules", False) + # TODO: remove this in future - backward compatibility + or frules_global.get("enabled") + ) + global_rules = frules_global["rules"] + + if not activate_global_rules: + log.info( + "Colorspace global file rules are disabled." + ) + global_rules = {} + + # host is optional, some might not have any settings + frules_host = imageio_host.get("file_rules", {}) + + # compile file rules dictionary + activate_host_rules = frules_host.get("activate_host_rules") + if activate_host_rules is None: + # TODO: remove this in future - backward compatibility + activate_host_rules = frules_host.get("enabled", False) + + # return host rules if activated or global rules + return frules_host["rules"] if activate_host_rules else global_rules + + +def get_remapped_colorspace_to_native( + ocio_colorspace_name, host_name, imageio_host_settings +): + """Return native colorspace name. + + Args: + ocio_colorspace_name (str | None): ocio colorspace name + host_name (str): Host name. + imageio_host_settings (dict[str, Any]): ImageIO host settings. + + Returns: + Union[str, None]: native colorspace name defined in remapping or None + """ + + CachedData.remapping.setdefault(host_name, {}) + if CachedData.remapping[host_name].get("to_native") is None: + remapping_rules = imageio_host_settings["remapping"]["rules"] + CachedData.remapping[host_name]["to_native"] = { + rule["ocio_name"]: rule["host_native_name"] + for rule in remapping_rules + } + + return CachedData.remapping[host_name]["to_native"].get( + ocio_colorspace_name) + + +def get_remapped_colorspace_from_native( + host_native_colorspace_name, host_name, imageio_host_settings +): + """Return ocio colorspace name remapped from host native used name. + + Args: + host_native_colorspace_name (str): host native colorspace name + host_name (str): Host name. + imageio_host_settings (dict[str, Any]): ImageIO host settings. + + Returns: + Union[str, None]: Ocio colorspace name defined in remapping or None. + """ + + CachedData.remapping.setdefault(host_name, {}) + if CachedData.remapping[host_name].get("from_native") is None: + remapping_rules = imageio_host_settings["remapping"]["rules"] + CachedData.remapping[host_name]["from_native"] = { + rule["host_native_name"]: rule["ocio_name"] + for rule in remapping_rules + } + + return CachedData.remapping[host_name]["from_native"].get( + host_native_colorspace_name) + + +def _get_imageio_settings(project_settings, host_name): + """Get ImageIO settings for global and host + + Args: + project_settings (dict): project settings. + Defaults to None. + host_name (str): host name + + Returns: + tuple[dict, dict]: image io settings for global and host + """ + # get image io from global and host_name + imageio_global = project_settings["global"]["imageio"] + # host is optional, some might not have any settings + imageio_host = project_settings.get(host_name, {}).get("imageio", {}) + + return imageio_global, imageio_host + + +def get_colorspace_settings_from_publish_context(context_data): + """Returns solved settings for the host context. + + Args: + context_data (publish.Context.data): publishing context data + + Returns: + tuple | bool: config, file rules or None + """ + if "imageioSettings" in context_data and context_data["imageioSettings"]: + return context_data["imageioSettings"] + + project_name = context_data["projectName"] + host_name = context_data["hostName"] + anatomy_data = context_data["anatomyData"] + project_settings_ = context_data["project_settings"] + + config_data = get_imageio_config( + project_name, host_name, + project_settings=project_settings_, + anatomy_data=anatomy_data + ) + + # caching invalid state, so it's not recalculated all the time + file_rules = None + if config_data: + file_rules = get_imageio_file_rules( + project_name, host_name, + project_settings=project_settings_ + ) + + # caching settings for future instance processing + context_data["imageioSettings"] = (config_data, file_rules) + + return config_data, file_rules + + +def set_colorspace_data_to_representation( + representation, context_data, + colorspace=None, + log=None +): + """Sets colorspace data to representation. + + Args: + representation (dict): publishing representation + context_data (publish.Context.data): publishing context data + colorspace (str, optional): colorspace name. Defaults to None. + log (logging.Logger, optional): logger instance. Defaults to None. + + Example: + ``` + { + # for other publish plugins and loaders + "colorspace": "linear", + "config": { + # for future references in case need + "path": "/abs/path/to/config.ocio", + # for other plugins within remote publish cases + "template": "{project[root]}/path/to/config.ocio" + } + } + ``` + + """ + log = log or Logger.get_logger(__name__) + + file_ext = representation["ext"] + + # check if `file_ext` in lower case is in CachedData.allowed_exts + if file_ext.lstrip(".").lower() not in CachedData.allowed_exts: + log.debug( + "Extension '{}' is not in allowed extensions.".format(file_ext) + ) + return + + # get colorspace settings + config_data, file_rules = get_colorspace_settings_from_publish_context( + context_data) + + # in case host color management is not enabled + if not config_data: + log.warning("Host's colorspace management is disabled.") + return + + log.debug("Config data is: `{}`".format(config_data)) + + project_name = context_data["projectName"] + host_name = context_data["hostName"] + project_settings = context_data["project_settings"] + + # get one filename + filename = representation["files"] + if isinstance(filename, list): + filename = filename[0] + + # get matching colorspace from rules + colorspace = colorspace or get_imageio_colorspace_from_filepath( + filename, host_name, project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) + + # infuse data to representation + if colorspace: + colorspace_data = { + "colorspace": colorspace, + "config": config_data + } + + # update data key + representation["colorspaceData"] = colorspace_data + + +def get_display_view_colorspace_name(config_path, display, view): + """Returns the colorspace attribute of the (display, view) pair. + + Args: + config_path (str): path string leading to config.ocio + display (str): display name e.g. "ACES" + view (str): view name e.g. "sRGB" + + Returns: + view color space name (str) e.g. "Output - sRGB" + """ + + if not compatibility_check(): + # python environment is not compatible with PyOpenColorIO + # needs to be run in subprocess + return get_display_view_colorspace_subprocess(config_path, + display, view) + + from ayon_core.scripts.ocio_wrapper import _get_display_view_colorspace_name # noqa + + return _get_display_view_colorspace_name(config_path, display, view) + + +def get_display_view_colorspace_subprocess(config_path, display, view): + """Returns the colorspace attribute of the (display, view) pair + via subprocess. + + Args: + config_path (str): path string leading to config.ocio + display (str): display name e.g. "ACES" + view (str): view name e.g. "sRGB" + + Returns: + view color space name (str) e.g. "Output - sRGB" + """ + + with _make_temp_json_file() as tmp_json_path: + # Prepare subprocess arguments + args = [ + "run", get_ocio_config_script_path(), + "config", "get_display_view_colorspace_name", + "--in_path", config_path, + "--out_path", tmp_json_path, + "--display", display, + "--view", view + ] + log.debug("Executing: {}".format(" ".join(args))) + + run_ayon_launcher_process(*args, logger=log) + + # return default view colorspace name + with open(tmp_json_path, "r") as f: + return json.load(f) diff --git a/openpype/pipeline/constants.py b/client/ayon_core/pipeline/constants.py similarity index 100% rename from openpype/pipeline/constants.py rename to client/ayon_core/pipeline/constants.py diff --git a/openpype/pipeline/context_tools.py b/client/ayon_core/pipeline/context_tools.py similarity index 93% rename from openpype/pipeline/context_tools.py rename to client/ayon_core/pipeline/context_tools.py index a607c90912..197b1eb6e6 100644 --- a/openpype/pipeline/context_tools.py +++ b/client/ayon_core/pipeline/context_tools.py @@ -10,10 +10,9 @@ import pyblish.api from pyblish.lib import MessageHandler -import openpype -from openpype import AYON_SERVER_ENABLED -from openpype.host import HostBase -from openpype.client import ( +from ayon_core import AYON_CORE_ROOT +from ayon_core.host import HostBase +from ayon_core.client import ( get_project, get_asset_by_id, get_asset_by_name, @@ -21,10 +20,10 @@ get_asset_name_identifier, get_ayon_server_api_connection, ) -from openpype.lib.events import emit_event -from openpype.modules import load_modules, ModulesManager -from openpype.settings import get_project_settings -from openpype.tests.lib import is_in_tests +from ayon_core.lib.events import emit_event +from ayon_core.addon import load_addons, AddonsManager +from ayon_core.settings import get_project_settings +from ayon_core.tests.lib import is_in_tests from .publish.lib import filter_pyblish_plugins from .anatomy import Anatomy @@ -49,12 +48,11 @@ _registered_host = {"_": None} # Keep modules manager (and it's modules) in memory # - that gives option to register modules' callbacks -_modules_manager = None +_addons_manager = None log = logging.getLogger(__name__) -PACKAGE_DIR = os.path.dirname(os.path.abspath(openpype.__file__)) -PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") +PLUGINS_DIR = os.path.join(AYON_CORE_ROOT, "plugins") # Global plugin paths PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") @@ -62,7 +60,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -def _get_modules_manager(): +def _get_addons_manager(): """Get or create modules manager for host installation. This is not meant for public usage. Reason is to keep modules @@ -70,13 +68,13 @@ def _get_modules_manager(): need any. Returns: - ModulesManager: Manager wrapping discovered modules. + AddonsManager: Manager wrapping discovered modules. """ - global _modules_manager - if _modules_manager is None: - _modules_manager = ModulesManager() - return _modules_manager + global _addons_manager + if _addons_manager is None: + _addons_manager = AddonsManager() + return _addons_manager def register_root(path): @@ -116,11 +114,10 @@ def install_host(host): _is_installed = True # Make sure global AYON connection has set site id and version - if AYON_SERVER_ENABLED: - get_ayon_server_api_connection() + get_ayon_server_api_connection() legacy_io.install() - modules_manager = _get_modules_manager() + addons_manager = _get_addons_manager() missing = list() for key in ("AVALON_PROJECT", "AVALON_ASSET"): @@ -149,8 +146,8 @@ def modified_emit(obj, record): MessageHandler.emit = modified_emit - if os.environ.get("OPENPYPE_REMOTE_PUBLISH"): - # target "farm" == rendering on farm, expects OPENPYPE_PUBLISH_DATA + if os.environ.get("AYON_REMOTE_PUBLISH"): + # target "farm" == rendering on farm, expects AYON_PUBLISH_DATA # target "remote" == remote execution, installs host print("Registering pyblish target: remote") pyblish.api.register_target("remote") @@ -165,15 +162,15 @@ def modified_emit(obj, record): host_name = os.environ.get("AVALON_APP") # Give option to handle host installation - for module in modules_manager.get_enabled_modules(): - module.on_host_install(host, host_name, project_name) + for addon in addons_manager.get_enabled_addons(): + addon.on_host_install(host, host_name, project_name) install_openpype_plugins(project_name, host_name) -def install_openpype_plugins(project_name=None, host_name=None): +def install_ayon_plugins(project_name=None, host_name=None): # Make sure modules are loaded - load_modules() + load_addons() log.info("Registering global plug-ins..") pyblish.api.register_plugin_path(PUBLISH_PATH) @@ -184,23 +181,23 @@ def install_openpype_plugins(project_name=None, host_name=None): if host_name is None: host_name = os.environ.get("AVALON_APP") - modules_manager = _get_modules_manager() - publish_plugin_dirs = modules_manager.collect_publish_plugin_paths( + addons_manager = _get_addons_manager() + publish_plugin_dirs = addons_manager.collect_publish_plugin_paths( host_name) for path in publish_plugin_dirs: pyblish.api.register_plugin_path(path) - create_plugin_paths = modules_manager.collect_create_plugin_paths( + create_plugin_paths = addons_manager.collect_create_plugin_paths( host_name) for path in create_plugin_paths: register_creator_plugin_path(path) - load_plugin_paths = modules_manager.collect_load_plugin_paths( + load_plugin_paths = addons_manager.collect_load_plugin_paths( host_name) for path in load_plugin_paths: register_loader_plugin_path(path) - inventory_action_paths = modules_manager.collect_inventory_action_paths( + inventory_action_paths = addons_manager.collect_inventory_action_paths( host_name) for path in inventory_action_paths: register_inventory_action_path(path) @@ -237,6 +234,10 @@ def install_openpype_plugins(project_name=None, host_name=None): register_inventory_action_path(path) +def install_openpype_plugins(project_name=None, host_name=None): + install_ayon_plugins(project_name, host_name) + + def uninstall_host(): """Undo all of what `install()` did""" host = registered_host() diff --git a/client/ayon_core/pipeline/create/README.md b/client/ayon_core/pipeline/create/README.md new file mode 100644 index 0000000000..012572a776 --- /dev/null +++ b/client/ayon_core/pipeline/create/README.md @@ -0,0 +1,78 @@ +# Create +Creation is process defying what and how will be published. May work in a different way based on host implementation. + +## CreateContext +Entry point of creation. All data and metadata are handled through create context. Context hold all global data and instances. Is responsible for loading of plugins (create, publish), triggering creator methods, validation of host implementation and emitting changes to creators and host. + +Discovers Creator plugins to be able create new instances and convert existing instances. Creators may have defined attributes that are specific for their instances. Attributes definition can enhance behavior of instance during publishing. + +Publish plugins are loaded because they can also define attributes definitions. These are less family specific To be able define attributes Publish plugin must inherit from `AYONPyblishPluginMixin` and must override `get_attribute_defs` class method which must return list of attribute definitions. Values of publish plugin definitions are stored per plugin name under `publish_attributes`. Also can override `convert_attribute_values` class method which gives ability to modify values on instance before are used in CreatedInstance. Method `convert_attribute_values` can be also used without `get_attribute_defs` to modify values when changing compatibility (remove metadata from instance because are irrelevant). + +Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_definitions.py`. + +Except creating and removing instances are all changes not automatically propagated to host context (scene/workfile/...) to propagate changes call `save_changes` which trigger update of all instances in context using Creators implementation. + + +## CreatedInstance +Product of creation is "instance" which holds basic data defying it. Core data are `creator_identifier`, `family` and `subset`. Other data can be keys used to fill subset name or metadata modifying publishing process of the instance (more described later). All instances have `id` which holds constant `pyblish.avalon.instance` and `instance_id` which is identifier of the instance. +Family tells how should be instance processed and subset what name will published item have. +- There are cases when subset is not fully filled during creation and may change during publishing. That is in most of cases caused because instance is related to other instance or instance data do not represent final product. + +`CreatedInstance` is entity holding the data which are stored and used. + +```python +{ + # Immutable data after creation + ## Identifier that this data represents instance for publishing (automatically assigned) + "id": "pyblish.avalon.instance", + ## Identifier of this specific instance (automatically assigned) + "instance_id": , + ## Instance family (used from Creator) + "family": , + + # Mutable data + ## Subset name based on subset name template - may change overtime (on context change) + "subset": , + ## Instance is active and will be published + "active": True, + ## Version of instance + "version": 1, + # Identifier of creator (is unique) + "creator_identifier": "", + ## Creator specific attributes (defined by Creator) + "creator_attributes": {...}, + ## Publish plugin specific plugins (defined by Publish plugin) + "publish_attributes": { + # Attribute values are stored by publish plugin name + # - Duplicated plugin names can cause clashes! + : {...}, + ... + }, + ## Additional data related to instance (`asset`, `task`, etc.) + ... +} +``` + +## Creator +To be able create, update, remove or collect existing instances there must be defined a creator. Creator must have unique identifier and can represents a family. There can be multiple Creators for single family. Identifier of creator should contain family (advise). + +Creator has abstract methods to handle instances. For new instance creation is used `create` which should create metadata in host context and add new instance object to `CreateContext`. To collect existing instances is used `collect_instances` which should find all existing instances related to creator and add them to `CreateContext`. To update data of instance is used `update_instances` which is called from `CreateContext` on `save_changes`. To remove instance use `remove_instances` which should remove metadata from host context and remove instance from `CreateContext`. + +Creator has access to `CreateContext` which created object of the creator. All new instances or removed instances must be told to context. To do so use methods `_add_instance_to_context` and `_remove_instance_from_context` where `CreatedInstance` is passed. They should be called from `create` if new instance was created and from `remove_instances` if instance was removed. + +Creators don't have strictly defined how are instances handled but it is good practice to define a way which is host specific. It is not strict because there are cases when host implementation just can't handle all requirements of all creators. + +### AutoCreator +Auto-creators are automatically executed when `CreateContext` is reset. They can be used to create instances that should be always available and may not require artist's manual creation (e.g. `workfile`). Should not create duplicated instance and validate existence before creates a new. Method `remove_instances` is implemented to do nothing. + +## Host +Host implementation must have available global context metadata handler functions. One to get current context data and second to update them. Currently are to context data stored only context publish plugin attribute values. + +### Get global context data (`get_context_data`) +There are data that are not specific for any instance but are specific for whole context (e.g. Context plugins values). + +### Update global context data (`update_context_data`) +Update global context data. + +### Optional title of context +It is recommended to implement `get_context_title` function. String returned from this function will be shown in UI as context in which artist is. diff --git a/openpype/pipeline/create/__init__.py b/client/ayon_core/pipeline/create/__init__.py similarity index 100% rename from openpype/pipeline/create/__init__.py rename to client/ayon_core/pipeline/create/__init__.py diff --git a/openpype/pipeline/create/constants.py b/client/ayon_core/pipeline/create/constants.py similarity index 100% rename from openpype/pipeline/create/constants.py rename to client/ayon_core/pipeline/create/constants.py diff --git a/openpype/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py similarity index 97% rename from openpype/pipeline/create/context.py rename to client/ayon_core/pipeline/create/context.py index 683699a0d1..8990d50324 100644 --- a/openpype/pipeline/create/context.py +++ b/client/ayon_core/pipeline/create/context.py @@ -11,25 +11,24 @@ import pyblish.logic import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( +from ayon_core.client import ( get_assets, get_asset_by_name, get_asset_name_identifier, ) -from openpype.settings import ( +from ayon_core.settings import ( get_system_settings, get_project_settings ) -from openpype.lib.attribute_definitions import ( +from ayon_core.lib.attribute_definitions import ( UnknownDef, serialize_attr_defs, deserialize_attr_defs, get_default_values, ) -from openpype.host import IPublishHost, IWorkfileHost -from openpype.pipeline import legacy_io, Anatomy -from openpype.pipeline.plugin_discover import DiscoverResult +from ayon_core.host import IPublishHost, IWorkfileHost +from ayon_core.pipeline import legacy_io, Anatomy +from ayon_core.pipeline.plugin_discover import DiscoverResult from .creator_plugins import ( Creator, @@ -931,15 +930,9 @@ def __init__( data.pop("family", None) data.pop("subset", None) - if AYON_SERVER_ENABLED: - asset_name = data.pop("asset", None) - if "folderPath" not in data: - data["folderPath"] = asset_name - - elif "folderPath" in data: - asset_name = data.pop("folderPath").split("/")[-1] - if "asset" not in data: - data["asset"] = asset_name + asset_name = data.pop("asset", None) + if "folderPath" not in data: + data["folderPath"] = asset_name # QUESTION Does it make sense to have data stored as ordered dict? self._data = collections.OrderedDict() @@ -1207,7 +1200,7 @@ def set_publish_plugins(self, attr_plugins): Args: attr_plugins (List[pyblish.api.Plugin]): Pyblish plugins which - inherit from 'OpenPypePyblishPluginMixin' and may contain + inherit from 'AYONPyblishPluginMixin' and may contain attribute definitions. """ @@ -1283,9 +1276,7 @@ def deserialize_on_remote(cls, serialized_data): def has_set_asset(self): """Asset name is set in data.""" - if AYON_SERVER_ENABLED: - return "folderPath" in self._data - return "asset" in self._data + return "folderPath" in self._data @property def has_set_task(self): @@ -1744,8 +1735,8 @@ def reset_plugins(self, discover_publish_plugins=True): self._reset_convertor_plugins() def _reset_publish_plugins(self, discover_publish_plugins): - from openpype.pipeline import OpenPypePyblishPluginMixin - from openpype.pipeline.publish import ( + from ayon_core.pipeline import AYONPyblishPluginMixin + from ayon_core.pipeline.publish import ( publish_plugins_discover ) @@ -1768,7 +1759,7 @@ def _reset_publish_plugins(self, discover_publish_plugins): # Collect plugins that can have attribute definitions for plugin in publish_plugins: - if OpenPypePyblishPluginMixin in inspect.getmro(plugin): + if AYONPyblishPluginMixin in inspect.getmro(plugin): plugins_with_defs.append(plugin) plugins_mismatch_targets = [ @@ -2021,13 +2012,9 @@ def create( self.host_name ) asset_name = get_asset_name_identifier(asset_doc) - if AYON_SERVER_ENABLED: - asset_name_key = "folderPath" - else: - asset_name_key = "asset" instance_data = { - asset_name_key: asset_name, + "folderPath": asset_name, "task": task_name, "family": creator.family, "variant": variant @@ -2251,11 +2238,8 @@ def validate_instances_context(self, instances=None): task_names_by_asset_name = {} for instance in instances: + asset_name = instance.get("folderPath") task_name = instance.get("task") - if AYON_SERVER_ENABLED: - asset_name = instance.get("folderPath") - else: - asset_name = instance.get("asset") if asset_name: task_names_by_asset_name[asset_name] = set() if task_name: @@ -2266,13 +2250,10 @@ def validate_instances_context(self, instances=None): for asset_name in task_names_by_asset_name.keys() if asset_name is not None } - fields = {"name", "data.tasks"} - if AYON_SERVER_ENABLED: - fields |= {"data.parents"} asset_docs = list(get_assets( self.project_name, asset_names=asset_names, - fields=fields + fields={"name", "data.tasks", "data.parents"} )) task_names_by_asset_name = {} @@ -2287,15 +2268,12 @@ def validate_instances_context(self, instances=None): if not instance.has_valid_asset or not instance.has_valid_task: continue - if AYON_SERVER_ENABLED: - asset_name = instance["folderPath"] - if asset_name and "/" not in asset_name: - asset_docs = asset_docs_by_name.get(asset_name) - if len(asset_docs) == 1: - asset_name = get_asset_name_identifier(asset_docs[0]) - instance["folderPath"] = asset_name - else: - asset_name = instance["asset"] + asset_name = instance["folderPath"] + if asset_name and "/" not in asset_name: + asset_docs = asset_docs_by_name.get(asset_name) + if len(asset_docs) == 1: + asset_name = get_asset_name_identifier(asset_docs[0]) + instance["folderPath"] = asset_name if asset_name not in task_names_by_asset_name: instance.set_asset_invalid(True) diff --git a/openpype/pipeline/create/creator_plugins.py b/client/ayon_core/pipeline/create/creator_plugins.py similarity index 98% rename from openpype/pipeline/create/creator_plugins.py rename to client/ayon_core/pipeline/create/creator_plugins.py index b51f69379c..6fa0d2ffa1 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/client/ayon_core/pipeline/create/creator_plugins.py @@ -6,9 +6,9 @@ import six -from openpype.settings import get_system_settings, get_project_settings -from openpype.lib import Logger, is_func_signature_supported -from openpype.pipeline.plugin_discover import ( +from ayon_core.settings import get_system_settings, get_project_settings +from ayon_core.lib import Logger, is_func_signature_supported +from ayon_core.pipeline.plugin_discover import ( discover, register_plugin, register_plugin_path, @@ -185,7 +185,7 @@ class BaseCreator: # Instance attribute definitions that can be changed per instance # - returns list of attribute definitions from - # `openpype.pipeline.attribute_definitions` + # `ayon_core.pipeline.attribute_definitions` instance_attr_defs = [] # Filtering by host name - can be used to be filtered by host name @@ -832,7 +832,7 @@ def discover_convertor_plugins(*args, **kwargs): def discover_legacy_creator_plugins(): - from openpype.pipeline import get_current_project_name + from ayon_core.pipeline import get_current_project_name log = Logger.get_logger("CreatorDiscover") diff --git a/openpype/pipeline/create/legacy_create.py b/client/ayon_core/pipeline/create/legacy_create.py similarity index 98% rename from openpype/pipeline/create/legacy_create.py rename to client/ayon_core/pipeline/create/legacy_create.py index 50ef274633..08be32eed4 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/client/ayon_core/pipeline/create/legacy_create.py @@ -9,7 +9,7 @@ import logging import collections -from openpype.client import get_asset_by_id +from ayon_core.client import get_asset_by_id from .subset_name import get_subset_name @@ -191,7 +191,7 @@ def legacy_create(Creator, name, asset, options=None, data=None): Name of instance """ - from openpype.pipeline import registered_host + from ayon_core.pipeline import registered_host host = registered_host() plugin = Creator(name, asset, options, data) diff --git a/openpype/pipeline/create/subset_name.py b/client/ayon_core/pipeline/create/subset_name.py similarity index 97% rename from openpype/pipeline/create/subset_name.py rename to client/ayon_core/pipeline/create/subset_name.py index 00025b19b8..3892971ce8 100644 --- a/openpype/pipeline/create/subset_name.py +++ b/client/ayon_core/pipeline/create/subset_name.py @@ -1,8 +1,8 @@ import os -from openpype.settings import get_project_settings -from openpype.lib import filter_profiles, prepare_template_data -from openpype.pipeline import legacy_io +from ayon_core.settings import get_project_settings +from ayon_core.lib import filter_profiles, prepare_template_data +from ayon_core.pipeline import legacy_io from .constants import DEFAULT_SUBSET_TEMPLATE diff --git a/client/ayon_core/pipeline/create/utils.py b/client/ayon_core/pipeline/create/utils.py new file mode 100644 index 0000000000..0547c20c0a --- /dev/null +++ b/client/ayon_core/pipeline/create/utils.py @@ -0,0 +1,127 @@ +import collections + +from ayon_core.client import ( + get_assets, + get_subsets, + get_last_versions, + get_asset_name_identifier, +) + + +def get_last_versions_for_instances( + project_name, instances, use_value_for_missing=False +): + """Get last versions for instances by their asset and subset name. + + Args: + project_name (str): Project name. + instances (list[CreatedInstance]): Instances to get next versions for. + use_value_for_missing (Optional[bool]): Missing values are replaced + with negative value if True. Otherwise None is used. -2 is used + for instances without filled asset or subset name. -1 is used + for missing entities. + + Returns: + dict[str, Union[int, None]]: Last versions by instance id. + """ + + output = { + instance.id: -1 if use_value_for_missing else None + for instance in instances + } + subset_names_by_asset_name = collections.defaultdict(set) + instances_by_hierarchy = {} + for instance in instances: + asset_name = instance.data.get("asset") + subset_name = instance.subset_name + if not asset_name or not subset_name: + if use_value_for_missing: + output[instance.id] = -2 + continue + + ( + instances_by_hierarchy + .setdefault(asset_name, {}) + .setdefault(subset_name, []) + .append(instance) + ) + subset_names_by_asset_name[asset_name].add(subset_name) + + subset_names = set() + for names in subset_names_by_asset_name.values(): + subset_names |= names + + if not subset_names: + return output + + asset_docs = get_assets( + project_name, + asset_names=subset_names_by_asset_name.keys(), + fields=["name", "_id", "data.parents"] + ) + asset_names_by_id = { + asset_doc["_id"]: get_asset_name_identifier(asset_doc) + for asset_doc in asset_docs + } + if not asset_names_by_id: + return output + + subset_docs = get_subsets( + project_name, + asset_ids=asset_names_by_id.keys(), + subset_names=subset_names, + fields=["_id", "name", "parent"] + ) + subset_docs_by_id = {} + for subset_doc in subset_docs: + # Filter subset docs by subset names under parent + asset_id = subset_doc["parent"] + asset_name = asset_names_by_id[asset_id] + subset_name = subset_doc["name"] + if subset_name not in subset_names_by_asset_name[asset_name]: + continue + subset_docs_by_id[subset_doc["_id"]] = subset_doc + + if not subset_docs_by_id: + return output + + last_versions_by_subset_id = get_last_versions( + project_name, + subset_docs_by_id.keys(), + fields=["name", "parent"] + ) + for subset_id, version_doc in last_versions_by_subset_id.items(): + subset_doc = subset_docs_by_id[subset_id] + asset_id = subset_doc["parent"] + asset_name = asset_names_by_id[asset_id] + _instances = instances_by_hierarchy[asset_name][subset_doc["name"]] + for instance in _instances: + output[instance.id] = version_doc["name"] + + return output + + +def get_next_versions_for_instances(project_name, instances): + """Get next versions for instances by their asset and subset name. + + Args: + project_name (str): Project name. + instances (list[CreatedInstance]): Instances to get next versions for. + + Returns: + dict[str, Union[int, None]]: Next versions by instance id. Version is + 'None' if instance has no asset or subset name. + """ + + last_versions = get_last_versions_for_instances( + project_name, instances, True) + + output = {} + for instance_id, version in last_versions.items(): + if version == -2: + output[instance_id] = None + elif version == -1: + output[instance_id] = 1 + else: + output[instance_id] = version + 1 + return output diff --git a/client/ayon_core/pipeline/delivery.py b/client/ayon_core/pipeline/delivery.py new file mode 100644 index 0000000000..cb90e67090 --- /dev/null +++ b/client/ayon_core/pipeline/delivery.py @@ -0,0 +1,325 @@ +"""Functions useful for delivery of published representations.""" +import os +import copy +import shutil +import glob +import clique +import collections + +from ayon_core.lib import create_hard_link + + +def _copy_file(src_path, dst_path): + """Hardlink file if possible(to save space), copy if not. + + Because of using hardlinks should not be function used in other parts + of pipeline. + """ + + if os.path.exists(dst_path): + return + try: + create_hard_link( + src_path, + dst_path + ) + except OSError: + shutil.copyfile(src_path, dst_path) + + +def get_format_dict(anatomy, location_path): + """Returns replaced root values from user provider value. + + Args: + anatomy (Anatomy): Project anatomy. + location_path (str): User provided value. + + Returns: + (dict): Prepared data for formatting of a template. + """ + + format_dict = {} + if not location_path: + return format_dict + + location_path = location_path.replace("\\", "/") + root_names = anatomy.root_names_from_templates( + anatomy.templates["delivery"] + ) + format_dict["root"] = {} + for name in root_names: + format_dict["root"][name] = location_path + return format_dict + + +def check_destination_path( + repre_id, + anatomy, + anatomy_data, + datetime_data, + template_name +): + """ Try to create destination path based on 'template_name'. + + In the case that path cannot be filled, template contains unmatched + keys, provide error message to filter out repre later. + + Args: + repre_id (str): Representation id. + anatomy (Anatomy): Project anatomy. + anatomy_data (dict): Template data to fill anatomy templates. + datetime_data (dict): Values with actual date. + template_name (str): Name of template which should be used from anatomy + templates. + Returns: + Dict[str, List[str]]: Report of happened errors. Key is message title + value is detailed information. + """ + + anatomy_data.update(datetime_data) + anatomy_filled = anatomy.format_all(anatomy_data) + dest_path = anatomy_filled["delivery"][template_name] + report_items = collections.defaultdict(list) + + if not dest_path.solved: + msg = ( + "Missing keys in Representation's context" + " for anatomy template \"{}\"." + ).format(template_name) + + sub_msg = ( + "Representation: {}
" + ).format(repre_id) + + if dest_path.missing_keys: + keys = ", ".join(dest_path.missing_keys) + sub_msg += ( + "- Missing keys: \"{}\"
" + ).format(keys) + + if dest_path.invalid_types: + items = [] + for key, value in dest_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) + + keys = ", ".join(items) + sub_msg += ( + "- Invalid value DataType: \"{}\"
" + ).format(keys) + + report_items[msg].append(sub_msg) + + return report_items + + +def deliver_single_file( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log +): + """Copy single file to calculated path based on template + + Args: + src_path(str): path of source representation file + repre (dict): full repre, used only in deliver_sequence, here only + as to share same signature + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + # Make sure path is valid for all platforms + src_path = os.path.normpath(src_path.replace("\\", "/")) + + if not os.path.exists(src_path): + msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + if format_dict: + anatomy_data = copy.deepcopy(anatomy_data) + anatomy_data["root"] = format_dict["root"] + template_obj = anatomy.templates_obj["delivery"][template_name] + delivery_path = template_obj.format_strict(anatomy_data) + + # Backwards compatibility when extension contained `.` + delivery_path = delivery_path.replace("..", ".") + # Make sure path is valid for all platforms + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + # Remove newlines from the end of the string to avoid OSError during copy + delivery_path = delivery_path.rstrip() + + delivery_folder = os.path.dirname(delivery_path) + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + log.debug("Copying single: {} -> {}".format(src_path, delivery_path)) + _copy_file(src_path, delivery_path) + + return report_items, 1 + + +def deliver_sequence( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log, + has_renumbered_frame=False, + new_frame_start=0 +): + """ For Pype2(mainly - works in 3 too) where representation might not + contain files. + + Uses listing physical files (not 'files' on repre as a)might not be + present, b)might not be reliable for representation and copying them. + + TODO Should be refactored when files are sufficient to drive all + representations. + + Args: + src_path(str): path of source representation file + repre (dict): full representation + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + src_path = os.path.normpath(src_path.replace("\\", "/")) + + def hash_path_exist(myPath): + res = myPath.replace('#', '*') + glob_search_results = glob.glob(res) + if len(glob_search_results) > 0: + return True + return False + + if not hash_path_exist(src_path): + msg = "{} doesn't exist for {}".format( + src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + delivery_templates = anatomy.templates.get("delivery") or {} + delivery_template = delivery_templates.get(template_name) + if delivery_template is None: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + " was not found" + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + # Check if 'frame' key is available in template which is required + # for sequence delivery + if "{frame" not in delivery_template: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + "does not contain '{{frame}}' key to fill. Delivery of sequence" + " can't be processed." + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + dir_path, file_name = os.path.split(str(src_path)) + + context = repre["context"] + ext = context.get("ext", context.get("representation")) + + if not ext: + msg = "Source extension not found, cannot find collection" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, context)) + return report_items, 0 + + ext = "." + ext + # context.representation could be .psd + ext = ext.replace("..", ".") + + src_collections, remainder = clique.assemble(os.listdir(dir_path)) + src_collection = None + for col in src_collections: + if col.tail != ext: + continue + + src_collection = col + break + + if src_collection is None: + msg = "Source collection of files was not found" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, src_path)) + return report_items, 0 + + frame_indicator = "@####@" + + anatomy_data = copy.deepcopy(anatomy_data) + anatomy_data["frame"] = frame_indicator + if format_dict: + anatomy_data["root"] = format_dict["root"] + template_obj = anatomy.templates_obj["delivery"][template_name] + delivery_path = template_obj.format_strict(anatomy_data) + + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + delivery_folder = os.path.dirname(delivery_path) + dst_head, dst_tail = delivery_path.split(frame_indicator) + dst_padding = src_collection.padding + dst_collection = clique.Collection( + head=dst_head, + tail=dst_tail, + padding=dst_padding + ) + + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + src_head = src_collection.head + src_tail = src_collection.tail + uploaded = 0 + first_frame = min(src_collection.indexes) + for index in src_collection.indexes: + src_padding = src_collection.format("{padding}") % index + src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) + src = os.path.normpath( + os.path.join(dir_path, src_file_name) + ) + dst_index = index + if has_renumbered_frame: + # Calculate offset between first frame and current frame + # - '0' for first frame + offset = new_frame_start - first_frame + # Add offset to new frame start + dst_index = index + offset + if dst_index < 0: + msg = "Renumber frame has a smaller number than original frame" # noqa + report_items[msg].append(src_file_name) + log.warning("{} <{}>".format(msg, context)) + return report_items, 0 + dst_padding = dst_collection.format("{padding}") % dst_index + dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) + log.debug("Copying single: {} -> {}".format(src, dst)) + _copy_file(src, dst) + + uploaded += 1 + + return report_items, uploaded diff --git a/openpype/pipeline/editorial.py b/client/ayon_core/pipeline/editorial.py similarity index 100% rename from openpype/pipeline/editorial.py rename to client/ayon_core/pipeline/editorial.py diff --git a/openpype/hosts/unreal/plugins/__init__.py b/client/ayon_core/pipeline/farm/__init__.py similarity index 100% rename from openpype/hosts/unreal/plugins/__init__.py rename to client/ayon_core/pipeline/farm/__init__.py diff --git a/openpype/pipeline/farm/patterning.py b/client/ayon_core/pipeline/farm/patterning.py similarity index 100% rename from openpype/pipeline/farm/patterning.py rename to client/ayon_core/pipeline/farm/patterning.py diff --git a/openpype/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py similarity index 99% rename from openpype/pipeline/farm/pyblish_functions.py rename to client/ayon_core/pipeline/farm/pyblish_functions.py index 975fdd31cc..9423d8501c 100644 --- a/openpype/pipeline/farm/pyblish_functions.py +++ b/client/ayon_core/pipeline/farm/pyblish_functions.py @@ -7,18 +7,18 @@ import re import warnings -from openpype.pipeline import ( +from ayon_core.pipeline import ( get_current_project_name, get_representation_path, Anatomy, ) -from openpype.client import ( +from ayon_core.client import ( get_last_version_by_subset_name, get_representations ) -from openpype.lib import Logger -from openpype.pipeline.publish import KnownPublishError -from openpype.pipeline.farm.patterning import match_aov_pattern +from ayon_core.lib import Logger +from ayon_core.pipeline.publish import KnownPublishError +from ayon_core.pipeline.farm.patterning import match_aov_pattern @attr.s diff --git a/openpype/pipeline/farm/pyblish_functions.pyi b/client/ayon_core/pipeline/farm/pyblish_functions.pyi similarity index 96% rename from openpype/pipeline/farm/pyblish_functions.pyi rename to client/ayon_core/pipeline/farm/pyblish_functions.pyi index 76f7c34dcd..d9d46a63be 100644 --- a/openpype/pipeline/farm/pyblish_functions.pyi +++ b/client/ayon_core/pipeline/farm/pyblish_functions.pyi @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import Anatomy +from ayon_core.pipeline import Anatomy from typing import Tuple, Union, List diff --git a/openpype/pipeline/farm/tools.py b/client/ayon_core/pipeline/farm/tools.py similarity index 100% rename from openpype/pipeline/farm/tools.py rename to client/ayon_core/pipeline/farm/tools.py diff --git a/client/ayon_core/pipeline/legacy_io.py b/client/ayon_core/pipeline/legacy_io.py new file mode 100644 index 0000000000..cd09da2917 --- /dev/null +++ b/client/ayon_core/pipeline/legacy_io.py @@ -0,0 +1,109 @@ +"""Wrapper around interactions with the database""" + +import os +import sys +import logging +import functools + +from . import schema + +module = sys.modules[__name__] + +Session = {} +_is_installed = False + +log = logging.getLogger(__name__) + +SESSION_CONTEXT_KEYS = ( + # Name of current Project + "AVALON_PROJECT", + # Name of current Asset + "AVALON_ASSET", + # Name of current task + "AVALON_TASK", + # Name of current app + "AVALON_APP", + # Path to working directory + "AVALON_WORKDIR", + # Optional path to scenes directory (see Work Files API) + "AVALON_SCENEDIR" +) + + +def session_data_from_environment(context_keys=False): + session_data = {} + if context_keys: + for key in SESSION_CONTEXT_KEYS: + value = os.environ.get(key) + session_data[key] = value or "" + else: + for key in SESSION_CONTEXT_KEYS: + session_data[key] = None + + for key, default_value in ( + # Name of Avalon in graphical user interfaces + # Use this to customise the visual appearance of Avalon + # to better integrate with your surrounding pipeline + ("AVALON_LABEL", "Avalon"), + + # Used during any connections to the outside world + ("AVALON_TIMEOUT", "1000"), + + # Name of database used in MongoDB + ("AVALON_DB", "avalon"), + ): + value = os.environ.get(key) or default_value + if value is not None: + session_data[key] = value + + return session_data + + +def is_installed(): + return module._is_installed + + +def install(): + """Establish a persistent connection to the database""" + if is_installed(): + return + + session = session_data_from_environment(context_keys=True) + + session["schema"] = "openpype:session-4.0" + try: + schema.validate(session) + except schema.ValidationError as e: + # TODO(marcus): Make this mandatory + log.warning(e) + + Session.update(session) + + module._is_installed = True + + +def uninstall(): + """Close any connection to the database. + + Deprecated: + This function does nothing should be removed. + """ + module._is_installed = False + + +def requires_install(func): + @functools.wraps(func) + def decorated(*args, **kwargs): + if not is_installed(): + install() + return func(*args, **kwargs) + return decorated + + +@requires_install +def active_project(*args, **kwargs): + return Session["AVALON_PROJECT"] + + +def current_project(*args, **kwargs): + return Session.get("AVALON_PROJECT") diff --git a/openpype/pipeline/load/__init__.py b/client/ayon_core/pipeline/load/__init__.py similarity index 100% rename from openpype/pipeline/load/__init__.py rename to client/ayon_core/pipeline/load/__init__.py diff --git a/openpype/pipeline/load/plugins.py b/client/ayon_core/pipeline/load/plugins.py similarity index 96% rename from openpype/pipeline/load/plugins.py rename to client/ayon_core/pipeline/load/plugins.py index 8acfcfdb6c..e13260d296 100644 --- a/openpype/pipeline/load/plugins.py +++ b/client/ayon_core/pipeline/load/plugins.py @@ -1,12 +1,12 @@ import os import logging -from openpype.settings import get_system_settings, get_project_settings -from openpype.pipeline import ( +from ayon_core.settings import get_system_settings, get_project_settings +from ayon_core.pipeline import ( schema, legacy_io, ) -from openpype.pipeline.plugin_discover import ( +from ayon_core.pipeline.plugin_discover import ( discover, register_plugin, register_plugin_path, @@ -258,12 +258,13 @@ class SubsetLoaderPlugin(LoaderPlugin): def discover_loader_plugins(project_name=None): - from openpype.lib import Logger + from ayon_core.lib import Logger + from ayon_core.pipeline import get_current_project_name log = Logger.get_logger("LoaderDiscover") plugins = discover(LoaderPlugin) if not project_name: - project_name = legacy_io.active_project() + project_name = get_current_project_name() system_settings = get_system_settings() project_settings = get_project_settings(project_name) for plugin in plugins: diff --git a/client/ayon_core/pipeline/load/utils.py b/client/ayon_core/pipeline/load/utils.py new file mode 100644 index 0000000000..056836d712 --- /dev/null +++ b/client/ayon_core/pipeline/load/utils.py @@ -0,0 +1,910 @@ +import os +import platform +import copy +import getpass +import logging +import inspect +import collections +import numbers + +from ayon_core.host import ILoadHost +from ayon_core.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_version_by_id, + get_last_version_by_subset_id, + get_hero_version_by_subset_id, + get_version_by_name, + get_last_versions, + get_representations, + get_representation_by_id, + get_representation_by_name, + get_representation_parents +) +from ayon_core.lib import ( + StringTemplate, + TemplateUnsolved, +) +from ayon_core.pipeline import ( + Anatomy, +) + +log = logging.getLogger(__name__) + +ContainersFilterResult = collections.namedtuple( + "ContainersFilterResult", + ["latest", "outdated", "not_found", "invalid"] +) + + +class HeroVersionType(object): + def __init__(self, version): + assert isinstance(version, numbers.Integral), ( + "Version is not an integer. \"{}\" {}".format( + version, str(type(version)) + ) + ) + self.version = version + + def __str__(self): + return str(self.version) + + def __int__(self): + return int(self.version) + + def __format__(self, format_spec): + return self.version.__format__(format_spec) + + +class LoadError(Exception): + """Known error that happened during loading. + + A message is shown to user (without traceback). Make sure an artist can + understand the problem. + """ + + pass + + +class IncompatibleLoaderError(ValueError): + """Error when Loader is incompatible with a representation.""" + pass + + +class InvalidRepresentationContext(ValueError): + """Representation path can't be received using representation document.""" + pass + + +class LoaderSwitchNotImplementedError(NotImplementedError): + """Error when `switch` is used with Loader that has no implementation.""" + pass + + +class LoaderNotFoundError(RuntimeError): + """Error when Loader plugin for a loader name is not found.""" + pass + + +def get_repres_contexts(representation_ids, project_name=None): + """Return parenthood context for representation. + + Args: + representation_ids (list): The representation ids. + project_name (Optional[str]): Project name. + + Returns: + dict: The full representation context by representation id. + keys are repre_id, value is dictionary with full documents of + asset, subset, version and representation. + """ + from ayon_core.pipeline import get_current_project_name + + if not representation_ids: + return {} + + if not project_name: + project_name = get_current_project_name() + + repre_docs = get_representations(project_name, representation_ids) + + return get_contexts_for_repre_docs(project_name, repre_docs) + + +def get_contexts_for_repre_docs(project_name, repre_docs): + contexts = {} + if not repre_docs: + return contexts + + repre_docs_by_id = {} + version_ids = set() + for repre_doc in repre_docs: + version_ids.add(repre_doc["parent"]) + repre_docs_by_id[repre_doc["_id"]] = repre_doc + + version_docs = get_versions( + project_name, version_ids, hero=True + ) + + version_docs_by_id = {} + hero_version_docs = [] + versions_for_hero = set() + subset_ids = set() + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version_docs.append(version_doc) + versions_for_hero.add(version_doc["version_id"]) + version_docs_by_id[version_doc["_id"]] = version_doc + subset_ids.add(version_doc["parent"]) + + if versions_for_hero: + _version_docs = get_versions(project_name, versions_for_hero) + _version_data_by_id = { + version_doc["_id"]: version_doc["data"] + for version_doc in _version_docs + } + + for hero_version_doc in hero_version_docs: + hero_version_id = hero_version_doc["_id"] + version_id = hero_version_doc["version_id"] + version_data = copy.deepcopy(_version_data_by_id[version_id]) + version_docs_by_id[hero_version_id]["data"] = version_data + + subset_docs = get_subsets(project_name, subset_ids) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = get_assets(project_name, asset_ids) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = get_project(project_name) + + for repre_id, repre_doc in repre_docs_by_id.items(): + version_doc = version_docs_by_id[repre_doc["parent"]] + subset_doc = subset_docs_by_id[version_doc["parent"]] + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc, + "version": version_doc, + "representation": repre_doc, + } + contexts[repre_id] = context + + return contexts + + +def get_subset_contexts(subset_ids, project_name=None): + """Return parenthood context for subset. + + Provides context on subset granularity - less detail than + 'get_repre_contexts'. + Args: + subset_ids (list): The subset ids. + project_name (Optional[str]): Project name. + Returns: + dict: The full representation context by representation id. + """ + from ayon_core.pipeline import get_current_project_name + + contexts = {} + if not subset_ids: + return contexts + + if not project_name: + project_name = get_current_project_name() + subset_docs = get_subsets(project_name, subset_ids) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = get_assets(project_name, asset_ids) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = get_project(project_name) + + for subset_id, subset_doc in subset_docs_by_id.items(): + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc + } + contexts[subset_id] = context + + return contexts + + +def get_representation_context(representation): + """Return parenthood context for representation. + + Args: + representation (str or ObjectId or dict): The representation id + or full representation as returned by the database. + + Returns: + dict: The full representation context. + """ + from ayon_core.pipeline import get_current_project_name + + assert representation is not None, "This is a bug" + + project_name = get_current_project_name() + if not isinstance(representation, dict): + representation = get_representation_by_id( + project_name, representation + ) + + if not representation: + raise AssertionError("Representation was not found in database") + + version, subset, asset, project = get_representation_parents( + project_name, representation + ) + if not version: + raise AssertionError("Version was not found in database") + if not subset: + raise AssertionError("Subset was not found in database") + if not asset: + raise AssertionError("Asset was not found in database") + if not project: + raise AssertionError("Project was not found in database") + + context = { + "project": { + "name": project["name"], + "code": project["data"].get("code", '') + }, + "asset": asset, + "subset": subset, + "version": version, + "representation": representation, + } + + return context + + +def load_with_repre_context( + Loader, repre_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure the Loader is compatible for the representation + if not is_compatible_loader(Loader, repre_context): + raise IncompatibleLoaderError( + "Loader {} is incompatible with {}".format( + Loader.__name__, repre_context["subset"]["name"] + ) + ) + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = repre_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, repre_context["asset"]["name"] + ) + ) + + loader = Loader() + + # Backwards compatibility: Originally the loader's __init__ required the + # representation context to set `fname` attribute to the filename to load + # Deprecated - to be removed in OpenPype 3.16.6 or 3.17.0. + loader._fname = get_representation_path_from_context(repre_context) + + return loader.load(repre_context, name, namespace, options) + + +def load_with_subset_context( + Loader, subset_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = subset_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, subset_context["asset"]["name"] + ) + ) + + return Loader().load(subset_context, name, namespace, options) + + +def load_with_subset_contexts( + Loader, subset_contexts, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + joined_subset_names = " | ".join( + context["subset"]["name"] + for context in subset_contexts + ) + if name is None: + name = joined_subset_names + + log.info( + "Running '{}' on '{}'".format(Loader.__name__, joined_subset_names) + ) + + return Loader().load(subset_contexts, name, namespace, options) + + +def load_container( + Loader, representation, namespace=None, name=None, options=None, **kwargs +): + """Use Loader to load a representation. + + Args: + Loader (Loader): The loader class to trigger. + representation (str or ObjectId or dict): The representation id + or full representation as returned by the database. + namespace (str, Optional): The namespace to assign. Defaults to None. + name (str, Optional): The name to assign. Defaults to subset name. + options (dict, Optional): Additional options to pass on to the loader. + + Returns: + The return of the `loader.load()` method. + + Raises: + IncompatibleLoaderError: When the loader is not compatible with + the representation. + + """ + + context = get_representation_context(representation) + return load_with_repre_context( + Loader, + context, + namespace=namespace, + name=name, + options=options, + **kwargs + ) + + +def get_loader_identifier(loader): + """Loader identifier from loader plugin or object. + + Identifier should be stored to container for future management. + """ + if not inspect.isclass(loader): + loader = loader.__class__ + return loader.__name__ + + +def get_loaders_by_name(): + from .plugins import discover_loader_plugins + + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {} !".format(loader_name) + ) + loaders_by_name[loader_name] = loader + return loaders_by_name + + +def _get_container_loader(container): + """Return the Loader corresponding to the container""" + from .plugins import discover_loader_plugins + + loader = container["loader"] + for Plugin in discover_loader_plugins(): + # TODO: Ensure the loader is valid + if get_loader_identifier(Plugin) == loader: + return Plugin + return None + + +def remove_container(container): + """Remove a container""" + + Loader = _get_container_loader(container) + if not Loader: + raise LoaderNotFoundError( + "Can't remove container because loader '{}' was not found." + .format(container.get("loader")) + ) + + return Loader().remove(container) + + +def update_container(container, version=-1): + """Update a container""" + from ayon_core.pipeline import get_current_project_name + + # Compute the different version from 'representation' + project_name = get_current_project_name() + current_representation = get_representation_by_id( + project_name, container["representation"] + ) + + assert current_representation is not None, "This is a bug" + + current_version = get_version_by_id( + project_name, current_representation["parent"], fields=["parent"] + ) + if version == -1: + new_version = get_last_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + + elif isinstance(version, HeroVersionType): + new_version = get_hero_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + + else: + new_version = get_version_by_name( + project_name, version, current_version["parent"], fields=["_id"] + ) + + assert new_version is not None, "This is a bug" + + new_representation = get_representation_by_name( + project_name, current_representation["name"], new_version["_id"] + ) + assert new_representation is not None, "Representation wasn't found" + + path = get_representation_path(new_representation) + assert os.path.exists(path), "Path {} doesn't exist".format(path) + + # Run update on the Loader for this container + Loader = _get_container_loader(container) + if not Loader: + raise LoaderNotFoundError( + "Can't update container because loader '{}' was not found." + .format(container.get("loader")) + ) + + return Loader().update(container, new_representation) + + +def switch_container(container, representation, loader_plugin=None): + """Switch a container to representation + + Args: + container (dict): container information + representation (dict): representation data from document + + Returns: + function call + """ + from ayon_core.pipeline import get_current_project_name + + # Get the Loader for this container + if loader_plugin is None: + loader_plugin = _get_container_loader(container) + + if not loader_plugin: + raise LoaderNotFoundError( + "Can't switch container because loader '{}' was not found." + .format(container.get("loader")) + ) + + if not hasattr(loader_plugin, "switch"): + # Backwards compatibility (classes without switch support + # might be better to just have "switch" raise NotImplementedError + # on the base class of Loader\ + raise LoaderSwitchNotImplementedError( + "Loader {} does not support 'switch'".format(loader_plugin.label) + ) + + # Get the new representation to switch to + project_name = get_current_project_name() + new_representation = get_representation_by_id( + project_name, representation["_id"] + ) + + new_context = get_representation_context(new_representation) + if not is_compatible_loader(loader_plugin, new_context): + raise IncompatibleLoaderError( + "Loader {} is incompatible with {}".format( + loader_plugin.__name__, new_context["subset"]["name"] + ) + ) + + loader = loader_plugin(new_context) + + return loader.switch(container, new_representation) + + +def get_representation_path_from_context(context): + """Preparation wrapper using only context as a argument""" + from ayon_core.pipeline import get_current_project_name + + representation = context["representation"] + project_doc = context.get("project") + root = None + if project_doc and project_doc["name"] != get_current_project_name(): + anatomy = Anatomy(project_doc["name"]) + root = anatomy.roots + + return get_representation_path(representation, root) + + +def get_representation_path_with_anatomy(repre_doc, anatomy): + """Receive representation path using representation document and anatomy. + + Anatomy is used to replace 'root' key in representation file. Ideally + should be used instead of 'get_representation_path' which is based on + "current context". + + Future notes: + We want also be able store resources into representation and I can + imagine the result should also contain paths to possible resources. + + Args: + repre_doc (Dict[str, Any]): Representation document. + anatomy (Anatomy): Project anatomy object. + + Returns: + Union[None, TemplateResult]: None if path can't be received + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + """ + + try: + template = repre_doc["data"]["template"] + + except KeyError: + raise InvalidRepresentationContext(( + "Representation document does not" + " contain template in data ('data.template')" + )) + + try: + context = repre_doc["context"] + context["root"] = anatomy.roots + path = StringTemplate.format_strict_template(template, context) + + except TemplateUnsolved as exc: + raise InvalidRepresentationContext(( + "Couldn't resolve representation template with available data." + " Reason: {}".format(str(exc)) + )) + + return path.normalized() + + +def get_representation_path(representation, root=None): + """Get filename from representation document + + There are three ways of getting the path from representation which are + tried in following sequence until successful. + 1. Get template from representation['data']['template'] and data from + representation['context']. Then format template with the data. + 2. Get template from project['config'] and format it with default data set + 3. Get representation['data']['path'] and use it directly + + Args: + representation(dict): representation document from the database + + Returns: + str: fullpath of the representation + + """ + + if root is None: + from ayon_core.pipeline import registered_root + + root = registered_root() + + def path_from_representation(): + try: + template = representation["data"]["template"] + except KeyError: + return None + + try: + context = representation["context"] + context["root"] = root + path = StringTemplate.format_strict_template( + template, context + ) + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + except (TemplateUnsolved, KeyError): + # Template references unavailable data + return None + + if not path: + return path + + normalized_path = os.path.normpath(path) + if os.path.exists(normalized_path): + return normalized_path + return path + + def path_from_data(): + if "path" not in representation["data"]: + return None + + path = representation["data"]["path"] + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + + if os.path.exists(path): + return os.path.normpath(path) + + dir_path, file_name = os.path.split(path) + if not os.path.exists(dir_path): + return + + base_name, ext = os.path.splitext(file_name) + file_name_items = None + if "#" in base_name: + file_name_items = [part for part in base_name.split("#") if part] + elif "%" in base_name: + file_name_items = base_name.split("%") + + if not file_name_items: + return + + filename_start = file_name_items[0] + + for _file in os.listdir(dir_path): + if _file.startswith(filename_start) and _file.endswith(ext): + return os.path.normpath(path) + + return ( + path_from_representation() or path_from_data() + ) + + +def is_compatible_loader(Loader, context): + """Return whether a loader is compatible with a context. + + This checks the version's families and the representation for the given + Loader. + + Returns: + bool + """ + + return Loader.is_compatible_loader(context) + + +def loaders_from_repre_context(loaders, repre_context): + """Return compatible loaders for by representaiton's context.""" + + return [ + loader + for loader in loaders + if is_compatible_loader(loader, repre_context) + ] + + +def filter_repre_contexts_by_loader(repre_contexts, loader): + """Filter representation contexts for loader. + + Args: + repre_contexts (list[dict[str, Ant]]): Representation context. + loader (LoaderPlugin): Loader plugin to filter contexts for. + + Returns: + list[dict[str, Any]]: Filtered representation contexts. + """ + + return [ + repre_context + for repre_context in repre_contexts + if is_compatible_loader(loader, repre_context) + ] + + +def loaders_from_representation(loaders, representation): + """Return all compatible loaders for a representation.""" + + context = get_representation_context(representation) + return loaders_from_repre_context(loaders, context) + + +def any_outdated_containers(host=None, project_name=None): + """Check if there are any outdated containers in scene.""" + + if get_outdated_containers(host, project_name): + return True + return False + + +def get_outdated_containers(host=None, project_name=None): + """Collect outdated containers from host scene. + + Currently registered host and project in global session are used if + arguments are not passed. + + Args: + host (ModuleType): Host implementation with 'ls' function available. + project_name (str): Name of project in which context we are. + """ + from ayon_core.pipeline import registered_host, get_current_project_name + + if host is None: + host = registered_host() + + if project_name is None: + project_name = get_current_project_name() + + if isinstance(host, ILoadHost): + containers = host.get_containers() + else: + containers = host.ls() + return filter_containers(containers, project_name).outdated + + +def filter_containers(containers, project_name): + """Filter containers and split them into 4 categories. + + Categories are 'latest', 'outdated', 'invalid' and 'not_found'. + The 'lastest' containers are from last version, 'outdated' are not, + 'invalid' are invalid containers (invalid content) and 'not_found' has + some missing entity in database. + + Args: + containers (Iterable[dict]): List of containers referenced into scene. + project_name (str): Name of project in which context shoud look for + versions. + + Returns: + ContainersFilterResult: Named tuple with 'latest', 'outdated', + 'invalid' and 'not_found' containers. + """ + + # Make sure containers is list that won't change + containers = list(containers) + + outdated_containers = [] + uptodate_containers = [] + not_found_containers = [] + invalid_containers = [] + output = ContainersFilterResult( + uptodate_containers, + outdated_containers, + not_found_containers, + invalid_containers + ) + # Query representation docs to get it's version ids + repre_ids = { + container["representation"] + for container in containers + if container["representation"] + } + if not repre_ids: + if containers: + invalid_containers.extend(containers) + return output + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by stringified representation id + repre_docs_by_str_id = {} + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + repre_id = str(repre_doc["_id"]) + version_id = repre_doc["parent"] + repre_docs_by_str_id[repre_id] = repre_doc + repre_docs_by_version_id[version_id].append(repre_doc) + + # Query version docs to get it's subset ids + # - also query hero version to be able identify if representation + # belongs to existing version + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True, + fields=["_id", "parent", "type"] + ) + verisons_by_id = {} + versions_by_subset_id = collections.defaultdict(list) + hero_version_ids = set() + for version_doc in version_docs: + version_id = version_doc["_id"] + # Store versions by their ids + verisons_by_id[version_id] = version_doc + # There's no need to query subsets for hero versions + # - they are considered as latest? + if version_doc["type"] == "hero_version": + hero_version_ids.add(version_id) + continue + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + last_versions = get_last_versions( + project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id"] + ) + # Figure out which versions are outdated + outdated_version_ids = set() + for subset_id, last_version_doc in last_versions.items(): + for version_doc in versions_by_subset_id[subset_id]: + version_id = version_doc["_id"] + if version_id != last_version_doc["_id"]: + outdated_version_ids.add(version_id) + + # Based on all collected data figure out which containers are outdated + # - log out if there are missing representation or version documents + for container in containers: + container_name = container["objectName"] + repre_id = container["representation"] + if not repre_id: + invalid_containers.append(container) + continue + + repre_doc = repre_docs_by_str_id.get(repre_id) + if not repre_doc: + log.debug(( + "Container '{}' has an invalid representation." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + continue + + version_id = repre_doc["parent"] + if version_id in outdated_version_ids: + outdated_containers.append(container) + + elif version_id not in verisons_by_id: + log.debug(( + "Representation on container '{}' has an invalid version." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + + else: + uptodate_containers.append(container) + + return output diff --git a/openpype/pipeline/plugin_discover.py b/client/ayon_core/pipeline/plugin_discover.py similarity index 99% rename from openpype/pipeline/plugin_discover.py rename to client/ayon_core/pipeline/plugin_discover.py index e5257b801a..f531600276 100644 --- a/openpype/pipeline/plugin_discover.py +++ b/client/ayon_core/pipeline/plugin_discover.py @@ -2,8 +2,8 @@ import inspect import traceback -from openpype.lib import Logger -from openpype.lib.python_module_tools import ( +from ayon_core.lib import Logger +from ayon_core.lib.python_module_tools import ( modules_from_path, classes_from_module, ) diff --git a/openpype/pipeline/project_folders.py b/client/ayon_core/pipeline/project_folders.py similarity index 97% rename from openpype/pipeline/project_folders.py rename to client/ayon_core/pipeline/project_folders.py index 608344ce03..ad205522a6 100644 --- a/openpype/pipeline/project_folders.py +++ b/client/ayon_core/pipeline/project_folders.py @@ -4,8 +4,8 @@ import six -from openpype.settings import get_project_settings -from openpype.lib import Logger +from ayon_core.settings import get_project_settings +from ayon_core.lib import Logger from .anatomy import Anatomy from .template_data import get_project_template_data diff --git a/client/ayon_core/pipeline/publish/README.md b/client/ayon_core/pipeline/publish/README.md new file mode 100644 index 0000000000..2a0f45d093 --- /dev/null +++ b/client/ayon_core/pipeline/publish/README.md @@ -0,0 +1,38 @@ +# Publish +OpenPype is using `pyblish` for publishing process which is a little bit extented and modified mainly for UI purposes. OpenPype's (new) publish UI does not allow to enable/disable instances or plugins that can be done during creation part. Also does support actions only for validators after validation exception. + +## Exceptions +OpenPype define few specific exceptions that should be used in publish plugins. + +### Validation exception +Validation plugins should raise `PublishValidationError` to show to an artist what's wrong and give him actions to fix it. The exception says that error happened in plugin can be fixed by artist himself (with or without action on plugin). Any other errors will stop publishing immediately. Exception `PublishValidationError` raised after validation order has same effect as any other exception. + +Exception `PublishValidationError` 3 arguments: +- **message** Which is not used in UI but for headless publishing. +- **title** Short description of error (2-5 words). Title is used for grouping of exceptions per plugin. +- **description** Detailed description of happened issue where markdown and html can be used. + + +### Known errors +When there is a known error that can't be fixed by user (e.g. can't connect to deadline service, etc.) `KnownPublishError` should be raise. The only difference is that it's message is shown in UI to artist otherwise a neutral message without context is shown. + +## Plugin extension +Publish plugins can be extended by additional logic when inherits from `AYONPyblishPluginMixin` which can be used as mixin (additional inheritance of class). + +```python +import pyblish.api +from ayon_core.pipeline import AYONPyblishPluginMixin + + +# Example context plugin +class MyExtendedPlugin( + pyblish.api.ContextPlugin, AYONPyblishPluginMixin +): + pass + +``` + +### Extensions +Currently only extension is ability to define attributes for instances during creation. Method `get_attribute_defs` returns attribute definitions for families defined in plugin's `families` attribute if it's instance plugin or for whole context if it's context plugin. To convert existing values (or to remove legacy values) can be implemented `convert_attribute_values`. Values of publish attributes from created instance are never removed automatically so implementing of this method is best way to remove legacy data or convert them to new data structure. + +Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_definitions.py`. diff --git a/client/ayon_core/pipeline/publish/__init__.py b/client/ayon_core/pipeline/publish/__init__.py new file mode 100644 index 0000000000..d507972664 --- /dev/null +++ b/client/ayon_core/pipeline/publish/__init__.py @@ -0,0 +1,99 @@ +from .constants import ( + ValidatePipelineOrder, + ValidateContentsOrder, + ValidateSceneOrder, + ValidateMeshOrder, +) + +from .publish_plugins import ( + AbstractMetaInstancePlugin, + AbstractMetaContextPlugin, + + PublishValidationError, + PublishXmlValidationError, + KnownPublishError, + AYONPyblishPluginMixin, + OpenPypePyblishPluginMixin, + OptionalPyblishPluginMixin, + + RepairAction, + RepairContextAction, + + Extractor, + ColormanagedPyblishPluginMixin +) + +from .lib import ( + get_publish_template_name, + + publish_plugins_discover, + load_help_content_from_plugin, + load_help_content_from_filepath, + + get_errored_instances_from_context, + get_errored_plugins_from_context, + + filter_instances_for_context_plugin, + context_plugin_should_run, + get_instance_staging_dir, + get_publish_repre_path, + + apply_plugin_settings_automatically, + get_plugin_settings, + get_publish_instance_label, + get_publish_instance_families, +) + +from .abstract_expected_files import ExpectedFiles +from .abstract_collect_render import ( + RenderInstance, + AbstractCollectRender, +) + + +__all__ = ( + "ValidatePipelineOrder", + "ValidateContentsOrder", + "ValidateSceneOrder", + "ValidateMeshOrder", + + "AbstractMetaInstancePlugin", + "AbstractMetaContextPlugin", + + "PublishValidationError", + "PublishXmlValidationError", + "KnownPublishError", + "AYONPyblishPluginMixin", + "OpenPypePyblishPluginMixin", + "OptionalPyblishPluginMixin", + + "RepairAction", + "RepairContextAction", + + "Extractor", + "ColormanagedPyblishPluginMixin", + + "get_publish_template_name", + + "publish_plugins_discover", + "load_help_content_from_plugin", + "load_help_content_from_filepath", + + "get_errored_instances_from_context", + "get_errored_plugins_from_context", + + "filter_instances_for_context_plugin", + "context_plugin_should_run", + "get_instance_staging_dir", + "get_publish_repre_path", + + "apply_plugin_settings_automatically", + "get_plugin_settings", + "get_publish_instance_label", + "get_publish_instance_families", + + "ExpectedFiles", + + "RenderInstance", + "AbstractCollectRender", +) diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/client/ayon_core/pipeline/publish/abstract_collect_render.py similarity index 100% rename from openpype/pipeline/publish/abstract_collect_render.py rename to client/ayon_core/pipeline/publish/abstract_collect_render.py diff --git a/openpype/pipeline/publish/abstract_expected_files.py b/client/ayon_core/pipeline/publish/abstract_expected_files.py similarity index 100% rename from openpype/pipeline/publish/abstract_expected_files.py rename to client/ayon_core/pipeline/publish/abstract_expected_files.py diff --git a/openpype/pipeline/publish/constants.py b/client/ayon_core/pipeline/publish/constants.py similarity index 100% rename from openpype/pipeline/publish/constants.py rename to client/ayon_core/pipeline/publish/constants.py diff --git a/client/ayon_core/pipeline/publish/lib.py b/client/ayon_core/pipeline/publish/lib.py new file mode 100644 index 0000000000..47f4be9e69 --- /dev/null +++ b/client/ayon_core/pipeline/publish/lib.py @@ -0,0 +1,977 @@ +import os +import sys +import inspect +import copy +import tempfile +import xml.etree.ElementTree + +import pyblish.util +import pyblish.plugin +import pyblish.api + +from ayon_core.lib import ( + Logger, + import_filepath, + filter_profiles, + is_func_signature_supported, +) +from ayon_core.settings import ( + get_project_settings, + get_system_settings, +) +from ayon_core.pipeline import ( + tempdir, + Anatomy +) +from ayon_core.pipeline.plugin_discover import DiscoverResult + +from .constants import ( + DEFAULT_PUBLISH_TEMPLATE, + DEFAULT_HERO_PUBLISH_TEMPLATE, + TRANSIENT_DIR_TEMPLATE +) + + +def get_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for publish template keys. + + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings (Dict[str, Any]): Prepared project settings. + logger (Optional[logging.Logger]): Logger object to be used instead + of default logger. + + Returns: + List[Dict[str, Any]]: Publish template profiles. + """ + + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) + + if not project_settings: + project_settings = get_project_settings(project_name) + + return copy.deepcopy( + project_settings + ["global"] + ["tools"] + ["publish"] + ["template_name_profiles"] + ) + + +def get_hero_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for hero publish template keys. + + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings (Dict[str, Any]): Prepared project settings. + logger (Optional[logging.Logger]): Logger object to be used instead + of default logger. + + Returns: + List[Dict[str, Any]]: Publish template profiles. + """ + + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) + + if not project_settings: + project_settings = get_project_settings(project_name) + + return copy.deepcopy( + project_settings + ["global"] + ["tools"] + ["publish"] + ["hero_template_name_profiles"] + ) + + +def get_publish_template_name( + project_name, + host_name, + family, + task_name, + task_type, + project_settings=None, + hero=False, + logger=None +): + """Get template name which should be used for passed context. + + Publish templates are filtered by host name, family, task name and + task type. + + Default template which is used at if profiles are not available or profile + has empty value is defined by 'DEFAULT_PUBLISH_TEMPLATE' constant. + + Args: + project_name (str): Name of project where to look for settings. + host_name (str): Name of host integration. + family (str): Family for which should be found template. + task_name (str): Task name on which is instance working. + task_type (str): Task type on which is instance working. + project_settings (Dict[str, Any]): Prepared project settings. + hero (bool): Template is for hero version publishing. + logger (logging.Logger): Custom logger used for 'filter_profiles' + function. + + Returns: + str: Template name which should be used for integration. + """ + + template = None + filter_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + } + if hero: + default_template = DEFAULT_HERO_PUBLISH_TEMPLATE + profiles = get_hero_template_name_profiles( + project_name, project_settings, logger + ) + + else: + profiles = get_template_name_profiles( + project_name, project_settings, logger + ) + default_template = DEFAULT_PUBLISH_TEMPLATE + + profile = filter_profiles(profiles, filter_criteria, logger=logger) + if profile: + template = profile["template_name"] + return template or default_template + + +class HelpContent: + def __init__(self, title, description, detail=None): + self.title = title + self.description = description + self.detail = detail + + +def load_help_content_from_filepath(filepath): + """Load help content from xml file. + Xml file may containt errors and warnings. + """ + errors = {} + warnings = {} + output = { + "errors": errors, + "warnings": warnings + } + if not os.path.exists(filepath): + return output + tree = xml.etree.ElementTree.parse(filepath) + root = tree.getroot() + for child in root: + child_id = child.attrib.get("id") + if child_id is None: + continue + + # Make sure ID is string + child_id = str(child_id) + + title = child.find("title").text + description = child.find("description").text + detail_node = child.find("detail") + detail = None + if detail_node is not None: + detail = detail_node.text + if child.tag == "error": + errors[child_id] = HelpContent(title, description, detail) + elif child.tag == "warning": + warnings[child_id] = HelpContent(title, description, detail) + return output + + +def load_help_content_from_plugin(plugin): + cls = plugin + if not inspect.isclass(plugin): + cls = plugin.__class__ + plugin_filepath = inspect.getfile(cls) + plugin_dir = os.path.dirname(plugin_filepath) + basename = os.path.splitext(os.path.basename(plugin_filepath))[0] + filename = basename + ".xml" + filepath = os.path.join(plugin_dir, "help", filename) + return load_help_content_from_filepath(filepath) + + +def publish_plugins_discover(paths=None): + """Find and return available pyblish plug-ins + + Overridden function from `pyblish` module to be able to collect + crashed files and reason of their crash. + + Arguments: + paths (list, optional): Paths to discover plug-ins from. + If no paths are provided, all paths are searched. + """ + + # The only difference with `pyblish.api.discover` + result = DiscoverResult(pyblish.api.Plugin) + + plugins = {} + plugin_names = [] + + allow_duplicates = pyblish.plugin.ALLOW_DUPLICATES + log = pyblish.plugin.log + + # Include plug-ins from registered paths + if not paths: + paths = pyblish.plugin.plugin_paths() + + for path in paths: + path = os.path.normpath(path) + if not os.path.isdir(path): + continue + + for fname in os.listdir(path): + if fname.startswith("_"): + continue + + abspath = os.path.join(path, fname) + + if not os.path.isfile(abspath): + continue + + mod_name, mod_ext = os.path.splitext(fname) + + if mod_ext != ".py": + continue + + try: + module = import_filepath(abspath, mod_name) + + # Store reference to original module, to avoid + # garbage collection from collecting it's global + # imports, such as `import os`. + sys.modules[abspath] = module + + except Exception as err: + result.crashed_file_paths[abspath] = sys.exc_info() + + log.debug("Skipped: \"%s\" (%s)", mod_name, err) + continue + + for plugin in pyblish.plugin.plugins_from_module(module): + # Ignore base plugin classes + # NOTE 'pyblish.api.discover' does not ignore them! + if ( + plugin is pyblish.api.Plugin + or plugin is pyblish.api.ContextPlugin + or plugin is pyblish.api.InstancePlugin + ): + continue + if not allow_duplicates and plugin.__name__ in plugin_names: + result.duplicated_plugins.append(plugin) + log.debug("Duplicate plug-in found: %s", plugin) + continue + + plugin_names.append(plugin.__name__) + + plugin.__module__ = module.__file__ + key = "{0}.{1}".format(plugin.__module__, plugin.__name__) + plugins[key] = plugin + + # Include plug-ins from registration. + # Directly registered plug-ins take precedence. + for plugin in pyblish.plugin.registered_plugins(): + if not allow_duplicates and plugin.__name__ in plugin_names: + result.duplicated_plugins.append(plugin) + log.debug("Duplicate plug-in found: %s", plugin) + continue + + plugin_names.append(plugin.__name__) + + plugins[plugin.__name__] = plugin + + plugins = list(plugins.values()) + pyblish.plugin.sort(plugins) # In-place + + # In-place user-defined filter + for filter_ in pyblish.plugin._registered_plugin_filters: + filter_(plugins) + + result.plugins = plugins + + return result + + +def get_plugin_settings(plugin, project_settings, log, category=None): + """Get plugin settings based on host name and plugin name. + + Note: + Default implementation of automated settings is passing host name + into 'category'. + + Args: + plugin (pyblish.Plugin): Plugin where settings are applied. + project_settings (dict[str, Any]): Project settings. + log (logging.Logger): Logger to log messages. + category (Optional[str]): Settings category key where to look + for plugin settings. + + Returns: + dict[str, Any]: Plugin settings {'attribute': 'value'}. + """ + + # Plugin can define settings category by class attribute + # - it's impossible to set `settings_category` via settings because + # obviously settings are not applied before it. + # - if `settings_category` is set the fallback category method is ignored + settings_category = getattr(plugin, "settings_category", None) + if settings_category: + try: + return ( + project_settings + [settings_category] + ["publish"] + [plugin.__name__] + ) + except KeyError: + log.warning(( + "Couldn't find plugin '{}' settings" + " under settings category '{}'" + ).format(plugin.__name__, settings_category)) + return {} + + # Use project settings based on a category name + if category: + try: + return ( + project_settings + [category] + ["publish"] + [plugin.__name__] + ) + except KeyError: + pass + + # Settings category determined from path + # - usually path is './/plugins/publish/' + # - category can be host name of addon name ('maya', 'deadline', ...) + filepath = os.path.normpath(inspect.getsourcefile(plugin)) + + split_path = filepath.rsplit(os.path.sep, 5) + if len(split_path) < 4: + log.debug(( + "Plugin path is too short to automatically" + " extract settings category. {}" + ).format(filepath)) + return {} + + category_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if category_from_file in ("ayon_core", "openpype"): + category_from_file = "global" + + try: + return ( + project_settings + [category_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + pass + return {} + + +def apply_plugin_settings_automatically(plugin, settings, logger=None): + """Automatically apply plugin settings to a plugin object. + + Note: + This function was created to be able to use it in custom overrides of + 'apply_settings' class method. + + Args: + plugin (type[pyblish.api.Plugin]): Class of a plugin. + settings (dict[str, Any]): Plugin specific settings. + logger (Optional[logging.Logger]): Logger to log debug messages about + applied settings values. + """ + + for option, value in settings.items(): + if logger: + logger.debug("Plugin %s - Attr: %s -> %s", + plugin.__name__, option, value) + setattr(plugin, option, value) + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies AYON settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host_name = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_settings = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + # Apply settings to plugins + + apply_settings_func = getattr(plugin, "apply_settings", None) + if apply_settings_func is not None: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + try: + # Support to pass only project settings + # - make sure that both settings are passed, when can be + # - that covers cases when *args are in method parameters + both_supported = is_func_signature_supported( + apply_settings_func, project_settings, system_settings + ) + project_supported = is_func_signature_supported( + apply_settings_func, project_settings + ) + if not both_supported and project_supported: + plugin.apply_settings(project_settings) + else: + plugin.apply_settings(project_settings, system_settings) + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + else: + # Automated + plugin_settins = get_plugin_settings( + plugin, project_settings, log, host_name + ) + apply_plugin_settings_automatically(plugin, plugin_settins, log) + + # Remove disabled plugins + if getattr(plugin, "enabled", True) is False: + plugins.remove(plugin) + + +def remote_publish(log): + """Loops through all plugins, logs to console. Used for tests. + + Args: + log (Logger) + """ + + # Error exit as soon as any error occurs. + error_format = "Failed {plugin.__name__}: {error}\n{error.traceback}" + + for result in pyblish.util.publish_iter(): + if not result["error"]: + continue + + error_message = error_format.format(**result) + log.error(error_message) + # 'Fatal Error: ' is because of Deadline + raise RuntimeError("Fatal Error: {}".format(error_message)) + + +def get_errored_instances_from_context(context, plugin=None): + """Collect failed instances from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed instances. + plugin (pyblish.api.Plugin): If provided then only consider errors + related to that plug-in. + + Returns: + List[pyblish.lib.Instance]: Instances which failed during processing. + """ + + instances = list() + for result in context.data["results"]: + if result["instance"] is None: + # When instance is None we are on the "context" result + continue + + if plugin is not None and result.get("plugin") != plugin: + continue + + if result["error"]: + instances.append(result["instance"]) + + return instances + + +def get_errored_plugins_from_context(context): + """Collect failed plugins from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed plugins. + + Returns: + List[pyblish.api.Plugin]: Plugins which failed during processing. + """ + + plugins = list() + results = context.data.get("results", []) + for result in results: + if result["success"] is True: + continue + plugins.append(result["plugin"]) + + return plugins + + +def filter_instances_for_context_plugin(plugin, context): + """Filter instances on context by context plugin filters. + + This is for cases when context plugin need similar filtering like instance + plugin have, but for some reason must run on context or should find out + if there is at least one instance with a family. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with insances. + + Returns: + Iterator[pyblish.lib.Instance]: Iteration of valid instances. + """ + + instances = [] + plugin_families = set() + all_families = False + if plugin.families: + instances = context + plugin_families = set(plugin.families) + all_families = "*" in plugin_families + + for instance in instances: + # Ignore inactive instances + if ( + not instance.data.get("publish", True) + or not instance.data.get("active", True) + ): + continue + + family = instance.data.get("family") + families = instance.data.get("families") or [] + if ( + all_families + or (family and family in plugin_families) + or any(f in plugin_families for f in families) + ): + yield instance + + +def context_plugin_should_run(plugin, context): + """Return whether the ContextPlugin should run on the given context. + + This is a helper function to work around a bug pyblish-base#250 + Whenever a ContextPlugin sets specific families it will still trigger even + when no instances are present that have those families. + + This actually checks it correctly and returns whether it should run. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with instances. + + Returns: + bool: Context plugin should run based on valid instances. + """ + + for _ in filter_instances_for_context_plugin(plugin, context): + return True + return False + + +def get_instance_staging_dir(instance): + """Unified way how staging dir is stored and created on instances. + + First check if 'stagingDir' is already set in instance data. + In case there already is new tempdir will not be created. + + It also supports `AYON_TMPDIR`, so studio can define own temp + shared repository per project or even per more granular context. + Template formatting is supported also with optional keys. Folder is + created in case it doesn't exists. + + Available anatomy formatting keys: + - root[work | ] + - project[name | code] + + Note: + Staging dir does not have to be necessarily in tempdir so be careful + about its usage. + + Args: + instance (pyblish.lib.Instance): Instance for which we want to get + staging dir. + + Returns: + str: Path to staging dir of instance. + """ + staging_dir = instance.data.get('stagingDir') + if staging_dir: + return staging_dir + + anatomy = instance.context.data.get("anatomy") + + # get customized tempdir path from `AYON_TMPDIR` env var + custom_temp_dir = tempdir.create_custom_tempdir( + anatomy.project_name, anatomy) + + if custom_temp_dir: + staging_dir = os.path.normpath( + tempfile.mkdtemp( + prefix="pyblish_tmp_", + dir=custom_temp_dir + ) + ) + else: + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + instance.data['stagingDir'] = staging_dir + + return staging_dir + + +def get_publish_repre_path(instance, repre, only_published=False): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = get_instance_staging_dir(instance) + + # Expand the staging dir path in case it's been stored with the root + # template syntax + anatomy = instance.context.data["anatomy"] + staging_dir = anatomy.fill_root(staging_dir) + + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None + + +def get_custom_staging_dir_info(project_name, host_name, family, task_name, + task_type, subset_name, + project_settings=None, + anatomy=None, log=None): + """Checks profiles if context should use special custom dir as staging. + + Args: + project_name (str) + host_name (str) + family (str) + task_name (str) + task_type (str) + subset_name (str) + project_settings(Dict[str, Any]): Prepared project settings. + anatomy (Dict[str, Any]) + log (Logger) (optional) + + Returns: + (tuple) + Raises: + ValueError - if misconfigured template should be used + """ + settings = project_settings or get_project_settings(project_name) + custom_staging_dir_profiles = (settings["global"] + ["tools"] + ["publish"] + ["custom_staging_dir_profiles"]) + if not custom_staging_dir_profiles: + return None, None + + if not log: + log = Logger.get_logger("get_custom_staging_dir_info") + + filtering_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + "subsets": subset_name + } + profile = filter_profiles(custom_staging_dir_profiles, + filtering_criteria, + logger=log) + + if not profile or not profile["active"]: + return None, None + + if not anatomy: + anatomy = Anatomy(project_name) + + template_name = profile["template_name"] or TRANSIENT_DIR_TEMPLATE + _validate_transient_template(project_name, template_name, anatomy) + + custom_staging_dir = anatomy.templates[template_name]["folder"] + is_persistent = profile["custom_staging_dir_persistent"] + + return custom_staging_dir, is_persistent + + +def _validate_transient_template(project_name, template_name, anatomy): + """Check that transient template is correctly configured. + + Raises: + ValueError - if misconfigured template + """ + if template_name not in anatomy.templates: + raise ValueError(("Anatomy of project \"{}\" does not have set" + " \"{}\" template key!" + ).format(project_name, template_name)) + + if "folder" not in anatomy.templates[template_name]: + raise ValueError(("There is not set \"folder\" template in \"{}\" anatomy" # noqa + " for project \"{}\"." + ).format(template_name, project_name)) + + +def get_published_workfile_instance(context): + """Find workfile instance in context""" + for i in context: + is_workfile = ( + "workfile" in i.data.get("families", []) or + i.data["family"] == "workfile" + ) + if not is_workfile: + continue + + # test if there is instance of workfile waiting + # to be published. + if not i.data.get("publish", True): + continue + + return i + + +def replace_with_published_scene_path(instance, replace_in_path=True): + """Switch work scene path for published scene. + If rendering/exporting from published scenes is enabled, this will + replace paths from working scene to published scene. + This only works if publish contains workfile instance! + Args: + instance (pyblish.api.Instance): Pyblish instance. + replace_in_path (bool): if True, it will try to find + old scene name in path of expected files and replace it + with name of published scene. + Returns: + str: Published scene path. + None: if no published scene is found. + Note: + Published scene path is actually determined from project Anatomy + as at the time this plugin is running scene can still not be + published. + """ + log = Logger.get_logger("published_workfile") + workfile_instance = get_published_workfile_instance(instance.context) + if workfile_instance is None: + return + + # determine published path from Anatomy. + template_data = workfile_instance.data.get("anatomyData") + rep = workfile_instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None + + anatomy = instance.context.data['anatomy'] + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + file_path = os.path.normpath(template_filled) + + log.info("Using published scene for render {}".format(file_path)) + + if not os.path.exists(file_path): + log.error("published scene does not exist!") + raise + + if not replace_in_path: + return file_path + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(file_path) + orig_scene = _clean_name(instance.context.data["currentFile"]) + expected_files = instance.data.get("expectedFiles") + + if isinstance(expected_files[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in expected_files[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + str(f).replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + # [] might be too much here, TODO + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in expected_files: + new_exp.append( + str(f).replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = new_exp + + metadata_folder = instance.data.get("publishRenderMetadataFolder") + if metadata_folder: + metadata_folder = metadata_folder.replace(orig_scene, + new_scene) + instance.data["publishRenderMetadataFolder"] = metadata_folder + + log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) + + return file_path + + +def add_repre_files_for_cleanup(instance, repre): + """ Explicitly mark repre files to be deleted. + + Should be used on intermediate files (eg. review, thumbnails) to be + explicitly deleted. + """ + files = repre["files"] + staging_dir = repre.get("stagingDir") + + # first make sure representation level is not persistent + if ( + not staging_dir + or repre.get("stagingDir_persistent") + ): + return + + # then look into instance level if it's not persistent + if instance.data.get("stagingDir_persistent"): + return + + if isinstance(files, str): + files = [files] + + for file_name in files: + expected_file = os.path.join(staging_dir, file_name) + instance.context.data["cleanupFullPaths"].append(expected_file) + + +def get_publish_instance_label(instance): + """Try to get label from pyblish instance. + + First are used values in instance data under 'label' and 'name' keys. Then + is used string conversion of instance object -> 'instance._name'. + + Todos: + Maybe 'subset' key could be used too. + + Args: + instance (pyblish.api.Instance): Pyblish instance. + + Returns: + str: Instance label. + """ + + return ( + instance.data.get("label") + or instance.data.get("name") + or str(instance) + ) + + +def get_publish_instance_families(instance): + """Get all families of the instance. + + Look for families under 'family' and 'families' keys in instance data. + Value of 'family' is used as first family and then all other families + in random order. + + Args: + pyblish.api.Instance: Instance to get families from. + + Returns: + list[str]: List of families. + """ + + family = instance.data.get("family") + families = set(instance.data.get("families") or []) + output = [] + if family: + output.append(family) + families.discard(family) + output.extend(families) + return output diff --git a/openpype/pipeline/publish/publish_plugins.py b/client/ayon_core/pipeline/publish/publish_plugins.py similarity index 97% rename from openpype/pipeline/publish/publish_plugins.py rename to client/ayon_core/pipeline/publish/publish_plugins.py index ae6cbc42d1..2386558091 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/client/ayon_core/pipeline/publish/publish_plugins.py @@ -2,8 +2,8 @@ from abc import ABCMeta import pyblish.api from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin -from openpype.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS -from openpype.lib import BoolDef +from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS +from ayon_core.lib import BoolDef from .lib import ( load_help_content_from_plugin, @@ -12,7 +12,7 @@ get_instance_staging_dir, ) -from openpype.pipeline.colorspace import ( +from ayon_core.pipeline.colorspace import ( get_colorspace_settings_from_publish_context, set_colorspace_data_to_representation ) @@ -78,7 +78,7 @@ class KnownPublishError(Exception): pass -class OpenPypePyblishPluginMixin: +class AYONPyblishPluginMixin: # TODO # executable_in_thread = False # @@ -166,7 +166,10 @@ def get_attr_values_from_data(self, data): return self.get_attr_values_from_data_for_plugin(self.__class__, data) -class OptionalPyblishPluginMixin(OpenPypePyblishPluginMixin): +OpenPypePyblishPluginMixin = AYONPyblishPluginMixin + + +class OptionalPyblishPluginMixin(AYONPyblishPluginMixin): """Prepare mixin for optional plugins. Defined active attribute definition prepared for published and diff --git a/openpype/pipeline/schema/__init__.py b/client/ayon_core/pipeline/schema/__init__.py similarity index 100% rename from openpype/pipeline/schema/__init__.py rename to client/ayon_core/pipeline/schema/__init__.py diff --git a/openpype/pipeline/schema/application-1.0.json b/client/ayon_core/pipeline/schema/application-1.0.json similarity index 100% rename from openpype/pipeline/schema/application-1.0.json rename to client/ayon_core/pipeline/schema/application-1.0.json diff --git a/openpype/pipeline/schema/asset-1.0.json b/client/ayon_core/pipeline/schema/asset-1.0.json similarity index 100% rename from openpype/pipeline/schema/asset-1.0.json rename to client/ayon_core/pipeline/schema/asset-1.0.json diff --git a/openpype/pipeline/schema/asset-2.0.json b/client/ayon_core/pipeline/schema/asset-2.0.json similarity index 100% rename from openpype/pipeline/schema/asset-2.0.json rename to client/ayon_core/pipeline/schema/asset-2.0.json diff --git a/openpype/pipeline/schema/asset-3.0.json b/client/ayon_core/pipeline/schema/asset-3.0.json similarity index 100% rename from openpype/pipeline/schema/asset-3.0.json rename to client/ayon_core/pipeline/schema/asset-3.0.json diff --git a/openpype/pipeline/schema/config-1.0.json b/client/ayon_core/pipeline/schema/config-1.0.json similarity index 100% rename from openpype/pipeline/schema/config-1.0.json rename to client/ayon_core/pipeline/schema/config-1.0.json diff --git a/openpype/pipeline/schema/config-1.1.json b/client/ayon_core/pipeline/schema/config-1.1.json similarity index 100% rename from openpype/pipeline/schema/config-1.1.json rename to client/ayon_core/pipeline/schema/config-1.1.json diff --git a/openpype/pipeline/schema/config-2.0.json b/client/ayon_core/pipeline/schema/config-2.0.json similarity index 100% rename from openpype/pipeline/schema/config-2.0.json rename to client/ayon_core/pipeline/schema/config-2.0.json diff --git a/openpype/pipeline/schema/container-1.0.json b/client/ayon_core/pipeline/schema/container-1.0.json similarity index 100% rename from openpype/pipeline/schema/container-1.0.json rename to client/ayon_core/pipeline/schema/container-1.0.json diff --git a/openpype/pipeline/schema/container-2.0.json b/client/ayon_core/pipeline/schema/container-2.0.json similarity index 100% rename from openpype/pipeline/schema/container-2.0.json rename to client/ayon_core/pipeline/schema/container-2.0.json diff --git a/openpype/pipeline/schema/hero_version-1.0.json b/client/ayon_core/pipeline/schema/hero_version-1.0.json similarity index 100% rename from openpype/pipeline/schema/hero_version-1.0.json rename to client/ayon_core/pipeline/schema/hero_version-1.0.json diff --git a/openpype/pipeline/schema/inventory-1.0.json b/client/ayon_core/pipeline/schema/inventory-1.0.json similarity index 100% rename from openpype/pipeline/schema/inventory-1.0.json rename to client/ayon_core/pipeline/schema/inventory-1.0.json diff --git a/openpype/pipeline/schema/inventory-1.1.json b/client/ayon_core/pipeline/schema/inventory-1.1.json similarity index 100% rename from openpype/pipeline/schema/inventory-1.1.json rename to client/ayon_core/pipeline/schema/inventory-1.1.json diff --git a/openpype/pipeline/schema/project-2.0.json b/client/ayon_core/pipeline/schema/project-2.0.json similarity index 100% rename from openpype/pipeline/schema/project-2.0.json rename to client/ayon_core/pipeline/schema/project-2.0.json diff --git a/openpype/pipeline/schema/project-2.1.json b/client/ayon_core/pipeline/schema/project-2.1.json similarity index 100% rename from openpype/pipeline/schema/project-2.1.json rename to client/ayon_core/pipeline/schema/project-2.1.json diff --git a/openpype/pipeline/schema/project-3.0.json b/client/ayon_core/pipeline/schema/project-3.0.json similarity index 100% rename from openpype/pipeline/schema/project-3.0.json rename to client/ayon_core/pipeline/schema/project-3.0.json diff --git a/openpype/pipeline/schema/representation-1.0.json b/client/ayon_core/pipeline/schema/representation-1.0.json similarity index 100% rename from openpype/pipeline/schema/representation-1.0.json rename to client/ayon_core/pipeline/schema/representation-1.0.json diff --git a/openpype/pipeline/schema/representation-2.0.json b/client/ayon_core/pipeline/schema/representation-2.0.json similarity index 100% rename from openpype/pipeline/schema/representation-2.0.json rename to client/ayon_core/pipeline/schema/representation-2.0.json diff --git a/openpype/pipeline/schema/session-1.0.json b/client/ayon_core/pipeline/schema/session-1.0.json similarity index 100% rename from openpype/pipeline/schema/session-1.0.json rename to client/ayon_core/pipeline/schema/session-1.0.json diff --git a/openpype/pipeline/schema/session-2.0.json b/client/ayon_core/pipeline/schema/session-2.0.json similarity index 100% rename from openpype/pipeline/schema/session-2.0.json rename to client/ayon_core/pipeline/schema/session-2.0.json diff --git a/openpype/pipeline/schema/session-3.0.json b/client/ayon_core/pipeline/schema/session-3.0.json similarity index 100% rename from openpype/pipeline/schema/session-3.0.json rename to client/ayon_core/pipeline/schema/session-3.0.json diff --git a/openpype/pipeline/schema/session-4.0.json b/client/ayon_core/pipeline/schema/session-4.0.json similarity index 100% rename from openpype/pipeline/schema/session-4.0.json rename to client/ayon_core/pipeline/schema/session-4.0.json diff --git a/openpype/pipeline/schema/shaders-1.0.json b/client/ayon_core/pipeline/schema/shaders-1.0.json similarity index 100% rename from openpype/pipeline/schema/shaders-1.0.json rename to client/ayon_core/pipeline/schema/shaders-1.0.json diff --git a/openpype/pipeline/schema/subset-1.0.json b/client/ayon_core/pipeline/schema/subset-1.0.json similarity index 100% rename from openpype/pipeline/schema/subset-1.0.json rename to client/ayon_core/pipeline/schema/subset-1.0.json diff --git a/openpype/pipeline/schema/subset-2.0.json b/client/ayon_core/pipeline/schema/subset-2.0.json similarity index 100% rename from openpype/pipeline/schema/subset-2.0.json rename to client/ayon_core/pipeline/schema/subset-2.0.json diff --git a/openpype/pipeline/schema/subset-3.0.json b/client/ayon_core/pipeline/schema/subset-3.0.json similarity index 100% rename from openpype/pipeline/schema/subset-3.0.json rename to client/ayon_core/pipeline/schema/subset-3.0.json diff --git a/openpype/pipeline/schema/thumbnail-1.0.json b/client/ayon_core/pipeline/schema/thumbnail-1.0.json similarity index 100% rename from openpype/pipeline/schema/thumbnail-1.0.json rename to client/ayon_core/pipeline/schema/thumbnail-1.0.json diff --git a/openpype/pipeline/schema/version-1.0.json b/client/ayon_core/pipeline/schema/version-1.0.json similarity index 100% rename from openpype/pipeline/schema/version-1.0.json rename to client/ayon_core/pipeline/schema/version-1.0.json diff --git a/openpype/pipeline/schema/version-2.0.json b/client/ayon_core/pipeline/schema/version-2.0.json similarity index 100% rename from openpype/pipeline/schema/version-2.0.json rename to client/ayon_core/pipeline/schema/version-2.0.json diff --git a/openpype/pipeline/schema/version-3.0.json b/client/ayon_core/pipeline/schema/version-3.0.json similarity index 100% rename from openpype/pipeline/schema/version-3.0.json rename to client/ayon_core/pipeline/schema/version-3.0.json diff --git a/openpype/pipeline/schema/workfile-1.0.json b/client/ayon_core/pipeline/schema/workfile-1.0.json similarity index 100% rename from openpype/pipeline/schema/workfile-1.0.json rename to client/ayon_core/pipeline/schema/workfile-1.0.json diff --git a/client/ayon_core/pipeline/tempdir.py b/client/ayon_core/pipeline/tempdir.py new file mode 100644 index 0000000000..29d4659393 --- /dev/null +++ b/client/ayon_core/pipeline/tempdir.py @@ -0,0 +1,65 @@ +""" +Temporary folder operations +""" + +import os +from ayon_core.lib import StringTemplate +from ayon_core.pipeline import Anatomy + + +def create_custom_tempdir(project_name, anatomy=None): + """ Create custom tempdir + + Template path formatting is supporting: + - optional key formatting + - available keys: + - root[work | ] + - project[name | code] + + Args: + project_name (str): project name + anatomy (ayon_core.pipeline.Anatomy)[optional]: Anatomy object + + Returns: + str | None: formatted path or None + """ + env_tmpdir = os.getenv("AYON_TMPDIR") + if not env_tmpdir: + env_tmpdir = os.getenv("OPENPYPE_TMPDIR") + if not env_tmpdir: + return + print( + "DEPRECATION WARNING: Used 'OPENPYPE_TMPDIR' environment" + " variable. Please use 'AYON_TMPDIR' instead." + ) + + custom_tempdir = None + if "{" in env_tmpdir: + if anatomy is None: + anatomy = Anatomy(project_name) + # create base formate data + data = { + "root": anatomy.roots, + "project": { + "name": anatomy.project_name, + "code": anatomy.project_code, + } + } + # path is anatomy template + custom_tempdir = StringTemplate.format_template( + env_tmpdir, data).normalized() + + else: + # path is absolute + custom_tempdir = env_tmpdir + + # create the dir path if it doesn't exists + if not os.path.exists(custom_tempdir): + try: + # create it if it doesn't exists + os.makedirs(custom_tempdir) + except IOError as error: + raise IOError( + "Path couldn't be created: {}".format(error)) + + return custom_tempdir diff --git a/openpype/pipeline/template_data.py b/client/ayon_core/pipeline/template_data.py similarity index 96% rename from openpype/pipeline/template_data.py rename to client/ayon_core/pipeline/template_data.py index a48f0721b6..a1b944a431 100644 --- a/openpype/pipeline/template_data.py +++ b/client/ayon_core/pipeline/template_data.py @@ -1,6 +1,6 @@ -from openpype.client import get_project, get_asset_by_name -from openpype.settings import get_system_settings -from openpype.lib.local_settings import get_openpype_username +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.settings import get_system_settings +from ayon_core.lib.local_settings import get_ayon_username def get_general_template_data(system_settings=None): @@ -9,7 +9,7 @@ def get_general_template_data(system_settings=None): Output contains formatting keys: - 'studio[name]' - Studio name filled from system settings - 'studio[code]' - Studio code filled from system settings - - 'user' - User's name using 'get_openpype_username' + - 'user' - User's name using 'get_ayon_username' Args: system_settings (Dict[str, Any]): System settings. @@ -24,7 +24,7 @@ def get_general_template_data(system_settings=None): "name": studio_name, "code": studio_code }, - "user": get_openpype_username() + "user": get_ayon_username() } diff --git a/openpype/pipeline/version_start.py b/client/ayon_core/pipeline/version_start.py similarity index 87% rename from openpype/pipeline/version_start.py rename to client/ayon_core/pipeline/version_start.py index 0240ab0c7a..bd7d800335 100644 --- a/openpype/pipeline/version_start.py +++ b/client/ayon_core/pipeline/version_start.py @@ -1,5 +1,5 @@ -from openpype.lib.profiles_filtering import filter_profiles -from openpype.settings import get_project_settings +from ayon_core.lib.profiles_filtering import filter_profiles +from ayon_core.settings import get_project_settings def get_versioning_start( diff --git a/openpype/pipeline/workfile/__init__.py b/client/ayon_core/pipeline/workfile/__init__.py similarity index 100% rename from openpype/pipeline/workfile/__init__.py rename to client/ayon_core/pipeline/workfile/__init__.py diff --git a/openpype/pipeline/workfile/build_workfile.py b/client/ayon_core/pipeline/workfile/build_workfile.py similarity index 98% rename from openpype/pipeline/workfile/build_workfile.py rename to client/ayon_core/pipeline/workfile/build_workfile.py index 7b153d37b9..c62facaaa9 100644 --- a/openpype/pipeline/workfile/build_workfile.py +++ b/client/ayon_core/pipeline/workfile/build_workfile.py @@ -5,7 +5,7 @@ build per context and being explicit about loaded content. For more explicit workfile build is recommended 'AbstractTemplateBuilder' -from '~/openpype/pipeline/workfile/workfile_template_builder'. Which gives +from '~/ayon_core/pipeline/workfile/workfile_template_builder'. Which gives more abilities to define how build happens but require more code to achive it. """ @@ -13,19 +13,19 @@ import collections import json -from openpype.client import ( +from ayon_core.client import ( get_asset_by_name, get_subsets, get_last_versions, get_representations, get_linked_assets, ) -from openpype.settings import get_project_settings -from openpype.lib import ( +from ayon_core.settings import get_project_settings +from ayon_core.lib import ( filter_profiles, Logger, ) -from openpype.pipeline.load import ( +from ayon_core.pipeline.load import ( discover_loader_plugins, IncompatibleLoaderError, load_container, @@ -100,7 +100,7 @@ def build_workfile(self): List[Dict[str, Any]]: Loaded containers during build. """ - from openpype.pipeline.context_tools import ( + from ayon_core.pipeline.context_tools import ( get_current_project_name, get_current_asset_name, get_current_task_name, @@ -240,7 +240,7 @@ def get_build_presets(self, task_name, asset_doc): Dict[str, Any]: preset per entered task name """ - from openpype.pipeline.context_tools import ( + from ayon_core.pipeline.context_tools import ( get_current_host_name, get_current_project_name, ) @@ -660,7 +660,7 @@ def _collect_last_version_repres(self, asset_docs): ``` """ - from openpype.pipeline.context_tools import get_current_project_name + from ayon_core.pipeline.context_tools import get_current_project_name output = {} if not asset_docs: diff --git a/openpype/pipeline/workfile/lock_workfile.py b/client/ayon_core/pipeline/workfile/lock_workfile.py similarity index 90% rename from openpype/pipeline/workfile/lock_workfile.py rename to client/ayon_core/pipeline/workfile/lock_workfile.py index 579840c07d..a6d4348966 100644 --- a/openpype/pipeline/workfile/lock_workfile.py +++ b/client/ayon_core/pipeline/workfile/lock_workfile.py @@ -1,9 +1,9 @@ import os import json -from openpype.lib import Logger, filter_profiles -from openpype.lib.pype_info import get_workstation_info -from openpype.settings import get_project_settings -from openpype.pipeline import get_process_id +from ayon_core.lib import Logger, filter_profiles +from ayon_core.lib.ayon_info import get_workstation_info +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import get_process_id def _read_lock_file(lock_filepath): diff --git a/openpype/pipeline/workfile/path_resolving.py b/client/ayon_core/pipeline/workfile/path_resolving.py similarity index 98% rename from openpype/pipeline/workfile/path_resolving.py rename to client/ayon_core/pipeline/workfile/path_resolving.py index 78acee20da..95a0a03c60 100644 --- a/openpype/pipeline/workfile/path_resolving.py +++ b/client/ayon_core/pipeline/workfile/path_resolving.py @@ -3,15 +3,15 @@ import copy import platform -from openpype.client import get_project, get_asset_by_name -from openpype.settings import get_project_settings -from openpype.lib import ( +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.settings import get_project_settings +from ayon_core.lib import ( filter_profiles, Logger, StringTemplate, ) -from openpype.pipeline import version_start, Anatomy -from openpype.pipeline.template_data import get_template_data +from ayon_core.pipeline import version_start, Anatomy +from ayon_core.pipeline.template_data import get_template_data def get_workfile_template_key_from_context( diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py new file mode 100644 index 0000000000..1afe26813f --- /dev/null +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -0,0 +1,1938 @@ +"""Workfile build mechanism using workfile templates. + +Build templates are manually prepared using plugin definitions which create +placeholders inside the template which are populated on import. + +This approach is very explicit to achive very specific build logic that can be +targeted by task types and names. + +Placeholders are created using placeholder plugins which should care about +logic and data of placeholder items. 'PlaceholderItem' is used to keep track +about it's progress. +""" + +import os +import re +import collections +import copy +from abc import ABCMeta, abstractmethod + +import six +from ayon_api import get_products, get_last_versions +from ayon_api.graphql_queries import folders_graphql_query + +from ayon_core.client import ( + get_asset_by_name, + get_linked_assets, + get_representations, + get_ayon_server_api_connection, +) +from ayon_core.settings import ( + get_project_settings, + get_system_settings, +) +from ayon_core.host import IWorkfileHost, HostBase +from ayon_core.lib import ( + Logger, + StringTemplate, + filter_profiles, + attribute_definitions, +) +from ayon_core.lib.attribute_definitions import get_attributes_keys +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.load import ( + get_loaders_by_name, + get_contexts_for_repre_docs, + load_with_repre_context, +) + +from ayon_core.pipeline.create import ( + discover_legacy_creator_plugins, + CreateContext, +) + + +class TemplateNotFound(Exception): + """Exception raised when template does not exist.""" + pass + + +class TemplateProfileNotFound(Exception): + """Exception raised when current profile + doesn't match any template profile""" + pass + + +class TemplateAlreadyImported(Exception): + """Error raised when Template was already imported by host for + this session""" + pass + + +class TemplateLoadFailed(Exception): + """Error raised whend Template loader was unable to load the template""" + pass + + +@six.add_metaclass(ABCMeta) +class AbstractTemplateBuilder(object): + """Abstraction of Template Builder. + + Builder cares about context, shared data, cache, discovery of plugins + and trigger logic. Provides public api for host workfile build systen. + + Rest of logic is based on plugins that care about collection and creation + of placeholder items. + + Population of placeholders happens in loops. Each loop will collect all + available placeholders, skip already populated, and populate the rest. + + Builder item has 2 types of shared data. Refresh lifetime which are cleared + on refresh and populate lifetime which are cleared after loop of + placeholder population. + + Args: + host (Union[HostBase, ModuleType]): Implementation of host. + """ + + _log = None + use_legacy_creators = False + + def __init__(self, host): + # Get host name + if isinstance(host, HostBase): + host_name = host.name + else: + host_name = os.environ.get("AVALON_APP") + + self._host = host + self._host_name = host_name + + # Shared data across placeholder plugins + self._shared_data = {} + self._shared_populate_data = {} + + # Where created objects of placeholder plugins will be stored + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + self._create_context = None + + self._system_settings = None + self._project_settings = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + @property + def project_name(self): + if isinstance(self._host, HostBase): + return self._host.get_current_project_name() + return os.getenv("AVALON_PROJECT") + + @property + def current_asset_name(self): + if isinstance(self._host, HostBase): + return self._host.get_current_asset_name() + return os.getenv("AVALON_ASSET") + + @property + def current_task_name(self): + if isinstance(self._host, HostBase): + return self._host.get_current_task_name() + return os.getenv("AVALON_TASK") + + def get_current_context(self): + if isinstance(self._host, HostBase): + return self._host.get_current_context() + return { + "project_name": self.project_name, + "asset_name": self.current_asset_name, + "task_name": self.current_task_name + } + + @property + def system_settings(self): + if self._system_settings is None: + self._system_settings = get_system_settings() + return self._system_settings + + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings(self.project_name) + return self._project_settings + + @property + def current_asset_doc(self): + if self._current_asset_doc is None: + self._current_asset_doc = get_asset_by_name( + self.project_name, self.current_asset_name + ) + return self._current_asset_doc + + @property + def linked_asset_docs(self): + if self._linked_asset_docs is None: + self._linked_asset_docs = get_linked_assets( + self.project_name, self.current_asset_doc + ) + return self._linked_asset_docs + + @property + def current_task_type(self): + asset_doc = self.current_asset_doc + if not asset_doc: + return None + return ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(self.current_task_name, {}) + .get("type") + ) + + @property + def create_context(self): + if self._create_context is None: + self._create_context = CreateContext( + self.host, + discover_publish_plugins=False, + headless=True + ) + return self._create_context + + def get_placeholder_plugin_classes(self): + """Get placeholder plugin classes that can be used to build template. + + Default implementation looks for method + 'get_workfile_build_placeholder_plugins' on host. + + Returns: + List[PlaceholderPlugin]: Plugin classes available for host. + """ + + if hasattr(self._host, "get_workfile_build_placeholder_plugins"): + return self._host.get_workfile_build_placeholder_plugins() + return [] + + @property + def host(self): + """Access to host implementation. + + Returns: + Union[HostBase, ModuleType]: Implementation of host. + """ + + return self._host + + @property + def host_name(self): + """Name of 'host' implementation. + + Returns: + str: Host's name. + """ + + return self._host_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def refresh(self): + """Reset cached data.""" + + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + self._system_settings = None + self._project_settings = None + + self.clear_shared_data() + self.clear_shared_populate_data() + + def get_loaders_by_name(self): + if self._loaders_by_name is None: + self._loaders_by_name = get_loaders_by_name() + return self._loaders_by_name + + def _collect_legacy_creators(self): + creators_by_name = {} + for creator in discover_legacy_creator_plugins(): + if not creator.enabled: + continue + creator_name = creator.__name__ + if creator_name in creators_by_name: + raise KeyError( + "Duplicated creator name {} !".format(creator_name) + ) + creators_by_name[creator_name] = creator + self._creators_by_name = creators_by_name + + def _collect_creators(self): + self._creators_by_name = dict(self.create_context.creators) + + def get_creators_by_name(self): + if self._creators_by_name is None: + if self.use_legacy_creators: + self._collect_legacy_creators() + else: + self._collect_creators() + + return self._creators_by_name + + def get_shared_data(self, key): + """Receive shared data across plugins and placeholders. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + return self._shared_data.get(key) + + def set_shared_data(self, key, value): + """Store share data across plugins and placeholders. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_data[key] = value + + def clear_shared_data(self): + """Clear shared data. + + Method only clear shared data to default state. + """ + + self._shared_data = {} + + def clear_shared_populate_data(self): + """Receive shared data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + self._shared_populate_data = {} + + def get_shared_populate_data(self, key): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + return self._shared_populate_data.get(key) + + def set_shared_populate_data(self, key, value): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_populate_data[key] = value + + @property + def placeholder_plugins(self): + """Access to initialized placeholder plugins. + + Returns: + List[PlaceholderPlugin]: Initialized plugins available for host. + """ + + if self._placeholder_plugins is None: + placeholder_plugins = {} + for cls in self.get_placeholder_plugin_classes(): + try: + plugin = cls(self) + placeholder_plugins[plugin.identifier] = plugin + + except Exception: + self.log.warning( + "Failed to initialize placeholder plugin {}".format( + cls.__name__ + ), + exc_info=True + ) + + self._placeholder_plugins = placeholder_plugins + return self._placeholder_plugins + + def create_placeholder(self, plugin_identifier, placeholder_data): + """Create new placeholder using plugin identifier and data. + + Args: + plugin_identifier (str): Identifier of plugin. That's how builder + know which plugin should be used. + placeholder_data (Dict[str, Any]): Placeholder item data. They + should match options required by the plugin. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + plugin = self.placeholder_plugins[plugin_identifier] + return plugin.create_placeholder(placeholder_data) + + def get_placeholders(self): + """Collect placeholder items from scene. + + Each placeholder plugin can collect it's placeholders and return them. + This method does not use cached values but always go through the scene. + + Returns: + List[PlaceholderItem]: Sorted placeholder items. + """ + + placeholders = [] + for placeholder_plugin in self.placeholder_plugins.values(): + result = placeholder_plugin.collect_placeholders() + if result: + placeholders.extend(result) + + return list(sorted( + placeholders, + key=lambda i: i.order + )) + + def build_template( + self, + template_path=None, + level_limit=None, + keep_placeholders=None, + create_first_version=None, + workfile_creation_enabled=False + ): + """Main callback for building workfile from template path. + + Todo: + Handle report of populated placeholders from + 'populate_scene_placeholders' to be shown to a user. + + Args: + template_path (str): Path to a template file with placeholders. + Template from settings 'get_template_preset' used when not + passed. + level_limit (int): Limit of populate loops. Related to + 'populate_scene_placeholders' method. + keep_placeholders (bool): Add flag to placeholder data for + hosts to decide if they want to remove + placeholder after it is used. + create_first_version (bool): create first version of a workfile + workfile_creation_enabled (bool): If True, it might create + first version but ignore + process if version is created + + """ + template_preset = self.get_template_preset() + + if template_path is None: + template_path = template_preset["path"] + + if keep_placeholders is None: + keep_placeholders = template_preset["keep_placeholder"] + if create_first_version is None: + create_first_version = template_preset["create_first_version"] + + # check if first version is created + created_version_workfile = False + if create_first_version: + created_version_workfile = self.create_first_workfile_version() + + # if first version is created, import template + # and populate placeholders + if ( + create_first_version + and workfile_creation_enabled + and created_version_workfile + ): + self.import_template(template_path) + self.populate_scene_placeholders( + level_limit, keep_placeholders) + + # save workfile after template is populated + self.save_workfile(created_version_workfile) + + # ignore process if first workfile is enabled + # but a version is already created + if workfile_creation_enabled: + return + + self.import_template(template_path) + self.populate_scene_placeholders( + level_limit, keep_placeholders) + + def rebuild_template(self): + """Go through existing placeholders in scene and update them. + + This could not make sense for all plugin types so this is optional + logic for plugins. + + Note: + Logic is not importing the template again but using placeholders + that were already available. We should maybe change the method + name. + + Question: + Should this also handle subloops as it is possible that another + template is loaded during processing? + """ + + if not self.placeholder_plugins: + self.log.info("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.info("No placeholders were found.") + return + + for placeholder in placeholders: + plugin = placeholder.plugin + plugin.repopulate_placeholder(placeholder) + + self.clear_shared_populate_data() + + @abstractmethod + def import_template(self, template_path): + """ + Import template in current host. + + Should load the content of template into scene so + 'populate_scene_placeholders' can be started. + + Args: + template_path (str): Fullpath for current task and + host's template file. + """ + + pass + + def create_first_workfile_version(self): + """ + Create first version of workfile. + + Should load the content of template into scene so + 'populate_scene_placeholders' can be started. + + Args: + template_path (str): Fullpath for current task and + host's template file. + """ + last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") + self.log.info("__ last_workfile_path: {}".format(last_workfile_path)) + if os.path.exists(last_workfile_path): + # ignore in case workfile existence + self.log.info("Workfile already exists, skipping creation.") + return False + + # Create first version + self.log.info("Creating first version of workfile.") + self.save_workfile(last_workfile_path) + + # Confirm creation of first version + return last_workfile_path + + def save_workfile(self, workfile_path): + """Save workfile in current host.""" + # Save current scene, continue to open file + if isinstance(self.host, IWorkfileHost): + self.host.save_workfile(workfile_path) + else: + self.host.save_file(workfile_path) + + def _prepare_placeholders(self, placeholders): + """Run preparation part for placeholders on plugins. + + Args: + placeholders (List[PlaceholderItem]): Placeholder items that will + be processed. + """ + + # Prepare placeholder items by plugin + plugins_by_identifier = {} + placeholders_by_plugin_id = collections.defaultdict(list) + for placeholder in placeholders: + plugin = placeholder.plugin + identifier = plugin.identifier + plugins_by_identifier[identifier] = plugin + placeholders_by_plugin_id[identifier].append(placeholder) + + # Plugin should prepare data for passed placeholders + for identifier, placeholders in placeholders_by_plugin_id.items(): + plugin = plugins_by_identifier[identifier] + plugin.prepare_placeholders(placeholders) + + def populate_scene_placeholders( + self, level_limit=None, keep_placeholders=None + ): + """Find placeholders in scene using plugins and process them. + + This should happen after 'import_template'. + + Collect available placeholders from scene. All of them are processed + after that shared data are cleared. Placeholder items are collected + again and if there are any new the loop happens again. This is possible + to change with defying 'level_limit'. + + Placeholders are marked as processed so they're not re-processed. To + identify which placeholders were already processed is used + placeholder's 'scene_identifier'. + + Args: + level_limit (int): Level of loops that can happen. Default is 1000. + keep_placeholders (bool): Add flag to placeholder data for + hosts to decide if they want to remove + placeholder after it is used. + """ + + if not self.placeholder_plugins: + self.log.warning("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.warning("No placeholders were found.") + return + + # Avoid infinite loop + # - 1000 iterations of placeholders processing must be enough + if not level_limit: + level_limit = 1000 + + placeholder_by_scene_id = { + placeholder.scene_identifier: placeholder + for placeholder in placeholders + } + all_processed = len(placeholders) == 0 + # Counter is checked at the ned of a loop so the loop happens at least + # once. + iter_counter = 0 + while not all_processed: + filtered_placeholders = [] + for placeholder in placeholders: + if placeholder.finished: + continue + + if placeholder.in_progress: + self.log.warning(( + "Placeholder that should be processed" + " is already in progress." + )) + continue + + # add flag for keeping placeholders in scene + # after they are processed + placeholder.data["keep_placeholder"] = keep_placeholders + + filtered_placeholders.append(placeholder) + + self._prepare_placeholders(filtered_placeholders) + + for placeholder in filtered_placeholders: + placeholder.set_in_progress() + placeholder_plugin = placeholder.plugin + try: + placeholder_plugin.populate_placeholder(placeholder) + + except Exception as exc: + self.log.warning( + ( + "Failed to process placeholder {} with plugin {}" + ).format( + placeholder.scene_identifier, + placeholder_plugin.__class__.__name__ + ), + exc_info=True + ) + placeholder.set_failed(exc) + + placeholder.set_finished() + + # Clear shared data before getting new placeholders + self.clear_shared_populate_data() + + iter_counter += 1 + if iter_counter >= level_limit: + break + + all_processed = True + collected_placeholders = self.get_placeholders() + for placeholder in collected_placeholders: + identifier = placeholder.scene_identifier + if identifier in placeholder_by_scene_id: + continue + + all_processed = False + placeholder_by_scene_id[identifier] = placeholder + placeholders.append(placeholder) + + self.refresh() + + def _get_build_profiles(self): + """Get build profiles for workfile build template path. + + Returns: + List[Dict[str, Any]]: Profiles for template path resolving. + """ + + return ( + self.project_settings + [self.host_name] + ["templated_workfile_build"] + ["profiles"] + ) + + def get_template_preset(self): + """Unified way how template preset is received usign settings. + + Method is dependent on '_get_build_profiles' which should return filter + profiles to resolve path to a template. Default implementation looks + into host settings: + - 'project_settings/{host name}/templated_workfile_build/profiles' + + Returns: + str: Path to a template file with placeholders. + + Raises: + TemplateProfileNotFound: When profiles are not filled. + TemplateLoadFailed: Profile was found but path is not set. + TemplateNotFound: Path was set but file does not exists. + """ + + host_name = self.host_name + project_name = self.project_name + task_name = self.current_task_name + task_type = self.current_task_type + + build_profiles = self._get_build_profiles() + profile = filter_profiles( + build_profiles, + { + "task_types": task_type, + "task_names": task_name + } + ) + + if not profile: + raise TemplateProfileNotFound(( + "No matching profile found for task '{}' of type '{}' " + "with host '{}'" + ).format(task_name, task_type, host_name)) + + path = profile["path"] + + # switch to remove placeholders after they are used + keep_placeholder = profile.get("keep_placeholder") + create_first_version = profile.get("create_first_version") + + # backward compatibility, since default is True + if keep_placeholder is None: + keep_placeholder = True + + if not path: + raise TemplateLoadFailed(( + "Template path is not set.\n" + "Path need to be set in {}\\Template Workfile Build " + "Settings\\Profiles" + ).format(host_name.title())) + + # Try fill path with environments and anatomy roots + anatomy = Anatomy(project_name) + fill_data = { + key: value + for key, value in os.environ.items() + } + + fill_data["root"] = anatomy.roots + fill_data["project"] = { + "name": project_name, + "code": anatomy.project_code, + } + + result = StringTemplate.format_template(path, fill_data) + if result.solved: + path = result.normalized() + + if path and os.path.exists(path): + self.log.info("Found template at: '{}'".format(path)) + return { + "path": path, + "keep_placeholder": keep_placeholder, + "create_first_version": create_first_version + } + + solved_path = None + while True: + try: + solved_path = anatomy.path_remapper(path) + except KeyError as missing_key: + raise KeyError( + "Could not solve key '{}' in template path '{}'".format( + missing_key, path)) + + if solved_path is None: + solved_path = path + if solved_path == path: + break + path = solved_path + + solved_path = os.path.normpath(solved_path) + if not os.path.exists(solved_path): + raise TemplateNotFound( + "Template found in AYON settings for task '{}' with host " + "'{}' does not exists. (Not found : {})".format( + task_name, host_name, solved_path)) + + self.log.info("Found template at: '{}'".format(solved_path)) + + return { + "path": solved_path, + "keep_placeholder": keep_placeholder, + "create_first_version": create_first_version + } + + +@six.add_metaclass(ABCMeta) +class PlaceholderPlugin(object): + """Plugin which care about handling of placeholder items logic. + + Plugin create and update placeholders in scene and populate them on + template import. Populating means that based on placeholder data happens + a logic in the scene. Most common logic is to load representation using + loaders or to create instances in scene. + """ + + label = None + _log = None + + def __init__(self, builder): + self._builder = builder + + @property + def builder(self): + """Access to builder which initialized the plugin. + + Returns: + AbstractTemplateBuilder: Loader of template build. + """ + + return self._builder + + @property + def project_name(self): + return self._builder.project_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + @property + def identifier(self): + """Identifier which will be stored to placeholder. + + Default implementation uses class name. + + Returns: + str: Unique identifier of placeholder plugin. + """ + + return self.__class__.__name__ + + @abstractmethod + def create_placeholder(self, placeholder_data): + """Create new placeholder in scene and get it's item. + + It matters on the plugin implementation if placeholder will use + selection in scene or create new node. + + Args: + placeholder_data (Dict[str, Any]): Data that were created + based on attribute definitions from 'get_placeholder_options'. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + pass + + @abstractmethod + def update_placeholder(self, placeholder_item, placeholder_data): + """Update placeholder item with new data. + + New data should be propagated to object of placeholder item itself + and also into the scene. + + Reason: + Some placeholder plugins may require some special way how the + updates should be propagated to object. + + Args: + placeholder_item (PlaceholderItem): Object of placeholder that + should be updated. + placeholder_data (Dict[str, Any]): Data related to placeholder. + Should match plugin options. + """ + + pass + + @abstractmethod + def collect_placeholders(self): + """Collect placeholders from scene. + + Returns: + List[PlaceholderItem]: Placeholder objects. + """ + + pass + + def get_placeholder_options(self, options=None): + """Placeholder options for data showed. + + Returns: + List[AbstractAttrDef]: Attribute definitions of + placeholder options. + """ + + return [] + + def get_placeholder_keys(self): + """Get placeholder keys that are stored in scene. + + Returns: + Set[str]: Key of placeholder keys that are stored in scene. + """ + + option_keys = get_attributes_keys(self.get_placeholder_options()) + option_keys.add("plugin_identifier") + return option_keys + + def prepare_placeholders(self, placeholders): + """Preparation part of placeholders. + + Args: + placeholders (List[PlaceholderItem]): List of placeholders that + will be processed. + """ + + pass + + @abstractmethod + def populate_placeholder(self, placeholder): + """Process single placeholder item. + + Processing of placeholders is defined by their order thus can't be + processed in batch. + + Args: + placeholder (PlaceholderItem): Placeholder that should be + processed. + """ + + pass + + def repopulate_placeholder(self, placeholder): + """Update scene with current context for passed placeholder. + + Can be used to re-run placeholder logic (if it make sense). + """ + + pass + + def get_plugin_shared_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_data(self.identifier, plugin_data) + + def get_plugin_shared_populate_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared populate data from builder but stored under plugin + identifier. + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_populate_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_populate_data(self.identifier, plugin_data) + + +class PlaceholderItem(object): + """Item representing single item in scene that is a placeholder to process. + + Items are always created and updated by their plugins. Each plugin can use + modified class of 'PlacehoderItem' but only to add more options instead of + new other. + + Scene identifier is used to avoid processing of the palceholder item + multiple times so must be unique across whole workfile builder. + + Args: + scene_identifier (str): Unique scene identifier. If placeholder is + created from the same "node" it must have same identifier. + data (Dict[str, Any]): Data related to placeholder. They're defined + by plugin. + plugin (PlaceholderPlugin): Plugin which created the placeholder item. + """ + + default_order = 100 + + def __init__(self, scene_identifier, data, plugin): + self._log = None + self._scene_identifier = scene_identifier + self._data = data + self._plugin = plugin + + # Keep track about state of Placeholder process + self._state = 0 + + # Error messages to be shown in UI + # - all other messages should be logged + self._errors = [] # -> List[str] + + @property + def plugin(self): + """Access to plugin which created placeholder. + + Returns: + PlaceholderPlugin: Plugin object. + """ + + return self._plugin + + @property + def builder(self): + """Access to builder. + + Returns: + AbstractTemplateBuilder: Builder which is the top part of + placeholder. + """ + + return self.plugin.builder + + @property + def data(self): + """Placeholder data which can modify how placeholder is processed. + + Possible general keys + - order: Can define the order in which is palceholder processed. + Lower == earlier. + + Other keys are defined by placeholder and should validate them on item + creation. + + Returns: + Dict[str, Any]: Placeholder item data. + """ + + return self._data + + def to_dict(self): + """Create copy of item's data. + + Returns: + Dict[str, Any]: Placeholder data. + """ + + return copy.deepcopy(self.data) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def __repr__(self): + return "< {} {} >".format( + self.__class__.__name__, + self._scene_identifier + ) + + @property + def order(self): + """Order of item processing.""" + + order = self._data.get("order") + if order is None: + return self.default_order + return order + + @property + def scene_identifier(self): + return self._scene_identifier + + @property + def finished(self): + """Item was already processed.""" + + return self._state == 2 + + @property + def in_progress(self): + """Processing is in progress.""" + + return self._state == 1 + + def set_in_progress(self): + """Change to in progress state.""" + + self._state = 1 + + def set_finished(self): + """Change to finished state.""" + + self._state = 2 + + def set_failed(self, exception): + self.add_error(str(exception)) + + def add_error(self, error): + """Set placeholder item as failed and mark it as finished.""" + + self._errors.append(error) + + def get_errors(self): + """Exception with which the placeholder process failed. + + Gives ability to access the exception. + """ + + return self._errors + + +class PlaceholderLoadMixin(object): + """Mixin prepared for loading placeholder plugins. + + Implementation prepares options for placeholders with + 'get_load_plugin_options'. + + For placeholder population is implemented 'populate_load_placeholder'. + + PlaceholderItem can have implemented methods: + - 'load_failed' - called when loading of one representation failed + - 'load_succeed' - called when loading of one representation succeeded + """ + + def get_load_plugin_options(self, options=None): + """Unified attribute definitions for load placeholder. + + Common function for placeholder plugins used for loading of + repsentations. Use it in 'get_placeholder_options'. + + Args: + plugin (PlaceholderPlugin): Plugin used for loading of + representations. + options (Dict[str, Any]): Already available options which are used + as defaults for attributes. + + Returns: + List[AbstractAttrDef]: Attribute definitions common for load + plugins. + """ + + loaders_by_name = self.builder.get_loaders_by_name() + loader_items = [ + {"value": loader_name, "label": loader.label or loader_name} + for loader_name, loader in loaders_by_name.items() + ] + + loader_items = list(sorted(loader_items, key=lambda i: i["label"])) + options = options or {} + + # Get families from all loaders excluding "*" + families = set() + for loader in loaders_by_name.values(): + families.update(loader.families) + families.discard("*") + + # Sort for readability + families = list(sorted(families)) + + builder_type_enum_items = [ + {"label": "Current folder", "value": "context_folder"}, + # TODO implement linked folders + # {"label": "Linked folders", "value": "linked_folders"}, + {"label": "All folders", "value": "all_folders"}, + ] + build_type_label = "Folder Builder Type" + build_type_help = ( + "Folder Builder Type\n" + "\nBuilder type describe what template loader will look" + " for." + "\nCurrent Folder: Template loader will look for products" + " of current context folder (Folder /assets/bob will" + " find asset)" + "\nAll folders: All folders matching the regex will be" + " used." + ) + + return [ + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Main attributes"), + attribute_definitions.UISeparatorDef(), + + attribute_definitions.EnumDef( + "builder_type", + label=build_type_label, + default=options.get("builder_type"), + items=builder_type_enum_items, + tooltip=build_type_help + ), + attribute_definitions.EnumDef( + "family", + label="Family", + default=options.get("family"), + items=families + ), + attribute_definitions.TextDef( + "representation", + label="Representation name", + default=options.get("representation"), + placeholder="ma, abc, ..." + ), + attribute_definitions.EnumDef( + "loader", + label="Loader", + default=options.get("loader"), + items=loader_items, + tooltip=( + "Loader" + "\nDefines what OpenPype loader will be used to" + " load assets." + "\nUseable loader depends on current host's loader list." + "\nField is case sensitive." + ) + ), + attribute_definitions.TextDef( + "loader_args", + label="Loader Arguments", + default=options.get("loader_args"), + placeholder='{"camera":"persp", "lights":True}', + tooltip=( + "Loader" + "\nDefines a dictionnary of arguments used to load assets." + "\nUseable arguments depend on current placeholder Loader." + "\nField should be a valid python dict." + " Anything else will be ignored." + ) + ), + attribute_definitions.NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Optional attributes"), + attribute_definitions.UISeparatorDef(), + attribute_definitions.TextDef( + "folder_path", + label="Folder filter", + default=options.get("folder_path"), + placeholder="regex filtering by folder path", + tooltip=( + "Filtering assets by matching" + " field regex to folder path" + ) + ), + attribute_definitions.TextDef( + "product_name", + label="Product filter", + default=options.get("product_name"), + placeholder="regex filtering by product name", + tooltip=( + "Filtering assets by matching" + " field regex to product name" + ) + ), + ] + + def parse_loader_args(self, loader_args): + """Helper function to parse string of loader arugments. + + Empty dictionary is returned if conversion fails. + + Args: + loader_args (str): Loader args filled by user. + + Returns: + Dict[str, Any]: Parsed arguments used as dictionary. + """ + + if not loader_args: + return {} + + try: + parsed_args = eval(loader_args) + if isinstance(parsed_args, dict): + return parsed_args + + except Exception as err: + print( + "Error while parsing loader arguments '{}'.\n{}: {}\n\n" + "Continuing with default arguments. . .".format( + loader_args, err.__class__.__name__, err)) + + return {} + + def _query_by_folder_regex(self, project_name, folder_regex): + """Query folders by folder path regex. + + WARNING: + This method will be removed once the same functionality is + available in ayon-python-api. + + Args: + project_name (str): Project name. + folder_regex (str): Regex for folder path. + + Returns: + list[str]: List of folder paths. + """ + + query = folders_graphql_query({"id"}) + + folders_field = None + for child in query._children: + if child.path != "project": + continue + + for project_child in child._children: + if project_child.path == "project/folders": + folders_field = project_child + break + if folders_field: + break + + if "folderPathRegex" not in query._variables: + folder_path_regex_var = query.add_variable( + "folderPathRegex", "String!" + ) + folders_field.set_filter("pathEx", folder_path_regex_var) + + query.set_variable_value("projectName", project_name) + if folder_regex: + query.set_variable_value("folderPathRegex", folder_regex) + + api = get_ayon_server_api_connection() + for parsed_data in query.continuous_query(api): + for folder in parsed_data["project"]["folders"]: + yield folder["id"] + + def _get_representations(self, placeholder): + """Prepared query of representations based on load options. + + This function is directly connected to options defined in + 'get_load_plugin_options'. + + Note: + This returns all representation documents from all versions of + matching subset. To filter for last version use + '_reduce_last_version_repre_docs'. + + Args: + placeholder (PlaceholderItem): Item which should be populated. + + Returns: + List[Dict[str, Any]]: Representation documents matching filters + from placeholder data. + """ + + # An OpenPype placeholder loaded in AYON + if "asset" in placeholder.data: + return [] + + representation_name = placeholder.data["representation"] + if not representation_name: + return [] + + project_name = self.builder.project_name + current_asset_doc = self.builder.current_asset_doc + + folder_path_regex = placeholder.data["folder_path"] + product_name_regex_value = placeholder.data["product_name"] + product_name_regex = None + if product_name_regex_value: + product_name_regex = re.compile(product_name_regex_value) + product_type = placeholder.data["family"] + + builder_type = placeholder.data["builder_type"] + folder_ids = [] + if builder_type == "context_folder": + folder_ids = [current_asset_doc["_id"]] + + elif builder_type == "all_folders": + folder_ids = list(self._query_by_folder_regex( + project_name, folder_path_regex + )) + + if not folder_ids: + return [] + + products = list(get_products( + project_name, + folder_ids=folder_ids, + product_types=[product_type], + fields={"id", "name"} + )) + filtered_product_ids = set() + for product in products: + if ( + product_name_regex is None + or product_name_regex.match(product["name"]) + ): + filtered_product_ids.add(product["id"]) + + if not filtered_product_ids: + return [] + + version_ids = set( + version["id"] + for version in get_last_versions( + project_name, filtered_product_ids, fields={"id"} + ).values() + ) + return list(get_representations( + project_name, + representation_names=[representation_name], + version_ids=version_ids + )) + + def _before_placeholder_load(self, placeholder): + """Can be overridden. It's called before placeholder representations + are loaded. + """ + + pass + + def _before_repre_load(self, placeholder, representation): + """Can be overridden. It's called before representation is loaded.""" + + pass + + def _reduce_last_version_repre_docs(self, representations): + """Reduce representations to last verison.""" + + mapping = {} + for repre_doc in representations: + repre_context = repre_doc["context"] + + asset_name = repre_context["asset"] + subset_name = repre_context["subset"] + version = repre_context.get("version", -1) + + if asset_name not in mapping: + mapping[asset_name] = {} + + subset_mapping = mapping[asset_name] + if subset_name not in subset_mapping: + subset_mapping[subset_name] = collections.defaultdict(list) + + version_mapping = subset_mapping[subset_name] + version_mapping[version].append(repre_doc) + + output = [] + for subset_mapping in mapping.values(): + for version_mapping in subset_mapping.values(): + last_version = tuple(sorted(version_mapping.keys()))[-1] + output.extend(version_mapping[last_version]) + return output + + def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): + """Load placeholder is going to load matching representations. + + Note: + Ignore repre ids is to avoid loading the same representation again + on load. But the representation can be loaded with different loader + and there could be published new version of matching subset for the + representation. We should maybe expect containers. + + Also import loaders don't have containers at all... + + Args: + placeholder (PlaceholderItem): Placeholder item with information + about requested representations. + ignore_repre_ids (Iterable[Union[str, ObjectId]]): Representation + ids that should be skipped. + """ + + if ignore_repre_ids is None: + ignore_repre_ids = set() + + # TODO check loader existence + loader_name = placeholder.data["loader"] + loader_args = self.parse_loader_args(placeholder.data["loader_args"]) + + placeholder_representations = self._get_representations(placeholder) + + filtered_representations = [] + for representation in self._reduce_last_version_repre_docs( + placeholder_representations + ): + repre_id = str(representation["_id"]) + if repre_id not in ignore_repre_ids: + filtered_representations.append(representation) + + if not filtered_representations: + self.log.info(( + "There's no representation for this placeholder: {}" + ).format(placeholder.scene_identifier)) + return + + repre_load_contexts = get_contexts_for_repre_docs( + self.project_name, filtered_representations + ) + loaders_by_name = self.builder.get_loaders_by_name() + self._before_placeholder_load( + placeholder + ) + + failed = False + for repre_load_context in repre_load_contexts.values(): + representation = repre_load_context["representation"] + repre_context = representation["context"] + self._before_repre_load( + placeholder, representation + ) + self.log.info( + "Loading {} from {} with loader {}\n" + "Loader arguments used : {}".format( + repre_context["subset"], + repre_context["asset"], + loader_name, + placeholder.data["loader_args"], + ) + ) + try: + container = load_with_repre_context( + loaders_by_name[loader_name], + repre_load_context, + options=loader_args + ) + + except Exception: + self.load_failed(placeholder, representation) + failed = True + else: + self.load_succeed(placeholder, container) + + # Run post placeholder process after load of all representations + self.post_placeholder_process(placeholder, failed) + + if failed: + self.log.debug( + "Placeholder cleanup skipped due to failed placeholder " + "population." + ) + return + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) + + def load_failed(self, placeholder, representation): + if hasattr(placeholder, "load_failed"): + placeholder.load_failed(representation) + + def load_succeed(self, placeholder, container): + if hasattr(placeholder, "load_succeed"): + placeholder.load_succeed(container) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + + pass + + def delete_placeholder(self, placeholder): + """Called when all item population is done.""" + self.log.debug("Clean up of placeholder is not implemented.") + + +class PlaceholderCreateMixin(object): + """Mixin prepared for creating placeholder plugins. + + Implementation prepares options for placeholders with + 'get_create_plugin_options'. + + For placeholder population is implemented 'populate_create_placeholder'. + + PlaceholderItem can have implemented methods: + - 'create_failed' - called when creating of an instance failed + - 'create_succeed' - called when creating of an instance succeeded + """ + + def get_create_plugin_options(self, options=None): + """Unified attribute definitions for create placeholder. + + Common function for placeholder plugins used for creating of + publishable instances. Use it with 'get_placeholder_options'. + + Args: + plugin (PlaceholderPlugin): Plugin used for creating of + publish instances. + options (Dict[str, Any]): Already available options which are used + as defaults for attributes. + + Returns: + List[AbstractAttrDef]: Attribute definitions common for create + plugins. + """ + + creators_by_name = self.builder.get_creators_by_name() + + creator_items = [ + (creator_name, creator.label or creator_name) + for creator_name, creator in creators_by_name.items() + ] + + creator_items.sort(key=lambda i: i[1]) + options = options or {} + return [ + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Main attributes"), + attribute_definitions.UISeparatorDef(), + + attribute_definitions.EnumDef( + "creator", + label="Creator", + default=options.get("creator"), + items=creator_items, + tooltip=( + "Creator" + "\nDefines what OpenPype creator will be used to" + " create publishable instance." + "\nUseable creator depends on current host's creator list." + "\nField is case sensitive." + ) + ), + attribute_definitions.TextDef( + "create_variant", + label="Variant", + default=options.get("create_variant"), + placeholder='Main', + tooltip=( + "Creator" + "\nDefines variant name which will be use for " + "\ncompiling of subset name." + ) + ), + attribute_definitions.UISeparatorDef(), + attribute_definitions.NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines creating instance priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ) + ] + + def populate_create_placeholder(self, placeholder, pre_create_data=None): + """Create placeholder is going to create matching publishabe instance. + + Args: + placeholder (PlaceholderItem): Placeholder item with information + about requested publishable instance. + pre_create_data (dict): dictionary of configuration from Creator + configuration in UI + """ + + legacy_create = self.builder.use_legacy_creators + creator_name = placeholder.data["creator"] + create_variant = placeholder.data["create_variant"] + + creator_plugin = self.builder.get_creators_by_name()[creator_name] + + # create subset name + context = self._builder.get_current_context() + project_name = context["project_name"] + asset_name = context["asset_name"] + task_name = context["task_name"] + + if legacy_create: + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + assert asset_doc, "No current asset found in Session" + subset_name = creator_plugin.get_subset_name( + create_variant, + task_name, + asset_doc["_id"], + project_name + ) + + else: + asset_doc = get_asset_by_name(project_name, asset_name) + assert asset_doc, "No current asset found in Session" + subset_name = creator_plugin.get_subset_name( + create_variant, + task_name, + asset_doc, + project_name, + self.builder.host_name + ) + + creator_data = { + "creator_name": creator_name, + "create_variant": create_variant, + "subset_name": subset_name, + "creator_plugin": creator_plugin + } + + self._before_instance_create(placeholder) + + # compile subset name from variant + try: + if legacy_create: + creator_instance = creator_plugin( + subset_name, + asset_name + ).process() + else: + creator_instance = self.builder.create_context.create( + creator_plugin.identifier, + create_variant, + asset_doc, + task_name=task_name, + pre_create_data=pre_create_data + ) + + except: # noqa: E722 + failed = True + self.create_failed(placeholder, creator_data) + + else: + failed = False + self.create_succeed(placeholder, creator_instance) + + self.post_placeholder_process(placeholder, failed) + + if failed: + self.log.debug( + "Placeholder cleanup skipped due to failed placeholder " + "population." + ) + return + + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) + + def create_failed(self, placeholder, creator_data): + if hasattr(placeholder, "create_failed"): + placeholder.create_failed(creator_data) + + def create_succeed(self, placeholder, creator_instance): + if hasattr(placeholder, "create_succeed"): + placeholder.create_succeed(creator_instance) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + pass + + def delete_placeholder(self, placeholder): + """Called when all item population is done.""" + self.log.debug("Clean up of placeholder is not implemented.") + + def _before_instance_create(self, placeholder): + """Can be overriden. Is called before instance is created.""" + + pass + + +class LoadPlaceholderItem(PlaceholderItem): + """PlaceholderItem for plugin which is loading representations. + + Connected to 'PlaceholderLoadMixin'. + """ + + def __init__(self, *args, **kwargs): + super(LoadPlaceholderItem, self).__init__(*args, **kwargs) + self._failed_representations = [] + + def get_errors(self): + if not self._failed_representations: + return [] + message = ( + "Failed to load {} representations using Loader {}" + ).format( + len(self._failed_representations), + self.data["loader"] + ) + return [message] + + def load_failed(self, representation): + self._failed_representations.append(representation) + + +class CreatePlaceholderItem(PlaceholderItem): + """PlaceholderItem for plugin which is creating publish instance. + + Connected to 'PlaceholderCreateMixin'. + """ + + def __init__(self, *args, **kwargs): + super(CreatePlaceholderItem, self).__init__(*args, **kwargs) + self._failed_created_publish_instances = [] + + def get_errors(self): + if not self._failed_created_publish_instances: + return [] + message = ( + "Failed to create {} instance using Creator {}" + ).format( + len(self._failed_created_publish_instances), + self.data["creator"] + ) + return [message] + + def create_failed(self, creator_data): + self._failed_created_publish_instances.append(creator_data) diff --git a/openpype/plugins/actions/open_file_explorer.py b/client/ayon_core/plugins/actions/open_file_explorer.py similarity index 96% rename from openpype/plugins/actions/open_file_explorer.py rename to client/ayon_core/plugins/actions/open_file_explorer.py index 1568c41fbd..b29ed30258 100644 --- a/openpype/plugins/actions/open_file_explorer.py +++ b/client/ayon_core/plugins/actions/open_file_explorer.py @@ -3,15 +3,15 @@ import subprocess from string import Formatter -from openpype.client import ( +from ayon_core.client import ( get_project, get_asset_by_name, ) -from openpype.pipeline import ( +from ayon_core.pipeline import ( Anatomy, LauncherAction, ) -from openpype.pipeline.template_data import get_template_data +from ayon_core.pipeline.template_data import get_template_data class OpenTaskPath(LauncherAction): diff --git a/openpype/plugins/inventory/remove_and_load.py b/client/ayon_core/plugins/inventory/remove_and_load.py similarity index 84% rename from openpype/plugins/inventory/remove_and_load.py rename to client/ayon_core/plugins/inventory/remove_and_load.py index ae66b95f6e..5529090b42 100644 --- a/openpype/plugins/inventory/remove_and_load.py +++ b/client/ayon_core/plugins/inventory/remove_and_load.py @@ -1,12 +1,12 @@ -from openpype.pipeline import InventoryAction -from openpype.pipeline import get_current_project_name -from openpype.pipeline.load.plugins import discover_loader_plugins -from openpype.pipeline.load.utils import ( +from ayon_core.pipeline import InventoryAction +from ayon_core.pipeline import get_current_project_name +from ayon_core.pipeline.load.plugins import discover_loader_plugins +from ayon_core.pipeline.load.utils import ( get_loader_identifier, remove_container, load_container, ) -from openpype.client import get_representation_by_id +from ayon_core.client import get_representation_by_id class RemoveAndLoad(InventoryAction): diff --git a/openpype/plugins/load/copy_file.py b/client/ayon_core/plugins/load/copy_file.py similarity index 90% rename from openpype/plugins/load/copy_file.py rename to client/ayon_core/plugins/load/copy_file.py index 7fd56c8a6a..0da22826f0 100644 --- a/openpype/plugins/load/copy_file.py +++ b/client/ayon_core/plugins/load/copy_file.py @@ -1,5 +1,5 @@ -from openpype.style import get_default_entity_icon_color -from openpype.pipeline import load +from ayon_core.style import get_default_entity_icon_color +from ayon_core.pipeline import load class CopyFile(load.LoaderPlugin): diff --git a/openpype/plugins/load/copy_file_path.py b/client/ayon_core/plugins/load/copy_file_path.py similarity index 95% rename from openpype/plugins/load/copy_file_path.py rename to client/ayon_core/plugins/load/copy_file_path.py index b055494e85..c3478c32f3 100644 --- a/openpype/plugins/load/copy_file_path.py +++ b/client/ayon_core/plugins/load/copy_file_path.py @@ -1,6 +1,6 @@ import os -from openpype.pipeline import load +from ayon_core.pipeline import load class CopyFilePath(load.LoaderPlugin): diff --git a/client/ayon_core/plugins/load/delete_old_versions.py b/client/ayon_core/plugins/load/delete_old_versions.py new file mode 100644 index 0000000000..6b3263e2b6 --- /dev/null +++ b/client/ayon_core/plugins/load/delete_old_versions.py @@ -0,0 +1,497 @@ +# TODO This plugin is not converted for AYON + +# import collections +# import os +# import uuid +# +# import clique +# from pymongo import UpdateOne +# import qargparse +# from qtpy import QtWidgets, QtCore +# +# from ayon_core import style +# from ayon_core.client import get_versions, get_representations +# from ayon_core.addon import AddonsManager +# from ayon_core.lib import format_file_size +# from ayon_core.pipeline import load, Anatomy +# from ayon_core.pipeline.load import ( +# get_representation_path_with_anatomy, +# InvalidRepresentationContext, +# ) +# +# +# class DeleteOldVersions(load.SubsetLoaderPlugin): +# """Deletes specific number of old version""" +# +# is_multiple_contexts_compatible = True +# sequence_splitter = "__sequence_splitter__" +# +# representations = ["*"] +# families = ["*"] +# tool_names = ["library_loader"] +# +# label = "Delete Old Versions" +# order = 35 +# icon = "trash" +# color = "#d8d8d8" +# +# options = [ +# qargparse.Integer( +# "versions_to_keep", default=2, min=0, help="Versions to keep:" +# ), +# qargparse.Boolean( +# "remove_publish_folder", help="Remove publish folder:" +# ) +# ] +# +# def delete_whole_dir_paths(self, dir_paths, delete=True): +# size = 0 +# +# for dir_path in dir_paths: +# # Delete all files and fodlers in dir path +# for root, dirs, files in os.walk(dir_path, topdown=False): +# for name in files: +# file_path = os.path.join(root, name) +# size += os.path.getsize(file_path) +# if delete: +# os.remove(file_path) +# self.log.debug("Removed file: {}".format(file_path)) +# +# for name in dirs: +# if delete: +# os.rmdir(os.path.join(root, name)) +# +# if not delete: +# continue +# +# # Delete even the folder and it's parents folders if they are empty +# while True: +# if not os.path.exists(dir_path): +# dir_path = os.path.dirname(dir_path) +# continue +# +# if len(os.listdir(dir_path)) != 0: +# break +# +# os.rmdir(os.path.join(dir_path)) +# +# return size +# +# def path_from_representation(self, representation, anatomy): +# try: +# context = representation["context"] +# except KeyError: +# return (None, None) +# +# try: +# path = get_representation_path_with_anatomy( +# representation, anatomy +# ) +# except InvalidRepresentationContext: +# return (None, None) +# +# sequence_path = None +# if "frame" in context: +# context["frame"] = self.sequence_splitter +# sequence_path = get_representation_path_with_anatomy( +# representation, anatomy +# ) +# +# if sequence_path: +# sequence_path = sequence_path.normalized() +# +# return (path.normalized(), sequence_path) +# +# def delete_only_repre_files(self, dir_paths, file_paths, delete=True): +# size = 0 +# +# for dir_id, dir_path in dir_paths.items(): +# dir_files = os.listdir(dir_path) +# collections, remainders = clique.assemble(dir_files) +# for file_path, seq_path in file_paths[dir_id]: +# file_path_base = os.path.split(file_path)[1] +# # Just remove file if `frame` key was not in context or +# # filled path is in remainders (single file sequence) +# if not seq_path or file_path_base in remainders: +# if not os.path.exists(file_path): +# self.log.debug( +# "File was not found: {}".format(file_path) +# ) +# continue +# +# size += os.path.getsize(file_path) +# +# if delete: +# os.remove(file_path) +# self.log.debug("Removed file: {}".format(file_path)) +# +# if file_path_base in remainders: +# remainders.remove(file_path_base) +# continue +# +# seq_path_base = os.path.split(seq_path)[1] +# head, tail = seq_path_base.split(self.sequence_splitter) +# +# final_col = None +# for collection in collections: +# if head != collection.head or tail != collection.tail: +# continue +# final_col = collection +# break +# +# if final_col is not None: +# # Fill full path to head +# final_col.head = os.path.join(dir_path, final_col.head) +# for _file_path in final_col: +# if os.path.exists(_file_path): +# +# size += os.path.getsize(_file_path) +# +# if delete: +# os.remove(_file_path) +# self.log.debug( +# "Removed file: {}".format(_file_path) +# ) +# +# _seq_path = final_col.format("{head}{padding}{tail}") +# self.log.debug("Removed files: {}".format(_seq_path)) +# collections.remove(final_col) +# +# elif os.path.exists(file_path): +# size += os.path.getsize(file_path) +# +# if delete: +# os.remove(file_path) +# self.log.debug("Removed file: {}".format(file_path)) +# else: +# self.log.debug( +# "File was not found: {}".format(file_path) +# ) +# +# # Delete as much as possible parent folders +# if not delete: +# return size +# +# for dir_path in dir_paths.values(): +# while True: +# if not os.path.exists(dir_path): +# dir_path = os.path.dirname(dir_path) +# continue +# +# if len(os.listdir(dir_path)) != 0: +# break +# +# self.log.debug("Removed folder: {}".format(dir_path)) +# os.rmdir(dir_path) +# +# return size +# +# def message(self, text): +# msgBox = QtWidgets.QMessageBox() +# msgBox.setText(text) +# msgBox.setStyleSheet(style.load_stylesheet()) +# msgBox.setWindowFlags( +# msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint +# ) +# msgBox.exec_() +# +# def get_data(self, context, versions_count): +# subset = context["subset"] +# asset = context["asset"] +# project_name = context["project"]["name"] +# anatomy = Anatomy(project_name) +# +# versions = list(get_versions(project_name, subset_ids=[subset["_id"]])) +# +# versions_by_parent = collections.defaultdict(list) +# for ent in versions: +# versions_by_parent[ent["parent"]].append(ent) +# +# def sort_func(ent): +# return int(ent["name"]) +# +# all_last_versions = [] +# for _parent_id, _versions in versions_by_parent.items(): +# for idx, version in enumerate( +# sorted(_versions, key=sort_func, reverse=True) +# ): +# if idx >= versions_count: +# break +# all_last_versions.append(version) +# +# self.log.debug("Collected versions ({})".format(len(versions))) +# +# # Filter latest versions +# for version in all_last_versions: +# versions.remove(version) +# +# # Update versions_by_parent without filtered versions +# versions_by_parent = collections.defaultdict(list) +# for ent in versions: +# versions_by_parent[ent["parent"]].append(ent) +# +# # Filter already deleted versions +# versions_to_pop = [] +# for version in versions: +# version_tags = version["data"].get("tags") +# if version_tags and "deleted" in version_tags: +# versions_to_pop.append(version) +# +# for version in versions_to_pop: +# msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format( +# asset["name"], subset["name"], version["name"] +# ) +# self.log.debug(( +# "Skipping version. Already tagged as `deleted`. < {} >" +# ).format(msg)) +# versions.remove(version) +# +# version_ids = [ent["_id"] for ent in versions] +# +# self.log.debug( +# "Filtered versions to delete ({})".format(len(version_ids)) +# ) +# +# if not version_ids: +# msg = "Skipping processing. Nothing to delete on {}/{}".format( +# asset["name"], subset["name"] +# ) +# self.log.info(msg) +# print(msg) +# return +# +# repres = list(get_representations( +# project_name, version_ids=version_ids +# )) +# +# self.log.debug( +# "Collected representations to remove ({})".format(len(repres)) +# ) +# +# dir_paths = {} +# file_paths_by_dir = collections.defaultdict(list) +# for repre in repres: +# file_path, seq_path = self.path_from_representation(repre, anatomy) +# if file_path is None: +# self.log.debug(( +# "Could not format path for represenation \"{}\"" +# ).format(str(repre))) +# continue +# +# dir_path = os.path.dirname(file_path) +# dir_id = None +# for _dir_id, _dir_path in dir_paths.items(): +# if _dir_path == dir_path: +# dir_id = _dir_id +# break +# +# if dir_id is None: +# dir_id = uuid.uuid4() +# dir_paths[dir_id] = dir_path +# +# file_paths_by_dir[dir_id].append([file_path, seq_path]) +# +# dir_ids_to_pop = [] +# for dir_id, dir_path in dir_paths.items(): +# if os.path.exists(dir_path): +# continue +# +# dir_ids_to_pop.append(dir_id) +# +# # Pop dirs from both dictionaries +# for dir_id in dir_ids_to_pop: +# dir_paths.pop(dir_id) +# paths = file_paths_by_dir.pop(dir_id) +# # TODO report of missing directories? +# paths_msg = ", ".join([ +# "'{}'".format(path[0].replace("\\", "/")) for path in paths +# ]) +# self.log.debug(( +# "Folder does not exist. Deleting it's files skipped: {}" +# ).format(paths_msg)) +# +# data = { +# "dir_paths": dir_paths, +# "file_paths_by_dir": file_paths_by_dir, +# "versions": versions, +# "asset": asset, +# "subset": subset, +# "archive_subset": versions_count == 0 +# } +# +# return data +# +# def main(self, project_name, data, remove_publish_folder): +# # Size of files. +# size = 0 +# if not data: +# return size +# +# if remove_publish_folder: +# size = self.delete_whole_dir_paths(data["dir_paths"].values()) +# else: +# size = self.delete_only_repre_files( +# data["dir_paths"], data["file_paths_by_dir"] +# ) +# +# mongo_changes_bulk = [] +# for version in data["versions"]: +# orig_version_tags = version["data"].get("tags") or [] +# version_tags = [tag for tag in orig_version_tags] +# if "deleted" not in version_tags: +# version_tags.append("deleted") +# +# if version_tags == orig_version_tags: +# continue +# +# update_query = {"_id": version["_id"]} +# update_data = {"$set": {"data.tags": version_tags}} +# mongo_changes_bulk.append(UpdateOne(update_query, update_data)) +# +# if data["archive_subset"]: +# mongo_changes_bulk.append(UpdateOne( +# { +# "_id": data["subset"]["_id"], +# "type": "subset" +# }, +# {"$set": {"type": "archived_subset"}} +# )) +# +# if mongo_changes_bulk: +# dbcon = AvalonMongoDB() +# dbcon.Session["AVALON_PROJECT"] = project_name +# dbcon.install() +# dbcon.bulk_write(mongo_changes_bulk) +# dbcon.uninstall() +# +# self._ftrack_delete_versions(data) +# +# return size +# +# def _ftrack_delete_versions(self, data): +# """Delete version on ftrack. +# +# Handling of ftrack logic in this plugin is not ideal. But in OP3 it is +# almost impossible to solve the issue other way. +# +# Note: +# Asset versions on ftrack are not deleted but marked as +# "not published" which cause that they're invisible. +# +# Args: +# data (dict): Data sent to subset loader with full context. +# """ +# +# # First check for ftrack id on asset document +# # - skip if ther is none +# asset_ftrack_id = data["asset"]["data"].get("ftrackId") +# if not asset_ftrack_id: +# self.log.info(( +# "Asset does not have filled ftrack id. Skipped delete" +# " of ftrack version." +# )) +# return +# +# # Check if ftrack module is enabled +# addons_manager = AddonsManager() +# ftrack_addon = addons_manager.get("ftrack") +# if not ftrack_addon or not ftrack_addon.enabled: +# return +# +# import ftrack_api +# +# session = ftrack_api.Session() +# subset_name = data["subset"]["name"] +# versions = { +# '"{}"'.format(version_doc["name"]) +# for version_doc in data["versions"] +# } +# asset_versions = session.query( +# ( +# "select id, is_published from AssetVersion where" +# " asset.parent.id is \"{}\"" +# " and asset.name is \"{}\"" +# " and version in ({})" +# ).format( +# asset_ftrack_id, +# subset_name, +# ",".join(versions) +# ) +# ).all() +# +# # Set attribute `is_published` to `False` on ftrack AssetVersions +# for asset_version in asset_versions: +# asset_version["is_published"] = False +# +# try: +# session.commit() +# +# except Exception: +# msg = ( +# "Could not set `is_published` attribute to `False`" +# " for selected AssetVersions." +# ) +# self.log.error(msg) +# self.message(msg) +# +# def load(self, contexts, name=None, namespace=None, options=None): +# try: +# size = 0 +# for count, context in enumerate(contexts): +# versions_to_keep = 2 +# remove_publish_folder = False +# if options: +# versions_to_keep = options.get( +# "versions_to_keep", versions_to_keep +# ) +# remove_publish_folder = options.get( +# "remove_publish_folder", remove_publish_folder +# ) +# +# data = self.get_data(context, versions_to_keep) +# if not data: +# continue +# +# project_name = context["project"]["name"] +# size += self.main(project_name, data, remove_publish_folder) +# print("Progressing {}/{}".format(count + 1, len(contexts))) +# +# msg = "Total size of files: {}".format(format_file_size(size)) +# self.log.info(msg) +# self.message(msg) +# +# except Exception: +# self.log.error("Failed to delete versions.", exc_info=True) +# +# +# class CalculateOldVersions(DeleteOldVersions): +# """Calculate file size of old versions""" +# label = "Calculate Old Versions" +# order = 30 +# tool_names = ["library_loader"] +# +# options = [ +# qargparse.Integer( +# "versions_to_keep", default=2, min=0, help="Versions to keep:" +# ), +# qargparse.Boolean( +# "remove_publish_folder", help="Remove publish folder:" +# ) +# ] +# +# def main(self, project_name, data, remove_publish_folder): +# size = 0 +# +# if not data: +# return size +# +# if remove_publish_folder: +# size = self.delete_whole_dir_paths( +# data["dir_paths"].values(), delete=False +# ) +# else: +# size = self.delete_only_repre_files( +# data["dir_paths"], data["file_paths_by_dir"], delete=False +# ) +# +# return size diff --git a/client/ayon_core/plugins/load/delivery.py b/client/ayon_core/plugins/load/delivery.py new file mode 100644 index 0000000000..16f315937b --- /dev/null +++ b/client/ayon_core/plugins/load/delivery.py @@ -0,0 +1,372 @@ +import copy +import platform +from collections import defaultdict + +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.client import get_representations +from ayon_core.pipeline import load, Anatomy +from ayon_core import resources, style + +from ayon_core.lib import ( + format_file_size, + collect_frames, + get_datetime_data, +) +from ayon_core.pipeline.load import get_representation_path_with_anatomy +from ayon_core.pipeline.delivery import ( + get_format_dict, + check_destination_path, + deliver_single_file, + deliver_sequence, +) + + +class Delivery(load.SubsetLoaderPlugin): + """Export selected versions to folder structure from Template""" + + is_multiple_contexts_compatible = True + sequence_splitter = "__sequence_splitter__" + + representations = ["*"] + families = ["*"] + tool_names = ["library_loader"] + + label = "Deliver Versions" + order = 35 + icon = "upload" + color = "#d8d8d8" + + def message(self, text): + msgBox = QtWidgets.QMessageBox() + msgBox.setText(text) + msgBox.setStyleSheet(style.load_stylesheet()) + msgBox.setWindowFlags( + msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint + ) + msgBox.exec_() + + def load(self, contexts, name=None, namespace=None, options=None): + try: + dialog = DeliveryOptionsDialog(contexts, self.log) + dialog.exec_() + except Exception: + self.log.error("Failed to deliver versions.", exc_info=True) + + +class DeliveryOptionsDialog(QtWidgets.QDialog): + """Dialog to select template where to deliver selected representations.""" + + def __init__(self, contexts, log=None, parent=None): + super(DeliveryOptionsDialog, self).__init__(parent=parent) + + self.setWindowTitle("AYON - Deliver versions") + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowStaysOnTopHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + + self.setStyleSheet(style.load_stylesheet()) + + project_name = contexts[0]["project"]["name"] + self.anatomy = Anatomy(project_name) + self._representations = None + self.log = log + self.currently_uploaded = 0 + + self._set_representations(project_name, contexts) + + dropdown = QtWidgets.QComboBox() + self.templates = self._get_templates(self.anatomy) + for name, _ in self.templates.items(): + dropdown.addItem(name) + if self.templates and platform.system() == "Darwin": + # fix macos QCombobox Style + dropdown.setItemDelegate(QtWidgets.QStyledItemDelegate()) + # update combo box length to longest entry + longest_key = max(self.templates.keys(), key=len) + dropdown.setMinimumContentsLength(len(longest_key)) + + template_label = QtWidgets.QLabel() + template_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor)) + template_label.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse) + + renumber_frame = QtWidgets.QCheckBox() + + first_frame_start = QtWidgets.QSpinBox() + max_int = (1 << 32) // 2 + first_frame_start.setRange(0, max_int - 1) + + root_line_edit = QtWidgets.QLineEdit() + + repre_checkboxes_layout = QtWidgets.QFormLayout() + repre_checkboxes_layout.setContentsMargins(10, 5, 5, 10) + + self._representation_checkboxes = {} + for repre in self._get_representation_names(): + checkbox = QtWidgets.QCheckBox() + checkbox.setChecked(False) + self._representation_checkboxes[repre] = checkbox + + checkbox.stateChanged.connect(self._update_selected_label) + repre_checkboxes_layout.addRow(repre, checkbox) + + selected_label = QtWidgets.QLabel() + + input_widget = QtWidgets.QWidget(self) + input_layout = QtWidgets.QFormLayout(input_widget) + input_layout.setContentsMargins(10, 15, 5, 5) + + input_layout.addRow("Selected representations", selected_label) + input_layout.addRow("Delivery template", dropdown) + input_layout.addRow("Template value", template_label) + input_layout.addRow("Renumber Frame", renumber_frame) + input_layout.addRow("Renumber start frame", first_frame_start) + input_layout.addRow("Root", root_line_edit) + input_layout.addRow("Representations", repre_checkboxes_layout) + + btn_delivery = QtWidgets.QPushButton("Deliver") + btn_delivery.setEnabled(False) + + progress_bar = QtWidgets.QProgressBar(self) + progress_bar.setMinimum = 0 + progress_bar.setMaximum = 100 + progress_bar.setVisible(False) + + text_area = QtWidgets.QTextEdit() + text_area.setReadOnly(True) + text_area.setVisible(False) + text_area.setMinimumHeight(100) + + layout = QtWidgets.QVBoxLayout(self) + + layout.addWidget(input_widget) + layout.addStretch(1) + layout.addWidget(btn_delivery) + layout.addWidget(progress_bar) + layout.addWidget(text_area) + + self.selected_label = selected_label + self.template_label = template_label + self.dropdown = dropdown + self.first_frame_start = first_frame_start + self.renumber_frame = renumber_frame + self.root_line_edit = root_line_edit + self.progress_bar = progress_bar + self.text_area = text_area + self.btn_delivery = btn_delivery + + self.files_selected, self.size_selected = \ + self._get_counts(self._get_selected_repres()) + + self._update_selected_label() + self._update_template_value() + + btn_delivery.clicked.connect(self.deliver) + dropdown.currentIndexChanged.connect(self._update_template_value) + + if not self.dropdown.count(): + self.text_area.setVisible(True) + error_message = ( + "No Delivery Templates found!\n" + "Add Template in [project_anatomy/templates/delivery]" + ) + self.text_area.setText(error_message) + self.log.error(error_message.replace("\n", " ")) + + def deliver(self): + """Main method to loop through all selected representations""" + self.progress_bar.setVisible(True) + self.btn_delivery.setEnabled(False) + QtWidgets.QApplication.processEvents() + + report_items = defaultdict(list) + + selected_repres = self._get_selected_repres() + + datetime_data = get_datetime_data() + template_name = self.dropdown.currentText() + format_dict = get_format_dict(self.anatomy, self.root_line_edit.text()) + renumber_frame = self.renumber_frame.isChecked() + frame_offset = self.first_frame_start.value() + for repre in self._representations: + if repre["name"] not in selected_repres: + continue + + repre_path = get_representation_path_with_anatomy( + repre, self.anatomy + ) + + anatomy_data = copy.deepcopy(repre["context"]) + new_report_items = check_destination_path(str(repre["_id"]), + self.anatomy, + anatomy_data, + datetime_data, + template_name) + + report_items.update(new_report_items) + if new_report_items: + continue + + args = [ + repre_path, + repre, + self.anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + self.log + ] + + if repre.get("files"): + src_paths = [] + for repre_file in repre["files"]: + src_path = self.anatomy.fill_root(repre_file["path"]) + src_paths.append(src_path) + sources_and_frames = collect_frames(src_paths) + + frames = set(sources_and_frames.values()) + frames.discard(None) + first_frame = None + if frames: + first_frame = min(frames) + + for src_path, frame in sources_and_frames.items(): + args[0] = src_path + # Renumber frames + if renumber_frame and frame is not None: + # Calculate offset between + # first frame and current frame + # - '0' for first frame + offset = frame_offset - int(first_frame) + # Add offset to new frame start + dst_frame = int(frame) + offset + if dst_frame < 0: + msg = "Renumber frame has a smaller number than original frame" # noqa + report_items[msg].append(src_path) + self.log.warning("{} <{}>".format( + msg, dst_frame)) + continue + frame = dst_frame + + if frame is not None: + anatomy_data["frame"] = frame + new_report_items, uploaded = deliver_single_file(*args) + report_items.update(new_report_items) + self._update_progress(uploaded) + else: # fallback for Pype2 and representations without files + frame = repre['context'].get('frame') + if frame: + repre["context"]["frame"] = len(str(frame)) * "#" + + if not frame: + new_report_items, uploaded = deliver_single_file(*args) + else: + new_report_items, uploaded = deliver_sequence(*args) + report_items.update(new_report_items) + self._update_progress(uploaded) + + self.text_area.setText(self._format_report(report_items)) + self.text_area.setVisible(True) + + def _get_representation_names(self): + """Get set of representation names for checkbox filtering.""" + return set([repre["name"] for repre in self._representations]) + + def _get_templates(self, anatomy): + """Adds list of delivery templates from Anatomy to dropdown.""" + templates = {} + for template_name, value in anatomy.templates["delivery"].items(): + if not isinstance(value, str) or not value.startswith('{root'): + continue + + templates[template_name] = value + + return templates + + def _set_representations(self, project_name, contexts): + version_ids = [context["version"]["_id"] for context in contexts] + + repres = list(get_representations( + project_name, version_ids=version_ids + )) + + self._representations = repres + + def _get_counts(self, selected_repres=None): + """Returns tuple of number of selected files and their size.""" + files_selected = 0 + size_selected = 0 + for repre in self._representations: + if repre["name"] in selected_repres: + files = repre.get("files", []) + if not files: # for repre without files, cannot divide by 0 + files_selected += 1 + size_selected += 0 + else: + for repre_file in files: + files_selected += 1 + size_selected += repre_file["size"] + + return files_selected, size_selected + + def _prepare_label(self): + """Provides text with no of selected files and their size.""" + label = "{} files, size {}".format( + self.files_selected, + format_file_size(self.size_selected)) + return label + + def _get_selected_repres(self): + """Returns list of representation names filtered from checkboxes.""" + selected_repres = [] + for repre_name, chckbox in self._representation_checkboxes.items(): + if chckbox.isChecked(): + selected_repres.append(repre_name) + + return selected_repres + + def _update_selected_label(self): + """Updates label with list of number of selected files.""" + selected_repres = self._get_selected_repres() + self.files_selected, self.size_selected = \ + self._get_counts(selected_repres) + self.selected_label.setText(self._prepare_label()) + # update delivery button state if any templates found + if self.dropdown.count(): + self.btn_delivery.setEnabled(bool(selected_repres)) + + def _update_template_value(self, _index=None): + """Sets template value to label after selection in dropdown.""" + name = self.dropdown.currentText() + template_value = self.templates.get(name) + if template_value: + self.template_label.setText(template_value) + self.btn_delivery.setEnabled(bool(self._get_selected_repres())) + + def _update_progress(self, uploaded): + """Update progress bar after each repre copied.""" + self.currently_uploaded += uploaded + + ratio = self.currently_uploaded / self.files_selected + self.progress_bar.setValue(ratio * self.progress_bar.maximum()) + + def _format_report(self, report_items): + """Format final result and error details as html.""" + msg = "Delivery finished" + if not report_items: + msg += " successfully" + else: + msg += " with errors" + txt = "

{}

".format(msg) + for header, data in report_items.items(): + txt += "

{}

".format(header) + for item in data: + txt += "{}
".format(item) + + return txt diff --git a/openpype/plugins/load/open_djv.py b/client/ayon_core/plugins/load/open_djv.py similarity index 95% rename from openpype/plugins/load/open_djv.py rename to client/ayon_core/plugins/load/open_djv.py index 5c679f6a51..70352c2435 100644 --- a/openpype/plugins/load/open_djv.py +++ b/client/ayon_core/plugins/load/open_djv.py @@ -1,6 +1,6 @@ import os -from openpype.lib import ApplicationManager -from openpype.pipeline import load +from ayon_core.lib import ApplicationManager +from ayon_core.pipeline import load def existing_djv_path(): diff --git a/openpype/plugins/load/open_file.py b/client/ayon_core/plugins/load/open_file.py similarity index 95% rename from openpype/plugins/load/open_file.py rename to client/ayon_core/plugins/load/open_file.py index 5c4f4901d1..5ae5959102 100644 --- a/openpype/plugins/load/open_file.py +++ b/client/ayon_core/plugins/load/open_file.py @@ -2,7 +2,7 @@ import os import subprocess -from openpype.pipeline import load +from ayon_core.pipeline import load def open(filepath): diff --git a/client/ayon_core/plugins/load/push_to_library.py b/client/ayon_core/plugins/load/push_to_library.py new file mode 100644 index 0000000000..39f95d134c --- /dev/null +++ b/client/ayon_core/plugins/load/push_to_library.py @@ -0,0 +1,54 @@ +import os + +from ayon_core import AYON_CORE_ROOT +from ayon_core.lib import get_ayon_launcher_args, run_detached_process +from ayon_core.pipeline import load +from ayon_core.pipeline.load import LoadError + + +class PushToLibraryProject(load.SubsetLoaderPlugin): + """Export selected versions to folder structure from Template""" + + is_multiple_contexts_compatible = True + + representations = ["*"] + families = ["*"] + + label = "Push to Library project" + order = 35 + icon = "send" + color = "#d8d8d8" + + def load(self, contexts, name=None, namespace=None, options=None): + filtered_contexts = [ + context + for context in contexts + if context.get("project") and context.get("version") + ] + if not filtered_contexts: + raise LoadError("Nothing to push for your selection") + + if len(filtered_contexts) > 1: + raise LoadError("Please select only one item") + + context = tuple(filtered_contexts)[0] + + push_tool_script_path = os.path.join( + AYON_CORE_ROOT, + "tools", + "push_to_project", + "main.py" + ) + + project_doc = context["project"] + version_doc = context["version"] + project_name = project_doc["name"] + version_id = str(version_doc["_id"]) + + args = get_ayon_launcher_args( + "run", + push_tool_script_path, + "--project", project_name, + "--version", version_id + ) + run_detached_process(args) diff --git a/openpype/plugins/publish/cleanup.py b/client/ayon_core/plugins/publish/cleanup.py similarity index 98% rename from openpype/plugins/publish/cleanup.py rename to client/ayon_core/plugins/publish/cleanup.py index 6c122ddf09..7bed3269c2 100644 --- a/openpype/plugins/publish/cleanup.py +++ b/client/ayon_core/plugins/publish/cleanup.py @@ -5,7 +5,7 @@ import pyblish.api import re -from openpype.tests.lib import is_in_tests +from ayon_core.tests.lib import is_in_tests class CleanUp(pyblish.api.InstancePlugin): @@ -32,7 +32,6 @@ class CleanUp(pyblish.api.InstancePlugin): "resolve", "tvpaint", "unreal", - "standalonepublisher", "webpublisher", "shell" ] diff --git a/openpype/plugins/publish/cleanup_explicit.py b/client/ayon_core/plugins/publish/cleanup_explicit.py similarity index 100% rename from openpype/plugins/publish/cleanup_explicit.py rename to client/ayon_core/plugins/publish/cleanup_explicit.py diff --git a/openpype/plugins/publish/cleanup_farm.py b/client/ayon_core/plugins/publish/cleanup_farm.py similarity index 100% rename from openpype/plugins/publish/cleanup_farm.py rename to client/ayon_core/plugins/publish/cleanup_farm.py diff --git a/client/ayon_core/plugins/publish/collect_addons.py b/client/ayon_core/plugins/publish/collect_addons.py new file mode 100644 index 0000000000..9bba9978ab --- /dev/null +++ b/client/ayon_core/plugins/publish/collect_addons.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +"""Collect AYON addons.""" +import pyblish.api + +from ayon_core.addon import AddonsManager + + +class CollectAddons(pyblish.api.ContextPlugin): + """Collect AYON addons.""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "AYON Addons" + + def process(self, context): + manager = AddonsManager() + context.data["ayonAddonsManager"] = manager + context.data["ayonAddons"] = manager.addons_by_name + # Backwards compatibility - remove + context.data["openPypeModules"] = manager.addons_by_name diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/client/ayon_core/plugins/publish/collect_anatomy_context_data.py similarity index 96% rename from openpype/plugins/publish/collect_anatomy_context_data.py rename to client/ayon_core/plugins/publish/collect_anatomy_context_data.py index 508b01447b..978ae5e1e1 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_context_data.py @@ -15,7 +15,7 @@ import json import pyblish.api -from openpype.pipeline.template_data import get_template_data +from ayon_core.pipeline.template_data import get_template_data class CollectAnatomyContextData(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py similarity index 99% rename from openpype/plugins/publish/collect_anatomy_instance_data.py rename to client/ayon_core/plugins/publish/collect_anatomy_instance_data.py index b1b7ecd138..336ac02b8e 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py @@ -27,13 +27,13 @@ import pyblish.api -from openpype.client import ( +from ayon_core.client import ( get_assets, get_subsets, get_last_versions, get_asset_name_identifier, ) -from openpype.pipeline.version_start import get_versioning_start +from ayon_core.pipeline.version_start import get_versioning_start class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): @@ -342,7 +342,6 @@ def _fill_task_data(self, instance, project_task_types, anatomy_data): return project_name = instance.context.data["projectName"] - # OpenPype approach vs AYON approach if "/" not in asset_name: tasks_info = self._find_tasks_info_in_hierarchy( hierarchy_context, asset_name diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/client/ayon_core/plugins/publish/collect_anatomy_object.py similarity index 87% rename from openpype/plugins/publish/collect_anatomy_object.py rename to client/ayon_core/plugins/publish/collect_anatomy_object.py index f792cf3abd..1439520ccc 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_object.py @@ -4,11 +4,11 @@ context -> projectName Provides: - context -> anatomy (openpype.pipeline.anatomy.Anatomy) + context -> anatomy (ayon_core.pipeline.anatomy.Anatomy) """ import pyblish.api -from openpype.pipeline import Anatomy, KnownPublishError +from ayon_core.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/plugins/publish/collect_audio.py b/client/ayon_core/plugins/publish/collect_audio.py new file mode 100644 index 0000000000..94477e5578 --- /dev/null +++ b/client/ayon_core/plugins/publish/collect_audio.py @@ -0,0 +1,179 @@ +import collections +import pyblish.api + +from ayon_core.client import ( + get_assets, + get_subsets, + get_last_versions, + get_representations, + get_asset_name_identifier, +) +from ayon_core.pipeline.load import get_representation_path_with_anatomy + + +class CollectAudio(pyblish.api.ContextPlugin): + """Collect asset's last published audio. + + The audio subset name searched for is defined in: + project settings > Collect Audio + + Note: + The plugin was instance plugin but because of so much queries the + plugin was slowing down whole collection phase a lot thus was + converted to context plugin which requires only 4 queries top. + """ + + label = "Collect Asset Audio" + order = pyblish.api.CollectorOrder + 0.1 + families = ["review"] + hosts = [ + "nuke", + "maya", + "shell", + "hiero", + "premiere", + "harmony", + "traypublisher", + "fusion", + "tvpaint", + "resolve", + "webpublisher", + "aftereffects", + "flame", + "unreal" + ] + + audio_subset_name = "audioMain" + + def process(self, context): + # Fake filtering by family inside context plugin + filtered_instances = [] + for instance in pyblish.api.instances_by_plugin( + context, self.__class__ + ): + # Skip instances that already have audio filled + if instance.data.get("audio"): + self.log.debug( + "Skipping Audio collection. It is already collected" + ) + continue + filtered_instances.append(instance) + + # Skip if none of instances remained + if not filtered_instances: + return + + # Add audio to instance if exists. + instances_by_asset_name = collections.defaultdict(list) + for instance in filtered_instances: + asset_name = instance.data["asset"] + instances_by_asset_name[asset_name].append(instance) + + asset_names = set(instances_by_asset_name.keys()) + self.log.debug(( + "Searching for audio subset '{subset}' in assets {assets}" + ).format( + subset=self.audio_subset_name, + assets=", ".join([ + '"{}"'.format(asset_name) + for asset_name in asset_names + ]) + )) + + # Query all required documents + project_name = context.data["projectName"] + anatomy = context.data["anatomy"] + repre_docs_by_asset_names = self.query_representations( + project_name, asset_names) + + for asset_name, instances in instances_by_asset_name.items(): + repre_docs = repre_docs_by_asset_names[asset_name] + if not repre_docs: + continue + + repre_doc = repre_docs[0] + repre_path = get_representation_path_with_anatomy( + repre_doc, anatomy + ) + for instance in instances: + instance.data["audio"] = [{ + "offset": 0, + "filename": repre_path + }] + self.log.debug("Audio Data added to instance ...") + + def query_representations(self, project_name, asset_names): + """Query representations related to audio subsets for passed assets. + + Args: + project_name (str): Project in which we're looking for all + entities. + asset_names (Iterable[str]): Asset names where to look for audio + subsets and their representations. + + Returns: + collections.defaultdict[str, List[Dict[Str, Any]]]: Representations + related to audio subsets by asset name. + """ + + output = collections.defaultdict(list) + # Query asset documents + asset_docs = get_assets( + project_name, + asset_names=asset_names, + fields=["_id", "name", "data.parents"] + ) + + asset_id_by_name = { + get_asset_name_identifier(asset_doc): asset_doc["_id"] + for asset_doc in asset_docs + } + asset_ids = set(asset_id_by_name.values()) + + # Query subsets with name define by 'audio_subset_name' attr + # - one or none subsets with the name should be available on an asset + subset_docs = get_subsets( + project_name, + subset_names=[self.audio_subset_name], + asset_ids=asset_ids, + fields=["_id", "parent"] + ) + subset_id_by_asset_id = {} + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subset_id_by_asset_id[asset_id] = subset_doc["_id"] + + subset_ids = set(subset_id_by_asset_id.values()) + if not subset_ids: + return output + + # Find all latest versions for the subsets + version_docs_by_subset_id = get_last_versions( + project_name, subset_ids=subset_ids, fields=["_id", "parent"] + ) + version_id_by_subset_id = { + subset_id: version_doc["_id"] + for subset_id, version_doc in version_docs_by_subset_id.items() + } + version_ids = set(version_id_by_subset_id.values()) + if not version_ids: + return output + + # Find representations under latest versions of audio subsets + repre_docs = get_representations( + project_name, version_ids=version_ids + ) + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + repre_docs_by_version_id[version_id].append(repre_doc) + + if not repre_docs_by_version_id: + return output + + for asset_name in asset_names: + asset_id = asset_id_by_name.get(asset_name) + subset_id = subset_id_by_asset_id.get(asset_id) + version_id = version_id_by_subset_id.get(subset_id) + output[asset_name] = repre_docs_by_version_id[version_id] + return output diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/client/ayon_core/plugins/publish/collect_cleanup_keys.py similarity index 100% rename from openpype/plugins/publish/collect_cleanup_keys.py rename to client/ayon_core/plugins/publish/collect_cleanup_keys.py diff --git a/openpype/plugins/publish/collect_comment.py b/client/ayon_core/plugins/publish/collect_comment.py similarity index 95% rename from openpype/plugins/publish/collect_comment.py rename to client/ayon_core/plugins/publish/collect_comment.py index 38d61a7071..dadb7b9e8d 100644 --- a/openpype/plugins/publish/collect_comment.py +++ b/client/ayon_core/plugins/publish/collect_comment.py @@ -24,13 +24,13 @@ """ import pyblish.api -from openpype.lib.attribute_definitions import TextDef -from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from ayon_core.lib.attribute_definitions import TextDef +from ayon_core.pipeline.publish import AYONPyblishPluginMixin class CollectInstanceCommentDef( pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ): label = "Comment per instance" targets = ["local"] @@ -64,7 +64,7 @@ def get_attribute_defs(cls): class CollectComment( pyblish.api.ContextPlugin, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ): """Collect comment per each instance. diff --git a/openpype/plugins/publish/collect_context_entities.py b/client/ayon_core/plugins/publish/collect_context_entities.py similarity index 96% rename from openpype/plugins/publish/collect_context_entities.py rename to client/ayon_core/plugins/publish/collect_context_entities.py index 312f5f0eb5..8480435e21 100644 --- a/openpype/plugins/publish/collect_context_entities.py +++ b/client/ayon_core/plugins/publish/collect_context_entities.py @@ -14,8 +14,8 @@ import pyblish.api -from openpype.client import get_project, get_asset_by_name -from openpype.pipeline import KnownPublishError +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.pipeline import KnownPublishError class CollectContextEntities(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_context_label.py b/client/ayon_core/plugins/publish/collect_context_label.py similarity index 100% rename from openpype/plugins/publish/collect_context_label.py rename to client/ayon_core/plugins/publish/collect_context_label.py diff --git a/client/ayon_core/plugins/publish/collect_current_ayon_user.py b/client/ayon_core/plugins/publish/collect_current_ayon_user.py new file mode 100644 index 0000000000..f8dea2349e --- /dev/null +++ b/client/ayon_core/plugins/publish/collect_current_ayon_user.py @@ -0,0 +1,16 @@ +import pyblish.api + +from ayon_core.lib import get_ayon_username + + +class CollectCurrentAYONUser(pyblish.api.ContextPlugin): + """Inject the currently logged on user into the Context""" + + # Order must be after default pyblish-base CollectCurrentUser + order = pyblish.api.CollectorOrder + 0.001 + label = "Collect AYON User" + + def process(self, context): + user = get_ayon_username() + context.data["user"] = user + self.log.debug("Collected user \"{}\"".format(user)) diff --git a/openpype/plugins/publish/collect_current_context.py b/client/ayon_core/plugins/publish/collect_current_context.py similarity index 96% rename from openpype/plugins/publish/collect_current_context.py rename to client/ayon_core/plugins/publish/collect_current_context.py index 8b12a3f77f..90b9fcdcbd 100644 --- a/openpype/plugins/publish/collect_current_context.py +++ b/client/ayon_core/plugins/publish/collect_current_context.py @@ -6,7 +6,7 @@ """ import pyblish.api -from openpype.pipeline import get_current_context +from ayon_core.pipeline import get_current_context class CollectCurrentContext(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_current_shell_file.py b/client/ayon_core/plugins/publish/collect_current_shell_file.py similarity index 100% rename from openpype/plugins/publish/collect_current_shell_file.py rename to client/ayon_core/plugins/publish/collect_current_shell_file.py diff --git a/openpype/plugins/publish/collect_custom_staging_dir.py b/client/ayon_core/plugins/publish/collect_custom_staging_dir.py similarity index 97% rename from openpype/plugins/publish/collect_custom_staging_dir.py rename to client/ayon_core/plugins/publish/collect_custom_staging_dir.py index 669c4873e0..6840c8e416 100644 --- a/openpype/plugins/publish/collect_custom_staging_dir.py +++ b/client/ayon_core/plugins/publish/collect_custom_staging_dir.py @@ -12,7 +12,7 @@ import pyblish.api -from openpype.pipeline.publish.lib import get_custom_staging_dir_info +from ayon_core.pipeline.publish.lib import get_custom_staging_dir_info class CollectCustomStagingDir(pyblish.api.InstancePlugin): diff --git a/openpype/plugins/publish/collect_datetime_data.py b/client/ayon_core/plugins/publish/collect_datetime_data.py similarity index 88% rename from openpype/plugins/publish/collect_datetime_data.py rename to client/ayon_core/plugins/publish/collect_datetime_data.py index b3178ca3d2..a61fb9a174 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/client/ayon_core/plugins/publish/collect_datetime_data.py @@ -5,7 +5,7 @@ """ import pyblish.api -from openpype.lib.dateutils import get_datetime_data +from ayon_core.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): diff --git a/client/ayon_core/plugins/publish/collect_farm_target.py b/client/ayon_core/plugins/publish/collect_farm_target.py new file mode 100644 index 0000000000..3bf89450ec --- /dev/null +++ b/client/ayon_core/plugins/publish/collect_farm_target.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +import pyblish.api + + +class CollectFarmTarget(pyblish.api.InstancePlugin): + """Collects the render target for the instance + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = "Collect Farm Target" + targets = ["local"] + + def process(self, instance): + if not instance.data.get("farm"): + return + + context = instance.context + + farm_name = "" + addons_manager = context.data.get("ayonAddonsManager") + + for farm_renderer in ["deadline", "royalrender"]: + addon = addons_manager.get(farm_renderer, False) + + if not addon: + self.log.error("Cannot find AYON addon '{0}'.".format( + farm_renderer)) + elif addon.enabled: + farm_name = farm_renderer + + if farm_name: + self.log.debug("Collected render target: {0}".format(farm_name)) + instance.data["toBeRenderedOn"] = farm_name + else: + AssertionError("No AYON renderer addon found") diff --git a/openpype/plugins/publish/collect_frames_fix.py b/client/ayon_core/plugins/publish/collect_frames_fix.py similarity index 93% rename from openpype/plugins/publish/collect_frames_fix.py rename to client/ayon_core/plugins/publish/collect_frames_fix.py index 86e727b053..4903991d40 100644 --- a/openpype/plugins/publish/collect_frames_fix.py +++ b/client/ayon_core/plugins/publish/collect_frames_fix.py @@ -1,11 +1,11 @@ import pyblish.api -from openpype.lib.attribute_definitions import ( +from ayon_core.lib.attribute_definitions import ( TextDef, BoolDef ) -from openpype.pipeline.publish import OpenPypePyblishPluginMixin -from openpype.client.entities import ( +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.client.entities import ( get_last_version_by_subset_name, get_representations ) @@ -13,7 +13,7 @@ class CollectFramesFixDef( pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin + AYONPyblishPluginMixin ): """Provides text field to insert frame(s) to be rerendered. diff --git a/openpype/plugins/publish/collect_from_create_context.py b/client/ayon_core/plugins/publish/collect_from_create_context.py similarity index 91% rename from openpype/plugins/publish/collect_from_create_context.py rename to client/ayon_core/plugins/publish/collect_from_create_context.py index 84f6141069..d38138b2e9 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/client/ayon_core/plugins/publish/collect_from_create_context.py @@ -4,10 +4,9 @@ import os import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.host import IPublishHost -from openpype.pipeline import legacy_io, registered_host -from openpype.pipeline.create import CreateContext +from ayon_core.host import IPublishHost +from ayon_core.pipeline import legacy_io, registered_host +from ayon_core.pipeline.create import CreateContext class CollectFromCreateContext(pyblish.api.ContextPlugin): @@ -39,8 +38,7 @@ def process(self, context): for created_instance in create_context.instances: instance_data = created_instance.data_to_store() - if AYON_SERVER_ENABLED: - instance_data["asset"] = instance_data.pop("folderPath") + instance_data["asset"] = instance_data.pop("folderPath") if instance_data["active"]: thumbnail_path = thumbnail_paths_by_instance_id.get( created_instance.id diff --git a/openpype/plugins/publish/collect_hierarchy.py b/client/ayon_core/plugins/publish/collect_hierarchy.py similarity index 100% rename from openpype/plugins/publish/collect_hierarchy.py rename to client/ayon_core/plugins/publish/collect_hierarchy.py diff --git a/openpype/plugins/publish/collect_host_name.py b/client/ayon_core/plugins/publish/collect_host_name.py similarity index 96% rename from openpype/plugins/publish/collect_host_name.py rename to client/ayon_core/plugins/publish/collect_host_name.py index d64af4d049..89e4e03c1a 100644 --- a/openpype/plugins/publish/collect_host_name.py +++ b/client/ayon_core/plugins/publish/collect_host_name.py @@ -7,7 +7,7 @@ import os import pyblish.api -from openpype.lib import ApplicationManager +from ayon_core.lib import ApplicationManager class CollectHostName(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_input_representations_to_versions.py b/client/ayon_core/plugins/publish/collect_input_representations_to_versions.py similarity index 97% rename from openpype/plugins/publish/collect_input_representations_to_versions.py rename to client/ayon_core/plugins/publish/collect_input_representations_to_versions.py index 2b8c745d3d..b5c9872e74 100644 --- a/openpype/plugins/publish/collect_input_representations_to_versions.py +++ b/client/ayon_core/plugins/publish/collect_input_representations_to_versions.py @@ -2,7 +2,7 @@ from bson.objectid import ObjectId -from openpype.client import get_representations +from ayon_core.client import get_representations class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_machine_name.py b/client/ayon_core/plugins/publish/collect_machine_name.py similarity index 100% rename from openpype/plugins/publish/collect_machine_name.py rename to client/ayon_core/plugins/publish/collect_machine_name.py diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py similarity index 98% rename from openpype/plugins/publish/collect_otio_frame_ranges.py rename to client/ayon_core/plugins/publish/collect_otio_frame_ranges.py index 4b130b0e03..d1c8d03212 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/client/ayon_core/plugins/publish/collect_otio_frame_ranges.py @@ -23,7 +23,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): def process(self, instance): # Not all hosts can import these modules. import opentimelineio as otio - from openpype.pipeline.editorial import ( + from ayon_core.pipeline.editorial import ( get_media_range_with_retimes, otio_range_to_frame_range, otio_range_with_handles diff --git a/openpype/plugins/publish/collect_otio_review.py b/client/ayon_core/plugins/publish/collect_otio_review.py similarity index 100% rename from openpype/plugins/publish/collect_otio_review.py rename to client/ayon_core/plugins/publish/collect_otio_review.py diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/client/ayon_core/plugins/publish/collect_otio_subset_resources.py similarity index 98% rename from openpype/plugins/publish/collect_otio_subset_resources.py rename to client/ayon_core/plugins/publish/collect_otio_subset_resources.py index 739f5bb726..e6817a4beb 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/client/ayon_core/plugins/publish/collect_otio_subset_resources.py @@ -10,7 +10,7 @@ import clique import pyblish.api -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ( get_publish_template_name ) @@ -26,7 +26,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): def process(self, instance): # Not all hosts can import these modules. import opentimelineio as otio - from openpype.pipeline.editorial import ( + from ayon_core.pipeline.editorial import ( get_media_range_with_retimes, range_from_frames, make_sequence_collection diff --git a/openpype/plugins/publish/collect_rendered_files.py b/client/ayon_core/plugins/publish/collect_rendered_files.py similarity index 93% rename from openpype/plugins/publish/collect_rendered_files.py rename to client/ayon_core/plugins/publish/collect_rendered_files.py index baaf454a11..5ffcd669a0 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/client/ayon_core/plugins/publish/collect_rendered_files.py @@ -12,14 +12,14 @@ import pyblish.api -from openpype.pipeline import legacy_io, KnownPublishError -from openpype.pipeline.publish.lib import add_repre_files_for_cleanup +from ayon_core.pipeline import legacy_io, KnownPublishError +from ayon_core.pipeline.publish.lib import add_repre_files_for_cleanup class CollectRenderedFiles(pyblish.api.ContextPlugin): """ This collector will try to find json files in provided - `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + `AYON_PUBLISH_DATA`. Those files _MUST_ share same context. Note: We should split this collector and move the part which handle reading @@ -140,13 +140,17 @@ def _process_path(self, data, anatomy): def process(self, context): self._context = context - if not os.environ.get("OPENPYPE_PUBLISH_DATA"): - raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + publish_data_paths = ( + os.environ.get("AYON_PUBLISH_DATA") + or os.environ.get("OPENPYPE_PUBLISH_DATA") + ) + if publish_data_paths: + raise KnownPublishError("Missing `AYON_PUBLISH_DATA`") # QUESTION # Do we support (or want support) multiple files in the variable? # - what if they have different context? - paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) + paths = publish_data_paths.split(os.pathsep) # Using already collected Anatomy anatomy = context.data["anatomy"] diff --git a/openpype/plugins/publish/collect_resources_path.py b/client/ayon_core/plugins/publish/collect_resources_path.py similarity index 100% rename from openpype/plugins/publish/collect_resources_path.py rename to client/ayon_core/plugins/publish/collect_resources_path.py diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py similarity index 95% rename from openpype/plugins/publish/collect_scene_loaded_versions.py rename to client/ayon_core/plugins/publish/collect_scene_loaded_versions.py index 627d451f58..397a3ce87c 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/client/ayon_core/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.client import get_representations -from openpype.pipeline import registered_host +from ayon_core.client import get_representations +from ayon_core.pipeline import registered_host class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_scene_version.py b/client/ayon_core/plugins/publish/collect_scene_version.py similarity index 93% rename from openpype/plugins/publish/collect_scene_version.py rename to client/ayon_core/plugins/publish/collect_scene_version.py index f870ae9ad7..254d3c913d 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/client/ayon_core/plugins/publish/collect_scene_version.py @@ -1,9 +1,9 @@ import os import pyblish.api -from openpype.lib import get_version_from_path -from openpype.tests.lib import is_in_tests -from openpype.pipeline import KnownPublishError +from ayon_core.lib import get_version_from_path +from ayon_core.tests.lib import is_in_tests +from ayon_core.pipeline import KnownPublishError class CollectSceneVersion(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_settings.py b/client/ayon_core/plugins/publish/collect_settings.py similarity index 92% rename from openpype/plugins/publish/collect_settings.py rename to client/ayon_core/plugins/publish/collect_settings.py index a418a6400c..4e3331209d 100644 --- a/openpype/plugins/publish/collect_settings.py +++ b/client/ayon_core/plugins/publish/collect_settings.py @@ -1,5 +1,5 @@ from pyblish import api -from openpype.settings import ( +from ayon_core.settings import ( get_current_project_settings, get_system_settings, ) diff --git a/openpype/plugins/publish/collect_shell_workspace.py b/client/ayon_core/plugins/publish/collect_shell_workspace.py similarity index 100% rename from openpype/plugins/publish/collect_shell_workspace.py rename to client/ayon_core/plugins/publish/collect_shell_workspace.py diff --git a/openpype/plugins/publish/collect_source_for_source.py b/client/ayon_core/plugins/publish/collect_source_for_source.py similarity index 100% rename from openpype/plugins/publish/collect_source_for_source.py rename to client/ayon_core/plugins/publish/collect_source_for_source.py diff --git a/openpype/plugins/publish/collect_time.py b/client/ayon_core/plugins/publish/collect_time.py similarity index 84% rename from openpype/plugins/publish/collect_time.py rename to client/ayon_core/plugins/publish/collect_time.py index 7a005cc9cb..175f7f4676 100644 --- a/openpype/plugins/publish/collect_time.py +++ b/client/ayon_core/plugins/publish/collect_time.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.lib import get_formatted_current_time +from ayon_core.lib import get_formatted_current_time class CollectTime(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/extract_burnin.py b/client/ayon_core/plugins/publish/extract_burnin.py similarity index 98% rename from openpype/plugins/publish/extract_burnin.py rename to client/ayon_core/plugins/publish/extract_burnin.py index 56d45e477b..2b76527d5f 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/client/ayon_core/plugins/publish/extract_burnin.py @@ -9,17 +9,17 @@ import six import pyblish.api -from openpype import resources, PACKAGE_DIR -from openpype.pipeline import publish -from openpype.lib import ( - run_openpype_process, +from ayon_core import resources, AYON_CORE_ROOT +from ayon_core.pipeline import publish +from ayon_core.lib import ( + run_ayon_launcher_process, get_transcode_temp_directory, convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg ) -from openpype.lib.profiles_filtering import filter_profiles -from openpype.pipeline.publish.lib import add_repre_files_for_cleanup +from ayon_core.lib.profiles_filtering import filter_profiles +from ayon_core.pipeline.publish.lib import add_repre_files_for_cleanup class ExtractBurnin(publish.Extractor): @@ -42,7 +42,6 @@ class ExtractBurnin(publish.Extractor): "hiero", "premiere", "traypublisher", - "standalonepublisher", "harmony", "fusion", "aftereffects", @@ -352,7 +351,7 @@ def main_process(self, instance): "logger": self.log } - run_openpype_process(*args, **process_kwargs) + run_ayon_launcher_process(*args, **process_kwargs) # Remove the temporary json os.remove(temporary_json_filepath) @@ -429,7 +428,7 @@ def _get_burnin_options(self): if not os.path.exists(font_filepath): font_filepath = None - # Use OpenPype default font + # Use default AYON font if not font_filepath: font_filepath = resources.get_liberation_font_path() @@ -809,7 +808,7 @@ def burnin_script_path(self): """Return path to python script for burnin processing.""" scriptpath = os.path.normpath( os.path.join( - PACKAGE_DIR, + AYON_CORE_ROOT, "scripts", "otio_burnin.py" ) diff --git a/openpype/plugins/publish/extract_color_transcode.py b/client/ayon_core/plugins/publish/extract_color_transcode.py similarity index 98% rename from openpype/plugins/publish/extract_color_transcode.py rename to client/ayon_core/plugins/publish/extract_color_transcode.py index 922df469fe..66ba8ad2be 100644 --- a/openpype/plugins/publish/extract_color_transcode.py +++ b/client/ayon_core/plugins/publish/extract_color_transcode.py @@ -3,18 +3,18 @@ import clique import pyblish.api -from openpype.pipeline import publish -from openpype.lib import ( +from ayon_core.pipeline import publish +from ayon_core.lib import ( is_oiio_supported, ) -from openpype.lib.transcoding import ( +from ayon_core.lib.transcoding import ( convert_colorspace, get_transcode_temp_directory, ) -from openpype.lib.profiles_filtering import filter_profiles +from ayon_core.lib.profiles_filtering import filter_profiles class ExtractOIIOTranscode(publish.Extractor): diff --git a/openpype/plugins/publish/extract_colorspace_data.py b/client/ayon_core/plugins/publish/extract_colorspace_data.py similarity index 97% rename from openpype/plugins/publish/extract_colorspace_data.py rename to client/ayon_core/plugins/publish/extract_colorspace_data.py index 8873dcd637..7da4890748 100644 --- a/openpype/plugins/publish/extract_colorspace_data.py +++ b/client/ayon_core/plugins/publish/extract_colorspace_data.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractColorspaceData(publish.Extractor, diff --git a/openpype/plugins/publish/extract_hierarchy_to_ayon.py b/client/ayon_core/plugins/publish/extract_hierarchy_to_ayon.py similarity index 97% rename from openpype/plugins/publish/extract_hierarchy_to_ayon.py rename to client/ayon_core/plugins/publish/extract_hierarchy_to_ayon.py index 9e84daca30..0851b28134 100644 --- a/openpype/plugins/publish/extract_hierarchy_to_ayon.py +++ b/client/ayon_core/plugins/publish/extract_hierarchy_to_ayon.py @@ -7,9 +7,8 @@ from ayon_api import slugify_string from ayon_api.entity_hub import EntityHub -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_assets, get_asset_name_identifier -from openpype.pipeline.template_data import ( +from ayon_core.client import get_assets, get_asset_name_identifier +from ayon_core.pipeline.template_data import ( get_asset_template_data, get_task_template_data, ) @@ -27,9 +26,6 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin): families = ["clip", "shot"] def process(self, context): - if not AYON_SERVER_ENABLED: - return - if not context.data.get("hierarchyContext"): self.log.debug("Skipping ExtractHierarchyToAYON") return diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py similarity index 99% rename from openpype/plugins/publish/extract_otio_audio_tracks.py rename to client/ayon_core/plugins/publish/extract_otio_audio_tracks.py index d5ab1d6032..c6bdb59f59 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/client/ayon_core/plugins/publish/extract_otio_audio_tracks.py @@ -3,7 +3,7 @@ import pyblish -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, run_subprocess ) diff --git a/client/ayon_core/plugins/publish/extract_otio_file.py b/client/ayon_core/plugins/publish/extract_otio_file.py new file mode 100644 index 0000000000..149f90482a --- /dev/null +++ b/client/ayon_core/plugins/publish/extract_otio_file.py @@ -0,0 +1,47 @@ +import os + +import pyblish.api + +from ayon_core.pipeline import publish + + +class ExtractOTIOFile(publish.Extractor): + """ + Extractor export OTIO file + """ + + label = "Extract OTIO file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["workfile"] + hosts = ["resolve", "hiero", "traypublisher"] + + def process(self, instance): + # Not all hosts can import this module. + import opentimelineio as otio + + if not instance.context.data.get("otioTimeline"): + return + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + staging_dir = self.staging_dir(instance) + + otio_timeline = instance.context.data["otioTimeline"] + # create otio timeline representation + otio_file_name = name + ".otio" + otio_file_path = os.path.join(staging_dir, otio_file_name) + otio.adapters.write_to_file(otio_timeline, otio_file_path) + + representation_otio = { + 'name': "otio", + 'ext': "otio", + 'files': otio_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_otio) + + self.log.info("Added OTIO file representation: {}".format( + representation_otio)) diff --git a/openpype/plugins/publish/extract_otio_review.py b/client/ayon_core/plugins/publish/extract_otio_review.py similarity index 98% rename from openpype/plugins/publish/extract_otio_review.py rename to client/ayon_core/plugins/publish/extract_otio_review.py index ad4c807091..be365520c7 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/client/ayon_core/plugins/publish/extract_otio_review.py @@ -19,11 +19,11 @@ import clique from pyblish import api -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, run_subprocess, ) -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractOTIOReview(publish.Extractor): @@ -57,7 +57,7 @@ class ExtractOTIOReview(publish.Extractor): def process(self, instance): # Not all hosts can import these modules. import opentimelineio as otio - from openpype.pipeline.editorial import ( + from ayon_core.pipeline.editorial import ( otio_range_to_frame_range, make_sequence_collection ) @@ -282,7 +282,7 @@ def _trim_available_range(self, avl_range, start, duration, fps): otio.time.TimeRange: trimmed available range """ # Not all hosts can import these modules. - from openpype.pipeline.editorial import ( + from ayon_core.pipeline.editorial import ( trim_media_range, range_from_frames ) @@ -345,7 +345,7 @@ def _render_seqment(self, sequence=None, otio.time.TimeRange: trimmed available range """ # Not all hosts can import this module. - from openpype.pipeline.editorial import frames_to_seconds + from ayon_core.pipeline.editorial import frames_to_seconds # create path and frame start to destination output_path, out_frame_start = self._get_ffmpeg_output() diff --git a/openpype/plugins/publish/extract_otio_trimming_video.py b/client/ayon_core/plugins/publish/extract_otio_trimming_video.py similarity index 96% rename from openpype/plugins/publish/extract_otio_trimming_video.py rename to client/ayon_core/plugins/publish/extract_otio_trimming_video.py index 2020fcde93..9736c30b73 100644 --- a/openpype/plugins/publish/extract_otio_trimming_video.py +++ b/client/ayon_core/plugins/publish/extract_otio_trimming_video.py @@ -10,11 +10,11 @@ import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, run_subprocess, ) -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractOTIOTrimmingVideo(publish.Extractor): @@ -75,7 +75,7 @@ def _ffmpeg_trim_seqment(self, input_file_path, otio_range): """ # Not all hosts can import this module. - from openpype.pipeline.editorial import frames_to_seconds + from ayon_core.pipeline.editorial import frames_to_seconds # create path to destination output_path = self._get_ffmpeg_output(input_file_path) diff --git a/client/ayon_core/plugins/publish/extract_review.py b/client/ayon_core/plugins/publish/extract_review.py new file mode 100644 index 0000000000..91d39882ae --- /dev/null +++ b/client/ayon_core/plugins/publish/extract_review.py @@ -0,0 +1,1926 @@ +import os +import re +import copy +import json +import shutil +import subprocess +from abc import ABCMeta, abstractmethod + +import six +import clique +import speedcopy +import pyblish.api + +from ayon_core.lib import ( + get_ffmpeg_tool_args, + filter_profiles, + path_to_subprocess_arg, + run_subprocess, +) +from ayon_core.lib.transcoding import ( + IMAGE_EXTENSIONS, + get_ffprobe_streams, + should_convert_for_ffmpeg, + get_review_layer_name, + convert_input_paths_for_ffmpeg, + get_transcode_temp_directory, +) +from ayon_core.pipeline.publish import ( + KnownPublishError, + get_publish_instance_label, +) +from ayon_core.pipeline.publish.lib import add_repre_files_for_cleanup + + +class ExtractReview(pyblish.api.InstancePlugin): + """Extracting Review mov file for Ftrack + + Compulsory attribute of representation is tags list with "review", + otherwise the representation is ignored. + + All new representations are created and encoded by ffmpeg following + presets found in AYON Settings interface at + `project_settings/global/publish/ExtractReview/profiles:outputs`. + """ + + label = "Extract Review" + order = pyblish.api.ExtractorOrder + 0.02 + families = ["review"] + hosts = [ + "nuke", + "maya", + "blender", + "houdini", + "max", + "shell", + "hiero", + "premiere", + "harmony", + "traypublisher", + "fusion", + "tvpaint", + "resolve", + "webpublisher", + "aftereffects", + "flame", + "unreal" + ] + + # Supported extensions + image_exts = ["exr", "jpg", "jpeg", "png", "dpx", "tga"] + video_exts = ["mov", "mp4"] + supported_exts = image_exts + video_exts + + alpha_exts = ["exr", "png", "dpx"] + + # Preset attributes + profiles = None + + def process(self, instance): + self.log.debug(str(instance.data["representations"])) + # Skip review when requested. + if not instance.data.get("review", True): + return + + # Run processing + self.main_process(instance) + + # Make sure cleanup happens and pop representations with "delete" tag. + for repre in tuple(instance.data["representations"]): + tags = repre.get("tags") or [] + # Representation is not marked to be deleted + if "delete" not in tags: + continue + + # The representation can be used as thumbnail source + if "thumbnail" in tags or "need_thumbnail" in tags: + continue + + self.log.debug( + "Removing representation: {}".format(repre) + ) + instance.data["representations"].remove(repre) + + def _get_outputs_for_instance(self, instance): + host_name = instance.context.data["hostName"] + family = self.main_family_from_instance(instance) + + self.log.debug("Host: \"{}\"".format(host_name)) + self.log.debug("Family: \"{}\"".format(family)) + + profile = filter_profiles( + self.profiles, + { + "hosts": host_name, + "families": family, + }, + logger=self.log) + if not profile: + self.log.info(( + "Skipped instance. None of profiles in presets are for" + " Host: \"{}\" | Family: \"{}\"" + ).format(host_name, family)) + return + + self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile))) + + subset_name = instance.data.get("subset") + instance_families = self.families_from_instance(instance) + filtered_outputs = self.filter_output_defs( + profile, subset_name, instance_families + ) + if not filtered_outputs: + self.log.info(( + "Skipped instance. All output definitions from selected" + " profile do not match instance families \"{}\" or" + " subset name \"{}\"." + ).format(str(instance_families), subset_name)) + + # Store `filename_suffix` to save arguments + profile_outputs = [] + for filename_suffix, definition in filtered_outputs.items(): + definition["filename_suffix"] = filename_suffix + profile_outputs.append(definition) + + return profile_outputs + + def _get_outputs_per_representations(self, instance, profile_outputs): + outputs_per_representations = [] + for repre in instance.data["representations"]: + repre_name = str(repre.get("name")) + tags = repre.get("tags") or [] + custom_tags = repre.get("custom_tags") + if "review" not in tags: + self.log.debug(( + "Repre: {} - Didn't find \"review\" in tags. Skipping" + ).format(repre_name)) + continue + + if "thumbnail" in tags: + self.log.debug(( + "Repre: {} - Found \"thumbnail\" in tags. Skipping" + ).format(repre_name)) + continue + + if "passing" in tags: + self.log.debug(( + "Repre: {} - Found \"passing\" in tags. Skipping" + ).format(repre_name)) + continue + + input_ext = repre["ext"] + if input_ext.startswith("."): + input_ext = input_ext[1:] + + if input_ext not in self.supported_exts: + self.log.info( + "Representation has unsupported extension \"{}\"".format( + input_ext + ) + ) + continue + + # Filter output definition by representation's + # custom tags (optional) + outputs = self.filter_outputs_by_custom_tags( + profile_outputs, custom_tags) + if not outputs: + self.log.info(( + "Skipped representation. All output definitions from" + " selected profile does not match to representation's" + " custom tags. \"{}\"" + ).format(str(custom_tags))) + continue + + outputs_per_representations.append((repre, outputs)) + return outputs_per_representations + + def _single_frame_filter(self, input_filepaths, output_defs): + single_frame_image = False + if len(input_filepaths) == 1: + ext = os.path.splitext(input_filepaths[0])[-1] + single_frame_image = ext.lower() in IMAGE_EXTENSIONS + + filtered_defs = [] + for output_def in output_defs: + output_filters = output_def.get("filter") or {} + frame_filter = output_filters.get("single_frame_filter") + if ( + (not single_frame_image and frame_filter == "single_frame") + or (single_frame_image and frame_filter == "multi_frame") + ): + continue + + filtered_defs.append(output_def) + + return filtered_defs + + def main_process(self, instance): + instance_label = get_publish_instance_label(instance) + self.log.debug("Processing instance \"{}\"".format(instance_label)) + profile_outputs = self._get_outputs_for_instance(instance) + if not profile_outputs: + return + + # Loop through representations + outputs_per_repres = self._get_outputs_per_representations( + instance, profile_outputs + ) + + for repre, output_defs in outputs_per_repres: + # Check if input should be preconverted before processing + # Store original staging dir (it's value may change) + src_repre_staging_dir = repre["stagingDir"] + # Receive filepath to first file in representation + first_input_path = None + input_filepaths = [] + if not self.input_is_sequence(repre): + first_input_path = os.path.join( + src_repre_staging_dir, repre["files"] + ) + input_filepaths.append(first_input_path) + else: + for filename in repre["files"]: + filepath = os.path.join( + src_repre_staging_dir, filename + ) + input_filepaths.append(filepath) + if first_input_path is None: + first_input_path = filepath + + filtered_output_defs = self._single_frame_filter( + input_filepaths, output_defs + ) + if not filtered_output_defs: + self.log.debug(( + "Repre: {} - All output definitions were filtered" + " out by single frame filter. Skipping" + ).format(repre["name"])) + continue + + # Skip if file is not set + if first_input_path is None: + self.log.warning(( + "Representation \"{}\" have empty files. Skipped." + ).format(repre["name"])) + continue + + # Determine if representation requires pre conversion for ffmpeg + do_convert = should_convert_for_ffmpeg(first_input_path) + # If result is None the requirement of conversion can't be + # determined + if do_convert is None: + self.log.info(( + "Can't determine if representation requires conversion." + " Skipped." + )) + continue + + layer_name = get_review_layer_name(first_input_path) + + # Do conversion if needed + # - change staging dir of source representation + # - must be set back after output definitions processing + if do_convert: + new_staging_dir = get_transcode_temp_directory() + repre["stagingDir"] = new_staging_dir + + convert_input_paths_for_ffmpeg( + input_filepaths, + new_staging_dir, + self.log + ) + + try: + self._render_output_definitions( + instance, + repre, + src_repre_staging_dir, + filtered_output_defs, + layer_name + ) + + finally: + # Make sure temporary staging is cleaned up and representation + # has set origin stagingDir + if do_convert: + # Set staging dir of source representation back to previous + # value + repre["stagingDir"] = src_repre_staging_dir + if os.path.exists(new_staging_dir): + shutil.rmtree(new_staging_dir) + + def _render_output_definitions( + self, + instance, + repre, + src_repre_staging_dir, + output_definitions, + layer_name + ): + fill_data = copy.deepcopy(instance.data["anatomyData"]) + for _output_def in output_definitions: + output_def = copy.deepcopy(_output_def) + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + + if "burnins" not in output_def: + output_def["burnins"] = [] + + # Create copy of representation + new_repre = copy.deepcopy(repre) + new_tags = new_repre.get("tags") or [] + # Make sure new representation has origin staging dir + # - this is because source representation may change + # it's staging dir because of ffmpeg conversion + new_repre["stagingDir"] = src_repre_staging_dir + + # Remove "delete" tag from new repre if there is + if "delete" in new_tags: + new_tags.remove("delete") + + if "need_thumbnail" in new_tags: + new_tags.remove("need_thumbnail") + + # Add additional tags from output definition to representation + for tag in output_def["tags"]: + if tag not in new_tags: + new_tags.append(tag) + + # Return tags to new representation + new_repre["tags"] = new_tags + + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + + self.log.debug( + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) + ) + + temp_data = self.prepare_temp_data(instance, repre, output_def) + files_to_clean = [] + if temp_data["input_is_sequence"]: + self.log.debug("Checking sequence to fill gaps in sequence..") + files_to_clean = self.fill_sequence_gaps( + files=temp_data["origin_repre"]["files"], + staging_dir=new_repre["stagingDir"], + start_frame=temp_data["frame_start"], + end_frame=temp_data["frame_end"] + ) + + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + + try: # temporary until oiiotool is supported cross platform + ffmpeg_args = self._ffmpeg_arguments( + output_def, + instance, + new_repre, + temp_data, + fill_data, + layer_name, + ) + except ZeroDivisionError: + # TODO recalculate width and height using OIIO before + # conversion + if 'exr' in temp_data["origin_repre"]["ext"]: + self.log.warning( + ( + "Unsupported compression on input files." + " Skipping!!!" + ), + exc_info=True + ) + return + raise NotImplementedError + + subprcs_cmd = " ".join(ffmpeg_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + + run_subprocess(subprcs_cmd, shell=True, logger=self.log) + + # delete files added to fill gaps + if files_to_clean: + for f in files_to_clean: + os.unlink(f) + + new_repre.update({ + "fps": temp_data["fps"], + "name": "{}_{}".format(output_name, output_ext), + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"], + "ffmpeg_cmd": subprcs_cmd + }) + + # Force to pop these key if are in new repre + new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") + + # adding representation + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + + add_repre_files_for_cleanup(instance, new_repre) + + def input_is_sequence(self, repre): + """Deduce from representation data if input is sequence.""" + # TODO GLOBAL ISSUE - Find better way how to find out if input + # is sequence. Issues (in theory): + # - there may be multiple files ant not be sequence + # - remainders are not checked at all + # - there can be more than one collection + return isinstance(repre["files"], (list, tuple)) + + def prepare_temp_data(self, instance, repre, output_def): + """Prepare dictionary with values used across extractor's process. + + All data are collected from instance, context, origin representation + and output definition. + + There are few required keys in Instance data: "frameStart", "frameEnd" + and "fps". + + Args: + instance (Instance): Currently processed instance. + repre (dict): Representation from which new representation was + copied. + output_def (dict): Definition of output of this plugin. + + Returns: + dict: All data which are used across methods during process. + Their values should not change during process but new keys + with values may be added. + """ + + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + # Try to get handles from instance + handle_start = instance.data.get("handleStart") + handle_end = instance.data.get("handleEnd") + # If even one of handle values is not set on instance use + # handles from context + if handle_start is None or handle_end is None: + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + + # Change output frames when output should be without handles + without_handles = bool("no-handles" in output_def["tags"]) + if without_handles: + output_frame_start = frame_start + output_frame_end = frame_end + else: + output_frame_start = frame_start_handle + output_frame_end = frame_end_handle + + handles_are_set = handle_start > 0 or handle_end > 0 + + with_audio = True + if ( + # Check if has `no-audio` tag + "no-audio" in output_def["tags"] + # Check if instance has ny audio in data + or not instance.data.get("audio") + ): + with_audio = False + + input_is_sequence = self.input_is_sequence(repre) + input_allow_bg = False + first_sequence_frame = None + if input_is_sequence and repre["files"]: + # Calculate first frame that should be used + cols, _ = clique.assemble(repre["files"]) + input_frames = list(sorted(cols[0].indexes)) + first_sequence_frame = input_frames[0] + # WARNING: This is an issue as we don't know if first frame + # is with or without handles! + # - handle start is added but how do not know if we should + output_duration = (output_frame_end - output_frame_start) + 1 + if ( + without_handles + and len(input_frames) - handle_start >= output_duration + ): + first_sequence_frame += handle_start + + ext = os.path.splitext(repre["files"][0])[1].replace(".", "") + if ext.lower() in self.alpha_exts: + input_allow_bg = True + + return { + "fps": float(instance.data["fps"]), + "frame_start": frame_start, + "frame_end": frame_end, + "handle_start": handle_start, + "handle_end": handle_end, + "frame_start_handle": frame_start_handle, + "frame_end_handle": frame_end_handle, + "output_frame_start": int(output_frame_start), + "output_frame_end": int(output_frame_end), + "pixel_aspect": instance.data.get("pixelAspect", 1), + "resolution_width": instance.data.get("resolutionWidth"), + "resolution_height": instance.data.get("resolutionHeight"), + "origin_repre": repre, + "input_is_sequence": input_is_sequence, + "first_sequence_frame": first_sequence_frame, + "input_allow_bg": input_allow_bg, + "with_audio": with_audio, + "without_handles": without_handles, + "handles_are_set": handles_are_set + } + + def _ffmpeg_arguments( + self, + output_def, + instance, + new_repre, + temp_data, + fill_data, + layer_name + ): + """Prepares ffmpeg arguments for expected extraction. + + Prepares input and output arguments based on output definition and + input files. + + Args: + output_def (dict): Currently processed output definition. + instance (Instance): Currently processed instance. + new_repre (dict): Representation representing output of this + process. + temp_data (dict): Base data for successful process. + """ + + # Get FFmpeg arguments from profile presets + out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} + + _ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] + _ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] + _ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] + _ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] + + # Cleanup empty strings + ffmpeg_input_args = [ + value for value in _ffmpeg_input_args if value.strip() + ] + ffmpeg_video_filters = [ + value for value in _ffmpeg_video_filters if value.strip() + ] + ffmpeg_audio_filters = [ + value for value in _ffmpeg_audio_filters if value.strip() + ] + + ffmpeg_output_args = [] + for value in _ffmpeg_output_args: + value = value.strip() + if not value: + continue + try: + value = value.format(**fill_data) + except Exception: + self.log.warning( + "Failed to format ffmpeg argument: {}".format(value), + exc_info=True + ) + pass + ffmpeg_output_args.append(value) + + # Prepare input and output filepaths + self.input_output_paths(new_repre, output_def, temp_data) + + # Set output frames len to 1 when ouput is single image + if ( + temp_data["output_ext_is_image"] + and not temp_data["output_is_sequence"] + ): + output_frames_len = 1 + + else: + output_frames_len = ( + temp_data["output_frame_end"] + - temp_data["output_frame_start"] + + 1 + ) + + duration_seconds = float(output_frames_len / temp_data["fps"]) + + # Define which layer should be used + if layer_name: + ffmpeg_input_args.extend(["-layer", layer_name]) + + if temp_data["input_is_sequence"]: + # Set start frame of input sequence (just frame in filename) + # - definition of input filepath + # - add handle start if output should be without handles + start_number = temp_data["first_sequence_frame"] + if temp_data["without_handles"] and temp_data["handles_are_set"]: + start_number += temp_data["handle_start"] + ffmpeg_input_args.extend([ + "-start_number", str(start_number) + ]) + + # TODO add fps mapping `{fps: fraction}` ? + # - e.g.: { + # "25": "25/1", + # "24": "24/1", + # "23.976": "24000/1001" + # } + # Add framerate to input when input is sequence + ffmpeg_input_args.extend([ + "-framerate", str(temp_data["fps"]) + ]) + # Add duration of an input sequence if output is video + if not temp_data["output_is_sequence"]: + ffmpeg_input_args.extend([ + "-to", "{:0.10f}".format(duration_seconds) + ]) + + if temp_data["output_is_sequence"]: + # Set start frame of output sequence (just frame in filename) + # - this is definition of an output + ffmpeg_output_args.extend([ + "-start_number", str(temp_data["output_frame_start"]) + ]) + + # Change output's duration and start point if should not contain + # handles + if temp_data["without_handles"] and temp_data["handles_are_set"]: + # Set output duration in seconds + ffmpeg_output_args.extend([ + "-t", "{:0.10}".format(duration_seconds) + ]) + + # Add -ss (start offset in seconds) if input is not sequence + if not temp_data["input_is_sequence"]: + start_sec = float(temp_data["handle_start"]) / temp_data["fps"] + # Set start time without handles + # - Skip if start sec is 0.0 + if start_sec > 0.0: + ffmpeg_input_args.extend([ + "-ss", "{:0.10f}".format(start_sec) + ]) + + # Set frame range of output when input or output is sequence + elif temp_data["output_is_sequence"]: + ffmpeg_output_args.extend([ + "-frames:v", str(output_frames_len) + ]) + + # Add video/image input path + ffmpeg_input_args.extend([ + "-i", path_to_subprocess_arg(temp_data["full_input_path"]) + ]) + + # Add audio arguments if there are any. Skipped when output are images. + if not temp_data["output_ext_is_image"] and temp_data["with_audio"]: + audio_in_args, audio_filters, audio_out_args = self.audio_args( + instance, temp_data, duration_seconds + ) + ffmpeg_input_args.extend(audio_in_args) + ffmpeg_audio_filters.extend(audio_filters) + ffmpeg_output_args.extend(audio_out_args) + + res_filters = self.rescaling_filters(temp_data, output_def, new_repre) + ffmpeg_video_filters.extend(res_filters) + + ffmpeg_input_args = self.split_ffmpeg_args(ffmpeg_input_args) + + lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args) + ffmpeg_video_filters.extend(lut_filters) + + bg_alpha = 0 + bg_color = output_def.get("bg_color") + if bg_color: + bg_red, bg_green, bg_blue, bg_alpha = bg_color + + if bg_alpha > 0: + if not temp_data["input_allow_bg"]: + self.log.info(( + "Output definition has defined BG color input was" + " resolved as does not support adding BG." + )) + else: + bg_color_hex = "#{0:0>2X}{1:0>2X}{2:0>2X}".format( + bg_red, bg_green, bg_blue + ) + bg_color_alpha = float(bg_alpha) / 255 + bg_color_str = "{}@{}".format(bg_color_hex, bg_color_alpha) + + self.log.info("Applying BG color {}".format(bg_color_str)) + color_args = [ + "split=2[bg][fg]", + "[bg]drawbox=c={}:replace=1:t=fill[bg]".format( + bg_color_str + ), + "[bg][fg]overlay=format=auto" + ] + # Prepend bg color change before all video filters + # NOTE at the time of creation it is required as video filters + # from settings may affect color of BG + # e.g. `eq` can remove alpha from input + for arg in reversed(color_args): + ffmpeg_video_filters.insert(0, arg) + + # Add argument to override output file + ffmpeg_output_args.append("-y") + + # NOTE This must be latest added item to output arguments. + ffmpeg_output_args.append( + path_to_subprocess_arg(temp_data["full_output_path"]) + ) + + return self.ffmpeg_full_args( + ffmpeg_input_args, + ffmpeg_video_filters, + ffmpeg_audio_filters, + ffmpeg_output_args + ) + + def split_ffmpeg_args(self, in_args): + """Makes sure all entered arguments are separated in individual items. + + Split each argument string with " -" to identify if string contains + one or more arguments. + """ + splitted_args = [] + for arg in in_args: + sub_args = arg.split(" -") + if len(sub_args) == 1: + if arg and arg not in splitted_args: + splitted_args.append(arg) + continue + + for idx, arg in enumerate(sub_args): + if idx != 0: + arg = "-" + arg + + if arg and arg not in splitted_args: + splitted_args.append(arg) + return splitted_args + + def ffmpeg_full_args( + self, input_args, video_filters, audio_filters, output_args + ): + """Post processing of collected FFmpeg arguments. + + Just verify that output arguments does not contain video or audio + filters which may cause issues because of duplicated argument entry. + Filters found in output arguments are moved to list they belong to. + + Args: + input_args (list): All collected ffmpeg arguments with inputs. + video_filters (list): All collected video filters. + audio_filters (list): All collected audio filters. + output_args (list): All collected ffmpeg output arguments with + output filepath. + + Returns: + list: Containing all arguments ready to run in subprocess. + """ + output_args = self.split_ffmpeg_args(output_args) + + video_args_dentifiers = ["-vf", "-filter:v"] + audio_args_dentifiers = ["-af", "-filter:a"] + for arg in tuple(output_args): + for identifier in video_args_dentifiers: + if arg.startswith("{} ".format(identifier)): + output_args.remove(arg) + arg = arg.replace(identifier, "").strip() + video_filters.append(arg) + + for identifier in audio_args_dentifiers: + if arg.startswith("{} ".format(identifier)): + output_args.remove(arg) + arg = arg.replace(identifier, "").strip() + audio_filters.append(arg) + + all_args = [ + subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg")) + ] + all_args.extend(input_args) + if video_filters: + all_args.append("-filter:v") + all_args.append("\"{}\"".format(",".join(video_filters))) + + if audio_filters: + all_args.append("-filter:a") + all_args.append("\"{}\"".format(",".join(audio_filters))) + + all_args.extend(output_args) + + return all_args + + def fill_sequence_gaps(self, files, staging_dir, start_frame, end_frame): + # type: (list, str, int, int) -> list + """Fill missing files in sequence by duplicating existing ones. + + This will take nearest frame file and copy it with so as to fill + gaps in sequence. Last existing file there is is used to for the + hole ahead. + + Args: + files (list): List of representation files. + staging_dir (str): Path to staging directory. + start_frame (int): Sequence start (no matter what files are there) + end_frame (int): Sequence end (no matter what files are there) + + Returns: + list of added files. Those should be cleaned after work + is done. + + Raises: + KnownPublishError: if more than one collection is obtained. + """ + + collections = clique.assemble(files)[0] + if len(collections) != 1: + raise KnownPublishError( + "Multiple collections {} found.".format(collections)) + + col = collections[0] + + # Prepare which hole is filled with what frame + # - the frame is filled only with already existing frames + prev_frame = next(iter(col.indexes)) + hole_frame_to_nearest = {} + for frame in range(int(start_frame), int(end_frame) + 1): + if frame in col.indexes: + prev_frame = frame + else: + # Use previous frame as source for hole + hole_frame_to_nearest[frame] = prev_frame + + # Calculate paths + added_files = [] + col_format = col.format("{head}{padding}{tail}") + for hole_frame, src_frame in hole_frame_to_nearest.items(): + hole_fpath = os.path.join(staging_dir, col_format % hole_frame) + src_fpath = os.path.join(staging_dir, col_format % src_frame) + if not os.path.isfile(src_fpath): + raise KnownPublishError( + "Missing previously detected file: {}".format(src_fpath)) + + speedcopy.copyfile(src_fpath, hole_fpath) + added_files.append(hole_fpath) + + return added_files + + def input_output_paths(self, new_repre, output_def, temp_data): + """Deduce input nad output file paths based on entered data. + + Input may be sequence of images, video file or single image file and + same can be said about output, this method helps to find out what + their paths are. + + It is validated that output directory exist and creates if not. + + During process are set "files", "stagingDir", "ext" and + "sequence_file" (if output is sequence) keys to new representation. + """ + + repre = temp_data["origin_repre"] + src_staging_dir = repre["stagingDir"] + dst_staging_dir = new_repre["stagingDir"] + + if temp_data["input_is_sequence"]: + collections = clique.assemble(repre["files"])[0] + full_input_path = os.path.join( + src_staging_dir, + collections[0].format("{head}{padding}{tail}") + ) + + filename = collections[0].format("{head}") + if filename.endswith("."): + filename = filename[:-1] + + # Make sure to have full path to one input file + full_input_path_single_file = os.path.join( + src_staging_dir, repre["files"][0] + ) + + else: + full_input_path = os.path.join( + src_staging_dir, repre["files"] + ) + filename = os.path.splitext(repre["files"])[0] + + # Make sure to have full path to one input file + full_input_path_single_file = full_input_path + + filename_suffix = output_def["filename_suffix"] + + output_ext = output_def.get("ext") + # Use input extension if output definition do not specify it + if output_ext is None: + output_ext = os.path.splitext(full_input_path)[1] + + # TODO Define if extension should have dot or not + if output_ext.startswith("."): + output_ext = output_ext[1:] + + output_ext = output_ext.lower() + + # Store extension to representation + new_repre["ext"] = output_ext + + self.log.debug("New representation ext: `{}`".format(output_ext)) + + # Output is image file sequence witht frames + output_ext_is_image = bool(output_ext in self.image_exts) + output_is_sequence = bool( + output_ext_is_image + and "sequence" in output_def["tags"] + ) + if output_is_sequence: + new_repre_files = [] + frame_start = temp_data["output_frame_start"] + frame_end = temp_data["output_frame_end"] + + filename_base = "{}_{}".format(filename, filename_suffix) + # Temporary tempalte for frame filling. Example output: + # "basename.%04d.exr" when `frame_end` == 1001 + repr_file = "{}.%{:0>2}d.{}".format( + filename_base, len(str(frame_end)), output_ext + ) + + for frame in range(frame_start, frame_end + 1): + new_repre_files.append(repr_file % frame) + + new_repre["sequence_file"] = repr_file + full_output_path = os.path.join( + dst_staging_dir, filename_base, repr_file + ) + + else: + repr_file = "{}_{}.{}".format( + filename, filename_suffix, output_ext + ) + full_output_path = os.path.join(dst_staging_dir, repr_file) + new_repre_files = repr_file + + # Store files to representation + new_repre["files"] = new_repre_files + + # Make sure stagingDire exists + dst_staging_dir = os.path.normpath(os.path.dirname(full_output_path)) + if not os.path.exists(dst_staging_dir): + self.log.debug("Creating dir: {}".format(dst_staging_dir)) + os.makedirs(dst_staging_dir) + + # Store stagingDir to representaion + new_repre["stagingDir"] = dst_staging_dir + + # Store paths to temp data + temp_data["full_input_path"] = full_input_path + temp_data["full_input_path_single_file"] = full_input_path_single_file + temp_data["full_output_path"] = full_output_path + + # Store information about output + temp_data["output_ext_is_image"] = output_ext_is_image + temp_data["output_is_sequence"] = output_is_sequence + + self.log.debug("Input path {}".format(full_input_path)) + self.log.debug("Output path {}".format(full_output_path)) + + def audio_args(self, instance, temp_data, duration_seconds): + """Prepares FFMpeg arguments for audio inputs.""" + audio_in_args = [] + audio_filters = [] + audio_out_args = [] + audio_inputs = instance.data.get("audio") + if not audio_inputs: + return audio_in_args, audio_filters, audio_out_args + + for audio in audio_inputs: + # NOTE modified, always was expected "frameStartFtrack" which is + # STRANGE?!!! There should be different key, right? + # TODO use different frame start! + offset_seconds = 0 + frame_start_ftrack = instance.data.get("frameStartFtrack") + if frame_start_ftrack is not None: + offset_frames = frame_start_ftrack - audio["offset"] + offset_seconds = offset_frames / temp_data["fps"] + + if offset_seconds > 0: + audio_in_args.append( + "-ss {}".format(offset_seconds) + ) + + elif offset_seconds < 0: + audio_in_args.append( + "-itsoffset {}".format(abs(offset_seconds)) + ) + + # Audio duration is offset from `-ss` + audio_duration = duration_seconds + offset_seconds + + # Set audio duration + audio_in_args.append("-to {:0.10f}".format(audio_duration)) + + # Ignore video data from audio input + audio_in_args.append("-vn") + + # Add audio input path + audio_in_args.append("-i {}".format( + path_to_subprocess_arg(audio["filename"]) + )) + + # NOTE: These were changed from input to output arguments. + # NOTE: value in "-ac" was hardcoded to 2, changed to audio inputs len. + # Need to merge audio if there are more than 1 input. + if len(audio_inputs) > 1: + audio_out_args.append("-filter_complex amerge") + audio_out_args.append("-ac {}".format(len(audio_inputs))) + + return audio_in_args, audio_filters, audio_out_args + + def get_letterbox_filters( + self, + letter_box_def, + output_width, + output_height + ): + output = [] + + ratio = letter_box_def["ratio"] + fill_color = letter_box_def["fill_color"] + f_red, f_green, f_blue, f_alpha = fill_color + fill_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + f_red, f_green, f_blue + ) + fill_color_alpha = float(f_alpha) / 255 + + line_thickness = letter_box_def["line_thickness"] + line_color = letter_box_def["line_color"] + l_red, l_green, l_blue, l_alpha = line_color + line_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + l_red, l_green, l_blue + ) + line_color_alpha = float(l_alpha) / 255 + + # test ratios and define if pillar or letter boxes + output_ratio = float(output_width) / float(output_height) + self.log.debug("Output ratio: {} LetterBox ratio: {}".format( + output_ratio, ratio + )) + pillar = output_ratio > ratio + need_mask = format(output_ratio, ".3f") != format(ratio, ".3f") + if not need_mask: + return [] + + if not pillar: + if fill_color_alpha > 0: + top_box = ( + "drawbox=0:0:{width}" + ":round(({height}-({width}/{ratio}))/2)" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) + + bottom_box = ( + "drawbox=0" + ":{height}-round(({height}-({width}/{ratio}))/2)" + ":{width}" + ":round(({height}-({width}/{ratio}))/2)" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) + output.extend([top_box, bottom_box]) + + if line_color_alpha > 0 and line_thickness > 0: + top_line = ( + "drawbox=0" + ":round(({height}-({width}/{ratio}))/2)-{l_thick}" + ":{width}:{l_thick}:t=fill:c={l_color}@{l_alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha + ) + bottom_line = ( + "drawbox=0" + ":{height}-round(({height}-({width}/{ratio}))/2)" + ":{width}:{l_thick}:t=fill:c={l_color}@{l_alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha + ) + output.extend([top_line, bottom_line]) + + else: + if fill_color_alpha > 0: + left_box = ( + "drawbox=0:0" + ":round(({width}-({height}*{ratio}))/2)" + ":{height}" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) + + right_box = ( + "drawbox=" + "{width}-round(({width}-({height}*{ratio}))/2)" + ":0" + ":round(({width}-({height}*{ratio}))/2)" + ":{height}" + ":t=fill:c={color}@{alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + color=fill_color_hex, + alpha=fill_color_alpha + ) + output.extend([left_box, right_box]) + + if line_color_alpha > 0 and line_thickness > 0: + left_line = ( + "drawbox=round(({width}-({height}*{ratio}))/2)" + ":0:{l_thick}:{height}:t=fill:c={l_color}@{l_alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha + ) + + right_line = ( + "drawbox={width}-round(({width}-({height}*{ratio}))/2)" + ":0:{l_thick}:{height}:t=fill:c={l_color}@{l_alpha}" + ).format( + width=output_width, + height=output_height, + ratio=ratio, + l_thick=line_thickness, + l_color=line_color_hex, + l_alpha=line_color_alpha + ) + output.extend([left_line, right_line]) + + return output + + def rescaling_filters(self, temp_data, output_def, new_repre): + """Prepare vieo filters based on tags in new representation. + + It is possible to add letterboxes to output video or rescale to + different resolution. + + During this preparation "resolutionWidth" and "resolutionHeight" are + set to new representation. + """ + filters = [] + + # if reformat input video file is already reforamted from upstream + reformat_in_baking = bool("reformated" in new_repre["tags"]) + self.log.debug("reformat_in_baking: `{}`".format(reformat_in_baking)) + + # Get instance data + pixel_aspect = temp_data["pixel_aspect"] + + if reformat_in_baking: + self.log.debug(( + "Using resolution from input. It is already " + "reformated from upstream process" + )) + pixel_aspect = 1 + + # NOTE Skipped using instance's resolution + full_input_path_single_file = temp_data["full_input_path_single_file"] + try: + streams = get_ffprobe_streams( + full_input_path_single_file, self.log + ) + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: \"{}\"." + " Error message: {}" + ).format(full_input_path_single_file, str(exc))) + + # Try to find first stream with defined 'width' and 'height' + # - this is to avoid order of streams where audio can be as first + # - there may be a better way (checking `codec_type`?) + input_width = None + input_height = None + output_width = None + output_height = None + for stream in streams: + if "width" in stream and "height" in stream: + input_width = int(stream["width"]) + input_height = int(stream["height"]) + break + + # Get instance data + pixel_aspect = temp_data["pixel_aspect"] + if reformat_in_baking: + self.log.debug(( + "Using resolution from input. It is already " + "reformated from upstream process" + )) + pixel_aspect = 1 + output_width = input_width + output_height = input_height + + # Raise exception of any stream didn't define input resolution + if input_width is None: + raise AssertionError(( + "FFprobe couldn't read resolution from input file: \"{}\"" + ).format(full_input_path_single_file)) + + # NOTE Setting only one of `width` or `heigth` is not allowed + # - settings value can't have None but has value of 0 + output_width = output_def.get("width") or output_width or None + output_height = output_def.get("height") or output_height or None + # Force to use input resolution if output resolution was not defined + # in settings. Resolution from instance is not used when + # 'use_input_res' is set to 'True'. + use_input_res = False + + # Overscan color + overscan_color_value = "black" + overscan_color = output_def.get("overscan_color") + if overscan_color: + bg_red, bg_green, bg_blue, _ = overscan_color + overscan_color_value = "#{0:0>2X}{1:0>2X}{2:0>2X}".format( + bg_red, bg_green, bg_blue + ) + self.log.debug("Overscan color: `{}`".format(overscan_color_value)) + + # Scale input to have proper pixel aspect ratio + # - scale width by the pixel aspect ratio + scale_pixel_aspect = output_def.get("scale_pixel_aspect", True) + if scale_pixel_aspect and pixel_aspect != 1: + # Change input width after pixel aspect + input_width = int(input_width * pixel_aspect) + use_input_res = True + filters.append(( + "scale={}x{}:flags=lanczos".format(input_width, input_height) + )) + + # Convert overscan value video filters + overscan_crop = output_def.get("overscan_crop") + overscan = OverscanCrop( + input_width, input_height, overscan_crop, overscan_color_value + ) + overscan_crop_filters = overscan.video_filters() + # Add overscan filters to filters if are any and modify input + # resolution by it's values + if overscan_crop_filters: + filters.extend(overscan_crop_filters) + # Change input resolution after overscan crop + input_width = overscan.width() + input_height = overscan.height() + use_input_res = True + + # Make sure input width and height is not an odd number + input_width_is_odd = bool(input_width % 2 != 0) + input_height_is_odd = bool(input_height % 2 != 0) + if input_width_is_odd or input_height_is_odd: + # Add padding to input and make sure this filter is at first place + filters.append("pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2") + + # Change input width or height as first filter will change them + if input_width_is_odd: + self.log.info(( + "Converting input width from odd to even number. {} -> {}" + ).format(input_width, input_width + 1)) + input_width += 1 + + if input_height_is_odd: + self.log.info(( + "Converting input height from odd to even number. {} -> {}" + ).format(input_height, input_height + 1)) + input_height += 1 + + self.log.debug("pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug("input_width: `{}`".format(input_width)) + self.log.debug("input_height: `{}`".format(input_height)) + + # Use instance resolution if output definition has not set it + # - use instance resolution only if there were not scale changes + # that may massivelly affect output 'use_input_res' + if not use_input_res and output_width is None or output_height is None: + output_width = temp_data["resolution_width"] + output_height = temp_data["resolution_height"] + + # Use source's input resolution instance does not have set it. + if output_width is None or output_height is None: + self.log.debug("Using resolution from input.") + output_width = input_width + output_height = input_height + + output_width = int(output_width) + output_height = int(output_height) + + # Make sure output width and height is not an odd number + # When this can happen: + # - if output definition has set width and height with odd number + # - `instance.data` contain width and height with odd numbeer + if output_width % 2 != 0: + self.log.warning(( + "Converting output width from odd to even number. {} -> {}" + ).format(output_width, output_width + 1)) + output_width += 1 + + if output_height % 2 != 0: + self.log.warning(( + "Converting output height from odd to even number. {} -> {}" + ).format(output_height, output_height + 1)) + output_height += 1 + + self.log.debug( + "Output resolution is {}x{}".format(output_width, output_height) + ) + + letter_box_def = output_def["letter_box"] + letter_box_enabled = letter_box_def["enabled"] + + # Skip processing if resolution is same as input's and letterbox is + # not set + if ( + output_width == input_width + and output_height == input_height + and not letter_box_enabled + ): + self.log.debug( + "Output resolution is same as input's" + " and \"letter_box\" key is not set. Skipping reformat part." + ) + new_repre["resolutionWidth"] = input_width + new_repre["resolutionHeight"] = input_height + return filters + + # scaling none square pixels and 1920 width + if input_height != output_height or input_width != output_width: + filters.extend([ + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( + output_width, output_height, + overscan_color_value + ), + "setsar=1" + ]) + + # letter_box + if letter_box_enabled: + filters.extend( + self.get_letterbox_filters( + letter_box_def, + output_width, + output_height + ) + ) + + new_repre["resolutionWidth"] = output_width + new_repre["resolutionHeight"] = output_height + + return filters + + def lut_filters(self, new_repre, instance, input_args): + """Add lut file to output ffmpeg filters.""" + filters = [] + # baking lut file application + lut_path = instance.data.get("lutPath") + if not lut_path or "bake-lut" not in new_repre["tags"]: + return filters + + # Prepare path for ffmpeg argument + lut_path = lut_path.replace("\\", "/").replace(":", "\\:") + + # Remove gamma from input arguments + if "-gamma" in input_args: + input_args.remove("-gamme") + + # Prepare filters + filters.append("lut3d=file='{}'".format(lut_path)) + # QUESTION hardcoded colormatrix? + filters.append("colormatrix=bt601:bt709") + + self.log.info("Added Lut to ffmpeg command.") + + return filters + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + + def families_from_instance(self, instance): + """Returns all families of entered instance.""" + families = [] + family = instance.data.get("family") + if family: + families.append(family) + + for family in (instance.data.get("families") or tuple()): + if family not in families: + families.append(family) + return families + + def families_filter_validation(self, families, output_families_filter): + """Determines if entered families intersect with families filters. + + All family values are lowered to avoid unexpected results. + """ + + families_filter_lower = set(family.lower() for family in + output_families_filter + # Exclude empty filter values + if family) + if not families_filter_lower: + return True + return any(family.lower() in families_filter_lower + for family in families) + + def filter_output_defs(self, profile, subset_name, families): + """Return outputs matching input instance families. + + Output definitions without families filter are marked as valid. + + Args: + profile (dict): Profile from presets matching current context. + families (list): All families of current instance. + subset_name (str): name of subset + + Returns: + list: Containg all output definitions matching entered families. + """ + outputs = profile.get("outputs") or {} + if not outputs: + return outputs + + filtered_outputs = {} + for filename_suffix, output_def in outputs.items(): + output_filters = output_def.get("filter") + # If no filter on output preset, skip filtering and add output + # profile for farther processing + if not output_filters: + filtered_outputs[filename_suffix] = output_def + continue + + families_filters = output_filters.get("families") + if not self.families_filter_validation(families, families_filters): + continue + + # Subsets name filters + subset_filters = [ + subset_filter + for subset_filter in output_filters.get("subsets", []) + # Skip empty strings + if subset_filter + ] + if subset_name and subset_filters: + match = False + for subset_filter in subset_filters: + compiled = re.compile(subset_filter) + if compiled.search(subset_name): + match = True + break + + if not match: + continue + + filtered_outputs[filename_suffix] = output_def + + return filtered_outputs + + def filter_outputs_by_custom_tags(self, outputs, custom_tags): + """Filter output definitions by entered representation custom_tags. + + Output definitions without custom_tags filter are marked as invalid, + only in case representation is having any custom_tags defined. + + Args: + outputs (list): Contain list of output definitions from presets. + custom_tags (list): Custom Tags of processed representation. + + Returns: + list: Containg all output definitions matching entered tags. + """ + + filtered_outputs = [] + repre_c_tags_low = [tag.lower() for tag in (custom_tags or [])] + for output_def in outputs: + tag_filters = output_def.get("filter", {}).get("custom_tags") + + if not custom_tags and not tag_filters: + # Definition is valid if both tags are empty + valid = True + + elif not custom_tags or not tag_filters: + # Invalid if one is empty + valid = False + + else: + # Check if output definition tags are in representation tags + valid = False + # lower all filter tags + tag_filters_low = [tag.lower() for tag in tag_filters] + # check if any repre tag is not in filter tags + for tag in repre_c_tags_low: + if tag in tag_filters_low: + valid = True + break + + if valid: + filtered_outputs.append(output_def) + + self.log.debug("__ filtered_outputs: {}".format( + [_o["filename_suffix"] for _o in filtered_outputs] + )) + + return filtered_outputs + + def add_video_filter_args(self, args, inserting_arg): + """ + Fixing video filter arguments to be one long string + + Args: + args (list): list of string arguments + inserting_arg (str): string argument we want to add + (without flag `-vf`) + + Returns: + str: long joined argument to be added back to list of arguments + + """ + # find all video format settings + vf_settings = [p for p in args + for v in ["-filter:v", "-vf"] + if v in p] + self.log.debug("_ vf_settings: `{}`".format(vf_settings)) + + # remove them from output args list + for p in vf_settings: + self.log.debug("_ remove p: `{}`".format(p)) + args.remove(p) + self.log.debug("_ args: `{}`".format(args)) + + # strip them from all flags + vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "") + for p in vf_settings] + + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + vf_fixed.insert(0, inserting_arg) + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + # create new video filter setting + vf_back = "-vf " + ",".join(vf_fixed) + + return vf_back + + +@six.add_metaclass(ABCMeta) +class _OverscanValue: + def __repr__(self): + return "<{}> {}".format(self.__class__.__name__, str(self)) + + @abstractmethod + def copy(self): + """Create a copy of object.""" + pass + + @abstractmethod + def size_for(self, value): + """Calculate new value for passed value.""" + pass + + +class PixValueExplicit(_OverscanValue): + def __init__(self, value): + self._value = int(value) + + def __str__(self): + return "{}px".format(self._value) + + def copy(self): + return PixValueExplicit(self._value) + + def size_for(self, value): + if self._value == 0: + return value + return self._value + + +class PercentValueExplicit(_OverscanValue): + def __init__(self, value): + self._value = float(value) + + def __str__(self): + return "{}%".format(abs(self._value)) + + def copy(self): + return PercentValueExplicit(self._value) + + def size_for(self, value): + if self._value == 0: + return value + return int((value / 100) * self._value) + + +class PixValueRelative(_OverscanValue): + def __init__(self, value): + self._value = int(value) + + def __str__(self): + sign = "-" if self._value < 0 else "+" + return "{}{}px".format(sign, abs(self._value)) + + def copy(self): + return PixValueRelative(self._value) + + def size_for(self, value): + return value + self._value + + +class PercentValueRelative(_OverscanValue): + def __init__(self, value): + self._value = float(value) + + def __str__(self): + return "{}%".format(self._value) + + def copy(self): + return PercentValueRelative(self._value) + + def size_for(self, value): + if self._value == 0: + return value + + offset = int((value / 100) * self._value) + + return value + offset + + +class PercentValueRelativeSource(_OverscanValue): + def __init__(self, value, source_sign): + self._value = float(value) + if source_sign not in ("-", "+"): + raise ValueError( + "Invalid sign value \"{}\" expected \"-\" or \"+\"".format( + source_sign + ) + ) + self._source_sign = source_sign + + def __str__(self): + return "{}%{}".format(self._value, self._source_sign) + + def copy(self): + return PercentValueRelativeSource(self._value, self._source_sign) + + def size_for(self, value): + if self._value == 0: + return value + return int((value * 100) / (100 - self._value)) + + +class OverscanCrop: + """Helper class to read overscan string and calculate output resolution. + + It is possible to enter single value for both width and height, or + two values for width and height. Overscan string may have a few variants. + Each variant define output size for input size. + + ### Example + For input size: 2200px + + | String | Output | Description | + |----------|--------|-------------------------------------------------| + | "" | 2200px | Empty string does nothing. | + | "10%" | 220px | Explicit percent size. | + | "-10%" | 1980px | Relative percent size (decrease). | + | "+10%" | 2420px | Relative percent size (increase). | + | "-10%+" | 2000px | Relative percent size to output size. | + | "300px" | 300px | Explicit output size cropped or expanded. | + | "-300px" | 1900px | Relative pixel size (decrease). | + | "+300px" | 2500px | Relative pixel size (increase). | + | "300" | 300px | Value without "%" and "px" is used as has "px". | + + Value without sign (+/-) in is always explicit and value with sign is + relative. Output size for "200px" and "+200px" are not the same. + Values "0", "0px" or "0%" are ignored. + + All values that cause output resolution smaller than 1 pixel are invalid. + + Value "-10%+" is a special case which says that input's resolution is + bigger by 10% than expected output. + + It is possible to combine these variants to define different output for + width and height. + + Resolution: 2000px 1000px + + | String | Output | + |---------------|---------------| + | "100px 120px" | 2100px 1120px | + | "-10% -200px" | 1800px 800px | + """ + + item_regex = re.compile(r"([\+\-])?([0-9]+)(.+)?") + relative_source_regex = re.compile(r"%([\+\-])") + + def __init__( + self, input_width, input_height, string_value, overscan_color=None + ): + # Make sure that is not None + string_value = string_value or "" + + self.input_width = input_width + self.input_height = input_height + self.overscan_color = overscan_color + + width, height = self._convert_string_to_values(string_value) + self._width_value = width + self._height_value = height + + self._string_value = string_value + + def __str__(self): + return "{}".format(self._string_value) + + def __repr__(self): + return "<{}>".format(self.__class__.__name__) + + def width(self): + """Calculated width.""" + return self._width_value.size_for(self.input_width) + + def height(self): + """Calculated height.""" + return self._height_value.size_for(self.input_height) + + def video_filters(self): + """FFmpeg video filters to achieve expected result. + + Filter may be empty, use "crop" filter, "pad" filter or combination of + "crop" and "pad". + + Returns: + list: FFmpeg video filters. + """ + # crop=width:height:x:y - explicit start x, y position + # crop=width:height - x, y are related to center by width/height + # pad=width:heigth:x:y - explicit start x, y position + # pad=width:heigth - x, y are set to 0 by default + + width = self.width() + height = self.height() + + output = [] + if self.input_width == width and self.input_height == height: + return output + + # Make sure resolution has odd numbers + if width % 2 == 1: + width -= 1 + + if height % 2 == 1: + height -= 1 + + if width <= self.input_width and height <= self.input_height: + output.append("crop={}:{}".format(width, height)) + + elif width >= self.input_width and height >= self.input_height: + output.append( + "pad={}:{}:(iw-ow)/2:(ih-oh)/2:{}".format( + width, height, self.overscan_color + ) + ) + + elif width > self.input_width and height < self.input_height: + output.append("crop=iw:{}".format(height)) + output.append("pad={}:ih:(iw-ow)/2:(ih-oh)/2:{}".format( + width, self.overscan_color + )) + + elif width < self.input_width and height > self.input_height: + output.append("crop={}:ih".format(width)) + output.append("pad=iw:{}:(iw-ow)/2:(ih-oh)/2:{}".format( + height, self.overscan_color + )) + + return output + + def _convert_string_to_values(self, orig_string_value): + string_value = orig_string_value.strip().lower() + if not string_value: + return [PixValueRelative(0), PixValueRelative(0)] + + # Replace "px" (and spaces before) with single space + string_value = re.sub(r"([ ]+)?px", " ", string_value) + string_value = re.sub(r"([ ]+)%", "%", string_value) + # Make sure +/- sign at the beggining of string is next to number + string_value = re.sub(r"^([\+\-])[ ]+", "\g<1>", string_value) + # Make sure +/- sign in the middle has zero spaces before number under + # which belongs + string_value = re.sub( + r"[ ]([\+\-])[ ]+([0-9])", + r" \g<1>\g<2>", + string_value + ) + string_parts = [ + part + for part in string_value.split(" ") + if part + ] + + error_msg = "Invalid string for rescaling \"{}\"".format( + orig_string_value + ) + if 1 > len(string_parts) > 2: + raise ValueError(error_msg) + + output = [] + for item in string_parts: + groups = self.item_regex.findall(item) + if not groups: + raise ValueError(error_msg) + + relative_sign, value, ending = groups[0] + if not relative_sign: + if not ending: + output.append(PixValueExplicit(value)) + else: + output.append(PercentValueExplicit(value)) + else: + source_sign_group = self.relative_source_regex.findall(ending) + if not ending: + output.append(PixValueRelative(int(relative_sign + value))) + + elif source_sign_group: + source_sign = source_sign_group[0] + output.append(PercentValueRelativeSource( + float(relative_sign + value), source_sign + )) + else: + output.append( + PercentValueRelative(float(relative_sign + value)) + ) + + if len(output) == 1: + width = output.pop(0) + height = width.copy() + else: + width, height = output + + return width, height diff --git a/openpype/plugins/publish/extract_review_slate.py b/client/ayon_core/plugins/publish/extract_review_slate.py similarity index 99% rename from openpype/plugins/publish/extract_review_slate.py rename to client/ayon_core/plugins/publish/extract_review_slate.py index 4e3406d3f9..35f55e275c 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/client/ayon_core/plugins/publish/extract_review_slate.py @@ -5,7 +5,7 @@ import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( path_to_subprocess_arg, run_subprocess, get_ffmpeg_tool_args, @@ -14,8 +14,8 @@ get_ffmpeg_codec_args, get_ffmpeg_format_args, ) -from openpype.pipeline import publish -from openpype.pipeline.publish import KnownPublishError +from ayon_core.pipeline import publish +from ayon_core.pipeline.publish import KnownPublishError class ExtractReviewSlate(publish.Extractor): diff --git a/openpype/plugins/publish/extract_scanline_exr.py b/client/ayon_core/plugins/publish/extract_scanline_exr.py similarity index 97% rename from openpype/plugins/publish/extract_scanline_exr.py rename to client/ayon_core/plugins/publish/extract_scanline_exr.py index 747155689b..07cc0882e0 100644 --- a/openpype/plugins/publish/extract_scanline_exr.py +++ b/client/ayon_core/plugins/publish/extract_scanline_exr.py @@ -5,12 +5,12 @@ import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( run_subprocess, get_oiio_tool_args, ToolNotFoundError, ) -from openpype.pipeline import KnownPublishError +from ayon_core.pipeline import KnownPublishError class ExtractScanlineExr(pyblish.api.InstancePlugin): diff --git a/client/ayon_core/plugins/publish/extract_thumbnail.py b/client/ayon_core/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..90e0ef7431 --- /dev/null +++ b/client/ayon_core/plugins/publish/extract_thumbnail.py @@ -0,0 +1,501 @@ +import copy +import os +import subprocess +import tempfile + +import pyblish.api +from ayon_core.lib import ( + get_ffmpeg_tool_args, + get_ffprobe_data, + + is_oiio_supported, + get_rescaled_command_arguments, + + path_to_subprocess_arg, + run_subprocess, +) +from ayon_core.lib.transcoding import convert_colorspace + +from ayon_core.lib.transcoding import VIDEO_EXTENSIONS + + +class ExtractThumbnail(pyblish.api.InstancePlugin): + """Create jpg thumbnail from sequence using ffmpeg""" + + label = "Extract Thumbnail" + order = pyblish.api.ExtractorOrder + 0.49 + families = [ + "imagesequence", "render", "render2d", "prerender", + "source", "clip", "take", "online", "image" + ] + hosts = [ + "shell", + "fusion", + "resolve", + "traypublisher", + "substancepainter", + "nuke", + ] + enabled = False + + integrate_thumbnail = False + target_size = { + "type": "resize", + "width": 1920, + "height": 1080 + } + background_color = None + duration_split = 0.5 + # attribute presets from settings + oiiotool_defaults = None + ffmpeg_args = None + + def process(self, instance): + # run main process + self._main_process(instance) + + # Make sure cleanup happens to representations which are having both + # tags `delete` and `need_thumbnail` + for repre in tuple(instance.data.get("representations", [])): + tags = repre.get("tags") or [] + # skip representations which are going to be published on farm + if "publish_on_farm" in tags: + continue + if ( + "delete" in tags + and "need_thumbnail" in tags + ): + self.log.debug( + "Removing representation: {}".format(repre) + ) + instance.data["representations"].remove(repre) + + def _main_process(self, instance): + subset_name = instance.data["subset"] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.debug(( + "Instance {} does not have representations. Skipping" + ).format(subset_name)) + return + + self.log.debug( + "Processing instance with subset name {}".format(subset_name) + ) + + # Skip if instance have 'review' key in data set to 'False' + if not self._is_review_instance(instance): + self.log.debug("Skipping - no review set on instance.") + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance_repres): + self.log.debug("Thumbnail representation already present.") + return + + # skip crypto passes. + # TODO: This is just a quick fix and has its own side-effects - it is + # affecting every subset name with `crypto` in its name. + # This must be solved properly, maybe using tags on + # representation that can be determined much earlier and + # with better precision. + if "crypto" in subset_name.lower(): + self.log.debug("Skipping crypto passes.") + return + + # first check for any explicitly marked representations for thumbnail + explicit_repres = self._get_explicit_repres_for_thumbnail(instance) + if explicit_repres: + filtered_repres = explicit_repres + else: + filtered_repres = self._get_filtered_repres(instance) + + if not filtered_repres: + self.log.info( + "Instance doesn't have representations that can be used " + "as source for thumbnail. Skipping thumbnail extraction." + ) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + for repre in filtered_repres: + repre_files = repre["files"] + src_staging = os.path.normpath(repre["stagingDir"]) + if not isinstance(repre_files, (list, tuple)): + # convert any video file to frame so oiio doesn't need to + # read video file (it is slow) and also we are having control + # over which frame is used for thumbnail + # this will also work with ffmpeg fallback conversion in case + # oiio is not supported + repre_extension = os.path.splitext(repre_files)[1] + if repre_extension in VIDEO_EXTENSIONS: + video_file_path = os.path.join( + src_staging, repre_files + ) + file_path = self._create_frame_from_video( + video_file_path, + dst_staging + ) + if file_path: + src_staging, input_file = os.path.split(file_path) + else: + # if it is not video file then just use first file + input_file = repre_files + else: + repre_files_thumb = copy.deepcopy(repre_files) + # exclude first frame if slate in representation tags + if "slate-frame" in repre.get("tags", []): + repre_files_thumb = repre_files_thumb[1:] + file_index = int( + float(len(repre_files_thumb)) * self.duration_split) + input_file = repre_files[file_index] + + full_input_path = os.path.join(src_staging, input_file) + self.log.debug("input {}".format(full_input_path)) + + filename = os.path.splitext(input_file)[0] + jpeg_file = filename + "_thumb.jpg" + full_output_path = os.path.join(dst_staging, jpeg_file) + colorspace_data = repre.get("colorspaceData") + + # only use OIIO if it is supported and representation has + # colorspace data + if oiio_supported and colorspace_data: + self.log.debug( + "Trying to convert with OIIO " + "with colorspace data: {}".format(colorspace_data) + ) + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self._create_thumbnail_oiio( + full_input_path, + full_output_path, + colorspace_data + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available or representation is not having + # colorspace data + if not thumbnail_created: + if oiio_supported: + self.log.debug( + "Converting with FFMPEG because input" + " can't be read by OIIO." + ) + + thumbnail_created = self._create_thumbnail_ffmpeg( + full_input_path, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + continue + + if len(explicit_repres) > 1: + repre_name = "thumbnail_{}".format(repre["outputName"]) + else: + repre_name = "thumbnail" + + # add thumbnail path to instance data for integrator + instance_thumb_path = instance.data.get("thumbnailPath") + if ( + not instance_thumb_path + or not os.path.isfile(instance_thumb_path) + ): + self.log.debug( + "Adding thumbnail path to instance data: {}".format( + full_output_path + ) + ) + instance.data["thumbnailPath"] = full_output_path + + new_repre_tags = ["thumbnail"] + # for workflows which needs to have thumbnails published as + # separate representations `delete` tag should not be added + if not self.integrate_thumbnail: + new_repre_tags.append("delete") + + new_repre = { + "name": repre_name, + "ext": "jpg", + "files": jpeg_file, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": new_repre_tags, + # If source image is jpg then there can be clash when + # integrating to making the output name explicit. + "outputName": "thumbnail" + } + + # adding representation + instance.data["representations"].append(new_repre) + + if explicit_repres: + # this key will then align assetVersion ftrack thumbnail sync + new_repre["outputName"] = ( + repre.get("outputName") or repre["name"]) + self.log.debug( + "Adding explicit thumbnail representation: {}".format( + new_repre)) + else: + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + # There is no need to create more then one thumbnail + break + + if not thumbnail_created: + self.log.warning("Thumbnail has not been created.") + + def _is_review_instance(self, instance): + # TODO: We should probably handle "not creating" of thumbnail + # other way then checking for "review" key on instance data? + if instance.data.get("review", True): + return True + return False + + def _already_has_thumbnail(self, repres): + for repre in repres: + self.log.debug("repre {}".format(repre)) + if repre["name"] == "thumbnail": + return True + return False + + def _get_explicit_repres_for_thumbnail(self, instance): + src_repres = instance.data.get("representations") or [] + # This is mainly for Nuke where we have multiple representations for + # one instance and representations are tagged for thumbnail. + # First check if any of the representations have + # `need_thumbnail` in tags and add them to filtered_repres + need_thumb_repres = [ + repre for repre in src_repres + if "need_thumbnail" in repre.get("tags", []) + if "publish_on_farm" not in repre.get("tags", []) + ] + if not need_thumb_repres: + return [] + + self.log.info( + "Instance has representation with tag `need_thumbnail`. " + "Using only this representations for thumbnail creation. " + ) + self.log.debug( + "Representations: {}".format(need_thumb_repres) + ) + return need_thumb_repres + + def _get_filtered_repres(self, instance): + filtered_repres = [] + src_repres = instance.data.get("representations") or [] + + for repre in src_repres: + self.log.debug(repre) + tags = repre.get("tags") or [] + + if "publish_on_farm" in tags: + # only process representations with are going + # to be published locally + continue + + valid = "review" in tags or "thumb-nuke" in tags + if not valid: + continue + + if not repre.get("files"): + self.log.debug(( + "Representation \"{}\" doesn't have files. Skipping" + ).format(repre["name"])) + continue + + filtered_repres.append(repre) + return filtered_repres + + def _create_thumbnail_oiio( + self, + src_path, + dst_path, + colorspace_data, + ): + """Create thumbnail using OIIO tool oiiotool + + Args: + src_path (str): path to source file + dst_path (str): path to destination file + colorspace_data (dict): colorspace data from representation + keys: + colorspace (str) + config (dict) + display (Optional[str]) + view (Optional[str]) + + Returns: + str: path to created thumbnail + """ + self.log.info("Extracting thumbnail {}".format(dst_path)) + resolution_arg = self._get_resolution_arg("oiiotool", src_path) + + repre_display = colorspace_data.get("display") + repre_view = colorspace_data.get("view") + oiio_default_type = None + oiio_default_display = None + oiio_default_view = None + oiio_default_colorspace = None + # first look into representation colorspaceData, perhaps it has + # display and view + if all([repre_display, repre_view]): + self.log.info( + "Using Display & View from " + "representation: '{} ({})'".format( + repre_view, + repre_display + ) + ) + # if representation doesn't have display and view then use + # oiiotool_defaults + elif self.oiiotool_defaults: + oiio_default_type = self.oiiotool_defaults["type"] + if "colorspace" in oiio_default_type: + oiio_default_colorspace = self.oiiotool_defaults["colorspace"] + else: + oiio_default_display = self.oiiotool_defaults["display"] + oiio_default_view = self.oiiotool_defaults["view"] + + try: + convert_colorspace( + src_path, + dst_path, + colorspace_data["config"]["path"], + colorspace_data["colorspace"], + display=repre_display or oiio_default_display, + view=repre_view or oiio_default_view, + target_colorspace=oiio_default_colorspace, + additional_command_args=resolution_arg, + logger=self.log, + ) + except Exception: + self.log.warning( + "Failed to create thumbnail using oiiotool", + exc_info=True + ) + return False + + return True + + def _create_thumbnail_ffmpeg(self, src_path, dst_path): + self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path)) + resolution_arg = self._get_resolution_arg("ffmpeg", src_path) + ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} + + jpeg_items = [ + subprocess.list2cmdline(ffmpeg_path_args) + ] + # flag for large file sizes + max_int = 2147483647 + jpeg_items.extend([ + "-y", + "-analyzeduration", str(max_int), + "-probesize", str(max_int), + ]) + # use same input args like with mov + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.extend(["-i", path_to_subprocess_arg(src_path)]) + # output arguments from presets + jpeg_items.extend(ffmpeg_args.get("output") or []) + # we just want one frame from movie files + jpeg_items.extend(["-vframes", "1"]) + + if resolution_arg: + jpeg_items.extend(resolution_arg) + + # output file + jpeg_items.append(path_to_subprocess_arg(dst_path)) + subprocess_command = " ".join(jpeg_items) + try: + run_subprocess( + subprocess_command, shell=True, logger=self.log + ) + return True + except Exception: + self.log.warning( + "Failed to create thumbnail using ffmpeg", + exc_info=True + ) + return False + + def _create_frame_from_video(self, video_file_path, output_dir): + """Convert video file to one frame image via ffmpeg""" + # create output file path + base_name = os.path.basename(video_file_path) + filename = os.path.splitext(base_name)[0] + output_thumb_file_path = os.path.join( + output_dir, "{}.png".format(filename)) + + # Set video input attributes + max_int = str(2147483647) + video_data = get_ffprobe_data(video_file_path, logger=self.log) + duration = float(video_data["format"]["duration"]) + + cmd_args = [ + "-y", + "-ss", str(duration * self.duration_split), + "-i", video_file_path, + "-analyzeduration", max_int, + "-probesize", max_int, + "-vframes", "1" + ] + + # add output file path + cmd_args.append(output_thumb_file_path) + + # create ffmpeg command + cmd = get_ffmpeg_tool_args( + "ffmpeg", + *cmd_args + ) + try: + # run subprocess + self.log.debug("Executing: {}".format(" ".join(cmd))) + run_subprocess(cmd, logger=self.log) + self.log.debug( + "Thumbnail created: {}".format(output_thumb_file_path)) + return output_thumb_file_path + except RuntimeError as error: + self.log.warning( + "Failed intermediate thumb source using ffmpeg: {}".format( + error) + ) + return None + + def _get_resolution_arg( + self, + application, + input_path, + ): + # get settings + if self.target_size.get("type") == "source": + return [] + + target_width = self.target_size["width"] + target_height = self.target_size["height"] + + # form arg string per application + return get_rescaled_command_arguments( + application, + input_path, + target_width, + target_height, + bg_color=self.background_color, + log=self.log + ) diff --git a/openpype/plugins/publish/extract_thumbnail_from_source.py b/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py similarity index 99% rename from openpype/plugins/publish/extract_thumbnail_from_source.py rename to client/ayon_core/plugins/publish/extract_thumbnail_from_source.py index 33cbf6d9bf..8d043d700d 100644 --- a/openpype/plugins/publish/extract_thumbnail_from_source.py +++ b/client/ayon_core/plugins/publish/extract_thumbnail_from_source.py @@ -16,7 +16,7 @@ import tempfile import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, get_oiio_tool_args, is_oiio_supported, diff --git a/openpype/plugins/publish/extract_trim_video_audio.py b/client/ayon_core/plugins/publish/extract_trim_video_audio.py similarity index 97% rename from openpype/plugins/publish/extract_trim_video_audio.py rename to client/ayon_core/plugins/publish/extract_trim_video_audio.py index 5e00cfc96f..78e2aec972 100644 --- a/openpype/plugins/publish/extract_trim_video_audio.py +++ b/client/ayon_core/plugins/publish/extract_trim_video_audio.py @@ -3,11 +3,11 @@ import pyblish.api -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, run_subprocess, ) -from openpype.pipeline import publish +from ayon_core.pipeline import publish class ExtractTrimVideoAudio(publish.Extractor): @@ -16,7 +16,7 @@ class ExtractTrimVideoAudio(publish.Extractor): # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher", "traypublisher"] + hosts = ["traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available diff --git a/openpype/plugins/publish/help/validate_containers.xml b/client/ayon_core/plugins/publish/help/validate_containers.xml similarity index 100% rename from openpype/plugins/publish/help/validate_containers.xml rename to client/ayon_core/plugins/publish/help/validate_containers.xml diff --git a/openpype/plugins/publish/help/validate_publish_dir.xml b/client/ayon_core/plugins/publish/help/validate_publish_dir.xml similarity index 100% rename from openpype/plugins/publish/help/validate_publish_dir.xml rename to client/ayon_core/plugins/publish/help/validate_publish_dir.xml diff --git a/openpype/plugins/publish/help/validate_unique_subsets.xml b/client/ayon_core/plugins/publish/help/validate_unique_subsets.xml similarity index 100% rename from openpype/plugins/publish/help/validate_unique_subsets.xml rename to client/ayon_core/plugins/publish/help/validate_unique_subsets.xml diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py new file mode 100644 index 0000000000..a67c837daf --- /dev/null +++ b/client/ayon_core/plugins/publish/integrate.py @@ -0,0 +1,1014 @@ +import os +import logging +import sys +import copy +import datetime + +import clique +import six +from bson.objectid import ObjectId +import pyblish.api + +from ayon_core.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) + +from ayon_core.client import ( + get_representations, + get_subset_by_name, + get_version_by_name, +) +from ayon_core.lib import source_hash +from ayon_core.lib.file_transaction import ( + FileTransaction, + DuplicateDestinationError +) +from ayon_core.pipeline.publish import ( + KnownPublishError, + get_publish_template_name, +) + +log = logging.getLogger(__name__) + + +def get_instance_families(instance): + """Get all families of the instance""" + # todo: move this to lib? + family = instance.data.get("family") + families = [] + if family: + families.append(family) + + for _family in (instance.data.get("families") or []): + if _family not in families: + families.append(_family) + + return families + + +def get_frame_padded(frame, padding): + """Return frame number as string with `padding` amount of padded zeros""" + return "{frame:0{padding}d}".format(padding=padding, frame=frame) + + +class IntegrateAsset(pyblish.api.InstancePlugin): + """Register publish in the database and transfer files to destinations. + + Steps: + 1) Register the subset and version + 2) Transfer the representation files to the destination + 3) Register the representation + + Requires: + instance.data['representations'] - must be a list and each member + must be a dictionary with following data: + 'files': list of filenames for sequence, string for single file. + Only the filename is allowed, without the folder path. + 'stagingDir': "path/to/folder/with/files" + 'name': representation name (usually the same as extension) + 'ext': file extension + optional data + "frameStart" + "frameEnd" + 'fps' + "data": additional metadata for each representation. + """ + + label = "Integrate Asset" + order = pyblish.api.IntegratorOrder + families = ["workfile", + "pointcache", + "pointcloud", + "proxyAbc", + "camera", + "animation", + "model", + "maxScene", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "ass", + "vdbcache", + "scene", + "vrayproxy", + "vrayscene_layer", + "render", + "prerender", + "imagesequence", + "review", + "rendersetup", + "rig", + "plate", + "look", + "ociolook", + "audio", + "yetiRig", + "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image", + "assembly", + "fbx", + "gltf", + "textures", + "action", + "harmony.template", + "harmony.palette", + "editorial", + "background", + "camerarig", + "redshiftproxy", + "effect", + "xgen", + "hda", + "usd", + "staticMesh", + "skeletalMesh", + "mvLook", + "mvUsd", + "mvUsdComposition", + "mvUsdOverride", + "online", + "uasset", + "blendScene", + "yeticacheUE", + "tycache" + ] + + default_template_name = "publish" + + # Representation context keys that should always be written to + # the database even if not used by the destination template + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "username", "user", "output" + ] + + def process(self, instance): + + # Instance should be integrated on a farm + if instance.data.get("farm"): + self.log.debug( + "Instance is marked to be processed on farm. Skipping") + return + + # Instance is marked to not get integrated + if not instance.data.get("integrate", True): + self.log.debug("Instance is marked to skip integrating. Skipping") + return + + filtered_repres = self.filter_representations(instance) + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + + file_transactions = FileTransaction(log=self.log, + # Enforce unique transfers + allow_queue_replacements=False) + try: + self.register(instance, file_transactions, filtered_repres) + except DuplicateDestinationError as exc: + # Raise DuplicateDestinationError as KnownPublishError + # and rollback the transactions + file_transactions.rollback() + six.reraise(KnownPublishError, + KnownPublishError(exc), + sys.exc_info()[2]) + except Exception: + # clean destination + # todo: preferably we'd also rollback *any* changes to the database + file_transactions.rollback() + self.log.critical("Error when registering", exc_info=True) + six.reraise(*sys.exc_info()) + + # Finalizing can't rollback safely so no use for moving it to + # the try, except. + file_transactions.finalize() + + def filter_representations(self, instance): + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise KnownPublishError( + "Instance {} has no representations to integrate".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + return filtered_repres + + def register(self, instance, file_transactions, filtered_repres): + project_name = instance.context.data["projectName"] + + instance_stagingdir = instance.data.get("stagingDir") + if not instance_stagingdir: + self.log.debug(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) + + else: + self.log.debug( + "Establishing staging directory " + "@ {0}".format(instance_stagingdir) + ) + + template_name = self.get_template_name(instance) + + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name + ) + instance.data["versionEntity"] = version + + anatomy = instance.context.data["anatomy"] + + # Get existing representations (if any) + existing_repres_by_name = { + repre_doc["name"].lower(): repre_doc + for repre_doc in get_representations( + project_name, + version_ids=[version["_id"]], + fields=["_id", "name"] + ) + } + + # Prepare all representations + prepared_representations = [] + for repre in filtered_repres: + # todo: reduce/simplify what is returned from this function + prepared = self.prepare_representation( + repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance) + + for src, dst in prepared["transfers"]: + # todo: add support for hardlink transfers + file_transactions.add(src, dst) + + prepared_representations.append(prepared) + + # Each instance can also have pre-defined transfers not explicitly + # part of a representation - like texture resources used by a + # .ma representation. Those destination paths are pre-defined, etc. + # todo: should we move or simplify this logic? + resource_destinations = set() + + file_copy_modes = [ + ("transfers", FileTransaction.MODE_COPY), + ("hardlinks", FileTransaction.MODE_HARDLINK) + ] + for files_type, copy_mode in file_copy_modes: + for src, dst in instance.data.get(files_type, []): + self._validate_path_in_project_roots(anatomy, dst) + + file_transactions.add(src, dst, mode=copy_mode) + resource_destinations.add(os.path.abspath(dst)) + + # Bulk write to the database + # We write the subset and version to the database before the File + # Transaction to reduce the chances of another publish trying to + # publish to the same version number since that chance can greatly + # increase if the file transaction takes a long time. + op_session.commit() + + self.log.info("Subset '{subset[name]}' version {version[name]} " + "written to database..".format(subset=subset, + version=version)) + + # Process all file transfers of all integrations now + self.log.debug("Integrating source files to destination ...") + file_transactions.process() + self.log.debug( + "Backed up existing files: {}".format(file_transactions.backups)) + self.log.debug( + "Transferred files: {}".format(file_transactions.transferred)) + self.log.debug("Retrieving Representation Site Sync information ...") + + # Get the accessible sites for Site Sync + addons_manager = instance.context.data["ayonAddonsManager"] + sync_server_addon = addons_manager.get("sync_server") + if sync_server_addon is None: + sites = [{ + "name": "studio", + "created_dt": datetime.datetime.now() + }] + else: + sites = sync_server_addon.compute_resource_sync_sites( + project_name=instance.data["projectEntity"]["name"] + ) + self.log.debug("Sync Server Sites: {}".format(sites)) + + # Compute the resource file infos once (files belonging to the + # version instance instead of an individual representation) so + # we can re-use those file infos per representation + resource_file_infos = self.get_files_info(resource_destinations, + sites=sites, + anatomy=anatomy) + + # Finalize the representations now the published files are integrated + # Get 'files' info for representations and its attached resources + new_repre_names_low = set() + for prepared in prepared_representations: + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] + transfers = prepared["transfers"] + destinations = [dst for src, dst in transfers] + repre_doc["files"] = self.get_files_info( + destinations, sites=sites, anatomy=anatomy + ) + + # Add the version resource file infos to each representation + repre_doc["files"] += resource_file_infos + + # Set up representation for writing to the database. Since + # we *might* be overwriting an existing entry if the version + # already existed we'll use ReplaceOnce with `upsert=True` + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) + + new_repre_names_low.add(repre_doc["name"].lower()) + + # Delete any existing representations that didn't get any new data + # if the instance is not set to append mode + if not instance.data.get("append", False): + for name, existing_repres in existing_repres_by_name.items(): + if name not in new_repre_names_low: + # We add the exact representation name because `name` is + # lowercase for name matching only and not in the database + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) + + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() + + # Backwards compatibility used in hero integration. + # todo: can we avoid the need to store this? + instance.data["published_representations"] = { + p["representation"]["_id"]: p for p in prepared_representations + } + + self.log.info( + "Registered {} representations: {}".format( + len(prepared_representations), + ", ".join(p["representation"]["name"] + for p in prepared_representations) + ) + ) + + def prepare_subset(self, instance, op_session, project_name): + asset_doc = instance.data["assetEntity"] + subset_name = instance.data["subset"] + family = instance.data["family"] + self.log.debug("Subset: {}".format(subset_name)) + + # Get existing subset if it exists + existing_subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] + ) + + # Define subset data + data = { + "families": get_instance_families(instance) + } + + subset_group = instance.data.get("subsetGroup") + if subset_group: + data["subsetGroup"] = subset_group + elif existing_subset_doc: + # Preserve previous subset group if new version does not set it + if "subsetGroup" in existing_subset_doc.get("data", {}): + subset_group = existing_subset_doc["data"]["subsetGroup"] + data["subsetGroup"] = subset_group + + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: + # Create a new subset + self.log.info("Subset '%s' not found, creating ..." % subset_name) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) + + else: + # Update existing subset data with new data and set in database. + # We also change the found subset in-place so we don't need to + # re-query the subset afterwards + subset_doc["data"].update(data) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) + + self.log.debug("Prepared subset: {}".format(subset_name)) + return subset_doc + + def prepare_version(self, instance, op_session, subset_doc, project_name): + version_number = instance.data["version"] + + existing_version = get_version_by_name( + project_name, + version_number, + subset_doc["_id"], + fields=["_id"] + ) + version_id = None + if existing_version: + version_id = existing_version["_id"] + + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) + + if existing_version: + self.log.debug("Updating existing version ...") + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) + else: + self.log.debug("Creating new version ...") + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) + + self.log.debug( + "Prepared version: v{0:03d}".format(version_doc["name"]) + ) + + return version_doc + + def _validate_repre_files(self, files, is_sequence_representation): + """Validate representation files before transfer preparation. + + Check if files contain only filenames instead of full paths and check + if sequence don't contain more than one sequence or has remainders. + + Args: + files (Union[str, List[str]]): Files from representation. + is_sequence_representation (bool): Files are for sequence. + + Raises: + KnownPublishError: If validations don't pass. + """ + + if not files: + return + + if not is_sequence_representation: + files = [files] + + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") + + if not is_sequence_representation: + return + + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) + + def prepare_representation(self, repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance): + + # pre-flight validations + if repre["ext"].startswith("."): + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) + + if repre.get("transfers"): + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) + + # create template data for Anatomy + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # required representation keys + files = repre["files"] + template_data["representation"] = repre["name"] + template_data["ext"] = repre["ext"] + + # allow overwriting existing version + template_data["version"] = version["name"] + + # add template data for colorspaceData + if repre.get("colorspaceData"): + colorspace = repre["colorspaceData"]["colorspace"] + # replace spaces with underscores + # pipeline.colorspace.parse_colorspace_from_filepath + # is checking it with underscores too + colorspace = colorspace.replace(" ", "_") + template_data["colorspace"] = colorspace + + stagingdir = repre.get("stagingDir") + if not stagingdir: + # Fall back to instance staging dir if not explicitly + # set for representation in the instance + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) + stagingdir = instance_stagingdir + + if not stagingdir: + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) + + # optionals + # retrieve additional anatomy data from representation if exists + for key, anatomy_key in { + # Representation Key: Anatomy data key + "resolutionWidth": "resolution_width", + "resolutionHeight": "resolution_height", + "fps": "fps", + "outputName": "output", + "originalBasename": "originalBasename" + }.items(): + # Allow to take value from representation + # if not found also consider instance.data + value = repre.get(key) + if value is None: + value = instance.data.get(key) + + if value is not None: + template_data[anatomy_key] = value + + self.log.debug("Anatomy template name: {}".format(template_name)) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) + + is_udim = bool(repre.get("udim")) + + # handle publish in place + if "{originalDirname}" in template: + # store as originalDirname only original value without project root + # if instance collected originalDirname is present, it should be + # used for all represe + # from temp to final + original_directory = ( + instance.data.get("originalDirname") or instance_stagingdir) + + _rootless = self.get_rootless_path(anatomy, original_directory) + if _rootless == original_directory: + raise KnownPublishError(( + "Destination path '{}' ".format(original_directory) + + "must be in project dir" + )) + relative_path_start = _rootless.rfind('}') + 2 + without_root = _rootless[relative_path_start:] + template_data["originalDirname"] = without_root + + is_sequence_representation = isinstance(files, (list, tuple)) + self._validate_repre_files(files, is_sequence_representation) + + # Output variables of conditions below: + # - transfers (List[Tuple[str, str]]): src -> dst filepaths to copy + # - repre_context (Dict[str, Any]): context data used to fill template + # - template_data (Dict[str, Any]): source data used to fill template + # - to add required data to 'repre_context' not used for + # formatting + path_template_obj = anatomy.templates_obj[template_name]["path"] + + # Treat template with 'orignalBasename' in special way + if "{originalBasename}" in template: + # Remove 'frame' from template data + template_data.pop("frame", None) + + # Find out first frame string value + first_index_padded = None + if not is_udim and is_sequence_representation: + col = clique.assemble(files)[0][0] + sorted_frames = tuple(sorted(col.indexes)) + # First frame used for end value + first_frame = sorted_frames[0] + # Get last frame for padding + last_frame = sorted_frames[-1] + # Use padding from collection of length of last frame as string + padding = max(col.padding, len(str(last_frame))) + first_index_padded = get_frame_padded( + frame=first_frame, + padding=padding + ) + + # Convert files to list for single file as remaining part is only + # transfers creation (iteration over files) + if not is_sequence_representation: + files = [files] + + repre_context = None + transfers = [] + for src_file_name in files: + template_data["originalBasename"], _ = os.path.splitext( + src_file_name) + + dst = path_template_obj.format_strict(template_data) + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + if repre_context is None: + repre_context = dst.used_values + + if not is_udim and first_index_padded is not None: + repre_context["frame"] = first_index_padded + + elif is_sequence_representation: + # Collection of files (sequence) + src_collections, remainders = clique.assemble(files) + + src_collection = src_collections[0] + destination_indexes = list(src_collection.indexes) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] + ) + if template_padding > destination_padding: + destination_padding = template_padding + + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + # In case source are published in place we need to + # skip renumbering + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre_frame_start) + # Shift destination sequence to the start frame + destination_indexes = [ + index_frame_start + idx + for idx in range(len(destination_indexes)) + ] + + # To construct the destination template with anatomy we require + # a Frame or UDIM tile set for the template data. We use the first + # index of the destination for that because that could've shifted + # from the source indexes, etc. + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) + + # Construct destination collection from template + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + template_filled = path_template_obj.format_strict( + template_data + ) + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded + + # Update the destination indexes and padding + dst_collection = clique.assemble(dst_filepaths)[0][0] + dst_collection.padding = destination_padding + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) + + # Multiple file transfers + transfers = [] + for src_file_name, dst in zip(src_collection, dst_collection): + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + + else: + # Single file + # Manage anatomy template data + template_data.pop("frame", None) + if is_udim: + template_data["udim"] = repre["udim"][0] + # Construct destination filepath from template + template_filled = path_template_obj.format_strict(template_data) + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled) + + # Single file transfer + src = os.path.join(stagingdir, files) + transfers = [(src, dst)] + + # todo: Are we sure the assumption each representation + # ends up in the same folder is valid? + if not instance.data.get("publishDir"): + template_obj = anatomy.templates_obj[template_name]["folder"] + template_filled = template_obj.format_strict(template_data) + instance.data["publishDir"] = template_filled + + for key in self.db_representation_context_keys: + # Also add these values to the context even if not used by the + # destination template + value = template_data.get(key) + if value is not None: + repre_context[key] = value + + # Explicitly store the full list even though template data might + # have a different value because it uses just a single udim tile + if repre.get("udim"): + repre_context["udim"] = repre.get("udim") # store list + + # Use previous representation's id if there is a name match + existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None + if existing: + repre_id = existing["_id"] + + # Store first transferred destination as published path data + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? + published_path = transfers[0][1] + repre["published_path"] = published_path + + # todo: `repre` is not the actual `representation` entity + # we should simplify/clarify difference between data above + # and the actual representation entity for the database + data = repre.get("data", {}) + data.update({"path": published_path, "template": template}) + + # add colorspace data if any exists on representation + if repre.get("colorspaceData"): + data["colorspaceData"] = repre["colorspaceData"] + + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) + + return { + "representation": repre_doc, + "repre_doc_update_data": update_data, + "anatomy_data": template_data, + "transfers": transfers, + # todo: avoid the need for 'published_files' used by Integrate Hero + # backwards compatibility + "published_files": [transfer[1] for transfer in transfers] + } + + def create_version_data(self, instance): + """Create the data dictionary for the version + + Args: + instance: the current instance being published + + Returns: + dict: the required information for version["data"] + """ + + context = instance.context + + # create relative source path for DB + if "source" in instance.data: + source = instance.data["source"] + else: + source = context.data["currentFile"] + anatomy = instance.context.data["anatomy"] + source = self.get_rootless_path(anatomy, source) + self.log.debug("Source: {}".format(source)) + + version_data = { + "families": get_instance_families(instance), + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": instance.data["comment"], + "machine": context.data.get("machine"), + "fps": instance.data.get("fps", context.data.get("fps")) + } + + # todo: preferably we wouldn't need this "if dict" etc. logic and + # instead be able to rely what the input value is if it's set. + intent_value = context.data.get("intent") + if intent_value and isinstance(intent_value, dict): + intent_value = intent_value.get("value") + + if intent_value: + version_data["intent"] = intent_value + + # Include optional data if present in + optionals = [ + "frameStart", "frameEnd", "step", + "handleEnd", "handleStart", "sourceHashes" + ] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + # Include instance.data[versionData] directly + version_data_instance = instance.data.get("versionData") + if version_data_instance: + version_data.update(version_data_instance) + + return version_data + + def get_template_name(self, instance): + """Return anatomy template name to use for integration""" + + # Anatomy data is pre-filled by Collectors + context = instance.context + project_name = context.data["projectName"] + + # Task can be optional in anatomy data + host_name = context.data["hostName"] + anatomy_data = instance.data["anatomyData"] + family = anatomy_data["family"] + task_info = anatomy_data.get("task") or {} + + return get_publish_template_name( + project_name, + host_name, + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=context.data["project_settings"], + logger=self.log + ) + + def get_rootless_path(self, anatomy, path): + """Returns, if possible, path without absolute portion from root + (eg. 'c:\' or '/opt/..') + + This information is platform dependent and shouldn't be captured. + Example: + 'c:/projects/MyProject1/Assets/publish...' > + '{root}/MyProject1/Assets...' + + Args: + anatomy: anatomy part from instance + path: path (absolute) + Returns: + path: modified path if possible, or unmodified path + + warning logged + """ + + success, rootless_path = anatomy.find_root_template_from_path(path) + if success: + path = rootless_path + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(path)) + return path + + def get_files_info(self, destinations, sites, anatomy): + """Prepare 'files' info portion for representations. + + Arguments: + destinations (list): List of transferred file destinations + sites (list): array of published locations + anatomy: anatomy part from instance + Returns: + output_resources: array of dictionaries to be added to 'files' key + in representation + """ + + file_infos = [] + for file_path in destinations: + file_info = self.prepare_file_info(file_path, anatomy, sites=sites) + file_infos.append(file_info) + return file_infos + + def prepare_file_info(self, path, anatomy, sites): + """ Prepare information for one file (asset or resource) + + Arguments: + path: destination url of published file + anatomy: anatomy part from instance + sites: array of published locations, + [ {'name':'studio', 'created_dt':date} by default + keys expected ['studio', 'site1', 'gdrive1'] + + Returns: + dict: file info dictionary + """ + + return { + "_id": ObjectId(), + "path": self.get_rootless_path(anatomy, path), + "size": os.path.getsize(path), + "hash": source_hash(path), + "sites": sites + } + + def _validate_path_in_project_roots(self, anatomy, file_path): + """Checks if 'file_path' starts with any of the roots. + + Used to check that published path belongs to project, eg. we are not + trying to publish to local only folder. + Args: + anatomy (Anatomy) + file_path (str) + Raises + (KnownPublishError) + """ + path = self.get_rootless_path(anatomy, file_path) + if not path: + raise KnownPublishError(( + "Destination path '{}' ".format(file_path) + + "must be in project dir" + )) diff --git a/openpype/plugins/publish/integrate_hero_version.py b/client/ayon_core/plugins/publish/integrate_hero_version.py similarity index 96% rename from openpype/plugins/publish/integrate_hero_version.py rename to client/ayon_core/plugins/publish/integrate_hero_version.py index 59dc6b5c64..6dec41b7b0 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/client/ayon_core/plugins/publish/integrate_hero_version.py @@ -6,24 +6,23 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( +from ayon_core.client import ( get_version_by_id, get_hero_version_by_subset_id, get_archived_representations, get_representations, ) -from openpype.client.operations import ( +from ayon_core.client.operations import ( OperationsSession, new_hero_version_doc, prepare_hero_version_update_data, prepare_representation_update_data, ) -from openpype.lib import create_hard_link -from openpype.pipeline import ( +from ayon_core.lib import create_hard_link +from ayon_core.pipeline import ( schema ) -from openpype.pipeline.publish import get_publish_template_name +from ayon_core.pipeline.publish import get_publish_template_name class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -141,7 +140,7 @@ def integrate_instance( )) return - if AYON_SERVER_ENABLED and src_version_entity["name"] == 0: + if src_version_entity["name"] == 0: self.log.debug( "Version 0 cannot have hero version. Skipping." ) @@ -202,19 +201,12 @@ def integrate_instance( if old_version: entity_id = old_version["_id"] - if AYON_SERVER_ENABLED: - new_hero_version = new_hero_version_doc( - src_version_entity["parent"], - copy.deepcopy(src_version_entity["data"]), - src_version_entity["name"], - entity_id=entity_id - ) - else: - new_hero_version = new_hero_version_doc( - src_version_entity["_id"], - src_version_entity["parent"], - entity_id=entity_id - ) + new_hero_version = new_hero_version_doc( + src_version_entity["parent"], + copy.deepcopy(src_version_entity["data"]), + src_version_entity["name"], + entity_id=entity_id + ) if old_version: self.log.debug("Replacing old hero version.") diff --git a/client/ayon_core/plugins/publish/integrate_inputlinks.py b/client/ayon_core/plugins/publish/integrate_inputlinks.py new file mode 100644 index 0000000000..da8df53170 --- /dev/null +++ b/client/ayon_core/plugins/publish/integrate_inputlinks.py @@ -0,0 +1,201 @@ +import collections + +import pyblish.api +from ayon_api import ( + create_link, + make_sure_link_type_exists, + get_versions_links, +) + + +class IntegrateInputLinksAYON(pyblish.api.ContextPlugin): + """Connecting version level dependency links""" + + order = pyblish.api.IntegratorOrder + 0.2 + label = "Connect Dependency InputLinks AYON" + + def process(self, context): + """Connect dependency links for all instances, globally + + Code steps: + - filter instances that integrated version + - have "versionEntity" entry in data + - separate workfile instance within filtered instances + - when workfile instance is available: + - link all `loadedVersions` as input of the workfile + - link workfile as input of all other integrated versions + - link version's inputs if it's instance have "inputVersions" entry + - + + inputVersions: + The "inputVersions" in instance.data should be a list of + version ids (str), which are the dependencies of the publishing + instance that should be extracted from working scene by the DCC + specific publish plugin. + """ + + workfile_instance, other_instances = self.split_instances(context) + + # Variable where links are stored in submethods + new_links_by_type = collections.defaultdict(list) + + self.create_workfile_links( + workfile_instance, other_instances, new_links_by_type) + + self.create_generative_links(other_instances, new_links_by_type) + + self.create_links_on_server(context, new_links_by_type) + + def split_instances(self, context): + workfile_instance = None + other_instances = [] + + for instance in context: + # Skip inactive instances + if not instance.data.get("publish", True): + continue + + version_doc = instance.data.get("versionEntity") + if not version_doc: + self.log.debug( + "Instance {} doesn't have version.".format(instance)) + continue + + family = instance.data.get("family") + if family == "workfile": + workfile_instance = instance + else: + other_instances.append(instance) + return workfile_instance, other_instances + + def add_link(self, new_links_by_type, link_type, input_id, output_id): + """Add dependency link data into temporary variable. + + Args: + new_links_by_type (dict[str, list[dict[str, Any]]]): Object where + output is stored. + link_type (str): Type of link, one of 'reference' or 'generative' + input_id (str): Input version id. + output_id (str): Output version id. + """ + + new_links_by_type[link_type].append((input_id, output_id)) + + def create_workfile_links( + self, workfile_instance, other_instances, new_links_by_type + ): + if workfile_instance is None: + self.log.warn("No workfile in this publish session.") + return + + workfile_version_id = workfile_instance.data["versionEntity"]["_id"] + # link workfile to all publishing versions + for instance in other_instances: + self.add_link( + new_links_by_type, + "generative", + workfile_version_id, + instance.data["versionEntity"]["_id"], + ) + + loaded_versions = workfile_instance.context.get("loadedVersions") + if not loaded_versions: + return + + # link all loaded versions in scene into workfile + for version in loaded_versions: + self.add_link( + new_links_by_type, + "reference", + version["version"], + workfile_version_id, + ) + + def create_generative_links(self, other_instances, new_links_by_type): + for instance in other_instances: + input_versions = instance.data.get("inputVersions") + if not input_versions: + continue + + version_entity = instance.data["versionEntity"] + for input_version in input_versions: + self.add_link( + new_links_by_type, + "generative", + input_version, + version_entity["_id"], + ) + + def _get_existing_links(self, project_name, link_type, entity_ids): + """Find all existing links for given version ids. + + Args: + project_name (str): Name of project. + link_type (str): Type of link. + entity_ids (set[str]): Set of version ids. + + Returns: + dict[str, set[str]]: Existing links by version id. + """ + + output = collections.defaultdict(set) + if not entity_ids: + return output + + existing_in_links = get_versions_links( + project_name, entity_ids, [link_type], "output" + ) + + for entity_id, links in existing_in_links.items(): + if not links: + continue + for link in links: + output[entity_id].add(link["entityId"]) + return output + + def create_links_on_server(self, context, new_links): + """Create new links on server. + + Args: + dict[str, list[tuple[str, str]]]: Version links by link type. + """ + + if not new_links: + return + + project_name = context.data["projectName"] + + # Make sure link types are available on server + for link_type in new_links.keys(): + make_sure_link_type_exists( + project_name, link_type, "version", "version" + ) + + # Create link themselves + for link_type, items in new_links.items(): + mapping = collections.defaultdict(set) + # Make sure there are no duplicates of src > dst ids + for item in items: + _input_id, _output_id = item + mapping[_input_id].add(_output_id) + + existing_links_by_in_id = self._get_existing_links( + project_name, link_type, set(mapping.keys()) + ) + + for input_id, output_ids in mapping.items(): + existing_links = existing_links_by_in_id[input_id] + for output_id in output_ids: + # Skip creation of link if already exists + # NOTE: AYON server does not support + # to have same links + if output_id in existing_links: + continue + create_link( + project_name, + link_type, + input_id, + "version", + output_id, + "version" + ) diff --git a/openpype/plugins/publish/integrate_resources_path.py b/client/ayon_core/plugins/publish/integrate_resources_path.py similarity index 100% rename from openpype/plugins/publish/integrate_resources_path.py rename to client/ayon_core/plugins/publish/integrate_resources_path.py diff --git a/openpype/plugins/publish/integrate_subset_group.py b/client/ayon_core/plugins/publish/integrate_subset_group.py similarity index 94% rename from openpype/plugins/publish/integrate_subset_group.py rename to client/ayon_core/plugins/publish/integrate_subset_group.py index a24ebba3a5..c2f1eac9e3 100644 --- a/openpype/plugins/publish/integrate_subset_group.py +++ b/client/ayon_core/plugins/publish/integrate_subset_group.py @@ -9,8 +9,8 @@ """ import pyblish.api -from openpype.lib.profiles_filtering import filter_profiles -from openpype.lib import ( +from ayon_core.lib.profiles_filtering import filter_profiles +from ayon_core.lib import ( prepare_template_data, StringTemplate, TemplateUnsolved @@ -30,7 +30,7 @@ class IntegrateSubsetGroup(pyblish.api.InstancePlugin): def process(self, instance): """Look into subset group profiles set by settings. - Attribute 'subset_grouping_profiles' is defined by OpenPype settings. + Attribute 'subset_grouping_profiles' is defined by settings. """ # Skip if 'subset_grouping_profiles' is empty diff --git a/client/ayon_core/plugins/publish/integrate_thumbnail.py b/client/ayon_core/plugins/publish/integrate_thumbnail.py new file mode 100644 index 0000000000..dd3fdd5073 --- /dev/null +++ b/client/ayon_core/plugins/publish/integrate_thumbnail.py @@ -0,0 +1,223 @@ +""" Integrate Thumbnails for use in Loaders. + + This thumbnail is different from 'thumbnail' representation which could + be uploaded to Ftrack, or used as any other representation in Loaders to + pull into a scene. + + This one is used only as image describing content of published item and + shows up only in Loader or WebUI. + + Instance must have 'published_representations' to + be able to integrate thumbnail. + Possible sources of thumbnail paths: + - instance.data["thumbnailPath"] + - representation with 'thumbnail' name in 'published_representations' + - context.data["thumbnailPath"] + + Notes: + Issue with 'thumbnail' representation is that we most likely don't + want to integrate it as representation. Integrated representation + is polluting Loader and database without real usage. That's why + they usually have 'delete' tag to skip the integration. + +""" + +import os +import collections + +import pyblish.api + +from ayon_core.client import get_versions +from ayon_core.client.operations import OperationsSession + +InstanceFilterResult = collections.namedtuple( + "InstanceFilterResult", + ["instance", "thumbnail_path", "version_id"] +) + + +class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin): + """Integrate Thumbnails for use in Loaders.""" + + label = "Integrate Thumbnails to AYON" + order = pyblish.api.IntegratorOrder + 0.01 + + required_context_keys = [ + "project", "asset", "task", "subset", "version" + ] + + def process(self, context): + # Filter instances which can be used for integration + filtered_instance_items = self._prepare_instances(context) + if not filtered_instance_items: + self.log.debug( + "All instances were filtered. Thumbnail integration skipped." + ) + return + + project_name = context.data["projectName"] + + # Collect version ids from all filtered instance + version_ids = { + instance_items.version_id + for instance_items in filtered_instance_items + } + # Query versions + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True, + fields=["_id", "type", "name"] + ) + # Store version by their id (converted to string) + version_docs_by_str_id = { + str(version_doc["_id"]): version_doc + for version_doc in version_docs + } + self._integrate_thumbnails( + filtered_instance_items, + version_docs_by_str_id, + project_name + ) + + def _prepare_instances(self, context): + context_thumbnail_path = context.data.get("thumbnailPath") + valid_context_thumbnail = bool( + context_thumbnail_path + and os.path.exists(context_thumbnail_path) + ) + + filtered_instances = [] + for instance in context: + instance_label = self._get_instance_label(instance) + # Skip instances without published representations + # - there is no place where to put the thumbnail + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug(( + "There are no published representations" + " on the instance {}." + ).format(instance_label)) + continue + + # Find thumbnail path on instance + thumbnail_path = ( + instance.data.get("thumbnailPath") + or self._get_instance_thumbnail_path(published_repres) + ) + if thumbnail_path: + self.log.debug(( + "Found thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + elif valid_context_thumbnail: + # Use context thumbnail path if is available + thumbnail_path = context_thumbnail_path + self.log.debug(( + "Using context thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + # Skip instance if thumbnail path is not available for it + if not thumbnail_path: + self.log.debug(( + "Skipping thumbnail integration for instance \"{}\"." + " Instance and context" + " thumbnail paths are not available." + ).format(instance_label)) + continue + + version_id = str(self._get_version_id(published_repres)) + filtered_instances.append( + InstanceFilterResult(instance, thumbnail_path, version_id) + ) + return filtered_instances + + def _get_version_id(self, published_representations): + for repre_info in published_representations.values(): + return repre_info["representation"]["parent"] + + def _get_instance_thumbnail_path(self, published_representations): + thumb_repre_doc = None + for repre_info in published_representations.values(): + repre_doc = repre_info["representation"] + if "thumbnail" in repre_doc["name"].lower(): + thumb_repre_doc = repre_doc + break + + if thumb_repre_doc is None: + self.log.debug( + "There is no representation with name \"thumbnail\"" + ) + return None + + path = thumb_repre_doc["data"]["path"] + if not os.path.exists(path): + self.log.warning( + "Thumbnail file cannot be found. Path: {}".format(path) + ) + return None + return os.path.normpath(path) + + def _integrate_thumbnails( + self, + filtered_instance_items, + version_docs_by_str_id, + project_name + ): + from ayon_core.client.operations import create_thumbnail + + # Make sure each entity id has defined only one thumbnail id + thumbnail_info_by_entity_id = {} + for instance_item in filtered_instance_items: + instance, thumbnail_path, version_id = instance_item + instance_label = self._get_instance_label(instance) + version_doc = version_docs_by_str_id.get(version_id) + if not version_doc: + self.log.warning(( + "Version entity for instance \"{}\" was not found." + ).format(instance_label)) + continue + + thumbnail_id = create_thumbnail(project_name, thumbnail_path) + + # Set thumbnail id for version + thumbnail_info_by_entity_id[version_id] = { + "thumbnail_id": thumbnail_id, + "entity_type": version_doc["type"], + } + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc["name"] + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version_name, version_id + )) + + asset_entity = instance.data["assetEntity"] + thumbnail_info_by_entity_id[asset_entity["_id"]] = { + "thumbnail_id": thumbnail_id, + "entity_type": "asset", + } + self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( + asset_entity["name"], version_id + )) + + op_session = OperationsSession() + for entity_id, thumbnail_info in thumbnail_info_by_entity_id.items(): + thumbnail_id = thumbnail_info["thumbnail_id"] + op_session.update_entity( + project_name, + thumbnail_info["entity_type"], + entity_id, + {"data.thumbnail_id": thumbnail_id} + ) + op_session.commit() + + def _get_instance_label(self, instance): + return ( + instance.data.get("label") + or instance.data.get("name") + or "N/A" + ) diff --git a/openpype/plugins/publish/integrate_version_attrs.py b/client/ayon_core/plugins/publish/integrate_version_attrs.py similarity index 93% rename from openpype/plugins/publish/integrate_version_attrs.py rename to client/ayon_core/plugins/publish/integrate_version_attrs.py index ed179ae319..5b5ec9cf5b 100644 --- a/openpype/plugins/publish/integrate_version_attrs.py +++ b/client/ayon_core/plugins/publish/integrate_version_attrs.py @@ -1,8 +1,7 @@ import pyblish.api import ayon_api -from openpype import AYON_SERVER_ENABLED -from openpype.client.operations import OperationsSession +from ayon_core.client.operations import OperationsSession class IntegrateVersionAttributes(pyblish.api.ContextPlugin): @@ -86,8 +85,3 @@ def get_instance_label(instance): or instance.data.get("subset") or str(instance) ) - - -# Discover the plugin only in AYON mode -if not AYON_SERVER_ENABLED: - del IntegrateVersionAttributes diff --git a/openpype/plugins/publish/preintegrate_thumbnail_representation.py b/client/ayon_core/plugins/publish/preintegrate_thumbnail_representation.py similarity index 97% rename from openpype/plugins/publish/preintegrate_thumbnail_representation.py rename to client/ayon_core/plugins/publish/preintegrate_thumbnail_representation.py index 77bf2edba5..fc60948139 100644 --- a/openpype/plugins/publish/preintegrate_thumbnail_representation.py +++ b/client/ayon_core/plugins/publish/preintegrate_thumbnail_representation.py @@ -13,7 +13,7 @@ """ import pyblish.api -from openpype.lib.profiles_filtering import filter_profiles +from ayon_core.lib.profiles_filtering import filter_profiles class PreIntegrateThumbnails(pyblish.api.InstancePlugin): diff --git a/openpype/plugins/publish/repair_unicode_strings.py b/client/ayon_core/plugins/publish/repair_unicode_strings.py similarity index 100% rename from openpype/plugins/publish/repair_unicode_strings.py rename to client/ayon_core/plugins/publish/repair_unicode_strings.py diff --git a/openpype/plugins/publish/validate_asset_docs.py b/client/ayon_core/plugins/publish/validate_asset_docs.py similarity index 95% rename from openpype/plugins/publish/validate_asset_docs.py rename to client/ayon_core/plugins/publish/validate_asset_docs.py index 8dfd783c39..22d957f6e2 100644 --- a/openpype/plugins/publish/validate_asset_docs.py +++ b/client/ayon_core/plugins/publish/validate_asset_docs.py @@ -1,5 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishValidationError +from ayon_core.pipeline import PublishValidationError class ValidateAssetDocs(pyblish.api.InstancePlugin): diff --git a/openpype/plugins/publish/validate_containers.py b/client/ayon_core/plugins/publish/validate_containers.py similarity index 86% rename from openpype/plugins/publish/validate_containers.py rename to client/ayon_core/plugins/publish/validate_containers.py index 8dc0c61cab..bd21ec9693 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/client/ayon_core/plugins/publish/validate_containers.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.load import any_outdated_containers -from openpype.pipeline import ( +from ayon_core.pipeline.load import any_outdated_containers +from ayon_core.pipeline import ( PublishXmlValidationError, OptionalPyblishPluginMixin ) @@ -13,7 +13,7 @@ class ShowInventory(pyblish.api.Action): on = "failed" def process(self, context, plugin): - from openpype.tools.utils import host_tools + from ayon_core.tools.utils import host_tools host_tools.show_scene_inventory() diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/client/ayon_core/plugins/publish/validate_editorial_asset_name.py similarity index 97% rename from openpype/plugins/publish/validate_editorial_asset_name.py rename to client/ayon_core/plugins/publish/validate_editorial_asset_name.py index b5afc49f2e..d40263d7f3 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/client/ayon_core/plugins/publish/validate_editorial_asset_name.py @@ -2,7 +2,7 @@ import pyblish.api -from openpype.client import get_assets, get_asset_name_identifier +from ayon_core.client import get_assets, get_asset_name_identifier class ValidateEditorialAssetName(pyblish.api.ContextPlugin): @@ -16,7 +16,6 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): label = "Validate Editorial Asset Name" hosts = [ "hiero", - "standalonepublisher", "resolve", "flame", "traypublisher" diff --git a/client/ayon_core/plugins/publish/validate_file_saved.py b/client/ayon_core/plugins/publish/validate_file_saved.py new file mode 100644 index 0000000000..d459ba7ed4 --- /dev/null +++ b/client/ayon_core/plugins/publish/validate_file_saved.py @@ -0,0 +1,17 @@ +import pyblish.api + +from ayon_core.pipeline.publish import PublishValidationError + + +class ValidateCurrentSaveFile(pyblish.api.ContextPlugin): + """File must be saved before publishing""" + + label = "Validate File Saved" + order = pyblish.api.ValidatorOrder - 0.1 + hosts = ["maya", "houdini", "nuke"] + + def process(self, context): + + current_file = context.data["currentFile"] + if not current_file: + raise PublishValidationError("File not saved") diff --git a/openpype/plugins/publish/validate_filesequences.py b/client/ayon_core/plugins/publish/validate_filesequences.py similarity index 87% rename from openpype/plugins/publish/validate_filesequences.py rename to client/ayon_core/plugins/publish/validate_filesequences.py index 0ac281022d..2e44c98167 100644 --- a/openpype/plugins/publish/validate_filesequences.py +++ b/client/ayon_core/plugins/publish/validate_filesequences.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateFileSequences(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/validate_intent.py b/client/ayon_core/plugins/publish/validate_intent.py similarity index 94% rename from openpype/plugins/publish/validate_intent.py rename to client/ayon_core/plugins/publish/validate_intent.py index 832c7cc0a1..71df652e92 100644 --- a/openpype/plugins/publish/validate_intent.py +++ b/client/ayon_core/plugins/publish/validate_intent.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.lib import filter_profiles -from openpype.pipeline.publish import PublishValidationError +from ayon_core.lib import filter_profiles +from ayon_core.pipeline.publish import PublishValidationError class ValidateIntent(pyblish.api.ContextPlugin): diff --git a/openpype/plugins/publish/validate_publish_dir.py b/client/ayon_core/plugins/publish/validate_publish_dir.py similarity index 96% rename from openpype/plugins/publish/validate_publish_dir.py rename to client/ayon_core/plugins/publish/validate_publish_dir.py index 0eb93da583..5827774cca 100644 --- a/openpype/plugins/publish/validate_publish_dir.py +++ b/client/ayon_core/plugins/publish/validate_publish_dir.py @@ -1,6 +1,6 @@ import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder -from openpype.pipeline.publish import ( +from ayon_core.pipeline.publish import ValidateContentsOrder +from ayon_core.pipeline.publish import ( PublishXmlValidationError, get_publish_template_name, ) diff --git a/client/ayon_core/plugins/publish/validate_resources.py b/client/ayon_core/plugins/publish/validate_resources.py new file mode 100644 index 0000000000..1b12d8bb05 --- /dev/null +++ b/client/ayon_core/plugins/publish/validate_resources.py @@ -0,0 +1,28 @@ +import os +import pyblish.api +from ayon_core.pipeline.publish import ValidateContentsOrder + + +class ValidateResources(pyblish.api.InstancePlugin): + """Validates mapped resources. + + These are external files to the current application, for example + these could be textures, image planes, cache files or other linked + media. + + This validates: + - The resources are existing files. + - The resources have correctly collected the data. + + """ + + order = ValidateContentsOrder + label = "Validate Resources" + + def process(self, instance): + + for resource in instance.data.get('resources', []): + # Required data + assert "source" in resource, "No source found" + assert "files" in resource, "No files from source" + assert all(os.path.exists(f) for f in resource['files']) diff --git a/client/ayon_core/plugins/publish/validate_unique_subsets.py b/client/ayon_core/plugins/publish/validate_unique_subsets.py new file mode 100644 index 0000000000..75d12f8e01 --- /dev/null +++ b/client/ayon_core/plugins/publish/validate_unique_subsets.py @@ -0,0 +1,76 @@ +from collections import defaultdict +import pyblish.api +from ayon_core.pipeline.publish import ( + PublishXmlValidationError, +) + + +class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): + """Validate all subset names are unique. + + This only validates whether the instances currently set to publish from + the workfile overlap one another for the asset + subset they are publishing + to. + + This does not perform any check against existing publishes in the database + since it is allowed to publish into existing subsets resulting in + versioning. + + A subset may appear twice to publish from the workfile if one + of them is set to publish to another asset than the other. + + """ + + label = "Validate Subset Uniqueness" + order = pyblish.api.ValidatorOrder + families = ["*"] + + def process(self, context): + + # Find instance per (asset,subset) + instance_per_asset_subset = defaultdict(list) + for instance in context: + + # Ignore disabled instances + if not instance.data.get('publish', True): + continue + + # Ignore instance without asset data + asset = instance.data.get("asset") + if asset is None: + self.log.warning("Instance found without `asset` data: " + "{}".format(instance.name)) + continue + + # Ignore instance without subset data + subset = instance.data.get("subset") + if subset is None: + self.log.warning("Instance found without `subset` data: " + "{}".format(instance.name)) + continue + + instance_per_asset_subset[(asset, subset)].append(instance) + + non_unique = [] + for (asset, subset), instances in instance_per_asset_subset.items(): + + # A single instance per asset, subset is fine + if len(instances) < 2: + continue + + non_unique.append("{asset} > {subset}".format(asset=asset, + subset=subset)) + + if not non_unique: + # All is ok + return + + msg = ("Instance subset names {} are not unique. ".format(non_unique) + + "Please remove or rename duplicates.") + formatting_data = { + "non_unique": ",".join(non_unique) + } + + if non_unique: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/plugins/publish/validate_version.py b/client/ayon_core/plugins/publish/validate_version.py similarity index 88% rename from openpype/plugins/publish/validate_version.py rename to client/ayon_core/plugins/publish/validate_version.py index 84d52fab73..9031194e8c 100644 --- a/openpype/plugins/publish/validate_version.py +++ b/client/ayon_core/plugins/publish/validate_version.py @@ -1,17 +1,17 @@ import pyblish.api -from openpype.pipeline.publish import PublishValidationError +from ayon_core.pipeline.publish import PublishValidationError class ValidateVersion(pyblish.api.InstancePlugin): """Validate instance version. - OpenPype does not allow overwriting previously published versions. + AYON does not allow overwriting previously published versions. """ order = pyblish.api.ValidatorOrder label = "Validate Version" - hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher", + hosts = ["nuke", "maya", "houdini", "blender", "photoshop", "aftereffects"] optional = False diff --git a/client/ayon_core/resources/__init__.py b/client/ayon_core/resources/__init__.py new file mode 100644 index 0000000000..2a98cc1968 --- /dev/null +++ b/client/ayon_core/resources/__init__.py @@ -0,0 +1,88 @@ +import os +from ayon_core.lib import is_staging_enabled + +RESOURCES_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def get_resource(*args): + """ Serves to simple resources access + + :param *args: should contain *subfolder* names and *filename* of + resource from resources folder + :type *args: list + """ + return os.path.normpath(os.path.join(RESOURCES_DIR, *args)) + + +def get_image_path(*args): + """Helper function to get images. + + Args: + *: Filepath part items. + """ + return get_resource("images", *args) + + +def get_liberation_font_path(bold=False, italic=False): + font_name = "LiberationSans" + suffix = "" + if bold: + suffix += "Bold" + if italic: + suffix += "Italic" + + if not suffix: + suffix = "Regular" + + filename = "{}-{}.ttf".format(font_name, suffix) + font_path = get_resource("fonts", font_name, filename) + return font_path + + +def get_ayon_production_icon_filepath(): + return get_resource("icons", "AYON_icon.png") + + +def get_ayon_staging_icon_filepath(): + return get_resource("icons", "AYON_icon_staging.png") + + +def get_ayon_icon_filepath(staging=None): + if os.getenv("AYON_USE_DEV") == "1": + return get_resource("icons", "AYON_icon_dev.png") + + if staging is None: + staging = is_staging_enabled() + + if staging: + return get_ayon_staging_icon_filepath() + return get_ayon_production_icon_filepath() + + +def get_ayon_splash_filepath(staging=None): + if staging is None: + staging = is_staging_enabled() + + if os.getenv("AYON_USE_DEV") == "1": + splash_file_name = "AYON_splash_dev.png" + elif staging: + splash_file_name = "AYON_splash_staging.png" + else: + splash_file_name = "AYON_splash.png" + return get_resource("icons", splash_file_name) + + +def get_openpype_production_icon_filepath(): + return get_ayon_production_icon_filepath() + + +def get_openpype_staging_icon_filepath(): + return get_ayon_staging_icon_filepath() + + +def get_openpype_icon_filepath(staging=None): + return get_ayon_icon_filepath(staging) + + +def get_openpype_splash_filepath(staging=None): + return get_ayon_splash_filepath(staging) diff --git a/openpype/resources/app_icons/3dsmax.png b/client/ayon_core/resources/app_icons/3dsmax.png similarity index 100% rename from openpype/resources/app_icons/3dsmax.png rename to client/ayon_core/resources/app_icons/3dsmax.png diff --git a/openpype/resources/app_icons/Aport.png b/client/ayon_core/resources/app_icons/Aport.png similarity index 100% rename from openpype/resources/app_icons/Aport.png rename to client/ayon_core/resources/app_icons/Aport.png diff --git a/openpype/resources/app_icons/aftereffects.png b/client/ayon_core/resources/app_icons/aftereffects.png similarity index 100% rename from openpype/resources/app_icons/aftereffects.png rename to client/ayon_core/resources/app_icons/aftereffects.png diff --git a/openpype/resources/app_icons/blender.png b/client/ayon_core/resources/app_icons/blender.png similarity index 100% rename from openpype/resources/app_icons/blender.png rename to client/ayon_core/resources/app_icons/blender.png diff --git a/openpype/resources/app_icons/celaction.png b/client/ayon_core/resources/app_icons/celaction.png similarity index 100% rename from openpype/resources/app_icons/celaction.png rename to client/ayon_core/resources/app_icons/celaction.png diff --git a/openpype/resources/app_icons/clockify-white.png b/client/ayon_core/resources/app_icons/clockify-white.png similarity index 100% rename from openpype/resources/app_icons/clockify-white.png rename to client/ayon_core/resources/app_icons/clockify-white.png diff --git a/openpype/resources/app_icons/clockify.png b/client/ayon_core/resources/app_icons/clockify.png similarity index 100% rename from openpype/resources/app_icons/clockify.png rename to client/ayon_core/resources/app_icons/clockify.png diff --git a/openpype/resources/app_icons/djvView.png b/client/ayon_core/resources/app_icons/djvView.png similarity index 100% rename from openpype/resources/app_icons/djvView.png rename to client/ayon_core/resources/app_icons/djvView.png diff --git a/openpype/resources/app_icons/flame.png b/client/ayon_core/resources/app_icons/flame.png similarity index 100% rename from openpype/resources/app_icons/flame.png rename to client/ayon_core/resources/app_icons/flame.png diff --git a/openpype/resources/app_icons/fusion.png b/client/ayon_core/resources/app_icons/fusion.png similarity index 100% rename from openpype/resources/app_icons/fusion.png rename to client/ayon_core/resources/app_icons/fusion.png diff --git a/openpype/resources/app_icons/harmony.png b/client/ayon_core/resources/app_icons/harmony.png similarity index 100% rename from openpype/resources/app_icons/harmony.png rename to client/ayon_core/resources/app_icons/harmony.png diff --git a/openpype/resources/app_icons/hiero.png b/client/ayon_core/resources/app_icons/hiero.png similarity index 100% rename from openpype/resources/app_icons/hiero.png rename to client/ayon_core/resources/app_icons/hiero.png diff --git a/openpype/resources/app_icons/houdini.png b/client/ayon_core/resources/app_icons/houdini.png similarity index 100% rename from openpype/resources/app_icons/houdini.png rename to client/ayon_core/resources/app_icons/houdini.png diff --git a/openpype/resources/app_icons/maya.png b/client/ayon_core/resources/app_icons/maya.png similarity index 100% rename from openpype/resources/app_icons/maya.png rename to client/ayon_core/resources/app_icons/maya.png diff --git a/openpype/resources/app_icons/nuke.png b/client/ayon_core/resources/app_icons/nuke.png similarity index 100% rename from openpype/resources/app_icons/nuke.png rename to client/ayon_core/resources/app_icons/nuke.png diff --git a/openpype/resources/app_icons/nukestudio.png b/client/ayon_core/resources/app_icons/nukestudio.png similarity index 100% rename from openpype/resources/app_icons/nukestudio.png rename to client/ayon_core/resources/app_icons/nukestudio.png diff --git a/openpype/resources/app_icons/nukex.png b/client/ayon_core/resources/app_icons/nukex.png similarity index 100% rename from openpype/resources/app_icons/nukex.png rename to client/ayon_core/resources/app_icons/nukex.png diff --git a/openpype/resources/app_icons/photoshop.png b/client/ayon_core/resources/app_icons/photoshop.png similarity index 100% rename from openpype/resources/app_icons/photoshop.png rename to client/ayon_core/resources/app_icons/photoshop.png diff --git a/openpype/resources/app_icons/premiere.png b/client/ayon_core/resources/app_icons/premiere.png similarity index 100% rename from openpype/resources/app_icons/premiere.png rename to client/ayon_core/resources/app_icons/premiere.png diff --git a/openpype/resources/app_icons/python.png b/client/ayon_core/resources/app_icons/python.png similarity index 100% rename from openpype/resources/app_icons/python.png rename to client/ayon_core/resources/app_icons/python.png diff --git a/openpype/resources/app_icons/resolve.png b/client/ayon_core/resources/app_icons/resolve.png similarity index 100% rename from openpype/resources/app_icons/resolve.png rename to client/ayon_core/resources/app_icons/resolve.png diff --git a/openpype/resources/app_icons/shotgrid.png b/client/ayon_core/resources/app_icons/shotgrid.png similarity index 100% rename from openpype/resources/app_icons/shotgrid.png rename to client/ayon_core/resources/app_icons/shotgrid.png diff --git a/openpype/resources/app_icons/storyboardpro.png b/client/ayon_core/resources/app_icons/storyboardpro.png similarity index 100% rename from openpype/resources/app_icons/storyboardpro.png rename to client/ayon_core/resources/app_icons/storyboardpro.png diff --git a/openpype/resources/app_icons/substancepainter.png b/client/ayon_core/resources/app_icons/substancepainter.png similarity index 100% rename from openpype/resources/app_icons/substancepainter.png rename to client/ayon_core/resources/app_icons/substancepainter.png diff --git a/openpype/resources/app_icons/tvpaint.png b/client/ayon_core/resources/app_icons/tvpaint.png similarity index 100% rename from openpype/resources/app_icons/tvpaint.png rename to client/ayon_core/resources/app_icons/tvpaint.png diff --git a/openpype/resources/app_icons/ue4.png b/client/ayon_core/resources/app_icons/ue4.png similarity index 100% rename from openpype/resources/app_icons/ue4.png rename to client/ayon_core/resources/app_icons/ue4.png diff --git a/openpype/resources/app_icons/wrap.png b/client/ayon_core/resources/app_icons/wrap.png similarity index 100% rename from openpype/resources/app_icons/wrap.png rename to client/ayon_core/resources/app_icons/wrap.png diff --git a/openpype/resources/fonts/LiberationSans/LiberationSans-Bold.ttf b/client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Bold.ttf similarity index 100% rename from openpype/resources/fonts/LiberationSans/LiberationSans-Bold.ttf rename to client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Bold.ttf diff --git a/openpype/resources/fonts/LiberationSans/LiberationSans-BoldItalic.ttf b/client/ayon_core/resources/fonts/LiberationSans/LiberationSans-BoldItalic.ttf similarity index 100% rename from openpype/resources/fonts/LiberationSans/LiberationSans-BoldItalic.ttf rename to client/ayon_core/resources/fonts/LiberationSans/LiberationSans-BoldItalic.ttf diff --git a/openpype/resources/fonts/LiberationSans/LiberationSans-Italic.ttf b/client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Italic.ttf similarity index 100% rename from openpype/resources/fonts/LiberationSans/LiberationSans-Italic.ttf rename to client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Italic.ttf diff --git a/openpype/resources/fonts/LiberationSans/LiberationSans-Regular.ttf b/client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Regular.ttf similarity index 100% rename from openpype/resources/fonts/LiberationSans/LiberationSans-Regular.ttf rename to client/ayon_core/resources/fonts/LiberationSans/LiberationSans-Regular.ttf diff --git a/openpype/resources/fonts/LiberationSans/License.txt b/client/ayon_core/resources/fonts/LiberationSans/License.txt similarity index 100% rename from openpype/resources/fonts/LiberationSans/License.txt rename to client/ayon_core/resources/fonts/LiberationSans/License.txt diff --git a/openpype/resources/ftrack/action_icons/ActionAskWhereIRun.svg b/client/ayon_core/resources/ftrack/action_icons/ActionAskWhereIRun.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/ActionAskWhereIRun.svg rename to client/ayon_core/resources/ftrack/action_icons/ActionAskWhereIRun.svg diff --git a/openpype/resources/ftrack/action_icons/AssetsRemover.svg b/client/ayon_core/resources/ftrack/action_icons/AssetsRemover.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/AssetsRemover.svg rename to client/ayon_core/resources/ftrack/action_icons/AssetsRemover.svg diff --git a/openpype/resources/ftrack/action_icons/BatchTasks.svg b/client/ayon_core/resources/ftrack/action_icons/BatchTasks.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/BatchTasks.svg rename to client/ayon_core/resources/ftrack/action_icons/BatchTasks.svg diff --git a/openpype/resources/ftrack/action_icons/ComponentOpen.svg b/client/ayon_core/resources/ftrack/action_icons/ComponentOpen.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/ComponentOpen.svg rename to client/ayon_core/resources/ftrack/action_icons/ComponentOpen.svg diff --git a/openpype/resources/ftrack/action_icons/CreateFolders.svg b/client/ayon_core/resources/ftrack/action_icons/CreateFolders.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/CreateFolders.svg rename to client/ayon_core/resources/ftrack/action_icons/CreateFolders.svg diff --git a/openpype/resources/ftrack/action_icons/CreateProjectFolders.svg b/client/ayon_core/resources/ftrack/action_icons/CreateProjectFolders.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/CreateProjectFolders.svg rename to client/ayon_core/resources/ftrack/action_icons/CreateProjectFolders.svg diff --git a/openpype/resources/ftrack/action_icons/DeleteAsset.svg b/client/ayon_core/resources/ftrack/action_icons/DeleteAsset.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/DeleteAsset.svg rename to client/ayon_core/resources/ftrack/action_icons/DeleteAsset.svg diff --git a/openpype/resources/ftrack/action_icons/Delivery.svg b/client/ayon_core/resources/ftrack/action_icons/Delivery.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/Delivery.svg rename to client/ayon_core/resources/ftrack/action_icons/Delivery.svg diff --git a/openpype/resources/ftrack/action_icons/MultipleNotes.svg b/client/ayon_core/resources/ftrack/action_icons/MultipleNotes.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/MultipleNotes.svg rename to client/ayon_core/resources/ftrack/action_icons/MultipleNotes.svg diff --git a/openpype/resources/ftrack/action_icons/OpenPypeAdmin.svg b/client/ayon_core/resources/ftrack/action_icons/OpenPypeAdmin.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/OpenPypeAdmin.svg rename to client/ayon_core/resources/ftrack/action_icons/OpenPypeAdmin.svg diff --git a/openpype/resources/ftrack/action_icons/PrepareProject.svg b/client/ayon_core/resources/ftrack/action_icons/PrepareProject.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/PrepareProject.svg rename to client/ayon_core/resources/ftrack/action_icons/PrepareProject.svg diff --git a/openpype/resources/ftrack/action_icons/RV.png b/client/ayon_core/resources/ftrack/action_icons/RV.png similarity index 100% rename from openpype/resources/ftrack/action_icons/RV.png rename to client/ayon_core/resources/ftrack/action_icons/RV.png diff --git a/openpype/resources/ftrack/action_icons/SeedProject.svg b/client/ayon_core/resources/ftrack/action_icons/SeedProject.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/SeedProject.svg rename to client/ayon_core/resources/ftrack/action_icons/SeedProject.svg diff --git a/openpype/resources/ftrack/action_icons/SortReview.svg b/client/ayon_core/resources/ftrack/action_icons/SortReview.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/SortReview.svg rename to client/ayon_core/resources/ftrack/action_icons/SortReview.svg diff --git a/openpype/resources/ftrack/action_icons/TestAction.svg b/client/ayon_core/resources/ftrack/action_icons/TestAction.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/TestAction.svg rename to client/ayon_core/resources/ftrack/action_icons/TestAction.svg diff --git a/openpype/resources/ftrack/action_icons/Thumbnail.svg b/client/ayon_core/resources/ftrack/action_icons/Thumbnail.svg similarity index 100% rename from openpype/resources/ftrack/action_icons/Thumbnail.svg rename to client/ayon_core/resources/ftrack/action_icons/Thumbnail.svg diff --git a/openpype/resources/ftrack/sign_in_message.html b/client/ayon_core/resources/ftrack/sign_in_message.html similarity index 100% rename from openpype/resources/ftrack/sign_in_message.html rename to client/ayon_core/resources/ftrack/sign_in_message.html diff --git a/openpype/resources/icons/AYON_icon.png b/client/ayon_core/resources/icons/AYON_icon.png similarity index 100% rename from openpype/resources/icons/AYON_icon.png rename to client/ayon_core/resources/icons/AYON_icon.png diff --git a/openpype/resources/icons/AYON_icon_dev.png b/client/ayon_core/resources/icons/AYON_icon_dev.png similarity index 100% rename from openpype/resources/icons/AYON_icon_dev.png rename to client/ayon_core/resources/icons/AYON_icon_dev.png diff --git a/openpype/resources/icons/AYON_icon_staging.png b/client/ayon_core/resources/icons/AYON_icon_staging.png similarity index 100% rename from openpype/resources/icons/AYON_icon_staging.png rename to client/ayon_core/resources/icons/AYON_icon_staging.png diff --git a/openpype/resources/icons/AYON_splash.png b/client/ayon_core/resources/icons/AYON_splash.png similarity index 100% rename from openpype/resources/icons/AYON_splash.png rename to client/ayon_core/resources/icons/AYON_splash.png diff --git a/openpype/resources/icons/AYON_splash_dev.png b/client/ayon_core/resources/icons/AYON_splash_dev.png similarity index 100% rename from openpype/resources/icons/AYON_splash_dev.png rename to client/ayon_core/resources/icons/AYON_splash_dev.png diff --git a/openpype/resources/icons/AYON_splash_staging.png b/client/ayon_core/resources/icons/AYON_splash_staging.png similarity index 100% rename from openpype/resources/icons/AYON_splash_staging.png rename to client/ayon_core/resources/icons/AYON_splash_staging.png diff --git a/openpype/resources/icons/circle_green.png b/client/ayon_core/resources/icons/circle_green.png similarity index 100% rename from openpype/resources/icons/circle_green.png rename to client/ayon_core/resources/icons/circle_green.png diff --git a/openpype/resources/icons/circle_orange.png b/client/ayon_core/resources/icons/circle_orange.png similarity index 100% rename from openpype/resources/icons/circle_orange.png rename to client/ayon_core/resources/icons/circle_orange.png diff --git a/openpype/resources/icons/circle_red.png b/client/ayon_core/resources/icons/circle_red.png similarity index 100% rename from openpype/resources/icons/circle_red.png rename to client/ayon_core/resources/icons/circle_red.png diff --git a/openpype/resources/icons/circle_yellow.png b/client/ayon_core/resources/icons/circle_yellow.png similarity index 100% rename from openpype/resources/icons/circle_yellow.png rename to client/ayon_core/resources/icons/circle_yellow.png diff --git a/openpype/resources/icons/eye.png b/client/ayon_core/resources/icons/eye.png similarity index 100% rename from openpype/resources/icons/eye.png rename to client/ayon_core/resources/icons/eye.png diff --git a/openpype/resources/icons/folder-favorite.png b/client/ayon_core/resources/icons/folder-favorite.png similarity index 100% rename from openpype/resources/icons/folder-favorite.png rename to client/ayon_core/resources/icons/folder-favorite.png diff --git a/openpype/resources/icons/inventory.png b/client/ayon_core/resources/icons/inventory.png similarity index 100% rename from openpype/resources/icons/inventory.png rename to client/ayon_core/resources/icons/inventory.png diff --git a/openpype/resources/icons/loader.png b/client/ayon_core/resources/icons/loader.png similarity index 100% rename from openpype/resources/icons/loader.png rename to client/ayon_core/resources/icons/loader.png diff --git a/openpype/resources/icons/lookmanager.png b/client/ayon_core/resources/icons/lookmanager.png similarity index 100% rename from openpype/resources/icons/lookmanager.png rename to client/ayon_core/resources/icons/lookmanager.png diff --git a/openpype/resources/icons/workfiles.png b/client/ayon_core/resources/icons/workfiles.png similarity index 100% rename from openpype/resources/icons/workfiles.png rename to client/ayon_core/resources/icons/workfiles.png diff --git a/openpype/resources/images/spinner-200.svg b/client/ayon_core/resources/images/spinner-200.svg similarity index 100% rename from openpype/resources/images/spinner-200.svg rename to client/ayon_core/resources/images/spinner-200.svg diff --git a/openpype/resources/images/warning.png b/client/ayon_core/resources/images/warning.png similarity index 100% rename from openpype/resources/images/warning.png rename to client/ayon_core/resources/images/warning.png diff --git a/openpype/modules/kitsu/utils/__init__.py b/client/ayon_core/scripts/__init__.py similarity index 100% rename from openpype/modules/kitsu/utils/__init__.py rename to client/ayon_core/scripts/__init__.py diff --git a/openpype/scripts/non_python_host_launch.py b/client/ayon_core/scripts/non_python_host_launch.py similarity index 93% rename from openpype/scripts/non_python_host_launch.py rename to client/ayon_core/scripts/non_python_host_launch.py index c95a9df314..97632e98ad 100644 --- a/openpype/scripts/non_python_host_launch.py +++ b/client/ayon_core/scripts/non_python_host_launch.py @@ -15,7 +15,7 @@ def show_error_messagebox(title, message, detail_message=None): """Function will show message and process ends after closing it.""" from qtpy import QtWidgets, QtCore - from openpype import style + from ayon_core import style app = QtWidgets.QApplication([]) app.setStyleSheet(style.load_stylesheet()) @@ -82,11 +82,11 @@ def main(argv): host_name = os.environ["AVALON_APP"].lower() if host_name == "photoshop": # TODO refactor launch logic according to AE - from openpype.hosts.photoshop.api.lib import main + from ayon_core.hosts.photoshop.api.lib import main elif host_name == "aftereffects": - from openpype.hosts.aftereffects.api.launch_logic import main + from ayon_core.hosts.aftereffects.api.launch_logic import main elif host_name == "harmony": - from openpype.hosts.harmony.api.lib import main + from ayon_core.hosts.harmony.api.lib import main else: title = "Unknown host name" message = ( diff --git a/openpype/scripts/ocio_wrapper.py b/client/ayon_core/scripts/ocio_wrapper.py similarity index 100% rename from openpype/scripts/ocio_wrapper.py rename to client/ayon_core/scripts/ocio_wrapper.py diff --git a/openpype/scripts/otio_burnin.py b/client/ayon_core/scripts/otio_burnin.py similarity index 99% rename from openpype/scripts/otio_burnin.py rename to client/ayon_core/scripts/otio_burnin.py index 189feaee3a..f12d298ac6 100644 --- a/openpype/scripts/otio_burnin.py +++ b/client/ayon_core/scripts/otio_burnin.py @@ -7,7 +7,7 @@ from string import Formatter import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins -from openpype.lib import ( +from ayon_core.lib import ( get_ffmpeg_tool_args, get_ffmpeg_codec_args, get_ffmpeg_format_args, diff --git a/openpype/scripts/remote_publish.py b/client/ayon_core/scripts/remote_publish.py similarity index 75% rename from openpype/scripts/remote_publish.py rename to client/ayon_core/scripts/remote_publish.py index d362f7abdc..7e7bf2493b 100644 --- a/openpype/scripts/remote_publish.py +++ b/client/ayon_core/scripts/remote_publish.py @@ -1,6 +1,6 @@ try: - from openpype.lib import Logger - from openpype.pipeline.publish.lib import remote_publish + from ayon_core.lib import Logger + from ayon_core.pipeline.publish.lib import remote_publish except ImportError as exc: # Ensure Deadline fails by output an error that contains "Fatal Error:" raise ImportError("Fatal Error: %s" % exc) diff --git a/openpype/scripts/slates/__init__.py b/client/ayon_core/scripts/slates/__init__.py similarity index 100% rename from openpype/scripts/slates/__init__.py rename to client/ayon_core/scripts/slates/__init__.py diff --git a/openpype/scripts/slates/__main__.py b/client/ayon_core/scripts/slates/__main__.py similarity index 100% rename from openpype/scripts/slates/__main__.py rename to client/ayon_core/scripts/slates/__main__.py diff --git a/openpype/modules/log_viewer/tray/__init__.py b/client/ayon_core/scripts/slates/slate_base/__init__.py similarity index 100% rename from openpype/modules/log_viewer/tray/__init__.py rename to client/ayon_core/scripts/slates/slate_base/__init__.py diff --git a/openpype/scripts/slates/slate_base/api.py b/client/ayon_core/scripts/slates/slate_base/api.py similarity index 100% rename from openpype/scripts/slates/slate_base/api.py rename to client/ayon_core/scripts/slates/slate_base/api.py diff --git a/openpype/scripts/slates/slate_base/base.py b/client/ayon_core/scripts/slates/slate_base/base.py similarity index 100% rename from openpype/scripts/slates/slate_base/base.py rename to client/ayon_core/scripts/slates/slate_base/base.py diff --git a/openpype/scripts/slates/slate_base/default_style.json b/client/ayon_core/scripts/slates/slate_base/default_style.json similarity index 100% rename from openpype/scripts/slates/slate_base/default_style.json rename to client/ayon_core/scripts/slates/slate_base/default_style.json diff --git a/openpype/scripts/slates/slate_base/example.py b/client/ayon_core/scripts/slates/slate_base/example.py similarity index 100% rename from openpype/scripts/slates/slate_base/example.py rename to client/ayon_core/scripts/slates/slate_base/example.py diff --git a/openpype/scripts/slates/slate_base/font_factory.py b/client/ayon_core/scripts/slates/slate_base/font_factory.py similarity index 100% rename from openpype/scripts/slates/slate_base/font_factory.py rename to client/ayon_core/scripts/slates/slate_base/font_factory.py diff --git a/openpype/scripts/slates/slate_base/items.py b/client/ayon_core/scripts/slates/slate_base/items.py similarity index 100% rename from openpype/scripts/slates/slate_base/items.py rename to client/ayon_core/scripts/slates/slate_base/items.py diff --git a/openpype/scripts/slates/slate_base/layer.py b/client/ayon_core/scripts/slates/slate_base/layer.py similarity index 100% rename from openpype/scripts/slates/slate_base/layer.py rename to client/ayon_core/scripts/slates/slate_base/layer.py diff --git a/openpype/scripts/slates/slate_base/lib.py b/client/ayon_core/scripts/slates/slate_base/lib.py similarity index 100% rename from openpype/scripts/slates/slate_base/lib.py rename to client/ayon_core/scripts/slates/slate_base/lib.py diff --git a/openpype/scripts/slates/slate_base/main_frame.py b/client/ayon_core/scripts/slates/slate_base/main_frame.py similarity index 100% rename from openpype/scripts/slates/slate_base/main_frame.py rename to client/ayon_core/scripts/slates/slate_base/main_frame.py diff --git a/client/ayon_core/settings/__init__.py b/client/ayon_core/settings/__init__.py new file mode 100644 index 0000000000..51019ca570 --- /dev/null +++ b/client/ayon_core/settings/__init__.py @@ -0,0 +1,25 @@ +from .constants import ( + SYSTEM_SETTINGS_KEY, + PROJECT_SETTINGS_KEY, +) +from .lib import ( + get_general_environments, + get_global_settings, + get_system_settings, + get_project_settings, + get_current_project_settings, + get_local_settings, +) + + +__all__ = ( + "SYSTEM_SETTINGS_KEY", + "PROJECT_SETTINGS_KEY", + + "get_general_environments", + "get_global_settings", + "get_system_settings", + "get_project_settings", + "get_current_project_settings", + "get_local_settings", +) diff --git a/openpype/settings/ayon_settings.py b/client/ayon_core/settings/ayon_settings.py similarity index 99% rename from openpype/settings/ayon_settings.py rename to client/ayon_core/settings/ayon_settings.py index 2c851c054d..ed1199d517 100644 --- a/openpype/settings/ayon_settings.py +++ b/client/ayon_core/settings/ayon_settings.py @@ -21,7 +21,7 @@ import six -from openpype.client import get_ayon_server_api_connection +from ayon_core.client import get_ayon_server_api_connection def _convert_color(color_value): @@ -269,7 +269,7 @@ def _convert_modules_system( output[key] = value # Make sure addons have access to settings in initialization - # - ModulesManager passes only modules settings into initialization + # - AddonsManager passes only modules settings into initialization if key not in modules_settings: modules_settings[key] = value @@ -1454,7 +1454,7 @@ def _use_bundles(cls): @classmethod def _get_variant(cls): if _AyonSettingsCache.variant is None: - from openpype.lib.openpype_version import is_staging_enabled + from ayon_core.lib import is_staging_enabled variant = "production" if is_dev_mode_enabled(): diff --git a/client/ayon_core/settings/constants.py b/client/ayon_core/settings/constants.py new file mode 100644 index 0000000000..0db3948b64 --- /dev/null +++ b/client/ayon_core/settings/constants.py @@ -0,0 +1,28 @@ +# Metadata keys for work with studio and project overrides +M_OVERRIDDEN_KEY = "__overriden_keys__" +# Metadata key for storing dynamic created labels +M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__" + +METADATA_KEYS = frozenset([ + M_OVERRIDDEN_KEY, + M_DYNAMIC_KEY_LABEL +]) + +# Keys where studio's system overrides are stored +SYSTEM_SETTINGS_KEY = "system_settings" +PROJECT_SETTINGS_KEY = "project_settings" + +DEFAULT_PROJECT_KEY = "__default_project__" + + +__all__ = ( + "M_OVERRIDDEN_KEY", + "M_DYNAMIC_KEY_LABEL", + + "METADATA_KEYS", + + "SYSTEM_SETTINGS_KEY", + "PROJECT_SETTINGS_KEY", + + "DEFAULT_PROJECT_KEY", +) diff --git a/openpype/settings/defaults/project_anatomy/attributes.json b/client/ayon_core/settings/defaults/project_anatomy/attributes.json similarity index 100% rename from openpype/settings/defaults/project_anatomy/attributes.json rename to client/ayon_core/settings/defaults/project_anatomy/attributes.json diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/client/ayon_core/settings/defaults/project_anatomy/imageio.json similarity index 100% rename from openpype/settings/defaults/project_anatomy/imageio.json rename to client/ayon_core/settings/defaults/project_anatomy/imageio.json diff --git a/openpype/settings/defaults/project_anatomy/roots.json b/client/ayon_core/settings/defaults/project_anatomy/roots.json similarity index 100% rename from openpype/settings/defaults/project_anatomy/roots.json rename to client/ayon_core/settings/defaults/project_anatomy/roots.json diff --git a/openpype/settings/defaults/project_anatomy/tasks.json b/client/ayon_core/settings/defaults/project_anatomy/tasks.json similarity index 100% rename from openpype/settings/defaults/project_anatomy/tasks.json rename to client/ayon_core/settings/defaults/project_anatomy/tasks.json diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/client/ayon_core/settings/defaults/project_anatomy/templates.json similarity index 100% rename from openpype/settings/defaults/project_anatomy/templates.json rename to client/ayon_core/settings/defaults/project_anatomy/templates.json diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/client/ayon_core/settings/defaults/project_settings/aftereffects.json similarity index 100% rename from openpype/settings/defaults/project_settings/aftereffects.json rename to client/ayon_core/settings/defaults/project_settings/aftereffects.json diff --git a/openpype/settings/defaults/project_settings/applications.json b/client/ayon_core/settings/defaults/project_settings/applications.json similarity index 100% rename from openpype/settings/defaults/project_settings/applications.json rename to client/ayon_core/settings/defaults/project_settings/applications.json diff --git a/openpype/settings/defaults/project_settings/blender.json b/client/ayon_core/settings/defaults/project_settings/blender.json similarity index 100% rename from openpype/settings/defaults/project_settings/blender.json rename to client/ayon_core/settings/defaults/project_settings/blender.json diff --git a/openpype/settings/defaults/project_settings/celaction.json b/client/ayon_core/settings/defaults/project_settings/celaction.json similarity index 100% rename from openpype/settings/defaults/project_settings/celaction.json rename to client/ayon_core/settings/defaults/project_settings/celaction.json diff --git a/openpype/settings/defaults/project_settings/deadline.json b/client/ayon_core/settings/defaults/project_settings/deadline.json similarity index 100% rename from openpype/settings/defaults/project_settings/deadline.json rename to client/ayon_core/settings/defaults/project_settings/deadline.json diff --git a/openpype/settings/defaults/project_settings/flame.json b/client/ayon_core/settings/defaults/project_settings/flame.json similarity index 100% rename from openpype/settings/defaults/project_settings/flame.json rename to client/ayon_core/settings/defaults/project_settings/flame.json diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/client/ayon_core/settings/defaults/project_settings/ftrack.json similarity index 100% rename from openpype/settings/defaults/project_settings/ftrack.json rename to client/ayon_core/settings/defaults/project_settings/ftrack.json diff --git a/openpype/settings/defaults/project_settings/fusion.json b/client/ayon_core/settings/defaults/project_settings/fusion.json similarity index 100% rename from openpype/settings/defaults/project_settings/fusion.json rename to client/ayon_core/settings/defaults/project_settings/fusion.json diff --git a/openpype/settings/defaults/project_settings/global.json b/client/ayon_core/settings/defaults/project_settings/global.json similarity index 100% rename from openpype/settings/defaults/project_settings/global.json rename to client/ayon_core/settings/defaults/project_settings/global.json diff --git a/openpype/settings/defaults/project_settings/harmony.json b/client/ayon_core/settings/defaults/project_settings/harmony.json similarity index 100% rename from openpype/settings/defaults/project_settings/harmony.json rename to client/ayon_core/settings/defaults/project_settings/harmony.json diff --git a/openpype/settings/defaults/project_settings/hiero.json b/client/ayon_core/settings/defaults/project_settings/hiero.json similarity index 100% rename from openpype/settings/defaults/project_settings/hiero.json rename to client/ayon_core/settings/defaults/project_settings/hiero.json diff --git a/openpype/settings/defaults/project_settings/houdini.json b/client/ayon_core/settings/defaults/project_settings/houdini.json similarity index 100% rename from openpype/settings/defaults/project_settings/houdini.json rename to client/ayon_core/settings/defaults/project_settings/houdini.json diff --git a/openpype/settings/defaults/project_settings/kitsu.json b/client/ayon_core/settings/defaults/project_settings/kitsu.json similarity index 100% rename from openpype/settings/defaults/project_settings/kitsu.json rename to client/ayon_core/settings/defaults/project_settings/kitsu.json diff --git a/openpype/settings/defaults/project_settings/max.json b/client/ayon_core/settings/defaults/project_settings/max.json similarity index 100% rename from openpype/settings/defaults/project_settings/max.json rename to client/ayon_core/settings/defaults/project_settings/max.json diff --git a/openpype/settings/defaults/project_settings/maya.json b/client/ayon_core/settings/defaults/project_settings/maya.json similarity index 99% rename from openpype/settings/defaults/project_settings/maya.json rename to client/ayon_core/settings/defaults/project_settings/maya.json index 615000183d..b2dc0ccd65 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/client/ayon_core/settings/defaults/project_settings/maya.json @@ -462,7 +462,7 @@ "definition": [ { "type": "action", - "command": "import openpype.hosts.maya.api.commands as op_cmds; op_cmds.edit_shader_definitions()", + "command": "import ayon_core.hosts.maya.api.commands as op_cmds; op_cmds.edit_shader_definitions()", "sourcetype": "python", "title": "Edit shader name definitions", "tooltip": "Edit shader name definitions used in validation and renaming.", diff --git a/openpype/settings/defaults/project_settings/nuke.json b/client/ayon_core/settings/defaults/project_settings/nuke.json similarity index 98% rename from openpype/settings/defaults/project_settings/nuke.json rename to client/ayon_core/settings/defaults/project_settings/nuke.json index 15c2d262e0..11b2988c67 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/client/ayon_core/settings/defaults/project_settings/nuke.json @@ -219,14 +219,14 @@ "type": "action", "sourcetype": "python", "title": "Set Frame Start (Read Node)", - "command": "from openpype.hosts.nuke.startup.frame_setting_for_read_nodes import main;main();", + "command": "from ayon_core.hosts.nuke.startup.frame_setting_for_read_nodes import main;main();", "tooltip": "Set frame start for read node(s)" }, { "type": "action", "sourcetype": "python", "title": "Set non publish output for Write Node", - "command": "from openpype.hosts.nuke.startup.custom_write_node import main;main();", + "command": "from ayon_core.hosts.nuke.startup.custom_write_node import main;main();", "tooltip": "Open the OpenPype Nuke user doc page" } ] diff --git a/openpype/settings/defaults/project_settings/photoshop.json b/client/ayon_core/settings/defaults/project_settings/photoshop.json similarity index 100% rename from openpype/settings/defaults/project_settings/photoshop.json rename to client/ayon_core/settings/defaults/project_settings/photoshop.json diff --git a/openpype/settings/defaults/project_settings/resolve.json b/client/ayon_core/settings/defaults/project_settings/resolve.json similarity index 100% rename from openpype/settings/defaults/project_settings/resolve.json rename to client/ayon_core/settings/defaults/project_settings/resolve.json diff --git a/openpype/settings/defaults/project_settings/royalrender.json b/client/ayon_core/settings/defaults/project_settings/royalrender.json similarity index 100% rename from openpype/settings/defaults/project_settings/royalrender.json rename to client/ayon_core/settings/defaults/project_settings/royalrender.json diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/client/ayon_core/settings/defaults/project_settings/shotgrid.json similarity index 100% rename from openpype/settings/defaults/project_settings/shotgrid.json rename to client/ayon_core/settings/defaults/project_settings/shotgrid.json diff --git a/openpype/settings/defaults/project_settings/slack.json b/client/ayon_core/settings/defaults/project_settings/slack.json similarity index 100% rename from openpype/settings/defaults/project_settings/slack.json rename to client/ayon_core/settings/defaults/project_settings/slack.json diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/client/ayon_core/settings/defaults/project_settings/standalonepublisher.json similarity index 100% rename from openpype/settings/defaults/project_settings/standalonepublisher.json rename to client/ayon_core/settings/defaults/project_settings/standalonepublisher.json diff --git a/openpype/settings/defaults/project_settings/substancepainter.json b/client/ayon_core/settings/defaults/project_settings/substancepainter.json similarity index 100% rename from openpype/settings/defaults/project_settings/substancepainter.json rename to client/ayon_core/settings/defaults/project_settings/substancepainter.json diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/client/ayon_core/settings/defaults/project_settings/traypublisher.json similarity index 100% rename from openpype/settings/defaults/project_settings/traypublisher.json rename to client/ayon_core/settings/defaults/project_settings/traypublisher.json diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/client/ayon_core/settings/defaults/project_settings/tvpaint.json similarity index 100% rename from openpype/settings/defaults/project_settings/tvpaint.json rename to client/ayon_core/settings/defaults/project_settings/tvpaint.json diff --git a/openpype/settings/defaults/project_settings/unreal.json b/client/ayon_core/settings/defaults/project_settings/unreal.json similarity index 100% rename from openpype/settings/defaults/project_settings/unreal.json rename to client/ayon_core/settings/defaults/project_settings/unreal.json diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/client/ayon_core/settings/defaults/project_settings/webpublisher.json similarity index 100% rename from openpype/settings/defaults/project_settings/webpublisher.json rename to client/ayon_core/settings/defaults/project_settings/webpublisher.json diff --git a/openpype/settings/defaults/system_settings/applications.json b/client/ayon_core/settings/defaults/system_settings/applications.json similarity index 100% rename from openpype/settings/defaults/system_settings/applications.json rename to client/ayon_core/settings/defaults/system_settings/applications.json diff --git a/openpype/settings/defaults/system_settings/general.json b/client/ayon_core/settings/defaults/system_settings/general.json similarity index 100% rename from openpype/settings/defaults/system_settings/general.json rename to client/ayon_core/settings/defaults/system_settings/general.json diff --git a/openpype/settings/defaults/system_settings/modules.json b/client/ayon_core/settings/defaults/system_settings/modules.json similarity index 100% rename from openpype/settings/defaults/system_settings/modules.json rename to client/ayon_core/settings/defaults/system_settings/modules.json diff --git a/openpype/settings/defaults/system_settings/tools.json b/client/ayon_core/settings/defaults/system_settings/tools.json similarity index 100% rename from openpype/settings/defaults/system_settings/tools.json rename to client/ayon_core/settings/defaults/system_settings/tools.json diff --git a/client/ayon_core/settings/lib.py b/client/ayon_core/settings/lib.py new file mode 100644 index 0000000000..beae376b7c --- /dev/null +++ b/client/ayon_core/settings/lib.py @@ -0,0 +1,273 @@ +import os +import json +import logging +import copy + +from .constants import ( + M_OVERRIDDEN_KEY, + + METADATA_KEYS, + + SYSTEM_SETTINGS_KEY, + PROJECT_SETTINGS_KEY, + DEFAULT_PROJECT_KEY +) + +from .ayon_settings import ( + get_ayon_project_settings, + get_ayon_system_settings +) + +log = logging.getLogger(__name__) + +# Py2 + Py3 json decode exception +JSON_EXC = getattr(json.decoder, "JSONDecodeError", ValueError) + + +# Path to default settings +DEFAULTS_DIR = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "defaults" +) + +# Variable where cache of default settings are stored +_DEFAULT_SETTINGS = None + + +def clear_metadata_from_settings(values): + """Remove all metadata keys from loaded settings.""" + if isinstance(values, dict): + for key in tuple(values.keys()): + if key in METADATA_KEYS: + values.pop(key) + else: + clear_metadata_from_settings(values[key]) + elif isinstance(values, list): + for item in values: + clear_metadata_from_settings(item) + + +def get_local_settings(): + # TODO implement ayon implementation + return {} + + +def load_openpype_default_settings(): + """Load openpype default settings.""" + return load_jsons_from_dir(DEFAULTS_DIR) + + +def reset_default_settings(): + """Reset cache of default settings. Can't be used now.""" + global _DEFAULT_SETTINGS + _DEFAULT_SETTINGS = None + + +def _get_default_settings(): + return load_openpype_default_settings() + + +def get_default_settings(): + """Get default settings. + + Todo: + Cache loaded defaults. + + Returns: + dict: Loaded default settings. + """ + global _DEFAULT_SETTINGS + if _DEFAULT_SETTINGS is None: + _DEFAULT_SETTINGS = _get_default_settings() + return copy.deepcopy(_DEFAULT_SETTINGS) + + +def load_json_file(fpath): + # Load json data + try: + with open(fpath, "r") as opened_file: + return json.load(opened_file) + + except JSON_EXC: + log.warning( + "File has invalid json format \"{}\"".format(fpath), + exc_info=True + ) + return {} + + +def load_jsons_from_dir(path, *args, **kwargs): + """Load all .json files with content from entered folder path. + + Data are loaded recursively from a directory and recreate the + hierarchy as a dictionary. + + Entered path hierarchy: + |_ folder1 + | |_ data1.json + |_ folder2 + |_ subfolder1 + |_ data2.json + + Will result in: + ```javascript + { + "folder1": { + "data1": "CONTENT OF FILE" + }, + "folder2": { + "subfolder1": { + "data2": "CONTENT OF FILE" + } + } + } + ``` + + Args: + path (str): Path to the root folder where the json hierarchy starts. + + Returns: + dict: Loaded data. + """ + output = {} + + path = os.path.normpath(path) + if not os.path.exists(path): + # TODO warning + return output + + sub_keys = list(kwargs.pop("subkeys", args)) + for sub_key in tuple(sub_keys): + _path = os.path.join(path, sub_key) + if not os.path.exists(_path): + break + + path = _path + sub_keys.pop(0) + + base_len = len(path) + 1 + for base, _directories, filenames in os.walk(path): + base_items_str = base[base_len:] + if not base_items_str: + base_items = [] + else: + base_items = base_items_str.split(os.path.sep) + + for filename in filenames: + basename, ext = os.path.splitext(filename) + if ext == ".json": + full_path = os.path.join(base, filename) + value = load_json_file(full_path) + dict_keys = base_items + [basename] + output = subkey_merge(output, value, dict_keys) + + for sub_key in sub_keys: + output = output[sub_key] + return output + + +def subkey_merge(_dict, value, keys): + key = keys.pop(0) + if not keys: + _dict[key] = value + return _dict + + if key not in _dict: + _dict[key] = {} + _dict[key] = subkey_merge(_dict[key], value, keys) + + return _dict + + +def merge_overrides(source_dict, override_dict): + """Merge data from override_dict to source_dict.""" + + if M_OVERRIDDEN_KEY in override_dict: + overridden_keys = set(override_dict.pop(M_OVERRIDDEN_KEY)) + else: + overridden_keys = set() + + for key, value in override_dict.items(): + if (key in overridden_keys or key not in source_dict): + source_dict[key] = value + + elif isinstance(value, dict) and isinstance(source_dict[key], dict): + source_dict[key] = merge_overrides(source_dict[key], value) + + else: + source_dict[key] = value + return source_dict + + +def get_site_local_overrides(project_name, site_name, local_settings=None): + """Site overrides from local settings for passet project and site name. + + Args: + project_name (str): For which project are overrides. + site_name (str): For which site are overrides needed. + local_settings (dict): Preloaded local settings. They are loaded + automatically if not passed. + """ + # Check if local settings were passed + if local_settings is None: + local_settings = get_local_settings() + + output = {} + + # Skip if local settings are empty + if not local_settings: + return output + + local_project_settings = local_settings.get("projects") or {} + + # Prepare overrides for entered project and for default project + project_locals = None + if project_name: + project_locals = local_project_settings.get(project_name) + default_project_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) + + # First load and use local settings from default project + if default_project_locals and site_name in default_project_locals: + output.update(default_project_locals[site_name]) + + # Apply project specific local settings if there are any + if project_locals and site_name in project_locals: + output.update(project_locals[site_name]) + + return output + + +def get_current_project_settings(): + """Project settings for current context project. + + Project name should be stored in environment variable `AVALON_PROJECT`. + This function should be used only in host context where environment + variable must be set and should not happen that any part of process will + change the value of the enviornment variable. + """ + project_name = os.environ.get("AVALON_PROJECT") + if not project_name: + raise ValueError( + "Missing context project in environemt variable `AVALON_PROJECT`." + ) + return get_project_settings(project_name) + + +def get_global_settings(): + default_settings = load_openpype_default_settings() + return default_settings["system_settings"]["general"] + + +def get_general_environments(): + value = get_system_settings() + return value["general"]["environment"] + + +def get_system_settings(*args, **kwargs): + default_settings = get_default_settings()[SYSTEM_SETTINGS_KEY] + return get_ayon_system_settings(default_settings) + + +def get_project_settings(project_name, *args, **kwargs): + default_settings = get_default_settings()[PROJECT_SETTINGS_KEY] + return get_ayon_project_settings(default_settings, project_name) diff --git a/openpype/settings/local_settings.md b/client/ayon_core/settings/local_settings.md similarity index 100% rename from openpype/settings/local_settings.md rename to client/ayon_core/settings/local_settings.md diff --git a/client/ayon_core/style/__init__.py b/client/ayon_core/style/__init__.py new file mode 100644 index 0000000000..8b2dfa1bcb --- /dev/null +++ b/client/ayon_core/style/__init__.py @@ -0,0 +1,276 @@ +import os +import copy +import json +import collections +import six + +from ayon_core import resources + +from .color_defs import parse_color + +current_dir = os.path.dirname(os.path.abspath(__file__)) + + +class _Cache: + stylesheet = None + font_ids = None + + tools_icon_color = None + default_entity_icon_color = None + disabled_entity_icon_color = None + deprecated_entity_font_color = None + + colors_data = None + objected_colors = None + + +def get_style_image_path(image_name): + # All filenames are lowered + image_name = image_name.lower() + # Male sure filename has png extension + if not image_name.endswith(".png"): + image_name += ".png" + filepath = os.path.join(current_dir, "images", image_name) + if os.path.exists(filepath): + return filepath + return None + + +def _get_colors_raw_data(): + """Read data file with stylesheet fill values. + + Returns: + dict: Loaded data for stylesheet. + """ + data_path = os.path.join(current_dir, "data.json") + with open(data_path, "r") as data_stream: + data = json.load(data_stream) + return data + + +def get_colors_data(): + """Only color data from stylesheet data.""" + if _Cache.colors_data is None: + data = _get_colors_raw_data() + color_data = data.get("color") or {} + _Cache.colors_data = color_data + return copy.deepcopy(_Cache.colors_data) + + +def _convert_color_values_to_objects(value): + """Parse all string values in dictionary to Color definitions. + + Recursive function calling itself if value is dictionary. + + Args: + value (dict, str): String is parsed into color definition object and + dictionary is passed into this function. + + Raises: + TypeError: If value in color data do not contain string of dictionary. + """ + if isinstance(value, dict): + output = {} + for _key, _value in value.items(): + output[_key] = _convert_color_values_to_objects(_value) + return output + + if not isinstance(value, six.string_types): + raise TypeError(( + "Unexpected type in colors data '{}'. Expected 'str' or 'dict'." + ).format(str(type(value)))) + return parse_color(value) + + +def get_objected_colors(*keys): + """Colors parsed from stylesheet data into color definitions. + + You can pass multiple arguments to get a key from the data dict's colors. + Because this functions returns a deep copy of the cached data this allows + a much smaller dataset to be copied and thus result in a faster function. + It is however a micro-optimization in the area of 0.001s and smaller. + + For example: + >>> get_colors_data() # copy of full colors dict + >>> get_colors_data("font") + >>> get_colors_data("loader", "asset-view") + + Args: + *keys: Each key argument will return a key nested deeper in the + objected colors data. + + Returns: + Any: Parsed color objects by keys in data. + """ + if _Cache.objected_colors is None: + colors_data = get_colors_data() + output = {} + for key, value in colors_data.items(): + output[key] = _convert_color_values_to_objects(value) + + _Cache.objected_colors = output + + output = _Cache.objected_colors + for key in keys: + output = output[key] + return copy.deepcopy(output) + + +def _load_stylesheet(): + """Load strylesheet and trigger all related callbacks. + + Style require more than a stylesheet string. Stylesheet string + contains paths to resources which must be registered into Qt application + and load fonts used in stylesheets. + + Also replace values from stylesheet data into stylesheet text. + """ + from . import qrc_resources + + qrc_resources.qInitResources() + + style_path = os.path.join(current_dir, "style.css") + with open(style_path, "r") as style_file: + stylesheet = style_file.read() + + data = _get_colors_raw_data() + + data_deque = collections.deque() + for item in data.items(): + data_deque.append(item) + + fill_data = {} + while data_deque: + key, value = data_deque.popleft() + if isinstance(value, dict): + for sub_key, sub_value in value.items(): + new_key = "{}:{}".format(key, sub_key) + data_deque.append((new_key, sub_value)) + continue + fill_data[key] = value + + for key, value in fill_data.items(): + replacement_key = "{" + key + "}" + stylesheet = stylesheet.replace(replacement_key, value) + return stylesheet + + +def _load_font(): + """Load and register fonts into Qt application.""" + from qtpy import QtGui + + # Check if font ids are still loaded + if _Cache.font_ids is not None: + for font_id in tuple(_Cache.font_ids): + font_families = QtGui.QFontDatabase.applicationFontFamilies( + font_id + ) + # Reset font if font id is not available + if not font_families: + _Cache.font_ids = None + break + + if _Cache.font_ids is None: + _Cache.font_ids = [] + fonts_dirpath = os.path.join(current_dir, "fonts") + font_dirs = [] + font_dirs.append(os.path.join(fonts_dirpath, "Noto_Sans")) + font_dirs.append(os.path.join( + fonts_dirpath, + "Noto_Sans_Mono", + "static", + "NotoSansMono" + )) + + loaded_fonts = [] + for font_dir in font_dirs: + for filename in os.listdir(font_dir): + if os.path.splitext(filename)[1] not in [".ttf"]: + continue + full_path = os.path.join(font_dir, filename) + font_id = QtGui.QFontDatabase.addApplicationFont(full_path) + _Cache.font_ids.append(font_id) + font_families = QtGui.QFontDatabase.applicationFontFamilies( + font_id + ) + loaded_fonts.extend(font_families) + print("Registered font families: {}".format(", ".join(loaded_fonts))) + + +def load_stylesheet(): + """Load and return OpenPype Qt stylesheet.""" + + if _Cache.stylesheet is None: + _Cache.stylesheet = _load_stylesheet() + _load_font() + return _Cache.stylesheet + + +def get_app_icon_path(): + """Path to OpenPype icon.""" + return resources.get_ayon_icon_filepath() + + +def app_icon_path(): + # Backwards compatibility + return get_app_icon_path() + + +def get_default_tools_icon_color(): + """Default color used in tool icons. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.tools_icon_color is None: + color_data = get_colors_data() + _Cache.tools_icon_color = color_data["icon-tools"] + return _Cache.tools_icon_color + + +def get_default_entity_icon_color(): + """Default color of entities icons. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.default_entity_icon_color is None: + color_data = get_colors_data() + _Cache.default_entity_icon_color = color_data["icon-entity-default"] + return _Cache.default_entity_icon_color + + +def get_disabled_entity_icon_color(): + """Default color of entities icons. + + TODO: Find more suitable function name. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.disabled_entity_icon_color is None: + color_data = get_colors_data() + _Cache.disabled_entity_icon_color = color_data["icon-entity-disabled"] + return _Cache.disabled_entity_icon_color + + +def get_deprecated_entity_font_color(): + """Font color for deprecated entities. + + Color must be possible to parse using QColor. + + Returns: + str: Color as a string. + """ + if _Cache.deprecated_entity_font_color is None: + color_data = get_colors_data() + _Cache.deprecated_entity_font_color = ( + color_data["font-entity-deprecated"] + ) + return _Cache.deprecated_entity_font_color diff --git a/openpype/style/color_defs.py b/client/ayon_core/style/color_defs.py similarity index 100% rename from openpype/style/color_defs.py rename to client/ayon_core/style/color_defs.py diff --git a/openpype/style/data.json b/client/ayon_core/style/data.json similarity index 100% rename from openpype/style/data.json rename to client/ayon_core/style/data.json diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Bold.ttf b/client/ayon_core/style/fonts/Noto_Sans/NotoSans-Bold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans/NotoSans-Bold.ttf rename to client/ayon_core/style/fonts/Noto_Sans/NotoSans-Bold.ttf diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf b/client/ayon_core/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf rename to client/ayon_core/style/fonts/Noto_Sans/NotoSans-BoldItalic.ttf diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Italic.ttf b/client/ayon_core/style/fonts/Noto_Sans/NotoSans-Italic.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans/NotoSans-Italic.ttf rename to client/ayon_core/style/fonts/Noto_Sans/NotoSans-Italic.ttf diff --git a/openpype/style/fonts/Noto_Sans/NotoSans-Regular.ttf b/client/ayon_core/style/fonts/Noto_Sans/NotoSans-Regular.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans/NotoSans-Regular.ttf rename to client/ayon_core/style/fonts/Noto_Sans/NotoSans-Regular.ttf diff --git a/openpype/style/fonts/Noto_Sans/OFL.txt b/client/ayon_core/style/fonts/Noto_Sans/OFL.txt similarity index 100% rename from openpype/style/fonts/Noto_Sans/OFL.txt rename to client/ayon_core/style/fonts/Noto_Sans/OFL.txt diff --git a/openpype/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/NotoSansMono-VariableFont_wdth,wght.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/OFL.txt b/client/ayon_core/style/fonts/Noto_Sans_Mono/OFL.txt similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/OFL.txt rename to client/ayon_core/style/fonts/Noto_Sans_Mono/OFL.txt diff --git a/openpype/style/fonts/Noto_Sans_Mono/README.txt b/client/ayon_core/style/fonts/Noto_Sans_Mono/README.txt similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/README.txt rename to client/ayon_core/style/fonts/Noto_Sans_Mono/README.txt diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Black.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Bold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-ExtraLight.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Light.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Medium.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Regular.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-SemiBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono/NotoSansMono-Thin.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Black.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Bold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-ExtraLight.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Light.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Medium.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Regular.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-SemiBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_Condensed/NotoSansMono_Condensed-Thin.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Black.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Bold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-ExtraLight.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Light.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Medium.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Regular.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-SemiBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_ExtraCondensed/NotoSansMono_ExtraCondensed-Thin.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Black.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Bold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-ExtraLight.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Light.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Medium.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Regular.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-SemiBold.ttf diff --git a/openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf b/client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf similarity index 100% rename from openpype/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf rename to client/ayon_core/style/fonts/Noto_Sans_Mono/static/NotoSansMono_SemiCondensed/NotoSansMono_SemiCondensed-Thin.ttf diff --git a/openpype/style/images/branch_closed.png b/client/ayon_core/style/images/branch_closed.png similarity index 100% rename from openpype/style/images/branch_closed.png rename to client/ayon_core/style/images/branch_closed.png diff --git a/openpype/style/images/branch_closed_on.png b/client/ayon_core/style/images/branch_closed_on.png similarity index 100% rename from openpype/style/images/branch_closed_on.png rename to client/ayon_core/style/images/branch_closed_on.png diff --git a/openpype/style/images/branch_open.png b/client/ayon_core/style/images/branch_open.png similarity index 100% rename from openpype/style/images/branch_open.png rename to client/ayon_core/style/images/branch_open.png diff --git a/openpype/style/images/branch_open_on.png b/client/ayon_core/style/images/branch_open_on.png similarity index 100% rename from openpype/style/images/branch_open_on.png rename to client/ayon_core/style/images/branch_open_on.png diff --git a/openpype/style/images/checkbox_checked.png b/client/ayon_core/style/images/checkbox_checked.png similarity index 100% rename from openpype/style/images/checkbox_checked.png rename to client/ayon_core/style/images/checkbox_checked.png diff --git a/openpype/style/images/checkbox_checked_disabled.png b/client/ayon_core/style/images/checkbox_checked_disabled.png similarity index 100% rename from openpype/style/images/checkbox_checked_disabled.png rename to client/ayon_core/style/images/checkbox_checked_disabled.png diff --git a/openpype/style/images/checkbox_checked_focus.png b/client/ayon_core/style/images/checkbox_checked_focus.png similarity index 100% rename from openpype/style/images/checkbox_checked_focus.png rename to client/ayon_core/style/images/checkbox_checked_focus.png diff --git a/openpype/style/images/checkbox_checked_hover.png b/client/ayon_core/style/images/checkbox_checked_hover.png similarity index 100% rename from openpype/style/images/checkbox_checked_hover.png rename to client/ayon_core/style/images/checkbox_checked_hover.png diff --git a/openpype/style/images/checkbox_indeterminate.png b/client/ayon_core/style/images/checkbox_indeterminate.png similarity index 100% rename from openpype/style/images/checkbox_indeterminate.png rename to client/ayon_core/style/images/checkbox_indeterminate.png diff --git a/openpype/style/images/checkbox_indeterminate_disabled.png b/client/ayon_core/style/images/checkbox_indeterminate_disabled.png similarity index 100% rename from openpype/style/images/checkbox_indeterminate_disabled.png rename to client/ayon_core/style/images/checkbox_indeterminate_disabled.png diff --git a/openpype/style/images/checkbox_indeterminate_focus.png b/client/ayon_core/style/images/checkbox_indeterminate_focus.png similarity index 100% rename from openpype/style/images/checkbox_indeterminate_focus.png rename to client/ayon_core/style/images/checkbox_indeterminate_focus.png diff --git a/openpype/style/images/checkbox_indeterminate_hover.png b/client/ayon_core/style/images/checkbox_indeterminate_hover.png similarity index 100% rename from openpype/style/images/checkbox_indeterminate_hover.png rename to client/ayon_core/style/images/checkbox_indeterminate_hover.png diff --git a/openpype/style/images/checkbox_unchecked.png b/client/ayon_core/style/images/checkbox_unchecked.png similarity index 100% rename from openpype/style/images/checkbox_unchecked.png rename to client/ayon_core/style/images/checkbox_unchecked.png diff --git a/openpype/style/images/checkbox_unchecked_disabled.png b/client/ayon_core/style/images/checkbox_unchecked_disabled.png similarity index 100% rename from openpype/style/images/checkbox_unchecked_disabled.png rename to client/ayon_core/style/images/checkbox_unchecked_disabled.png diff --git a/openpype/style/images/checkbox_unchecked_focus.png b/client/ayon_core/style/images/checkbox_unchecked_focus.png similarity index 100% rename from openpype/style/images/checkbox_unchecked_focus.png rename to client/ayon_core/style/images/checkbox_unchecked_focus.png diff --git a/openpype/style/images/checkbox_unchecked_hover.png b/client/ayon_core/style/images/checkbox_unchecked_hover.png similarity index 100% rename from openpype/style/images/checkbox_unchecked_hover.png rename to client/ayon_core/style/images/checkbox_unchecked_hover.png diff --git a/openpype/style/images/combobox_arrow.png b/client/ayon_core/style/images/combobox_arrow.png similarity index 100% rename from openpype/style/images/combobox_arrow.png rename to client/ayon_core/style/images/combobox_arrow.png diff --git a/openpype/style/images/combobox_arrow_disabled.png b/client/ayon_core/style/images/combobox_arrow_disabled.png similarity index 100% rename from openpype/style/images/combobox_arrow_disabled.png rename to client/ayon_core/style/images/combobox_arrow_disabled.png diff --git a/openpype/style/images/combobox_arrow_on.png b/client/ayon_core/style/images/combobox_arrow_on.png similarity index 100% rename from openpype/style/images/combobox_arrow_on.png rename to client/ayon_core/style/images/combobox_arrow_on.png diff --git a/openpype/style/images/down_arrow.png b/client/ayon_core/style/images/down_arrow.png similarity index 100% rename from openpype/style/images/down_arrow.png rename to client/ayon_core/style/images/down_arrow.png diff --git a/openpype/style/images/down_arrow_disabled.png b/client/ayon_core/style/images/down_arrow_disabled.png similarity index 100% rename from openpype/style/images/down_arrow_disabled.png rename to client/ayon_core/style/images/down_arrow_disabled.png diff --git a/openpype/style/images/down_arrow_on.png b/client/ayon_core/style/images/down_arrow_on.png similarity index 100% rename from openpype/style/images/down_arrow_on.png rename to client/ayon_core/style/images/down_arrow_on.png diff --git a/openpype/style/images/left_arrow.png b/client/ayon_core/style/images/left_arrow.png similarity index 100% rename from openpype/style/images/left_arrow.png rename to client/ayon_core/style/images/left_arrow.png diff --git a/openpype/style/images/left_arrow_disabled.png b/client/ayon_core/style/images/left_arrow_disabled.png similarity index 100% rename from openpype/style/images/left_arrow_disabled.png rename to client/ayon_core/style/images/left_arrow_disabled.png diff --git a/openpype/style/images/left_arrow_on.png b/client/ayon_core/style/images/left_arrow_on.png similarity index 100% rename from openpype/style/images/left_arrow_on.png rename to client/ayon_core/style/images/left_arrow_on.png diff --git a/openpype/style/images/right_arrow.png b/client/ayon_core/style/images/right_arrow.png similarity index 100% rename from openpype/style/images/right_arrow.png rename to client/ayon_core/style/images/right_arrow.png diff --git a/openpype/style/images/right_arrow_disabled.png b/client/ayon_core/style/images/right_arrow_disabled.png similarity index 100% rename from openpype/style/images/right_arrow_disabled.png rename to client/ayon_core/style/images/right_arrow_disabled.png diff --git a/openpype/style/images/right_arrow_on.png b/client/ayon_core/style/images/right_arrow_on.png similarity index 100% rename from openpype/style/images/right_arrow_on.png rename to client/ayon_core/style/images/right_arrow_on.png diff --git a/openpype/style/images/transparent.png b/client/ayon_core/style/images/transparent.png similarity index 100% rename from openpype/style/images/transparent.png rename to client/ayon_core/style/images/transparent.png diff --git a/openpype/style/images/up_arrow.png b/client/ayon_core/style/images/up_arrow.png similarity index 100% rename from openpype/style/images/up_arrow.png rename to client/ayon_core/style/images/up_arrow.png diff --git a/openpype/style/images/up_arrow_disabled.png b/client/ayon_core/style/images/up_arrow_disabled.png similarity index 100% rename from openpype/style/images/up_arrow_disabled.png rename to client/ayon_core/style/images/up_arrow_disabled.png diff --git a/openpype/style/images/up_arrow_on.png b/client/ayon_core/style/images/up_arrow_on.png similarity index 100% rename from openpype/style/images/up_arrow_on.png rename to client/ayon_core/style/images/up_arrow_on.png diff --git a/openpype/style/pyqt5_resources.py b/client/ayon_core/style/pyqt5_resources.py similarity index 100% rename from openpype/style/pyqt5_resources.py rename to client/ayon_core/style/pyqt5_resources.py diff --git a/openpype/style/pyside2_resources.py b/client/ayon_core/style/pyside2_resources.py similarity index 100% rename from openpype/style/pyside2_resources.py rename to client/ayon_core/style/pyside2_resources.py diff --git a/openpype/style/pyside6_resources.py b/client/ayon_core/style/pyside6_resources.py similarity index 100% rename from openpype/style/pyside6_resources.py rename to client/ayon_core/style/pyside6_resources.py diff --git a/openpype/style/qrc_resources.py b/client/ayon_core/style/qrc_resources.py similarity index 100% rename from openpype/style/qrc_resources.py rename to client/ayon_core/style/qrc_resources.py diff --git a/openpype/style/resources.qrc b/client/ayon_core/style/resources.qrc similarity index 100% rename from openpype/style/resources.qrc rename to client/ayon_core/style/resources.qrc diff --git a/openpype/style/style.css b/client/ayon_core/style/style.css similarity index 100% rename from openpype/style/style.css rename to client/ayon_core/style/style.css diff --git a/openpype/tests/README.md b/client/ayon_core/tests/README.md similarity index 100% rename from openpype/tests/README.md rename to client/ayon_core/tests/README.md diff --git a/openpype/modules/shotgrid/lib/__init__.py b/client/ayon_core/tests/__init__.py similarity index 100% rename from openpype/modules/shotgrid/lib/__init__.py rename to client/ayon_core/tests/__init__.py diff --git a/openpype/tests/lib.py b/client/ayon_core/tests/lib.py similarity index 100% rename from openpype/tests/lib.py rename to client/ayon_core/tests/lib.py diff --git a/openpype/tests/mongo_performance.py b/client/ayon_core/tests/mongo_performance.py similarity index 100% rename from openpype/tests/mongo_performance.py rename to client/ayon_core/tests/mongo_performance.py diff --git a/openpype/tests/test_avalon_plugin_presets.py b/client/ayon_core/tests/test_avalon_plugin_presets.py similarity index 97% rename from openpype/tests/test_avalon_plugin_presets.py rename to client/ayon_core/tests/test_avalon_plugin_presets.py index 464c216d6f..4926286ca3 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/client/ayon_core/tests/test_avalon_plugin_presets.py @@ -1,4 +1,4 @@ -from openpype.pipeline import ( +from ayon_core.pipeline import ( install_host, LegacyCreator, register_creator_plugin, diff --git a/client/ayon_core/tests/test_lib_restructuralization.py b/client/ayon_core/tests/test_lib_restructuralization.py new file mode 100644 index 0000000000..ffbd62b045 --- /dev/null +++ b/client/ayon_core/tests/test_lib_restructuralization.py @@ -0,0 +1,25 @@ +# Test for backward compatibility of restructure of lib.py into lib library +# Contains simple imports that should still work + + +def test_backward_compatibility(printer): + printer("Test if imports still work") + try: + from ayon_core.lib import execute_hook + from ayon_core.lib import PypeHook + + from ayon_core.lib import ApplicationLaunchFailed + + from ayon_core.lib import get_ffmpeg_tool_path + from ayon_core.lib import get_last_version_from_path + from ayon_core.lib import get_paths_from_environ + from ayon_core.lib import get_version_from_path + from ayon_core.lib import version_up + + from ayon_core.lib import get_ffprobe_streams + + from ayon_core.lib import source_hash + from ayon_core.lib import run_subprocess + + except ImportError as e: + raise diff --git a/openpype/tests/test_pyblish_filter.py b/client/ayon_core/tests/test_pyblish_filter.py similarity index 96% rename from openpype/tests/test_pyblish_filter.py rename to client/ayon_core/tests/test_pyblish_filter.py index b74784145f..bc20f863c9 100644 --- a/openpype/tests/test_pyblish_filter.py +++ b/client/ayon_core/tests/test_pyblish_filter.py @@ -2,7 +2,7 @@ import pyblish.api import pyblish.util import pyblish.plugin -from openpype.pipeline.publish.lib import filter_pyblish_plugins +from ayon_core.pipeline.publish.lib import filter_pyblish_plugins from . import lib diff --git a/openpype/modules/sync_server/providers/__init__.py b/client/ayon_core/tools/__init__.py similarity index 100% rename from openpype/modules/sync_server/providers/__init__.py rename to client/ayon_core/tools/__init__.py diff --git a/client/ayon_core/tools/adobe_webserver/app.py b/client/ayon_core/tools/adobe_webserver/app.py new file mode 100644 index 0000000000..893c076020 --- /dev/null +++ b/client/ayon_core/tools/adobe_webserver/app.py @@ -0,0 +1,238 @@ +"""This Webserver tool is python 3 specific. + +Don't import directly to avalon.tools or implementation of Python 2 hosts +would break. +""" +import os +import logging +import urllib +import threading +import asyncio +import socket + +from aiohttp import web + +from wsrpc_aiohttp import ( + WSRPCClient +) + +from ayon_core.pipeline import get_global_context + +log = logging.getLogger(__name__) + + +class WebServerTool: + """ + Basic POC implementation of asychronic websocket RPC server. + Uses class in external_app_1.py to mimic implementation for single + external application. + 'test_client' folder contains two test implementations of client + """ + _instance = None + + def __init__(self): + WebServerTool._instance = self + + self.client = None + self.handlers = {} + self.on_stop_callbacks = [] + + port = None + host_name = "localhost" + websocket_url = os.getenv("WEBSOCKET_URL") + if websocket_url: + parsed = urllib.parse.urlparse(websocket_url) + port = parsed.port + host_name = parsed.netloc.split(":")[0] + if not port: + port = 8098 # fallback + + self.port = port + self.host_name = host_name + + self.app = web.Application() + + # add route with multiple methods for single "external app" + self.webserver_thread = WebServerThread(self, self.port) + + def add_route(self, *args, **kwargs): + self.app.router.add_route(*args, **kwargs) + + def add_static(self, *args, **kwargs): + self.app.router.add_static(*args, **kwargs) + + def start_server(self): + if self.webserver_thread and not self.webserver_thread.is_alive(): + self.webserver_thread.start() + + def stop_server(self): + self.stop() + + async def send_context_change(self, host): + """ + Calls running webserver to inform about context change + + Used when new PS/AE should be triggered, + but one already running, without + this publish would point to old context. + """ + client = WSRPCClient(os.getenv("WEBSOCKET_URL"), + loop=asyncio.get_event_loop()) + await client.connect() + + context = get_global_context() + project = context["project_name"] + asset = context["asset_name"] + task = context["task_name"] + log.info("Sending context change to {}-{}-{}".format(project, + asset, + task)) + + await client.call('{}.set_context'.format(host), + project=project, asset=asset, task=task) + await client.close() + + def port_occupied(self, host_name, port): + """ + Check if 'url' is already occupied. + + This could mean, that app is already running and we are trying open it + again. In that case, use existing running webserver. + Check here is easier than capturing exception from thread. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = True + try: + sock.bind((host_name, port)) + result = False + except: + print("Port is in use") + + return result + + def call(self, func): + log.debug("websocket.call {}".format(func)) + future = asyncio.run_coroutine_threadsafe( + func, + self.webserver_thread.loop + ) + result = future.result() + return result + + @staticmethod + def get_instance(): + if WebServerTool._instance is None: + WebServerTool() + return WebServerTool._instance + + @property + def is_running(self): + if not self.webserver_thread: + return False + return self.webserver_thread.is_running + + def stop(self): + if not self.is_running: + return + try: + log.debug("Stopping websocket server") + self.webserver_thread.is_running = False + self.webserver_thread.stop() + except Exception: + log.warning( + "Error has happened during Killing websocket server", + exc_info=True + ) + + def thread_stopped(self): + for callback in self.on_stop_callbacks: + callback() + + +class WebServerThread(threading.Thread): + """ Listener for websocket rpc requests. + + It would be probably better to "attach" this to main thread (as for + example Harmony needs to run something on main thread), but currently + it creates separate thread and separate asyncio event loop + """ + def __init__(self, module, port): + super(WebServerThread, self).__init__() + + self.is_running = False + self.port = port + self.module = module + self.loop = None + self.runner = None + self.site = None + self.tasks = [] + + def run(self): + self.is_running = True + + try: + log.info("Starting web server") + self.loop = asyncio.new_event_loop() # create new loop for thread + asyncio.set_event_loop(self.loop) + + self.loop.run_until_complete(self.start_server()) + + websocket_url = "ws://localhost:{}/ws".format(self.port) + + log.debug( + "Running Websocket server on URL: \"{}\"".format(websocket_url) + ) + + asyncio.ensure_future(self.check_shutdown(), loop=self.loop) + self.loop.run_forever() + except Exception: + self.is_running = False + log.warning( + "Websocket Server service has failed", exc_info=True + ) + raise + finally: + self.loop.close() # optional + + self.is_running = False + self.module.thread_stopped() + log.info("Websocket server stopped") + + async def start_server(self): + """ Starts runner and TCPsite """ + self.runner = web.AppRunner(self.module.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, 'localhost', self.port) + await self.site.start() + + def stop(self): + """Sets is_running flag to false, 'check_shutdown' shuts server down""" + self.is_running = False + + async def check_shutdown(self): + """ Future that is running and checks if server should be running + periodically. + """ + while self.is_running: + while self.tasks: + task = self.tasks.pop(0) + log.debug("waiting for task {}".format(task)) + await task + log.debug("returned value {}".format(task.result)) + + await asyncio.sleep(0.5) + + log.debug("Starting shutdown") + await self.site.stop() + log.debug("Site stopped") + await self.runner.cleanup() + log.debug("Runner stopped") + tasks = [task for task in asyncio.all_tasks() if + task is not asyncio.current_task()] + list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks + results = await asyncio.gather(*tasks, return_exceptions=True) + log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + await self.loop.shutdown_asyncgens() + # to really make sure everything else has time to stop + await asyncio.sleep(0.07) + self.loop.stop() diff --git a/openpype/tools/adobe_webserver/readme.txt b/client/ayon_core/tools/adobe_webserver/readme.txt similarity index 85% rename from openpype/tools/adobe_webserver/readme.txt rename to client/ayon_core/tools/adobe_webserver/readme.txt index 06cf140fc4..d02d390277 100644 --- a/openpype/tools/adobe_webserver/readme.txt +++ b/client/ayon_core/tools/adobe_webserver/readme.txt @@ -8,5 +8,5 @@ This webserver is started in spawned Python process that opens DCC during its launch, waits for connection from DCC and handles communication going forward. Server is closed before Python process is killed. -(Different from `openpype/modules/webserver` as that one is running in Tray, +(Different from `ayon_core/modules/webserver` as that one is running in Tray, this one is running in spawn Python process.) \ No newline at end of file diff --git a/openpype/pipeline/farm/__init__.py b/client/ayon_core/tools/assetlinks/__init__.py similarity index 100% rename from openpype/pipeline/farm/__init__.py rename to client/ayon_core/tools/assetlinks/__init__.py diff --git a/client/ayon_core/tools/assetlinks/widgets.py b/client/ayon_core/tools/assetlinks/widgets.py new file mode 100644 index 0000000000..7db6243358 --- /dev/null +++ b/client/ayon_core/tools/assetlinks/widgets.py @@ -0,0 +1,155 @@ +import collections +from ayon_core.client import ( + get_versions, + get_subsets, + get_assets, + get_output_link_versions, +) + +from qtpy import QtWidgets + + +class SimpleLinkView(QtWidgets.QWidget): + def __init__(self, dbcon, parent): + super(SimpleLinkView, self).__init__(parent=parent) + self.dbcon = dbcon + + # TODO: display selected target + + in_text = QtWidgets.QLabel("Inputs") + in_view = QtWidgets.QListWidget(parent=self) + out_text = QtWidgets.QLabel("Outputs") + out_view = QtWidgets.QListWidget(parent=self) + + layout = QtWidgets.QGridLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(in_text, 0, 0) + layout.addWidget(in_view, 1, 0) + layout.addWidget(out_text, 0, 1) + layout.addWidget(out_view, 1, 1) + + self._in_view = in_view + self._out_view = out_view + self._version_doc_to_process = None + + @property + def project_name(self): + return self.dbcon.current_project() + + def clear(self): + self._in_view.clear() + self._out_view.clear() + + def set_version(self, version_doc): + self.clear() + self._version_doc_to_process = version_doc + if version_doc and self.isVisible(): + self._fill_values() + + def showEvent(self, event): + super(SimpleLinkView, self).showEvent(event) + self._fill_values() + + def _fill_values(self): + if self._version_doc_to_process is None: + return + version_doc = self._version_doc_to_process + self._version_doc_to_process = None + self._fill_inputs(version_doc) + self._fill_outputs(version_doc) + + def _fill_inputs(self, version_doc): + version_ids = set() + for link in version_doc["data"].get("inputLinks", []): + # Backwards compatibility for "input" key used as "id" + if "id" not in link: + link_id = link["input"] + else: + link_id = link["id"] + version_ids.add(link_id) + + version_docs = list(get_versions( + self.project_name, + version_ids=version_ids, + fields=["name", "parent"] + )) + + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] + )) + + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) + + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] + )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._in_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) + + def _fill_outputs(self, version_doc): + version_docs = list(get_output_link_versions( + self.project_name, + version_doc["_id"], + fields=["name", "parent"] + )) + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] + )) + + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) + + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] + )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._out_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) diff --git a/openpype/tools/attribute_defs/__init__.py b/client/ayon_core/tools/attribute_defs/__init__.py similarity index 100% rename from openpype/tools/attribute_defs/__init__.py rename to client/ayon_core/tools/attribute_defs/__init__.py diff --git a/openpype/tools/attribute_defs/dialog.py b/client/ayon_core/tools/attribute_defs/dialog.py similarity index 100% rename from openpype/tools/attribute_defs/dialog.py rename to client/ayon_core/tools/attribute_defs/dialog.py diff --git a/client/ayon_core/tools/attribute_defs/files_widget.py b/client/ayon_core/tools/attribute_defs/files_widget.py new file mode 100644 index 0000000000..95091bed5a --- /dev/null +++ b/client/ayon_core/tools/attribute_defs/files_widget.py @@ -0,0 +1,1015 @@ +import os +import collections +import uuid +import json + +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.lib import FileDefItem +from ayon_core.tools.utils import ( + paint_image_with_color, + ClickableLabel, +) +# TODO change imports +from ayon_core.tools.resources import get_image +from ayon_core.tools.utils import ( + IconButton, + PixmapLabel +) + +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 +ITEM_LABEL_ROLE = QtCore.Qt.UserRole + 2 +ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3 +FILENAMES_ROLE = QtCore.Qt.UserRole + 4 +DIRPATH_ROLE = QtCore.Qt.UserRole + 5 +IS_DIR_ROLE = QtCore.Qt.UserRole + 6 +IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 +EXT_ROLE = QtCore.Qt.UserRole + 8 + + +def convert_bytes_to_json(bytes_value): + if isinstance(bytes_value, QtCore.QByteArray): + # Raw data are already QByteArray and we don't have to load them + encoded_data = bytes_value + else: + encoded_data = QtCore.QByteArray.fromRawData(bytes_value) + stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly) + text = stream.readQString() + try: + return json.loads(text) + except Exception: + return None + + +def convert_data_to_bytes(data): + bytes_value = QtCore.QByteArray() + stream = QtCore.QDataStream(bytes_value, QtCore.QIODevice.WriteOnly) + stream.writeQString(json.dumps(data)) + return bytes_value + + +class SupportLabel(QtWidgets.QLabel): + pass + + +class DropEmpty(QtWidgets.QWidget): + _empty_extensions = "Any file" + + def __init__(self, single_item, allow_sequences, extensions_label, parent): + super(DropEmpty, self).__init__(parent) + + drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self) + + items_label_widget = SupportLabel(self) + items_label_widget.setWordWrap(True) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addSpacing(20) + layout.addWidget( + drop_label_widget, 0, alignment=QtCore.Qt.AlignCenter + ) + layout.addSpacing(30) + layout.addStretch(1) + layout.addWidget( + items_label_widget, 0, alignment=QtCore.Qt.AlignCenter + ) + layout.addSpacing(10) + + for widget in ( + drop_label_widget, + items_label_widget, + ): + widget.setAlignment(QtCore.Qt.AlignCenter) + widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + update_size_timer = QtCore.QTimer() + update_size_timer.setInterval(10) + update_size_timer.setSingleShot(True) + + update_size_timer.timeout.connect(self._on_update_size_timer) + + self._update_size_timer = update_size_timer + + if extensions_label and not extensions_label.startswith(" "): + extensions_label = " " + extensions_label + + self._single_item = single_item + self._extensions_label = extensions_label + self._allow_sequences = allow_sequences + self._allowed_extensions = set() + self._allow_folders = None + + self._drop_label_widget = drop_label_widget + self._items_label_widget = items_label_widget + + self.set_allow_folders(False) + + def set_extensions(self, extensions): + if extensions: + extensions = { + ext.replace(".", "") + for ext in extensions + } + if extensions == self._allowed_extensions: + return + self._allowed_extensions = extensions + + self._update_items_label() + + def set_allow_folders(self, allowed): + if self._allow_folders == allowed: + return + + self._allow_folders = allowed + self._update_items_label() + + def _update_items_label(self): + allowed_items = [] + if self._allow_folders: + allowed_items.append("folder") + + if self._allowed_extensions: + allowed_items.append("file") + if self._allow_sequences: + allowed_items.append("sequence") + + if not self._single_item: + allowed_items = [item + "s" for item in allowed_items] + + if not allowed_items: + self._drop_label_widget.setVisible(False) + self._items_label_widget.setText( + "It is not allowed to add anything here!" + ) + return + + self._drop_label_widget.setVisible(True) + items_label = "Multiple " + if self._single_item: + items_label = "Single " + + if len(allowed_items) == 1: + extensions_label = allowed_items[0] + elif len(allowed_items) == 2: + extensions_label = " or ".join(allowed_items) + else: + last_item = allowed_items.pop(-1) + new_last_item = " or ".join([last_item, allowed_items.pop(-1)]) + allowed_items.append(new_last_item) + extensions_label = ", ".join(allowed_items) + + allowed_items_label = extensions_label + + items_label += allowed_items_label + label_tooltip = None + if self._allowed_extensions: + items_label += " of\n{}".format( + ", ".join(sorted(self._allowed_extensions)) + ) + + if self._extensions_label: + label_tooltip = items_label + items_label = self._extensions_label + + if self._items_label_widget.text() == items_label: + return + + self._items_label_widget.setToolTip(label_tooltip) + self._items_label_widget.setText(items_label) + self._update_size_timer.start() + + def resizeEvent(self, event): + super(DropEmpty, self).resizeEvent(event) + self._update_size_timer.start() + + def _on_update_size_timer(self): + """Recalculate height of label with extensions. + + Dynamic QLabel with word wrap does not handle properly it's sizeHint + calculations on show. This way it is recalculated. It is good practice + to trigger this method with small offset using '_update_size_timer'. + """ + + width = self._items_label_widget.width() + height = self._items_label_widget.heightForWidth(width) + self._items_label_widget.setMinimumHeight(height) + self._items_label_widget.updateGeometry() + + def paintEvent(self, event): + super(DropEmpty, self).paintEvent(event) + + pen = QtGui.QPen() + pen.setBrush(QtCore.Qt.darkGray) + pen.setStyle(QtCore.Qt.DashLine) + pen.setWidth(1) + + content_margins = self.layout().contentsMargins() + rect = self.rect() + left_m = content_margins.left() + pen.width() + top_m = content_margins.top() + pen.width() + new_rect = QtCore.QRect( + left_m, + top_m, + ( + rect.width() + - (left_m + content_margins.right() + pen.width()) + ), + ( + rect.height() + - (top_m + content_margins.bottom() + pen.width()) + ) + ) + + painter = QtGui.QPainter(self) + painter.setRenderHint(QtGui.QPainter.Antialiasing) + painter.setPen(pen) + painter.drawRect(new_rect) + + +class FilesModel(QtGui.QStandardItemModel): + def __init__(self, single_item, allow_sequences): + super(FilesModel, self).__init__() + + self._id = str(uuid.uuid4()) + self._single_item = single_item + self._multivalue = False + self._allow_sequences = allow_sequences + + self._items_by_id = {} + self._file_items_by_id = {} + self._filenames_by_dirpath = collections.defaultdict(set) + self._items_by_dirpath = collections.defaultdict(list) + + self.rowsAboutToBeRemoved.connect(self._on_about_to_be_removed) + self.rowsInserted.connect(self._on_insert) + + @property + def id(self): + return self._id + + def _on_about_to_be_removed(self, parent_index, start, end): + """Make sure that removed items are removed from items mapping. + + Connected with '_on_insert'. When user drag item and drop it to same + view the item is actually removed and creted again but it happens in + inner calls of Qt. + """ + + for row in range(start, end + 1): + index = self.index(row, 0, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id is not None: + self._items_by_id.pop(item_id, None) + + def _on_insert(self, parent_index, start, end): + """Make sure new added items are stored in items mapping. + + Connected to '_on_about_to_be_removed'. Some items are not created + using '_create_item' but are recreated using Qt. So the item is not in + mapping and if it would it would not lead to same item pointer. + """ + + for row in range(start, end + 1): + index = self.index(start, end, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id not in self._items_by_id: + self._items_by_id[item_id] = self.item(row) + + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + + def add_filepaths(self, items): + if not items: + return + + if self._multivalue: + _items = [] + for item in items: + if isinstance(item, (tuple, list, set)): + _items.extend(item) + else: + _items.append(item) + items = _items + + file_items = FileDefItem.from_value(items, self._allow_sequences) + if not file_items: + return + + if not self._multivalue and self._single_item: + file_items = [file_items[0]] + current_ids = list(self._file_items_by_id.keys()) + if current_ids: + self.remove_item_by_ids(current_ids) + + new_model_items = [] + for file_item in file_items: + item_id, model_item = self._create_item(file_item) + new_model_items.append(model_item) + self._file_items_by_id[item_id] = file_item + self._items_by_id[item_id] = model_item + + if new_model_items: + roow_item = self.invisibleRootItem() + roow_item.appendRows(new_model_items) + + def remove_item_by_ids(self, item_ids): + if not item_ids: + return + + items = [] + for item_id in set(item_ids): + if item_id not in self._items_by_id: + continue + item = self._items_by_id.pop(item_id) + self._file_items_by_id.pop(item_id) + items.append(item) + + if items: + for item in items: + self.removeRows(item.row(), 1) + + def get_file_item_by_id(self, item_id): + return self._file_items_by_id.get(item_id) + + def _create_item(self, file_item): + if file_item.is_dir: + icon_pixmap = paint_image_with_color( + get_image(filename="folder.png"), QtCore.Qt.white + ) + else: + icon_pixmap = paint_image_with_color( + get_image(filename="file.png"), QtCore.Qt.white + ) + + item = QtGui.QStandardItem() + item_id = str(uuid.uuid4()) + item.setData(item_id, ITEM_ID_ROLE) + item.setData(file_item.label or "< empty >", ITEM_LABEL_ROLE) + item.setData(file_item.filenames, FILENAMES_ROLE) + item.setData(file_item.directory, DIRPATH_ROLE) + item.setData(icon_pixmap, ITEM_ICON_ROLE) + item.setData(file_item.lower_ext, EXT_ROLE) + item.setData(file_item.is_dir, IS_DIR_ROLE) + item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE) + + return item_id, item + + def mimeData(self, indexes): + item_ids = [ + index.data(ITEM_ID_ROLE) + for index in indexes + ] + + item_ids_data = convert_data_to_bytes(item_ids) + mime_data = super(FilesModel, self).mimeData(indexes) + mime_data.setData("files_widget/internal_move", item_ids_data) + + file_items = [] + for item_id in item_ids: + file_item = self.get_file_item_by_id(item_id) + if file_item: + file_items.append(file_item.to_dict()) + + full_item_data = convert_data_to_bytes({ + "items": file_items, + "id": self._id + }) + mime_data.setData("files_widget/full_data", full_item_data) + return mime_data + + def dropMimeData(self, mime_data, action, row, col, index): + item_ids = convert_bytes_to_json( + mime_data.data("files_widget/internal_move") + ) + if item_ids is None: + return False + + # Find matching item after which will be items moved + # - store item before moved items are removed + root = self.invisibleRootItem() + if row >= 0: + src_item = self.item(row) + else: + src_item_id = index.data(ITEM_ID_ROLE) + src_item = self._items_by_id.get(src_item_id) + + src_row = None + if src_item: + src_row = src_item.row() + + # Take out items that should be moved + items = [] + for item_id in item_ids: + item = self._items_by_id.get(item_id) + if item: + self.takeRow(item.row()) + items.append(item) + + # Skip if there are not items that can be moved + if not items: + return False + + # Calculate row where items should be inserted + row_count = root.rowCount() + if src_row is None: + src_row = row_count + + if src_row > row_count: + src_row = row_count + + root.insertRow(src_row, items) + return True + + +class FilesProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(FilesProxyModel, self).__init__(*args, **kwargs) + self._allow_folders = False + self._allowed_extensions = None + self._multivalue = False + + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + self.invalidateFilter() + + def set_allow_folders(self, allow=None): + if allow is None: + allow = not self._allow_folders + + if allow == self._allow_folders: + return + self._allow_folders = allow + self.invalidateFilter() + + def set_allowed_extensions(self, extensions=None): + if extensions is not None: + _extensions = set() + for ext in set(extensions): + if not ext.startswith("."): + ext = ".{}".format(ext) + _extensions.add(ext.lower()) + extensions = _extensions + + if self._allowed_extensions != extensions: + self._allowed_extensions = extensions + self.invalidateFilter() + + def are_valid_files(self, filepaths): + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext.lower() in self._allowed_extensions: + return True + + elif self._allow_folders: + return True + return False + + def filter_valid_files(self, filepaths): + filtered_paths = [] + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext.lower() in self._allowed_extensions: + filtered_paths.append(filepath) + + elif self._allow_folders: + filtered_paths.append(filepath) + return filtered_paths + + def filterAcceptsRow(self, row, parent_index): + # Skip filtering if multivalue is set + if self._multivalue: + return True + + model = self.sourceModel() + index = model.index(row, self.filterKeyColumn(), parent_index) + # First check if item is folder and if folders are enabled + if index.data(IS_DIR_ROLE): + if not self._allow_folders: + return False + return True + + # Check if there are any allowed extensions + if self._allowed_extensions is None: + return False + + if index.data(EXT_ROLE) not in self._allowed_extensions: + return False + return True + + def lessThan(self, left, right): + left_comparison = left.data(DIRPATH_ROLE) + right_comparison = right.data(DIRPATH_ROLE) + if left_comparison == right_comparison: + left_comparison = left.data(ITEM_LABEL_ROLE) + right_comparison = right.data(ITEM_LABEL_ROLE) + + if sorted((left_comparison, right_comparison))[0] == left_comparison: + return True + return False + + +class ItemWidget(QtWidgets.QWidget): + context_menu_requested = QtCore.Signal(QtCore.QPoint) + + def __init__( + self, item_id, label, pixmap_icon, is_sequence, multivalue, parent=None + ): + self._item_id = item_id + + super(ItemWidget, self).__init__(parent) + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + icon_widget = PixmapLabel(pixmap_icon, self) + label_widget = QtWidgets.QLabel(label, self) + + label_size_hint = label_widget.sizeHint() + height = label_size_hint.height() + actions_menu_pix = paint_image_with_color( + get_image(filename="menu.png"), QtCore.Qt.white + ) + + split_btn = ClickableLabel(self) + split_btn.setFixedSize(height, height) + split_btn.setPixmap(actions_menu_pix) + if multivalue: + split_btn.setVisible(False) + else: + split_btn.setVisible(is_sequence) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 5, 5) + layout.addWidget(icon_widget, 0) + layout.addWidget(label_widget, 1) + layout.addWidget(split_btn, 0) + + split_btn.clicked.connect(self._on_actions_clicked) + + self._icon_widget = icon_widget + self._label_widget = label_widget + self._split_btn = split_btn + self._actions_menu_pix = actions_menu_pix + self._last_scaled_pix_height = None + + def _update_btn_size(self): + label_size_hint = self._label_widget.sizeHint() + height = label_size_hint.height() + if height == self._last_scaled_pix_height: + return + self._last_scaled_pix_height = height + self._split_btn.setFixedSize(height, height) + pix = self._actions_menu_pix.scaled( + height, height, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + self._split_btn.setPixmap(pix) + + def showEvent(self, event): + super(ItemWidget, self).showEvent(event) + self._update_btn_size() + + def resizeEvent(self, event): + super(ItemWidget, self).resizeEvent(event) + self._update_btn_size() + + def _on_actions_clicked(self): + pos = self._split_btn.rect().bottomLeft() + point = self._split_btn.mapToGlobal(pos) + self.context_menu_requested.emit(point) + + +class InViewButton(IconButton): + pass + + +class FilesView(QtWidgets.QListView): + """View showing instances and their groups.""" + + remove_requested = QtCore.Signal() + context_menu_requested = QtCore.Signal(QtCore.QPoint) + + def __init__(self, *args, **kwargs): + super(FilesView, self).__init__(*args, **kwargs) + + self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + self.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection + ) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove) + + remove_btn = InViewButton(self) + pix_enabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.white + ) + pix_disabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.gray + ) + icon = QtGui.QIcon(pix_enabled) + icon.addPixmap(pix_disabled, QtGui.QIcon.Disabled, QtGui.QIcon.Off) + remove_btn.setIcon(icon) + remove_btn.setEnabled(False) + + remove_btn.clicked.connect(self._on_remove_clicked) + self.customContextMenuRequested.connect(self._on_context_menu_request) + + self._remove_btn = remove_btn + self._multivalue = False + + def setSelectionModel(self, *args, **kwargs): + """Catch selection model set to register signal callback. + + Selection model is not available during initialization. + """ + + super(FilesView, self).setSelectionModel(*args, **kwargs) + selection_model = self.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + + def set_multivalue(self, multivalue): + """Disable remove button on multivalue.""" + + self._multivalue = multivalue + self._remove_btn.setVisible(not multivalue) + + def update_remove_btn_visibility(self): + model = self.model() + visible = False + if not self._multivalue and model: + visible = model.rowCount() > 0 + self._remove_btn.setVisible(visible) + + def has_selected_item_ids(self): + """Is any index selected.""" + for index in self.selectionModel().selectedIndexes(): + instance_id = index.data(ITEM_ID_ROLE) + if instance_id is not None: + return True + return False + + def get_selected_item_ids(self): + """Ids of selected instances.""" + + selected_item_ids = set() + for index in self.selectionModel().selectedIndexes(): + instance_id = index.data(ITEM_ID_ROLE) + if instance_id is not None: + selected_item_ids.add(instance_id) + return selected_item_ids + + def has_selected_sequence(self): + for index in self.selectionModel().selectedIndexes(): + if index.data(IS_SEQUENCE_ROLE): + return True + return False + + def event(self, event): + if event.type() == QtCore.QEvent.KeyPress: + if ( + event.key() == QtCore.Qt.Key_Delete + and self.has_selected_item_ids() + ): + self.remove_requested.emit() + return True + + return super(FilesView, self).event(event) + + def _on_context_menu_request(self, pos): + index = self.indexAt(pos) + if index.isValid(): + point = self.viewport().mapToGlobal(pos) + self.context_menu_requested.emit(point) + + def _on_selection_change(self): + self._remove_btn.setEnabled(self.has_selected_item_ids()) + + def _on_remove_clicked(self): + self.remove_requested.emit() + + def _update_remove_btn(self): + """Position remove button to bottom right.""" + + viewport = self.viewport() + height = viewport.height() + pos_x = viewport.width() - self._remove_btn.width() - 5 + pos_y = height - self._remove_btn.height() - 5 + self._remove_btn.move(max(0, pos_x), max(0, pos_y)) + + def resizeEvent(self, event): + super(FilesView, self).resizeEvent(event) + self._update_remove_btn() + + def showEvent(self, event): + super(FilesView, self).showEvent(event) + self._update_remove_btn() + self.update_remove_btn_visibility() + + +class FilesWidget(QtWidgets.QFrame): + value_changed = QtCore.Signal() + + def __init__(self, single_item, allow_sequences, extensions_label, parent): + super(FilesWidget, self).__init__(parent) + self.setAcceptDrops(True) + + empty_widget = DropEmpty( + single_item, allow_sequences, extensions_label, self + ) + + files_model = FilesModel(single_item, allow_sequences) + files_proxy_model = FilesProxyModel() + files_proxy_model.setSourceModel(files_model) + files_view = FilesView(self) + files_view.setModel(files_proxy_model) + + layout = QtWidgets.QStackedLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setStackingMode(QtWidgets.QStackedLayout.StackAll) + layout.addWidget(empty_widget) + layout.addWidget(files_view) + layout.setCurrentWidget(empty_widget) + + files_proxy_model.rowsInserted.connect(self._on_rows_inserted) + files_proxy_model.rowsRemoved.connect(self._on_rows_removed) + files_view.remove_requested.connect(self._on_remove_requested) + files_view.context_menu_requested.connect( + self._on_context_menu_requested + ) + + self._in_set_value = False + self._single_item = single_item + self._multivalue = False + + self._empty_widget = empty_widget + self._files_model = files_model + self._files_proxy_model = files_proxy_model + self._files_view = files_view + + self._widgets_by_id = {} + + self._layout = layout + + def _set_multivalue(self, multivalue): + if self._multivalue is multivalue: + return + self._multivalue = multivalue + self._files_view.set_multivalue(multivalue) + self._files_model.set_multivalue(multivalue) + self._files_proxy_model.set_multivalue(multivalue) + self.setEnabled(not multivalue) + + def set_value(self, value, multivalue): + self._in_set_value = True + + widget_ids = set(self._widgets_by_id.keys()) + self._remove_item_by_ids(widget_ids) + + self._set_multivalue(multivalue) + + self._add_filepaths(value) + + self._in_set_value = False + + def current_value(self): + model = self._files_proxy_model + item_ids = set() + for row in range(model.rowCount()): + index = model.index(row, 0) + item_ids.add(index.data(ITEM_ID_ROLE)) + + file_items = [] + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if file_item is not None: + file_items.append(file_item.to_dict()) + + if not self._single_item: + return file_items + if file_items: + return file_items[0] + + empty_item = FileDefItem.create_empty_item() + return empty_item.to_dict() + + def set_filters(self, folders_allowed, exts_filter): + self._files_proxy_model.set_allow_folders(folders_allowed) + self._files_proxy_model.set_allowed_extensions(exts_filter) + self._empty_widget.set_extensions(exts_filter) + self._empty_widget.set_allow_folders(folders_allowed) + + def _on_rows_inserted(self, parent_index, start_row, end_row): + for row in range(start_row, end_row + 1): + index = self._files_proxy_model.index(row, 0, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id in self._widgets_by_id: + continue + label = index.data(ITEM_LABEL_ROLE) + pixmap_icon = index.data(ITEM_ICON_ROLE) + is_sequence = index.data(IS_SEQUENCE_ROLE) + + widget = ItemWidget( + item_id, + label, + pixmap_icon, + is_sequence, + self._multivalue + ) + widget.context_menu_requested.connect( + self._on_context_menu_requested + ) + self._files_view.setIndexWidget(index, widget) + self._files_proxy_model.setData( + index, widget.sizeHint(), QtCore.Qt.SizeHintRole + ) + self._widgets_by_id[item_id] = widget + + if not self._in_set_value: + self.value_changed.emit() + + self._update_visibility() + + def _on_rows_removed(self, parent_index, start_row, end_row): + available_item_ids = set() + for row in range(self._files_proxy_model.rowCount()): + index = self._files_proxy_model.index(row, 0) + item_id = index.data(ITEM_ID_ROLE) + available_item_ids.add(index.data(ITEM_ID_ROLE)) + + widget_ids = set(self._widgets_by_id.keys()) + for item_id in available_item_ids: + if item_id in widget_ids: + widget_ids.remove(item_id) + + for item_id in widget_ids: + widget = self._widgets_by_id.pop(item_id) + widget.setVisible(False) + widget.deleteLater() + + if not self._in_set_value: + self.value_changed.emit() + self._update_visibility() + + def _on_split_request(self): + if self._multivalue: + return + + item_ids = self._files_view.get_selected_item_ids() + if not item_ids: + return + + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if not file_item: + return + + new_items = file_item.split_sequence() + self._add_filepaths(new_items) + self._remove_item_by_ids(item_ids) + + def _on_remove_requested(self): + if self._multivalue: + return + + items_to_delete = self._files_view.get_selected_item_ids() + if items_to_delete: + self._remove_item_by_ids(items_to_delete) + + def _on_context_menu_requested(self, pos): + if self._multivalue: + return + + menu = QtWidgets.QMenu(self._files_view) + + if self._files_view.has_selected_sequence(): + split_action = QtWidgets.QAction("Split sequence", menu) + split_action.triggered.connect(self._on_split_request) + menu.addAction(split_action) + + remove_action = QtWidgets.QAction("Remove", menu) + remove_action.triggered.connect(self._on_remove_requested) + menu.addAction(remove_action) + + menu.popup(pos) + + def dragEnterEvent(self, event): + if self._multivalue: + return + + mime_data = event.mimeData() + if mime_data.hasUrls(): + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + if os.path.exists(filepath): + filepaths.append(filepath) + + if self._files_proxy_model.are_valid_files(filepaths): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + full_data_value = mime_data.data("files_widget/full_data") + if self._handle_full_data_drag(full_data_value): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + def dragLeaveEvent(self, event): + event.accept() + + def dropEvent(self, event): + if self._multivalue: + return + + mime_data = event.mimeData() + if mime_data.hasUrls(): + event.accept() + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + if os.path.exists(filepath): + filepaths.append(filepath) + + # Filter filepaths before passing it to model + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._handle_full_data_drop( + mime_data.data("files_widget/full_data") + ): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + super(FilesWidget, self).dropEvent(event) + + def _handle_full_data_drag(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + return True + + def _handle_full_data_drop(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + + for item in full_data["items"]: + filepaths = [ + os.path.join(item["directory"], filename) + for filename in item["filenames"] + ] + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._copy_modifiers_enabled(): + return False + return True + + def _copy_modifiers_enabled(self): + if ( + QtWidgets.QApplication.keyboardModifiers() + & QtCore.Qt.ControlModifier + ): + return True + return False + + def _add_filepaths(self, filepaths): + self._files_model.add_filepaths(filepaths) + + def _remove_item_by_ids(self, item_ids): + self._files_model.remove_item_by_ids(item_ids) + + def _update_visibility(self): + files_exists = self._files_proxy_model.rowCount() > 0 + if files_exists: + current_widget = self._files_view + else: + current_widget = self._empty_widget + self._layout.setCurrentWidget(current_widget) + self._files_view.update_remove_btn_visibility() diff --git a/client/ayon_core/tools/attribute_defs/widgets.py b/client/ayon_core/tools/attribute_defs/widgets.py new file mode 100644 index 0000000000..5ead3f46a6 --- /dev/null +++ b/client/ayon_core/tools/attribute_defs/widgets.py @@ -0,0 +1,661 @@ +import copy + +from qtpy import QtWidgets, QtCore + +from ayon_core.lib.attribute_definitions import ( + AbstractAttrDef, + UnknownDef, + HiddenDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef, + UIDef, + UISeparatorDef, + UILabelDef +) +from ayon_core.tools.utils import ( + CustomTextComboBox, + FocusSpinBox, + FocusDoubleSpinBox, + MultiSelectionComboBox, +) +from ayon_core.tools.utils import NiceCheckbox + +from .files_widget import FilesWidget + + +def create_widget_for_attr_def(attr_def, parent=None): + widget = _create_widget_for_attr_def(attr_def, parent) + if attr_def.hidden: + widget.setVisible(False) + + if attr_def.disabled: + widget.setEnabled(False) + return widget + + +def _create_widget_for_attr_def(attr_def, parent=None): + if not isinstance(attr_def, AbstractAttrDef): + raise TypeError("Unexpected type \"{}\" expected \"{}\"".format( + str(type(attr_def)), AbstractAttrDef + )) + + if isinstance(attr_def, NumberDef): + return NumberAttrWidget(attr_def, parent) + + if isinstance(attr_def, TextDef): + return TextAttrWidget(attr_def, parent) + + if isinstance(attr_def, EnumDef): + return EnumAttrWidget(attr_def, parent) + + if isinstance(attr_def, BoolDef): + return BoolAttrWidget(attr_def, parent) + + if isinstance(attr_def, UnknownDef): + return UnknownAttrWidget(attr_def, parent) + + if isinstance(attr_def, HiddenDef): + return HiddenAttrWidget(attr_def, parent) + + if isinstance(attr_def, FileDef): + return FileAttrWidget(attr_def, parent) + + if isinstance(attr_def, UISeparatorDef): + return SeparatorAttrWidget(attr_def, parent) + + if isinstance(attr_def, UILabelDef): + return LabelAttrWidget(attr_def, parent) + + raise ValueError("Unknown attribute definition \"{}\"".format( + str(type(attr_def)) + )) + + +class AttributeDefinitionsWidget(QtWidgets.QWidget): + """Create widgets for attribute definitions in grid layout. + + Widget creates input widgets for passed attribute definitions. + + Widget can't handle multiselection values. + """ + + def __init__(self, attr_defs=None, parent=None): + super(AttributeDefinitionsWidget, self).__init__(parent) + + self._widgets = [] + self._current_keys = set() + + self.set_attr_defs(attr_defs) + + def clear_attr_defs(self): + """Remove all existing widgets and reset layout if needed.""" + self._widgets = [] + self._current_keys = set() + + layout = self.layout() + if layout is not None: + if layout.count() == 0: + return + + while layout.count(): + item = layout.takeAt(0) + widget = item.widget() + if widget: + widget.setVisible(False) + widget.deleteLater() + + layout.deleteLater() + + new_layout = QtWidgets.QGridLayout() + new_layout.setColumnStretch(0, 0) + new_layout.setColumnStretch(1, 1) + self.setLayout(new_layout) + + def set_attr_defs(self, attr_defs): + """Replace current attribute definitions with passed.""" + self.clear_attr_defs() + if attr_defs: + self.add_attr_defs(attr_defs) + + def add_attr_defs(self, attr_defs): + """Add attribute definitions to current.""" + layout = self.layout() + + row = 0 + for attr_def in attr_defs: + if attr_def.is_value_def: + if attr_def.key in self._current_keys: + raise KeyError( + "Duplicated key \"{}\"".format(attr_def.key)) + + self._current_keys.add(attr_def.key) + widget = create_widget_for_attr_def(attr_def, self) + self._widgets.append(widget) + + if attr_def.hidden: + continue + + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + + col_num = 2 - expand_cols + + if attr_def.is_value_def and attr_def.label: + label_widget = QtWidgets.QLabel(attr_def.label, self) + tooltip = attr_def.tooltip + if tooltip: + label_widget.setToolTip(tooltip) + if attr_def.is_label_horizontal: + label_widget.setAlignment( + QtCore.Qt.AlignRight + | QtCore.Qt.AlignVCenter + ) + layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + + layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + row += 1 + + def set_value(self, value): + new_value = copy.deepcopy(value) + unused_keys = set(new_value.keys()) + for widget in self._widgets: + attr_def = widget.attr_def + if attr_def.key not in new_value: + continue + unused_keys.remove(attr_def.key) + + widget_value = new_value[attr_def.key] + if widget_value is None: + widget_value = copy.deepcopy(attr_def.default) + widget.set_value(widget_value) + + def current_value(self): + output = {} + for widget in self._widgets: + attr_def = widget.attr_def + if not isinstance(attr_def, UIDef): + output[attr_def.key] = widget.current_value() + + return output + + +class _BaseAttrDefWidget(QtWidgets.QWidget): + # Type 'object' may not work with older PySide versions + value_changed = QtCore.Signal(object, str) + + def __init__(self, attr_def, parent): + super(_BaseAttrDefWidget, self).__init__(parent) + + self.attr_def = attr_def + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + + self.main_layout = main_layout + + self._ui_init() + + def _ui_init(self): + raise NotImplementedError( + "Method '_ui_init' is not implemented. {}".format( + self.__class__.__name__ + ) + ) + + def current_value(self): + raise NotImplementedError( + "Method 'current_value' is not implemented. {}".format( + self.__class__.__name__ + ) + ) + + def set_value(self, value, multivalue=False): + raise NotImplementedError( + "Method 'set_value' is not implemented. {}".format( + self.__class__.__name__ + ) + ) + + +class SeparatorAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + input_widget = QtWidgets.QWidget(self) + input_widget.setObjectName("Separator") + input_widget.setMinimumHeight(2) + input_widget.setMaximumHeight(2) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + +class LabelAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + input_widget = QtWidgets.QLabel(self) + label = self.attr_def.label + if label: + input_widget.setText(str(label)) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + +class ClickableLineEdit(QtWidgets.QLineEdit): + clicked = QtCore.Signal() + + def __init__(self, text, parent): + super(ClickableLineEdit, self).__init__(parent) + self.setText(text) + self.setReadOnly(True) + + self._mouse_pressed = False + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(ClickableLineEdit, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(ClickableLineEdit, self).mouseReleaseEvent(event) + + +class NumberAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + decimals = self.attr_def.decimals + if decimals > 0: + input_widget = FocusDoubleSpinBox(self) + input_widget.setDecimals(decimals) + else: + input_widget = FocusSpinBox(self) + + if self.attr_def.tooltip: + input_widget.setToolTip(self.attr_def.tooltip) + + input_widget.setMinimum(self.attr_def.minimum) + input_widget.setMaximum(self.attr_def.maximum) + input_widget.setValue(self.attr_def.default) + + input_widget.setButtonSymbols( + QtWidgets.QAbstractSpinBox.ButtonSymbols.NoButtons + ) + input_line_edit = input_widget.lineEdit() + input_widget.installEventFilter(self) + + multisel_widget = ClickableLineEdit("< Multiselection >", self) + multisel_widget.setVisible(False) + + input_widget.valueChanged.connect(self._on_value_change) + multisel_widget.clicked.connect(self._on_multi_click) + + self._input_widget = input_widget + self._input_line_edit = input_line_edit + self._multisel_widget = multisel_widget + self._last_multivalue = None + self._multivalue = False + + self.main_layout.addWidget(input_widget, 0) + self.main_layout.addWidget(multisel_widget, 0) + + def eventFilter(self, obj, event): + if ( + self._multivalue + and obj is self._input_widget + and event.type() == QtCore.QEvent.FocusOut + ): + self._set_multiselection_visible(True) + return False + + def current_value(self): + return self._input_widget.value() + + def set_value(self, value, multivalue=False): + self._last_multivalue = None + if multivalue: + set_value = set(value) + if None in set_value: + set_value.remove(None) + set_value.add(self.attr_def.default) + + if len(set_value) > 1: + self._last_multivalue = next(iter(set_value), None) + self._set_multiselection_visible(True) + self._multivalue = True + return + value = tuple(set_value)[0] + + self._multivalue = False + self._set_multiselection_visible(False) + + if self.current_value != value: + self._input_widget.setValue(value) + + def _on_value_change(self, new_value): + self._multivalue = False + self.value_changed.emit(new_value, self.attr_def.id) + + def _on_multi_click(self): + self._set_multiselection_visible(False, True) + + def _set_multiselection_visible(self, visible, change_focus=False): + self._input_widget.setVisible(not visible) + self._multisel_widget.setVisible(visible) + if visible: + return + + # Change value once user clicked on the input field + if self._last_multivalue is None: + value = self.attr_def.default + else: + value = self._last_multivalue + self._input_widget.blockSignals(True) + self._input_widget.setValue(value) + self._input_widget.blockSignals(False) + if not change_focus: + return + # Change focus to input field and move cursor to the end + self._input_widget.setFocus(QtCore.Qt.MouseFocusReason) + self._input_line_edit.setCursorPosition( + len(self._input_line_edit.text()) + ) + + +class TextAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + # TODO Solve how to handle regex + # self.attr_def.regex + + self.multiline = self.attr_def.multiline + if self.multiline: + input_widget = QtWidgets.QPlainTextEdit(self) + else: + input_widget = QtWidgets.QLineEdit(self) + + if ( + self.attr_def.placeholder + and hasattr(input_widget, "setPlaceholderText") + ): + input_widget.setPlaceholderText(self.attr_def.placeholder) + + if self.attr_def.tooltip: + input_widget.setToolTip(self.attr_def.tooltip) + + if self.attr_def.default: + if self.multiline: + input_widget.setPlainText(self.attr_def.default) + else: + input_widget.setText(self.attr_def.default) + + input_widget.textChanged.connect(self._on_value_change) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + def _on_value_change(self): + if self.multiline: + new_value = self._input_widget.toPlainText() + else: + new_value = self._input_widget.text() + self.value_changed.emit(new_value, self.attr_def.id) + + def current_value(self): + if self.multiline: + return self._input_widget.toPlainText() + return self._input_widget.text() + + def set_value(self, value, multivalue=False): + block_signals = False + if multivalue: + set_value = set(value) + if None in set_value: + set_value.remove(None) + set_value.add(self.attr_def.default) + + if len(set_value) == 1: + value = tuple(set_value)[0] + else: + block_signals = True + value = "< Multiselection >" + + if value != self.current_value(): + if block_signals: + self._input_widget.blockSignals(True) + if self.multiline: + self._input_widget.setPlainText(value) + else: + self._input_widget.setText(value) + if block_signals: + self._input_widget.blockSignals(False) + + +class BoolAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + input_widget = NiceCheckbox(parent=self) + input_widget.setChecked(self.attr_def.default) + + if self.attr_def.tooltip: + input_widget.setToolTip(self.attr_def.tooltip) + + input_widget.stateChanged.connect(self._on_value_change) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + self.main_layout.addStretch(1) + + def _on_value_change(self): + new_value = self._input_widget.isChecked() + self.value_changed.emit(new_value, self.attr_def.id) + + def current_value(self): + return self._input_widget.isChecked() + + def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if None in set_value: + set_value.remove(None) + set_value.add(self.attr_def.default) + + if len(set_value) > 1: + self._input_widget.blockSignals(True) + self._input_widget.setCheckState(QtCore.Qt.PartiallyChecked) + self._input_widget.blockSignals(False) + return + value = tuple(set_value)[0] + + if value != self.current_value(): + self._input_widget.setChecked(value) + + +class EnumAttrWidget(_BaseAttrDefWidget): + def __init__(self, *args, **kwargs): + self._multivalue = False + super(EnumAttrWidget, self).__init__(*args, **kwargs) + + @property + def multiselection(self): + return self.attr_def.multiselection + + def _ui_init(self): + if self.multiselection: + input_widget = MultiSelectionComboBox(self) + + else: + input_widget = CustomTextComboBox(self) + combo_delegate = QtWidgets.QStyledItemDelegate(input_widget) + input_widget.setItemDelegate(combo_delegate) + self._combo_delegate = combo_delegate + + if self.attr_def.tooltip: + input_widget.setToolTip(self.attr_def.tooltip) + + for item in self.attr_def.items: + input_widget.addItem(item["label"], item["value"]) + + idx = input_widget.findData(self.attr_def.default) + if idx >= 0: + input_widget.setCurrentIndex(idx) + + if self.multiselection: + input_widget.value_changed.connect(self._on_value_change) + else: + input_widget.currentIndexChanged.connect(self._on_value_change) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + def _on_value_change(self): + new_value = self.current_value() + if self._multivalue: + self._multivalue = False + self._input_widget.set_custom_text(None) + self.value_changed.emit(new_value, self.attr_def.id) + + def current_value(self): + if self.multiselection: + return self._input_widget.value() + idx = self._input_widget.currentIndex() + return self._input_widget.itemData(idx) + + def _multiselection_multivalue_prep(self, values): + final = None + multivalue = False + for value in values: + value = set(value) + if final is None: + final = value + elif multivalue or final != value: + final |= value + multivalue = True + return list(final), multivalue + + def set_value(self, value, multivalue=False): + if multivalue: + if self.multiselection: + value, multivalue = self._multiselection_multivalue_prep( + value) + else: + set_value = set(value) + if len(set_value) == 1: + multivalue = False + value = tuple(set_value)[0] + + if self.multiselection: + self._input_widget.blockSignals(True) + self._input_widget.set_value(value) + self._input_widget.blockSignals(False) + + elif not multivalue: + idx = self._input_widget.findData(value) + cur_idx = self._input_widget.currentIndex() + if idx != cur_idx and idx >= 0: + self._input_widget.setCurrentIndex(idx) + + custom_text = None + if multivalue: + custom_text = "< Multiselection >" + self._input_widget.set_custom_text(custom_text) + self._multivalue = multivalue + + +class UnknownAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + input_widget = QtWidgets.QLabel(self) + self._value = self.attr_def.default + input_widget.setText(str(self._value)) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + def current_value(self): + raise ValueError( + "{} can't hold real value.".format(self.__class__.__name__) + ) + + def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if len(set_value) == 1: + value = tuple(set_value)[0] + else: + value = "< Multiselection >" + + str_value = str(value) + if str_value != self._value: + self._value = str_value + self._input_widget.setText(str_value) + + +class HiddenAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + self.setVisible(False) + self._value = self.attr_def.default + self._multivalue = False + + def setVisible(self, visible): + if visible: + visible = False + super(HiddenAttrWidget, self).setVisible(visible) + + def current_value(self): + if self._multivalue: + raise ValueError("{} can't output for multivalue.".format( + self.__class__.__name__ + )) + return self._value + + def set_value(self, value, multivalue=False): + self._value = copy.deepcopy(value) + self._multivalue = multivalue + + +class FileAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + input_widget = FilesWidget( + self.attr_def.single_item, + self.attr_def.allow_sequences, + self.attr_def.extensions_label, + self + ) + + if self.attr_def.tooltip: + input_widget.setToolTip(self.attr_def.tooltip) + + input_widget.set_filters( + self.attr_def.folders, self.attr_def.extensions + ) + + input_widget.value_changed.connect(self._on_value_change) + + self._input_widget = input_widget + + self.main_layout.addWidget(input_widget, 0) + + def _on_value_change(self): + new_value = self.current_value() + self.value_changed.emit(new_value, self.attr_def.id) + + def current_value(self): + return self._input_widget.current_value() + + def set_value(self, value, multivalue=False): + self._input_widget.set_value(value, multivalue) diff --git a/openpype/tools/ayon_utils/models/__init__.py b/client/ayon_core/tools/ayon_utils/models/__init__.py similarity index 100% rename from openpype/tools/ayon_utils/models/__init__.py rename to client/ayon_core/tools/ayon_utils/models/__init__.py diff --git a/openpype/tools/ayon_utils/models/cache.py b/client/ayon_core/tools/ayon_utils/models/cache.py similarity index 100% rename from openpype/tools/ayon_utils/models/cache.py rename to client/ayon_core/tools/ayon_utils/models/cache.py diff --git a/openpype/tools/ayon_utils/models/hierarchy.py b/client/ayon_core/tools/ayon_utils/models/hierarchy.py similarity index 99% rename from openpype/tools/ayon_utils/models/hierarchy.py rename to client/ayon_core/tools/ayon_utils/models/hierarchy.py index fc6b8e1eb7..07773dfb78 100644 --- a/openpype/tools/ayon_utils/models/hierarchy.py +++ b/client/ayon_core/tools/ayon_utils/models/hierarchy.py @@ -5,7 +5,7 @@ import ayon_api import six -from openpype.style import get_default_entity_icon_color +from ayon_core.style import get_default_entity_icon_color from .cache import NestedCacheItem diff --git a/openpype/tools/ayon_utils/models/projects.py b/client/ayon_core/tools/ayon_utils/models/projects.py similarity index 98% rename from openpype/tools/ayon_utils/models/projects.py rename to client/ayon_core/tools/ayon_utils/models/projects.py index 36d53edc24..e30561000e 100644 --- a/openpype/tools/ayon_utils/models/projects.py +++ b/client/ayon_core/tools/ayon_utils/models/projects.py @@ -4,7 +4,7 @@ import ayon_api import six -from openpype.style import get_default_entity_icon_color +from ayon_core.style import get_default_entity_icon_color from .cache import CacheItem diff --git a/openpype/tools/ayon_utils/models/selection.py b/client/ayon_core/tools/ayon_utils/models/selection.py similarity index 100% rename from openpype/tools/ayon_utils/models/selection.py rename to client/ayon_core/tools/ayon_utils/models/selection.py diff --git a/openpype/tools/ayon_utils/models/thumbnails.py b/client/ayon_core/tools/ayon_utils/models/thumbnails.py similarity index 98% rename from openpype/tools/ayon_utils/models/thumbnails.py rename to client/ayon_core/tools/ayon_utils/models/thumbnails.py index 40892338df..86d6f3cba3 100644 --- a/openpype/tools/ayon_utils/models/thumbnails.py +++ b/client/ayon_core/tools/ayon_utils/models/thumbnails.py @@ -2,7 +2,7 @@ import ayon_api -from openpype.client.server.thumbnails import AYONThumbnailCache +from ayon_core.client.thumbnails import AYONThumbnailCache from .cache import NestedCacheItem diff --git a/openpype/tools/ayon_utils/widgets/__init__.py b/client/ayon_core/tools/ayon_utils/widgets/__init__.py similarity index 100% rename from openpype/tools/ayon_utils/widgets/__init__.py rename to client/ayon_core/tools/ayon_utils/widgets/__init__.py diff --git a/client/ayon_core/tools/ayon_utils/widgets/folders_widget.py b/client/ayon_core/tools/ayon_utils/widgets/folders_widget.py new file mode 100644 index 0000000000..1e395b0368 --- /dev/null +++ b/client/ayon_core/tools/ayon_utils/widgets/folders_widget.py @@ -0,0 +1,516 @@ +import collections + +from qtpy import QtWidgets, QtGui, QtCore + +from ayon_core.tools.utils import ( + RecursiveSortFilterProxyModel, + TreeView, +) + +from .utils import RefreshThread, get_qt_icon + +FOLDERS_MODEL_SENDER_NAME = "qt_folders_model" +FOLDER_ID_ROLE = QtCore.Qt.UserRole + 1 +FOLDER_NAME_ROLE = QtCore.Qt.UserRole + 2 +FOLDER_PATH_ROLE = QtCore.Qt.UserRole + 3 +FOLDER_TYPE_ROLE = QtCore.Qt.UserRole + 4 + + +class FoldersQtModel(QtGui.QStandardItemModel): + """Folders model which cares about refresh of folders. + + Args: + controller (AbstractWorkfilesFrontend): The control object. + """ + + refreshed = QtCore.Signal() + + def __init__(self, controller): + super(FoldersQtModel, self).__init__() + + self._controller = controller + self._items_by_id = {} + self._parent_id_by_id = {} + + self._refresh_threads = {} + self._current_refresh_thread = None + self._last_project_name = None + + self._has_content = False + self._is_refreshing = False + + @property + def is_refreshing(self): + """Model is refreshing. + + Returns: + bool: True if model is refreshing. + """ + return self._is_refreshing + + @property + def has_content(self): + """Has at least one folder. + + Returns: + bool: True if model has at least one folder. + """ + + return self._has_content + + def refresh(self): + """Refresh folders for last selected project. + + Force to update folders model from controller. This may or may not + trigger query from server, that's based on controller's cache. + """ + + self.set_project_name(self._last_project_name) + + def _clear_items(self): + self._items_by_id = {} + self._parent_id_by_id = {} + self._has_content = False + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + + def get_index_by_id(self, item_id): + """Get index by folder id. + + Returns: + QtCore.QModelIndex: Index of the folder. Can be invalid if folder + is not available. + """ + item = self._items_by_id.get(item_id) + if item is None: + return QtCore.QModelIndex() + return self.indexFromItem(item) + + def get_project_name(self): + """Project name which model currently use. + + Returns: + Union[str, None]: Currently used project name. + """ + + return self._last_project_name + + def set_project_name(self, project_name): + """Refresh folders items. + + Refresh start thread because it can cause that controller can + start query from database if folders are not cached. + """ + + if not project_name: + self._last_project_name = project_name + self._fill_items({}) + self._current_refresh_thread = None + return + + self._is_refreshing = True + + if self._last_project_name != project_name: + self._clear_items() + self._last_project_name = project_name + + thread = self._refresh_threads.get(project_name) + if thread is not None: + self._current_refresh_thread = thread + return + + thread = RefreshThread( + project_name, + self._controller.get_folder_items, + project_name, + FOLDERS_MODEL_SENDER_NAME + ) + self._current_refresh_thread = thread + self._refresh_threads[thread.id] = thread + thread.refresh_finished.connect(self._on_refresh_thread) + thread.start() + + def _on_refresh_thread(self, thread_id): + """Callback when refresh thread is finished. + + Technically can be running multiple refresh threads at the same time, + to avoid using values from wrong thread, we check if thread id is + current refresh thread id. + + Folders are stored by id. + + Args: + thread_id (str): Thread id. + """ + + # Make sure to remove thread from '_refresh_threads' dict + thread = self._refresh_threads.pop(thread_id) + if ( + self._current_refresh_thread is None + or thread_id != self._current_refresh_thread.id + ): + return + + self._fill_items(thread.get_result()) + self._current_refresh_thread = None + + def _fill_item_data(self, item, folder_item): + """ + + Args: + item (QtGui.QStandardItem): Item to fill data. + folder_item (FolderItem): Folder item. + """ + + icon = get_qt_icon(folder_item.icon) + item.setData(folder_item.entity_id, FOLDER_ID_ROLE) + item.setData(folder_item.name, FOLDER_NAME_ROLE) + item.setData(folder_item.path, FOLDER_PATH_ROLE) + item.setData(folder_item.folder_type, FOLDER_TYPE_ROLE) + item.setData(folder_item.label, QtCore.Qt.DisplayRole) + item.setData(icon, QtCore.Qt.DecorationRole) + + def _fill_items(self, folder_items_by_id): + if not folder_items_by_id: + if folder_items_by_id is not None: + self._clear_items() + self._is_refreshing = False + self.refreshed.emit() + return + + self._has_content = True + + folder_ids = set(folder_items_by_id) + ids_to_remove = set(self._items_by_id) - folder_ids + + folder_items_by_parent = collections.defaultdict(dict) + for folder_item in folder_items_by_id.values(): + ( + folder_items_by_parent + [folder_item.parent_id] + [folder_item.entity_id] + ) = folder_item + + hierarchy_queue = collections.deque() + hierarchy_queue.append((self.invisibleRootItem(), None)) + + # Keep pointers to removed items until the refresh finishes + # - some children of the items could be moved and reused elsewhere + removed_items = [] + while hierarchy_queue: + item = hierarchy_queue.popleft() + parent_item, parent_id = item + folder_items = folder_items_by_parent[parent_id] + + items_by_id = {} + folder_ids_to_add = set(folder_items) + for row_idx in reversed(range(parent_item.rowCount())): + child_item = parent_item.child(row_idx) + child_id = child_item.data(FOLDER_ID_ROLE) + if child_id in ids_to_remove: + removed_items.append(parent_item.takeRow(row_idx)) + else: + items_by_id[child_id] = child_item + + new_items = [] + for item_id in folder_ids_to_add: + folder_item = folder_items[item_id] + item = items_by_id.get(item_id) + if item is None: + is_new = True + item = QtGui.QStandardItem() + item.setEditable(False) + else: + is_new = self._parent_id_by_id[item_id] != parent_id + + self._fill_item_data(item, folder_item) + if is_new: + new_items.append(item) + self._items_by_id[item_id] = item + self._parent_id_by_id[item_id] = parent_id + + hierarchy_queue.append((item, item_id)) + + if new_items: + parent_item.appendRows(new_items) + + for item_id in ids_to_remove: + self._items_by_id.pop(item_id) + self._parent_id_by_id.pop(item_id) + + self._is_refreshing = False + self.refreshed.emit() + + +class FoldersWidget(QtWidgets.QWidget): + """Folders widget. + + Widget that handles folders view, model and selection. + + Expected selection handling is disabled by default. If enabled, the + widget will handle the expected in predefined way. Widget is listening + to event 'expected_selection_changed' with expected event data below, + the same data must be available when called method + 'get_expected_selection_data' on controller. + + { + "folder": { + "current": bool, # Folder is what should be set now + "folder_id": Union[str, None], # Folder id that should be selected + }, + ... + } + + Selection is confirmed by calling method 'expected_folder_selected' on + controller. + + + Args: + controller (AbstractWorkfilesFrontend): The control object. + parent (QtWidgets.QWidget): The parent widget. + handle_expected_selection (bool): If True, the widget will handle + the expected selection. Defaults to False. + """ + + double_clicked = QtCore.Signal(QtGui.QMouseEvent) + selection_changed = QtCore.Signal() + refreshed = QtCore.Signal() + + def __init__(self, controller, parent, handle_expected_selection=False): + super(FoldersWidget, self).__init__(parent) + + folders_view = TreeView(self) + folders_view.setHeaderHidden(True) + + folders_model = FoldersQtModel(controller) + folders_proxy_model = RecursiveSortFilterProxyModel() + folders_proxy_model.setSourceModel(folders_model) + folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + folders_view.setModel(folders_proxy_model) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(folders_view, 1) + + controller.register_event_callback( + "selection.project.changed", + self._on_project_selection_change, + ) + controller.register_event_callback( + "folders.refresh.finished", + self._on_folders_refresh_finished + ) + controller.register_event_callback( + "controller.refresh.finished", + self._on_controller_refresh + ) + controller.register_event_callback( + "expected_selection_changed", + self._on_expected_selection_change + ) + + selection_model = folders_view.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + folders_view.double_clicked.connect(self.double_clicked) + folders_model.refreshed.connect(self._on_model_refresh) + + self._controller = controller + self._folders_view = folders_view + self._folders_model = folders_model + self._folders_proxy_model = folders_proxy_model + + self._handle_expected_selection = handle_expected_selection + self._expected_selection = None + + @property + def is_refreshing(self): + """Model is refreshing. + + Returns: + bool: True if model is refreshing. + """ + + return self._folders_model.is_refreshing + + @property + def has_content(self): + """Has at least one folder. + + Returns: + bool: True if model has at least one folder. + """ + + return self._folders_model.has_content + + def set_name_filter(self, name): + """Set filter of folder name. + + Args: + name (str): The string filter. + """ + + self._folders_proxy_model.setFilterFixedString(name) + + def refresh(self): + """Refresh folders model. + + Force to update folders model from controller. + """ + + self._folders_model.refresh() + + def get_project_name(self): + """Project name in which folders widget currently is. + + Returns: + Union[str, None]: Currently used project name. + """ + + return self._folders_model.get_project_name() + + def set_project_name(self, project_name): + """Set project name. + + Do not use this method when controller is handling selection of + project using 'selection.project.changed' event. + + Args: + project_name (str): Project name. + """ + + self._folders_model.set_project_name(project_name) + + def get_selected_folder_id(self): + """Get selected folder id. + + Returns: + Union[str, None]: Folder id which is selected. + """ + + return self._get_selected_item_id() + + def get_selected_folder_label(self): + """Selected folder label. + + Returns: + Union[str, None]: Selected folder label. + """ + + item_id = self._get_selected_item_id() + return self.get_folder_label(item_id) + + def get_folder_label(self, folder_id): + """Folder label for a given folder id. + + Returns: + Union[str, None]: Folder label. + """ + + index = self._folders_model.get_index_by_id(folder_id) + if index.isValid(): + return index.data(QtCore.Qt.DisplayRole) + return None + + def set_selected_folder(self, folder_id): + """Change selection. + + Args: + folder_id (Union[str, None]): Folder id or None to deselect. + """ + + if folder_id is None: + self._folders_view.clearSelection() + return True + + if folder_id == self._get_selected_item_id(): + return True + index = self._folders_model.get_index_by_id(folder_id) + if not index.isValid(): + return False + + proxy_index = self._folders_proxy_model.mapFromSource(index) + if not proxy_index.isValid(): + return False + + selection_model = self._folders_view.selectionModel() + selection_model.setCurrentIndex( + proxy_index, QtCore.QItemSelectionModel.SelectCurrent + ) + return True + + def set_deselectable(self, enabled): + """Set deselectable mode. + + Items in view can be deselected. + + Args: + enabled (bool): Enable deselectable mode. + """ + + self._folders_view.set_deselectable(enabled) + + def _get_selected_index(self): + return self._folders_model.get_index_by_id( + self.get_selected_folder_id() + ) + + def _on_project_selection_change(self, event): + project_name = event["project_name"] + self.set_project_name(project_name) + + def _on_folders_refresh_finished(self, event): + if event["sender"] != FOLDERS_MODEL_SENDER_NAME: + self.set_project_name(event["project_name"]) + + def _on_controller_refresh(self): + self._update_expected_selection() + + def _on_model_refresh(self): + if self._expected_selection: + self._set_expected_selection() + self._folders_proxy_model.sort(0) + self.refreshed.emit() + + def _get_selected_item_id(self): + selection_model = self._folders_view.selectionModel() + for index in selection_model.selectedIndexes(): + item_id = index.data(FOLDER_ID_ROLE) + if item_id is not None: + return item_id + return None + + def _on_selection_change(self): + item_id = self._get_selected_item_id() + self._controller.set_selected_folder(item_id) + self.selection_changed.emit() + + # Expected selection handling + def _on_expected_selection_change(self, event): + self._update_expected_selection(event.data) + + def _update_expected_selection(self, expected_data=None): + if not self._handle_expected_selection: + return + + if expected_data is None: + expected_data = self._controller.get_expected_selection_data() + + folder_data = expected_data.get("folder") + if not folder_data or not folder_data["current"]: + return + + folder_id = folder_data["id"] + self._expected_selection = folder_id + if not self._folders_model.is_refreshing: + self._set_expected_selection() + + def _set_expected_selection(self): + if not self._handle_expected_selection: + return + + folder_id = self._expected_selection + self._expected_selection = None + if folder_id is not None: + self.set_selected_folder(folder_id) + self._controller.expected_folder_selected(folder_id) diff --git a/client/ayon_core/tools/ayon_utils/widgets/projects_widget.py b/client/ayon_core/tools/ayon_utils/widgets/projects_widget.py new file mode 100644 index 0000000000..d3bebecfd6 --- /dev/null +++ b/client/ayon_core/tools/ayon_utils/widgets/projects_widget.py @@ -0,0 +1,600 @@ +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.ayon_utils.models import PROJECTS_MODEL_SENDER +from .utils import RefreshThread, get_qt_icon + +PROJECT_NAME_ROLE = QtCore.Qt.UserRole + 1 +PROJECT_IS_ACTIVE_ROLE = QtCore.Qt.UserRole + 2 +PROJECT_IS_LIBRARY_ROLE = QtCore.Qt.UserRole + 3 +PROJECT_IS_CURRENT_ROLE = QtCore.Qt.UserRole + 4 +LIBRARY_PROJECT_SEPARATOR_ROLE = QtCore.Qt.UserRole + 5 + + +class ProjectsQtModel(QtGui.QStandardItemModel): + refreshed = QtCore.Signal() + + def __init__(self, controller): + super(ProjectsQtModel, self).__init__() + self._controller = controller + + self._project_items = {} + self._has_libraries = False + + self._empty_item = None + self._empty_item_added = False + + self._select_item = None + self._select_item_added = False + self._select_item_visible = None + + self._libraries_sep_item = None + self._libraries_sep_item_added = False + self._libraries_sep_item_visible = False + + self._current_context_project = None + + self._selected_project = None + + self._refresh_thread = None + + @property + def is_refreshing(self): + return self._refresh_thread is not None + + def refresh(self): + self._refresh() + + def has_content(self): + return len(self._project_items) > 0 + + def set_select_item_visible(self, visible): + if self._select_item_visible is visible: + return + self._select_item_visible = visible + + if self._selected_project is None: + self._add_select_item() + + def set_libraries_separator_visible(self, visible): + if self._libraries_sep_item_visible is visible: + return + self._libraries_sep_item_visible = visible + + def set_selected_project(self, project_name): + if not self._select_item_visible: + return + + self._selected_project = project_name + if project_name is None: + self._add_select_item() + else: + self._remove_select_item() + + def set_current_context_project(self, project_name): + if project_name == self._current_context_project: + return + self._unset_current_context_project(self._current_context_project) + self._current_context_project = project_name + self._set_current_context_project(project_name) + + def _set_current_context_project(self, project_name): + item = self._project_items.get(project_name) + if item is None: + return + item.setData(True, PROJECT_IS_CURRENT_ROLE) + + def _unset_current_context_project(self, project_name): + item = self._project_items.get(project_name) + if item is None: + return + item.setData(False, PROJECT_IS_CURRENT_ROLE) + + def _add_empty_item(self): + if self._empty_item_added: + return + self._empty_item_added = True + item = self._get_empty_item() + root_item = self.invisibleRootItem() + root_item.appendRow(item) + + def _remove_empty_item(self): + if not self._empty_item_added: + return + self._empty_item_added = False + root_item = self.invisibleRootItem() + item = self._get_empty_item() + root_item.takeRow(item.row()) + + def _get_empty_item(self): + if self._empty_item is None: + item = QtGui.QStandardItem("< No projects >") + item.setFlags(QtCore.Qt.NoItemFlags) + self._empty_item = item + return self._empty_item + + def _get_library_sep_item(self): + if self._libraries_sep_item is not None: + return self._libraries_sep_item + + item = QtGui.QStandardItem() + item.setData("Libraries", QtCore.Qt.DisplayRole) + item.setData(True, LIBRARY_PROJECT_SEPARATOR_ROLE) + item.setFlags(QtCore.Qt.NoItemFlags) + self._libraries_sep_item = item + return item + + def _add_library_sep_item(self): + if ( + not self._libraries_sep_item_visible + or self._libraries_sep_item_added + ): + return + self._libraries_sep_item_added = True + item = self._get_library_sep_item() + root_item = self.invisibleRootItem() + root_item.appendRow(item) + + def _remove_library_sep_item(self): + if ( + not self._libraries_sep_item_added + ): + return + self._libraries_sep_item_added = False + item = self._get_library_sep_item() + root_item = self.invisibleRootItem() + root_item.takeRow(item.row()) + + def _add_select_item(self): + if self._select_item_added: + return + self._select_item_added = True + item = self._get_select_item() + root_item = self.invisibleRootItem() + root_item.appendRow(item) + + def _remove_select_item(self): + if not self._select_item_added: + return + self._select_item_added = False + root_item = self.invisibleRootItem() + item = self._get_select_item() + root_item.takeRow(item.row()) + + def _get_select_item(self): + if self._select_item is None: + item = QtGui.QStandardItem("< Select project >") + item.setEditable(False) + self._select_item = item + return self._select_item + + def _refresh(self): + if self._refresh_thread is not None: + return + + refresh_thread = RefreshThread( + "projects", self._query_project_items + ) + refresh_thread.refresh_finished.connect(self._refresh_finished) + + self._refresh_thread = refresh_thread + refresh_thread.start() + + def _query_project_items(self): + return self._controller.get_project_items( + sender=PROJECTS_MODEL_SENDER + ) + + def _refresh_finished(self): + # TODO check if failed + result = self._refresh_thread.get_result() + if result is not None: + self._fill_items(result) + + self._refresh_thread = None + if result is None: + self._refresh() + else: + self.refreshed.emit() + + def _fill_items(self, project_items): + new_project_names = { + project_item.name + for project_item in project_items + } + + # Handle "Select item" visibility + if self._select_item_visible: + # Add select project. if previously selected project is not in + # project items + if self._selected_project not in new_project_names: + self._add_select_item() + else: + self._remove_select_item() + + root_item = self.invisibleRootItem() + + items_to_remove = set(self._project_items.keys()) - new_project_names + for project_name in items_to_remove: + item = self._project_items.pop(project_name) + root_item.takeRow(item.row()) + + has_library_project = False + new_items = [] + for project_item in project_items: + project_name = project_item.name + item = self._project_items.get(project_name) + if project_item.is_library: + has_library_project = True + if item is None: + item = QtGui.QStandardItem() + item.setEditable(False) + new_items.append(item) + icon = get_qt_icon(project_item.icon) + item.setData(project_name, QtCore.Qt.DisplayRole) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setData(project_name, PROJECT_NAME_ROLE) + item.setData(project_item.active, PROJECT_IS_ACTIVE_ROLE) + item.setData(project_item.is_library, PROJECT_IS_LIBRARY_ROLE) + is_current = project_name == self._current_context_project + item.setData(is_current, PROJECT_IS_CURRENT_ROLE) + self._project_items[project_name] = item + + self._set_current_context_project(self._current_context_project) + + self._has_libraries = has_library_project + + if new_items: + root_item.appendRows(new_items) + + if self.has_content(): + # Make sure "No projects" item is removed + self._remove_empty_item() + if has_library_project: + self._add_library_sep_item() + else: + self._remove_library_sep_item() + else: + # Keep only "No projects" item + self._add_empty_item() + self._remove_select_item() + self._remove_library_sep_item() + + +class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(ProjectSortFilterProxy, self).__init__(*args, **kwargs) + self._filter_inactive = True + self._filter_standard = False + self._filter_library = False + self._sort_by_type = True + # Disable case sensitivity + self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + def _type_sort(self, l_index, r_index): + if not self._sort_by_type: + return None + + l_is_library = l_index.data(PROJECT_IS_LIBRARY_ROLE) + r_is_library = r_index.data(PROJECT_IS_LIBRARY_ROLE) + # Both hare project items + if l_is_library is not None and r_is_library is not None: + if l_is_library is r_is_library: + return None + if l_is_library: + return False + return True + + if l_index.data(LIBRARY_PROJECT_SEPARATOR_ROLE): + if r_is_library is None: + return False + return r_is_library + + if r_index.data(LIBRARY_PROJECT_SEPARATOR_ROLE): + if l_is_library is None: + return True + return l_is_library + return None + + def lessThan(self, left_index, right_index): + # Current project always on top + # - make sure this is always first, before any other sorting + # e.g. type sort would move the item lower + if left_index.data(PROJECT_IS_CURRENT_ROLE): + return True + if right_index.data(PROJECT_IS_CURRENT_ROLE): + return False + + # Library separator should be before library projects + result = self._type_sort(left_index, right_index) + if result is not None: + return result + + if left_index.data(PROJECT_NAME_ROLE) is None: + return True + + if right_index.data(PROJECT_NAME_ROLE) is None: + return False + + left_is_active = left_index.data(PROJECT_IS_ACTIVE_ROLE) + right_is_active = right_index.data(PROJECT_IS_ACTIVE_ROLE) + if right_is_active == left_is_active: + return super(ProjectSortFilterProxy, self).lessThan( + left_index, right_index + ) + + if left_is_active: + return True + return False + + def filterAcceptsRow(self, source_row, source_parent): + index = self.sourceModel().index(source_row, 0, source_parent) + project_name = index.data(PROJECT_NAME_ROLE) + if project_name is None: + return True + + string_pattern = self.filterRegularExpression().pattern() + if string_pattern: + return string_pattern.lower() in project_name.lower() + + # Current project keep always visible + default = super(ProjectSortFilterProxy, self).filterAcceptsRow( + source_row, source_parent + ) + if not default: + return default + + # Make sure current project is visible + if index.data(PROJECT_IS_CURRENT_ROLE): + return True + + if ( + self._filter_inactive + and not index.data(PROJECT_IS_ACTIVE_ROLE) + ): + return False + + if ( + self._filter_standard + and not index.data(PROJECT_IS_LIBRARY_ROLE) + ): + return False + + if ( + self._filter_library + and index.data(PROJECT_IS_LIBRARY_ROLE) + ): + return False + return True + + def _custom_index_filter(self, index): + return bool(index.data(PROJECT_IS_ACTIVE_ROLE)) + + def is_active_filter_enabled(self): + return self._filter_inactive + + def set_active_filter_enabled(self, enabled): + if self._filter_inactive == enabled: + return + self._filter_inactive = enabled + self.invalidateFilter() + + def set_library_filter_enabled(self, enabled): + if self._filter_library == enabled: + return + self._filter_library = enabled + self.invalidateFilter() + + def set_standard_filter_enabled(self, enabled): + if self._filter_standard == enabled: + return + self._filter_standard = enabled + self.invalidateFilter() + + def set_sort_by_type(self, enabled): + if self._sort_by_type is enabled: + return + self._sort_by_type = enabled + self.invalidate() + + +class ProjectsCombobox(QtWidgets.QWidget): + refreshed = QtCore.Signal() + selection_changed = QtCore.Signal() + + def __init__(self, controller, parent, handle_expected_selection=False): + super(ProjectsCombobox, self).__init__(parent) + + projects_combobox = QtWidgets.QComboBox(self) + combobox_delegate = QtWidgets.QStyledItemDelegate(projects_combobox) + projects_combobox.setItemDelegate(combobox_delegate) + projects_model = ProjectsQtModel(controller) + projects_proxy_model = ProjectSortFilterProxy() + projects_proxy_model.setSourceModel(projects_model) + projects_combobox.setModel(projects_proxy_model) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(projects_combobox, 1) + + projects_model.refreshed.connect(self._on_model_refresh) + + controller.register_event_callback( + "projects.refresh.finished", + self._on_projects_refresh_finished + ) + controller.register_event_callback( + "controller.refresh.finished", + self._on_controller_refresh + ) + controller.register_event_callback( + "expected_selection_changed", + self._on_expected_selection_change + ) + + projects_combobox.currentIndexChanged.connect( + self._on_current_index_changed + ) + + self._controller = controller + self._listen_selection_change = True + self._select_item_visible = False + + self._handle_expected_selection = handle_expected_selection + self._expected_selection = None + + self._projects_combobox = projects_combobox + self._projects_model = projects_model + self._projects_proxy_model = projects_proxy_model + self._combobox_delegate = combobox_delegate + + def refresh(self): + self._projects_model.refresh() + + def set_selection(self, project_name): + """Set selection to a given project. + + Selection change is ignored if project is not found. + + Args: + project_name (str): Name of project. + + Returns: + bool: True if selection was changed, False otherwise. NOTE: + Selection may not be changed if project is not found, or if + project is already selected. + """ + + idx = self._projects_combobox.findData( + project_name, PROJECT_NAME_ROLE) + if idx < 0: + return False + if idx != self._projects_combobox.currentIndex(): + self._projects_combobox.setCurrentIndex(idx) + return True + return False + + def set_listen_to_selection_change(self, listen): + """Disable listening to changes of the selection. + + Because combobox is triggering selection change when it's model + is refreshed, it's necessary to disable listening to selection for + some cases, e.g. when is on a different page of UI and should be just + refreshed. + + Args: + listen (bool): Enable or disable listening to selection changes. + """ + + self._listen_selection_change = listen + + def get_selected_project_name(self): + """Name of selected project. + + Returns: + Union[str, None]: Name of selected project, or None if no project + """ + + idx = self._projects_combobox.currentIndex() + if idx < 0: + return None + return self._projects_combobox.itemData(idx, PROJECT_NAME_ROLE) + + def set_current_context_project(self, project_name): + self._projects_model.set_current_context_project(project_name) + self._projects_proxy_model.invalidateFilter() + + def set_select_item_visible(self, visible): + self._select_item_visible = visible + self._projects_model.set_select_item_visible(visible) + self._update_select_item_visiblity() + + def set_libraries_separator_visible(self, visible): + self._projects_model.set_libraries_separator_visible(visible) + + def is_active_filter_enabled(self): + return self._projects_proxy_model.is_active_filter_enabled() + + def set_active_filter_enabled(self, enabled): + return self._projects_proxy_model.set_active_filter_enabled(enabled) + + def set_standard_filter_enabled(self, enabled): + return self._projects_proxy_model.set_standard_filter_enabled(enabled) + + def set_library_filter_enabled(self, enabled): + return self._projects_proxy_model.set_library_filter_enabled(enabled) + + def _update_select_item_visiblity(self, **kwargs): + if not self._select_item_visible: + return + if "project_name" not in kwargs: + project_name = self.get_selected_project_name() + else: + project_name = kwargs.get("project_name") + + # Hide the item if a project is selected + self._projects_model.set_selected_project(project_name) + + def _on_current_index_changed(self, idx): + if not self._listen_selection_change: + return + project_name = self._projects_combobox.itemData( + idx, PROJECT_NAME_ROLE) + self._update_select_item_visiblity(project_name=project_name) + self._controller.set_selected_project(project_name) + self.selection_changed.emit() + + def _on_model_refresh(self): + self._projects_proxy_model.sort(0) + self._projects_proxy_model.invalidateFilter() + if self._expected_selection: + self._set_expected_selection() + self._update_select_item_visiblity() + self.refreshed.emit() + + def _on_projects_refresh_finished(self, event): + if event["sender"] != PROJECTS_MODEL_SENDER: + self._projects_model.refresh() + + def _on_controller_refresh(self): + self._update_expected_selection() + + # Expected selection handling + def _on_expected_selection_change(self, event): + self._update_expected_selection(event.data) + + def _set_expected_selection(self): + if not self._handle_expected_selection: + return + project_name = self._expected_selection + if project_name is not None: + if project_name != self.get_selected_project_name(): + self.set_selection(project_name) + else: + # Fake project change + self._on_current_index_changed( + self._projects_combobox.currentIndex() + ) + + self._controller.expected_project_selected(project_name) + + def _update_expected_selection(self, expected_data=None): + if not self._handle_expected_selection: + return + if expected_data is None: + expected_data = self._controller.get_expected_selection_data() + + project_data = expected_data.get("project") + if ( + not project_data + or not project_data["current"] + or project_data["selected"] + ): + return + self._expected_selection = project_data["name"] + if not self._projects_model.is_refreshing: + self._set_expected_selection() + + +class ProjectsWidget(QtWidgets.QWidget): + # TODO implement + pass diff --git a/client/ayon_core/tools/ayon_utils/widgets/tasks_widget.py b/client/ayon_core/tools/ayon_utils/widgets/tasks_widget.py new file mode 100644 index 0000000000..3d6cc47fe3 --- /dev/null +++ b/client/ayon_core/tools/ayon_utils/widgets/tasks_widget.py @@ -0,0 +1,459 @@ +from qtpy import QtWidgets, QtGui, QtCore + +from ayon_core.style import get_disabled_entity_icon_color +from ayon_core.tools.utils import DeselectableTreeView + +from .utils import RefreshThread, get_qt_icon + +TASKS_MODEL_SENDER_NAME = "qt_tasks_model" +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 +PARENT_ID_ROLE = QtCore.Qt.UserRole + 2 +ITEM_NAME_ROLE = QtCore.Qt.UserRole + 3 +TASK_TYPE_ROLE = QtCore.Qt.UserRole + 4 + + +class TasksQtModel(QtGui.QStandardItemModel): + """Tasks model which cares about refresh of tasks by folder id. + + Args: + controller (AbstractWorkfilesFrontend): The control object. + """ + + refreshed = QtCore.Signal() + + def __init__(self, controller): + super(TasksQtModel, self).__init__() + + self._controller = controller + + self._items_by_name = {} + self._has_content = False + self._is_refreshing = False + + self._invalid_selection_item_used = False + self._invalid_selection_item = None + self._empty_tasks_item_used = False + self._empty_tasks_item = None + + self._last_project_name = None + self._last_folder_id = None + + self._refresh_threads = {} + self._current_refresh_thread = None + + # Initial state + self._add_invalid_selection_item() + + def _clear_items(self): + self._items_by_name = {} + self._has_content = False + self._remove_invalid_items() + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + + def refresh(self): + """Refresh tasks for last project and folder.""" + + self._refresh(self._last_project_name, self._last_folder_id) + + def set_context(self, project_name, folder_id): + """Set context for which should be tasks showed. + + Args: + project_name (Union[str]): Name of project. + folder_id (Union[str, None]): Folder id. + """ + + self._refresh(project_name, folder_id) + + def get_index_by_name(self, task_name): + """Find item by name and return its index. + + Returns: + QtCore.QModelIndex: Index of item. Is invalid if task is not + found by name. + """ + + item = self._items_by_name.get(task_name) + if item is None: + return QtCore.QModelIndex() + return self.indexFromItem(item) + + def get_last_project_name(self): + """Get last refreshed project name. + + Returns: + Union[str, None]: Project name. + """ + + return self._last_project_name + + def get_last_folder_id(self): + """Get last refreshed folder id. + + Returns: + Union[str, None]: Folder id. + """ + + return self._last_folder_id + + def set_selected_project(self, project_name): + self._selected_project_name = project_name + + def _get_invalid_selection_item(self): + if self._invalid_selection_item is None: + item = QtGui.QStandardItem("Select a folder") + item.setFlags(QtCore.Qt.NoItemFlags) + icon = get_qt_icon({ + "type": "awesome-font", + "name": "fa.times", + "color": get_disabled_entity_icon_color(), + }) + item.setData(icon, QtCore.Qt.DecorationRole) + self._invalid_selection_item = item + return self._invalid_selection_item + + def _get_empty_task_item(self): + if self._empty_tasks_item is None: + item = QtGui.QStandardItem("No task") + icon = get_qt_icon({ + "type": "awesome-font", + "name": "fa.exclamation-circle", + "color": get_disabled_entity_icon_color(), + }) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + self._empty_tasks_item = item + return self._empty_tasks_item + + def _add_invalid_item(self, item): + self._clear_items() + root_item = self.invisibleRootItem() + root_item.appendRow(item) + + def _remove_invalid_item(self, item): + root_item = self.invisibleRootItem() + root_item.takeRow(item.row()) + + def _remove_invalid_items(self): + self._remove_invalid_selection_item() + self._remove_empty_task_item() + + def _add_invalid_selection_item(self): + if not self._invalid_selection_item_used: + self._add_invalid_item(self._get_invalid_selection_item()) + self._invalid_selection_item_used = True + + def _remove_invalid_selection_item(self): + if self._invalid_selection_item: + self._remove_invalid_item(self._get_invalid_selection_item()) + self._invalid_selection_item_used = False + + def _add_empty_task_item(self): + if not self._empty_tasks_item_used: + self._add_invalid_item(self._get_empty_task_item()) + self._empty_tasks_item_used = True + + def _remove_empty_task_item(self): + if self._empty_tasks_item_used: + self._remove_invalid_item(self._get_empty_task_item()) + self._empty_tasks_item_used = False + + def _refresh(self, project_name, folder_id): + self._is_refreshing = True + self._last_project_name = project_name + self._last_folder_id = folder_id + if not folder_id: + self._add_invalid_selection_item() + self._current_refresh_thread = None + self._is_refreshing = False + self.refreshed.emit() + return + + thread = self._refresh_threads.get(folder_id) + if thread is not None: + self._current_refresh_thread = thread + return + thread = RefreshThread( + folder_id, + self._controller.get_task_items, + project_name, + folder_id + ) + self._current_refresh_thread = thread + self._refresh_threads[thread.id] = thread + thread.refresh_finished.connect(self._on_refresh_thread) + thread.start() + + def _fill_data_from_thread(self, thread): + task_items = thread.get_result() + # Task items are refreshed + if task_items is None: + return + + # No tasks are available on folder + if not task_items: + self._add_empty_task_item() + return + self._remove_invalid_items() + + new_items = [] + new_names = set() + for task_item in task_items: + name = task_item.name + new_names.add(name) + item = self._items_by_name.get(name) + if item is None: + item = QtGui.QStandardItem() + item.setEditable(False) + new_items.append(item) + self._items_by_name[name] = item + + # TODO cache locally + icon = get_qt_icon(task_item.icon) + item.setData(task_item.label, QtCore.Qt.DisplayRole) + item.setData(name, ITEM_NAME_ROLE) + item.setData(task_item.id, ITEM_ID_ROLE) + item.setData(task_item.parent_id, PARENT_ID_ROLE) + item.setData(icon, QtCore.Qt.DecorationRole) + + root_item = self.invisibleRootItem() + + for name in set(self._items_by_name) - new_names: + item = self._items_by_name.pop(name) + root_item.removeRow(item.row()) + + if new_items: + root_item.appendRows(new_items) + + def _on_refresh_thread(self, thread_id): + """Callback when refresh thread is finished. + + Technically can be running multiple refresh threads at the same time, + to avoid using values from wrong thread, we check if thread id is + current refresh thread id. + + Tasks are stored by name, so if a folder has same task name as + previously selected folder it keeps the selection. + + Args: + thread_id (str): Thread id. + """ + + # Make sure to remove thread from '_refresh_threads' dict + thread = self._refresh_threads.pop(thread_id) + if ( + self._current_refresh_thread is None + or thread_id != self._current_refresh_thread.id + ): + return + + self._fill_data_from_thread(thread) + + root_item = self.invisibleRootItem() + self._has_content = root_item.rowCount() > 0 + self._current_refresh_thread = None + self._is_refreshing = False + self.refreshed.emit() + + @property + def is_refreshing(self): + """Model is refreshing. + + Returns: + bool: Model is refreshing + """ + + return self._is_refreshing + + @property + def has_content(self): + """Model has content. + + Returns: + bools: Have at least one task. + """ + + return self._has_content + + def headerData(self, section, orientation, role): + # Show nice labels in the header + if ( + role == QtCore.Qt.DisplayRole + and orientation == QtCore.Qt.Horizontal + ): + if section == 0: + return "Tasks" + + return super(TasksQtModel, self).headerData( + section, orientation, role + ) + + +class TasksWidget(QtWidgets.QWidget): + """Tasks widget. + + Widget that handles tasks view, model and selection. + + Args: + controller (AbstractWorkfilesFrontend): Workfiles controller. + parent (QtWidgets.QWidget): Parent widget. + handle_expected_selection (Optional[bool]): Handle expected selection. + """ + + refreshed = QtCore.Signal() + selection_changed = QtCore.Signal() + + def __init__(self, controller, parent, handle_expected_selection=False): + super(TasksWidget, self).__init__(parent) + + tasks_view = DeselectableTreeView(self) + tasks_view.setIndentation(0) + + tasks_model = TasksQtModel(controller) + tasks_proxy_model = QtCore.QSortFilterProxyModel() + tasks_proxy_model.setSourceModel(tasks_model) + tasks_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + tasks_view.setModel(tasks_proxy_model) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(tasks_view, 1) + + controller.register_event_callback( + "tasks.refresh.finished", + self._on_tasks_refresh_finished + ) + controller.register_event_callback( + "selection.folder.changed", + self._folder_selection_changed + ) + controller.register_event_callback( + "expected_selection_changed", + self._on_expected_selection_change + ) + + selection_model = tasks_view.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + + tasks_model.refreshed.connect(self._on_tasks_model_refresh) + + self._controller = controller + self._tasks_view = tasks_view + self._tasks_model = tasks_model + self._tasks_proxy_model = tasks_proxy_model + + self._selected_folder_id = None + + self._handle_expected_selection = handle_expected_selection + self._expected_selection_data = None + + def refresh(self): + """Refresh folders for last selected project. + + Force to update folders model from controller. This may or may not + trigger query from server, that's based on controller's cache. + """ + + self._tasks_model.refresh() + + def _on_tasks_refresh_finished(self, event): + """Tasks were refreshed in controller. + + Ignore if refresh was triggered by tasks model, or refreshed folder is + not the same as currently selected folder. + + Args: + event (Event): Event object. + """ + + # Refresh only if current folder id is the same + if ( + event["sender"] == TASKS_MODEL_SENDER_NAME + or event["folder_id"] != self._selected_folder_id + ): + return + self._tasks_model.set_context( + event["project_name"], self._selected_folder_id + ) + + def _folder_selection_changed(self, event): + self._selected_folder_id = event["folder_id"] + self._tasks_model.set_context( + event["project_name"], self._selected_folder_id + ) + + def _on_tasks_model_refresh(self): + if not self._set_expected_selection(): + self._on_selection_change() + self._tasks_proxy_model.sort(0) + self.refreshed.emit() + + def _get_selected_item_ids(self): + selection_model = self._tasks_view.selectionModel() + for index in selection_model.selectedIndexes(): + task_id = index.data(ITEM_ID_ROLE) + task_name = index.data(ITEM_NAME_ROLE) + parent_id = index.data(PARENT_ID_ROLE) + if task_name is not None: + return parent_id, task_id, task_name + return self._selected_folder_id, None, None + + def _on_selection_change(self): + # Don't trigger task change during refresh + # - a task was deselected if that happens + # - can cause crash triggered during tasks refreshing + if self._tasks_model.is_refreshing: + return + + parent_id, task_id, task_name = self._get_selected_item_ids() + self._controller.set_selected_task(task_id, task_name) + self.selection_changed.emit() + + # Expected selection handling + def _on_expected_selection_change(self, event): + self._update_expected_selection(event.data) + + def _set_expected_selection(self): + if not self._handle_expected_selection: + return False + + if self._expected_selection_data is None: + return False + folder_id = self._expected_selection_data["folder_id"] + task_name = self._expected_selection_data["task_name"] + self._expected_selection_data = None + model_folder_id = self._tasks_model.get_last_folder_id() + if folder_id != model_folder_id: + return False + if task_name is not None: + index = self._tasks_model.get_index_by_name(task_name) + if index.isValid(): + proxy_index = self._tasks_proxy_model.mapFromSource(index) + self._tasks_view.setCurrentIndex(proxy_index) + self._controller.expected_task_selected(folder_id, task_name) + return True + + def _update_expected_selection(self, expected_data=None): + if not self._handle_expected_selection: + return + if expected_data is None: + expected_data = self._controller.get_expected_selection_data() + folder_data = expected_data.get("folder") + task_data = expected_data.get("task") + if ( + not folder_data + or not task_data + or not task_data["current"] + ): + return + folder_id = folder_data["id"] + self._expected_selection_data = { + "task_name": task_data["name"], + "folder_id": folder_id, + } + model_folder_id = self._tasks_model.get_last_folder_id() + if folder_id != model_folder_id or self._tasks_model.is_refreshing: + return + self._set_expected_selection() diff --git a/client/ayon_core/tools/ayon_utils/widgets/utils.py b/client/ayon_core/tools/ayon_utils/widgets/utils.py new file mode 100644 index 0000000000..ead8f4edb2 --- /dev/null +++ b/client/ayon_core/tools/ayon_utils/widgets/utils.py @@ -0,0 +1,109 @@ +import os +from functools import partial + +from qtpy import QtCore, QtGui + +from ayon_core.tools.utils.lib import get_qta_icon_by_name_and_color + + +class RefreshThread(QtCore.QThread): + refresh_finished = QtCore.Signal(str) + + def __init__(self, thread_id, func, *args, **kwargs): + super(RefreshThread, self).__init__() + self._id = thread_id + self._callback = partial(func, *args, **kwargs) + self._exception = None + self._result = None + self.finished.connect(self._on_finish_callback) + + @property + def id(self): + return self._id + + @property + def failed(self): + return self._exception is not None + + def run(self): + try: + self._result = self._callback() + except Exception as exc: + self._exception = exc + + def get_result(self): + return self._result + + def _on_finish_callback(self): + """Trigger custom signal with thread id. + + Listening for 'finished' signal we make sure that execution of thread + finished and QThread object can be safely deleted. + """ + + self.refresh_finished.emit(self.id) + + +class _IconsCache: + """Cache for icons.""" + + _cache = {} + _default = None + + @classmethod + def _get_cache_key(cls, icon_def): + parts = [] + icon_type = icon_def["type"] + if icon_type == "path": + parts = [icon_type, icon_def["path"]] + + elif icon_type == "awesome-font": + parts = [icon_type, icon_def["name"], icon_def["color"]] + return "|".join(parts) + + @classmethod + def get_icon(cls, icon_def): + if not icon_def: + return None + icon_type = icon_def["type"] + cache_key = cls._get_cache_key(icon_def) + cache = cls._cache.get(cache_key) + if cache is not None: + return cache + + icon = None + if icon_type == "path": + path = icon_def["path"] + if os.path.exists(path): + icon = QtGui.QIcon(path) + + elif icon_type == "awesome-font": + icon_name = icon_def["name"] + icon_color = icon_def["color"] + icon = get_qta_icon_by_name_and_color(icon_name, icon_color) + if icon is None: + icon = get_qta_icon_by_name_and_color( + "fa.{}".format(icon_name), icon_color) + if icon is None: + icon = cls.get_default() + cls._cache[cache_key] = icon + return icon + + @classmethod + def get_default(cls): + pix = QtGui.QPixmap(1, 1) + pix.fill(QtCore.Qt.transparent) + return QtGui.QIcon(pix) + + +def get_qt_icon(icon_def): + """Returns icon from cache or creates new one. + + Args: + icon_def (dict[str, Any]): Icon definition. + + Returns: + QtGui.QIcon: Icon. + """ + + return _IconsCache.get_icon(icon_def) diff --git a/client/ayon_core/tools/context_dialog/__init__.py b/client/ayon_core/tools/context_dialog/__init__.py new file mode 100644 index 0000000000..4fb912fb62 --- /dev/null +++ b/client/ayon_core/tools/context_dialog/__init__.py @@ -0,0 +1,7 @@ +from .window import ContextDialog, main + + +__all__ = ( + "ContextDialog", + "main", +) diff --git a/client/ayon_core/tools/context_dialog/window.py b/client/ayon_core/tools/context_dialog/window.py new file mode 100644 index 0000000000..e2c9f71aaa --- /dev/null +++ b/client/ayon_core/tools/context_dialog/window.py @@ -0,0 +1,799 @@ +import os +import json + +import ayon_api +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core import style +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.tools.ayon_utils.models import ( + ProjectsModel, + HierarchyModel, +) +from ayon_core.tools.ayon_utils.widgets import ( + ProjectsCombobox, + FoldersWidget, + TasksWidget, +) +from ayon_core.tools.utils.lib import ( + center_window, + get_ayon_qt_app, +) + + +class SelectionModel(object): + """Model handling selection changes. + + Triggering events: + - "selection.project.changed" + - "selection.folder.changed" + - "selection.task.changed" + """ + + event_source = "selection.model" + + def __init__(self, controller): + self._controller = controller + + self._project_name = None + self._folder_id = None + self._task_id = None + self._task_name = None + + def get_selected_project_name(self): + return self._project_name + + def set_selected_project(self, project_name): + self._project_name = project_name + self._controller.emit_event( + "selection.project.changed", + {"project_name": project_name}, + self.event_source + ) + + def get_selected_folder_id(self): + return self._folder_id + + def set_selected_folder(self, folder_id): + if folder_id == self._folder_id: + return + self._folder_id = folder_id + self._controller.emit_event( + "selection.folder.changed", + { + "project_name": self._project_name, + "folder_id": folder_id, + }, + self.event_source + ) + + def get_selected_task_name(self): + return self._task_name + + def get_selected_task_id(self): + return self._task_id + + def set_selected_task(self, task_id, task_name): + if task_id == self._task_id: + return + + self._task_name = task_name + self._task_id = task_id + self._controller.emit_event( + "selection.task.changed", + { + "project_name": self._project_name, + "folder_id": self._folder_id, + "task_name": task_name, + "task_id": task_id, + }, + self.event_source + ) + + +class ExpectedSelection: + def __init__(self, controller): + self._project_name = None + self._folder_id = None + + self._project_selected = True + self._folder_selected = True + + self._controller = controller + + def _emit_change(self): + self._controller.emit_event( + "expected_selection_changed", + self.get_expected_selection_data(), + ) + + def set_expected_selection(self, project_name, folder_id): + self._project_name = project_name + self._folder_id = folder_id + + self._project_selected = False + self._folder_selected = False + self._emit_change() + + def get_expected_selection_data(self): + project_current = False + folder_current = False + if not self._project_selected: + project_current = True + elif not self._folder_selected: + folder_current = True + return { + "project": { + "name": self._project_name, + "current": project_current, + "selected": self._project_selected, + }, + "folder": { + "id": self._folder_id, + "current": folder_current, + "selected": self._folder_selected, + }, + } + + def is_expected_project_selected(self, project_name): + return project_name == self._project_name and self._project_selected + + def is_expected_folder_selected(self, folder_id): + return folder_id == self._folder_id and self._folder_selected + + def expected_project_selected(self, project_name): + if project_name != self._project_name: + return False + self._project_selected = True + self._emit_change() + return True + + def expected_folder_selected(self, folder_id): + if folder_id != self._folder_id: + return False + self._folder_selected = True + self._emit_change() + return True + + +class ContextDialogController: + def __init__(self): + self._event_system = None + + self._projects_model = ProjectsModel(self) + self._hierarchy_model = HierarchyModel(self) + self._selection_model = SelectionModel(self) + self._expected_selection = ExpectedSelection(self) + + self._confirmed = False + self._is_strict = False + self._output_path = None + + self._initial_project_name = None + self._initial_folder_id = None + self._initial_folder_label = None + self._initial_project_found = True + self._initial_folder_found = True + self._initial_tasks_found = True + + def reset(self): + self._emit_event("controller.reset.started") + + self._confirmed = False + self._output_path = None + + self._initial_project_name = None + self._initial_folder_id = None + self._initial_folder_label = None + self._initial_project_found = True + self._initial_folder_found = True + self._initial_tasks_found = True + + self._projects_model.reset() + self._hierarchy_model.reset() + + self._emit_event("controller.reset.finished") + + def refresh(self): + self._emit_event("controller.refresh.started") + + self._projects_model.reset() + self._hierarchy_model.reset() + + self._emit_event("controller.refresh.finished") + + # Event handling + def emit_event(self, topic, data=None, source=None): + """Use implemented event system to trigger event.""" + + if data is None: + data = {} + self._get_event_system().emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self._get_event_system().add_callback(topic, callback) + + def set_output_json_path(self, output_path): + self._output_path = output_path + + def is_strict(self): + return self._is_strict + + def set_strict(self, enabled): + if self._is_strict is enabled: + return + self._is_strict = enabled + self._emit_event("strict.changed", {"strict": enabled}) + + # Data model functions + def get_project_items(self, sender=None): + return self._projects_model.get_project_items(sender) + + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_task_items(self, project_name, folder_id, sender=None): + return self._hierarchy_model.get_task_items( + project_name, folder_id, sender + ) + + # Expected selection helpers + def set_expected_selection(self, project_name, folder_id): + return self._expected_selection.set_expected_selection( + project_name, folder_id + ) + + def get_expected_selection_data(self): + return self._expected_selection.get_expected_selection_data() + + def expected_project_selected(self, project_name): + self._expected_selection.expected_project_selected(project_name) + + def expected_folder_selected(self, folder_id): + self._expected_selection.expected_folder_selected(folder_id) + + # Selection handling + def get_selected_project_name(self): + return self._selection_model.get_selected_project_name() + + def set_selected_project(self, project_name): + self._selection_model.set_selected_project(project_name) + + def get_selected_folder_id(self): + return self._selection_model.get_selected_folder_id() + + def set_selected_folder(self, folder_id): + self._selection_model.set_selected_folder(folder_id) + + def get_selected_task_name(self): + return self._selection_model.get_selected_task_name() + + def get_selected_task_id(self): + return self._selection_model.get_selected_task_id() + + def set_selected_task(self, task_id, task_name): + self._selection_model.set_selected_task(task_id, task_name) + + def is_initial_context_valid(self): + return self._initial_folder_found and self._initial_project_found + + def set_initial_context(self, project_name=None, asset_name=None): + result = self._prepare_initial_context(project_name, asset_name) + + self._initial_project_name = project_name + self._initial_folder_id = result["folder_id"] + self._initial_folder_label = result["folder_label"] + self._initial_project_found = result["project_found"] + self._initial_folder_found = result["folder_found"] + self._initial_tasks_found = result["tasks_found"] + self._emit_event( + "initial.context.changed", + self.get_initial_context() + ) + + def get_initial_context(self): + return { + "project_name": self._initial_project_name, + "folder_id": self._initial_folder_id, + "folder_label": self._initial_folder_label, + "project_found": self._initial_project_found, + "folder_found": self._initial_folder_found, + "tasks_found": self._initial_tasks_found, + "valid": ( + self._initial_project_found + and self._initial_folder_found + and self._initial_tasks_found + ) + } + + # Result of this tool + def get_selected_context(self): + project_name = None + folder_id = None + task_id = None + task_name = None + folder_path = None + folder_name = None + if self._confirmed: + project_name = self.get_selected_project_name() + folder_id = self.get_selected_folder_id() + task_id = self.get_selected_task_id() + task_name = self.get_selected_task_name() + + folder_item = None + if folder_id: + folder_item = self._hierarchy_model.get_folder_item( + project_name, folder_id) + + if folder_item: + folder_path = folder_item.path + folder_name = folder_item.name + return { + "project": project_name, + "project_name": project_name, + "asset": folder_name, + "folder_id": folder_id, + "folder_path": folder_path, + "task": task_name, + "task_name": task_name, + "task_id": task_id, + "initial_context_valid": self.is_initial_context_valid(), + } + + def confirm_selection(self): + self._confirmed = True + + def store_output(self): + if not self._output_path: + return + + dirpath = os.path.dirname(self._output_path) + os.makedirs(dirpath, exist_ok=True) + with open(self._output_path, "w") as stream: + json.dump(self.get_selected_context(), stream, indent=4) + + def _prepare_initial_context(self, project_name, asset_name): + project_found = True + output = { + "project_found": project_found, + "folder_id": None, + "folder_label": None, + "folder_found": True, + "tasks_found": True, + } + if project_name is None: + asset_name = None + else: + project = ayon_api.get_project(project_name) + project_found = project is not None + output["project_found"] = project_found + if not project_found or not asset_name: + return output + + output["folder_label"] = asset_name + + folder_id = None + folder_found = False + # First try to find by path + folder = ayon_api.get_folder_by_path(project_name, asset_name) + # Try to find by name if folder was not found by path + # - prevent to query by name if 'asset_name' contains '/' + if not folder and "/" not in asset_name: + folder = next( + ayon_api.get_folders( + project_name, folder_names=[asset_name], fields=["id"]), + None + ) + + if folder: + folder_id = folder["id"] + folder_found = True + + output["folder_id"] = folder_id + output["folder_found"] = folder_found + if not folder_found: + return output + + tasks = list(ayon_api.get_tasks( + project_name, folder_ids=[folder_id], fields=["id"] + )) + output["tasks_found"] = bool(tasks) + return output + + def _get_event_system(self): + """Inner event system for workfiles tool controller. + + Is used for communication with UI. Event system is created on demand. + + Returns: + QueuedEventSystem: Event system which can trigger callbacks + for topics. + """ + + if self._event_system is None: + self._event_system = QueuedEventSystem() + return self._event_system + + def _emit_event(self, topic, data=None): + self.emit_event(topic, data, "controller") + + +class InvalidContextOverlay(QtWidgets.QFrame): + confirmed = QtCore.Signal() + + def __init__(self, parent): + super(InvalidContextOverlay, self).__init__(parent) + self.setObjectName("OverlayFrame") + + mid_widget = QtWidgets.QWidget(self) + label_widget = QtWidgets.QLabel( + "Requested context was not found...", + mid_widget + ) + + confirm_btn = QtWidgets.QPushButton("Close", mid_widget) + + mid_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + mid_layout = QtWidgets.QVBoxLayout(mid_widget) + mid_layout.setContentsMargins(0, 0, 0, 0) + mid_layout.addWidget(label_widget, 0) + mid_layout.addSpacing(30) + mid_layout.addWidget(confirm_btn, 0) + + main_layout = QtWidgets.QGridLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(mid_widget, 1, 1) + main_layout.setRowStretch(0, 1) + main_layout.setRowStretch(1, 0) + main_layout.setRowStretch(2, 1) + main_layout.setColumnStretch(0, 1) + main_layout.setColumnStretch(1, 0) + main_layout.setColumnStretch(2, 1) + + confirm_btn.clicked.connect(self.confirmed) + + self._label_widget = label_widget + self._confirm_btn = confirm_btn + + def set_context( + self, + project_name, + folder_label, + project_found, + folder_found, + tasks_found, + ): + lines = [] + if not project_found: + lines.extend([ + "Requested project '{}' was not found...".format( + project_name), + ]) + + elif not folder_found: + lines.extend([ + "Requested folder was not found...", + "", + "Project: {}".format(project_name), + "Folder: {}".format(folder_label), + ]) + elif not tasks_found: + lines.extend([ + "Requested folder does not have any tasks...", + "", + "Project: {}".format(project_name), + "Folder: {}".format(folder_label), + ]) + else: + lines.append("Requested context was not found...") + self._label_widget.setText("
".join(lines)) + + +class ContextDialog(QtWidgets.QDialog): + """Dialog to select a context. + + Context has 3 parts: + - Project + - Asset + - Task + + It is possible to predefine project and asset. In that case their widgets + will have passed preselected values and will be disabled. + """ + def __init__(self, controller=None, parent=None): + super(ContextDialog, self).__init__(parent) + + self.setWindowTitle("Select Context") + self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) + + if controller is None: + controller = ContextDialogController() + + # Enable minimize and maximize for app + window_flags = QtCore.Qt.Window + if not parent: + window_flags |= QtCore.Qt.WindowStaysOnTopHint + self.setWindowFlags(window_flags) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + # UI initialization + main_splitter = QtWidgets.QSplitter(self) + + # Left side widget contains project combobox and asset widget + left_side_widget = QtWidgets.QWidget(main_splitter) + + project_combobox = ProjectsCombobox( + controller, + parent=left_side_widget, + handle_expected_selection=True + ) + project_combobox.set_select_item_visible(True) + + # Assets widget + folders_widget = FoldersWidget( + controller, + parent=left_side_widget, + handle_expected_selection=True + ) + + left_side_layout = QtWidgets.QVBoxLayout(left_side_widget) + left_side_layout.setContentsMargins(0, 0, 0, 0) + left_side_layout.addWidget(project_combobox, 0) + left_side_layout.addWidget(folders_widget, 1) + + # Right side of window contains only tasks + tasks_widget = TasksWidget(controller, parent=main_splitter) + + # Add widgets to main splitter + main_splitter.addWidget(left_side_widget) + main_splitter.addWidget(tasks_widget) + + # Set stretch of both sides + main_splitter.setStretchFactor(0, 7) + main_splitter.setStretchFactor(1, 3) + + # Add confimation button to bottom right + ok_btn = QtWidgets.QPushButton("OK", self) + + buttons_layout = QtWidgets.QHBoxLayout() + buttons_layout.setContentsMargins(0, 0, 0, 0) + buttons_layout.addStretch(1) + buttons_layout.addWidget(ok_btn, 0) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(main_splitter, 1) + main_layout.addLayout(buttons_layout, 0) + + overlay_widget = InvalidContextOverlay(self) + overlay_widget.setVisible(False) + + ok_btn.clicked.connect(self._on_ok_click) + project_combobox.refreshed.connect(self._on_projects_refresh) + overlay_widget.confirmed.connect(self._on_overlay_confirm) + + controller.register_event_callback( + "selection.project.changed", + self._on_project_selection_change + ) + controller.register_event_callback( + "selection.folder.changed", + self._on_folder_selection_change + ) + controller.register_event_callback( + "selection.task.changed", + self._on_task_selection_change + ) + controller.register_event_callback( + "initial.context.changed", + self._on_init_context_change + ) + controller.register_event_callback( + "strict.changed", + self._on_strict_changed + ) + controller.register_event_callback( + "controller.reset.finished", + self._on_controller_reset + ) + controller.register_event_callback( + "controller.refresh.finished", + self._on_controller_refresh + ) + + # Set stylehseet and resize window on first show + self._first_show = True + self._visible = False + + self._controller = controller + + self._project_combobox = project_combobox + self._folders_widget = folders_widget + self._tasks_widget = tasks_widget + + self._ok_btn = ok_btn + + self._overlay_widget = overlay_widget + + self._apply_strict_changes(self.is_strict()) + + def is_strict(self): + return self._controller.is_strict() + + def showEvent(self, event): + """Override show event to do some callbacks.""" + super(ContextDialog, self).showEvent(event) + self._visible = True + + if self._first_show: + self._first_show = False + # Set stylesheet and resize + self.setStyleSheet(style.load_stylesheet()) + self.resize(600, 700) + center_window(self) + self._controller.refresh() + + initial_context = self._controller.get_initial_context() + self._set_init_context(initial_context) + self._overlay_widget.resize(self.size()) + + def resizeEvent(self, event): + super(ContextDialog, self).resizeEvent(event) + self._overlay_widget.resize(self.size()) + + def closeEvent(self, event): + """Ignore close event if is in strict state and context is not done.""" + if self.is_strict() and not self._ok_btn.isEnabled(): + # Allow to close window when initial context is not valid + if self._controller.is_initial_context_valid(): + event.ignore() + return + + if self.is_strict(): + self._confirm_selection() + self._visible = False + super(ContextDialog, self).closeEvent(event) + + def set_strict(self, enabled): + """Change strictness of dialog.""" + + self._controller.set_strict(enabled) + + def refresh(self): + """Refresh all widget one by one. + + When asset refresh is triggered we have to wait when is done so + this method continues with `_on_asset_widget_refresh_finished`. + """ + + self._controller.reset() + + def get_context(self): + """Result of dialog.""" + return self._controller.get_selected_context() + + def set_context(self, project_name=None, asset_name=None): + """Set context which will be used and locked in dialog.""" + + self._controller.set_initial_context(project_name, asset_name) + + def _on_projects_refresh(self): + initial_context = self._controller.get_initial_context() + self._controller.set_expected_selection( + initial_context["project_name"], + initial_context["folder_id"] + ) + + def _on_overlay_confirm(self): + self.close() + + def _on_ok_click(self): + # Store values to output + self._confirm_selection() + # Close dialog + self.accept() + + def _confirm_selection(self): + self._controller.confirm_selection() + + def _on_project_selection_change(self, event): + self._on_selection_change( + event["project_name"], + ) + + def _on_folder_selection_change(self, event): + self._on_selection_change( + event["project_name"], + event["folder_id"], + ) + + def _on_task_selection_change(self, event): + self._on_selection_change( + event["project_name"], + event["folder_id"], + event["task_name"], + ) + + def _on_selection_change( + self, project_name, folder_id=None, task_name=None + ): + self._validate_strict(project_name, folder_id, task_name) + + def _on_init_context_change(self, event): + self._set_init_context(event.data) + if self._visible: + self._controller.set_expected_selection( + event["project_name"], event["folder_id"] + ) + + def _set_init_context(self, init_context): + project_name = init_context["project_name"] + if not init_context["valid"]: + self._overlay_widget.setVisible(True) + self._overlay_widget.set_context( + project_name, + init_context["folder_label"], + init_context["project_found"], + init_context["folder_found"], + init_context["tasks_found"] + ) + return + + self._overlay_widget.setVisible(False) + if project_name: + self._project_combobox.setEnabled(False) + if init_context["folder_id"]: + self._folders_widget.setEnabled(False) + else: + self._project_combobox.setEnabled(True) + self._folders_widget.setEnabled(True) + + def _on_strict_changed(self, event): + self._apply_strict_changes(event["strict"]) + + def _on_controller_reset(self): + self._apply_strict_changes(self.is_strict()) + self._project_combobox.refresh() + + def _on_controller_refresh(self): + self._project_combobox.refresh() + + def _apply_strict_changes(self, is_strict): + if not is_strict: + if not self._ok_btn.isEnabled(): + self._ok_btn.setEnabled(True) + return + context = self._controller.get_selected_context() + self._validate_strict( + context["project_name"], + context["folder_id"], + context["task_name"] + ) + + def _validate_strict(self, project_name, folder_id, task_name): + if not self.is_strict(): + return + + enabled = True + if not project_name or not folder_id or not task_name: + enabled = False + self._ok_btn.setEnabled(enabled) + + +def main( + path_to_store, + project_name=None, + asset_name=None, + strict=True +): + # Run Qt application + app = get_ayon_qt_app() + controller = ContextDialogController() + controller.set_strict(strict) + controller.set_initial_context(project_name, asset_name) + controller.set_output_json_path(path_to_store) + window = ContextDialog(controller=controller) + window.show() + app.exec_() + controller.store_output() diff --git a/openpype/tools/creator/__init__.py b/client/ayon_core/tools/creator/__init__.py similarity index 100% rename from openpype/tools/creator/__init__.py rename to client/ayon_core/tools/creator/__init__.py diff --git a/openpype/tools/creator/constants.py b/client/ayon_core/tools/creator/constants.py similarity index 100% rename from openpype/tools/creator/constants.py rename to client/ayon_core/tools/creator/constants.py diff --git a/client/ayon_core/tools/creator/model.py b/client/ayon_core/tools/creator/model.py new file mode 100644 index 0000000000..3650993b9e --- /dev/null +++ b/client/ayon_core/tools/creator/model.py @@ -0,0 +1,61 @@ +import uuid +from qtpy import QtGui, QtCore + +from ayon_core.pipeline import discover_legacy_creator_plugins + +from . constants import ( + FAMILY_ROLE, + ITEM_ID_ROLE +) + + +class CreatorsModel(QtGui.QStandardItemModel): + def __init__(self, *args, **kwargs): + super(CreatorsModel, self).__init__(*args, **kwargs) + + self._creators_by_id = {} + + def reset(self): + # TODO change to refresh when clearing is not needed + self.clear() + self._creators_by_id = {} + + items = [] + creators = discover_legacy_creator_plugins() + for creator in creators: + if not creator.enabled: + continue + item_id = str(uuid.uuid4()) + self._creators_by_id[item_id] = creator + + label = creator.label or creator.family + item = QtGui.QStandardItem(label) + item.setEditable(False) + item.setData(item_id, ITEM_ID_ROLE) + item.setData(creator.family, FAMILY_ROLE) + items.append(item) + + if not items: + item = QtGui.QStandardItem("No registered families") + item.setEnabled(False) + item.setData(False, QtCore.Qt.ItemIsEnabled) + items.append(item) + + items.sort(key=lambda item: item.text()) + self.invisibleRootItem().appendRows(items) + + def get_creator_by_id(self, item_id): + return self._creators_by_id.get(item_id) + + def get_indexes_by_family(self, family): + indexes = [] + for row in range(self.rowCount()): + index = self.index(row, 0) + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_by_id.get(item_id) + if creator_plugin and ( + creator_plugin.label.lower() == family.lower() + or creator_plugin.family.lower() == family.lower() + ): + indexes.append(index) + return indexes diff --git a/client/ayon_core/tools/creator/widgets.py b/client/ayon_core/tools/creator/widgets.py new file mode 100644 index 0000000000..05b5469151 --- /dev/null +++ b/client/ayon_core/tools/creator/widgets.py @@ -0,0 +1,272 @@ +import re +import inspect + +from qtpy import QtWidgets, QtCore, QtGui + +import qtawesome + +from ayon_core.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from ayon_core.tools.utils import ErrorMessageBox + +if hasattr(QtGui, "QRegularExpressionValidator"): + RegularExpressionValidatorClass = QtGui.QRegularExpressionValidator + RegularExpressionClass = QtCore.QRegularExpression +else: + RegularExpressionValidatorClass = QtGui.QRegExpValidator + RegularExpressionClass = QtCore.QRegExp + + +class CreateErrorMessageBox(ErrorMessageBox): + def __init__( + self, + family, + subset_name, + asset_name, + exc_msg, + formatted_traceback, + parent + ): + self._family = family + self._subset_name = subset_name + self._asset_name = asset_name + self._exc_msg = exc_msg + self._formatted_traceback = formatted_traceback + super(CreateErrorMessageBox, self).__init__("Creation failed", parent) + + def _create_top_widget(self, parent_widget): + label_widget = QtWidgets.QLabel(parent_widget) + label_widget.setText( + "Failed to create" + ) + return label_widget + + def _get_report_data(self): + report_message = ( + "Failed to create Product: \"{subset}\"" + " Type: \"{family}\"" + " in Asset: \"{asset}\"" + "\n\nError: {message}" + ).format( + subset=self._subset_name, + family=self._family, + asset=self._asset_name, + message=self._exc_msg + ) + if self._formatted_traceback: + report_message += "\n\n{}".format(self._formatted_traceback) + return [report_message] + + def _create_content(self, content_layout): + item_name_template = ( + "{}: {{}}
" + "{}: {{}}
" + "{}: {{}}
" + ).format( + "Product type", + "Product name", + "Folder" + ) + exc_msg_template = "{}" + + line = self._create_line() + content_layout.addWidget(line) + + item_name_widget = QtWidgets.QLabel(self) + item_name_widget.setText( + item_name_template.format( + self._family, self._subset_name, self._asset_name + ) + ) + content_layout.addWidget(item_name_widget) + + message_label_widget = QtWidgets.QLabel(self) + message_label_widget.setText( + exc_msg_template.format(self.convert_text_for_html(self._exc_msg)) + ) + content_layout.addWidget(message_label_widget) + + if self._formatted_traceback: + line_widget = self._create_line() + tb_widget = self._create_traceback_widget( + self._formatted_traceback + ) + content_layout.addWidget(line_widget) + content_layout.addWidget(tb_widget) + + +class SubsetNameValidator(RegularExpressionValidatorClass): + invalid = QtCore.Signal(set) + pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) + + def __init__(self): + reg = RegularExpressionClass(self.pattern) + super(SubsetNameValidator, self).__init__(reg) + + def validate(self, text, pos): + results = super(SubsetNameValidator, self).validate(text, pos) + if results[0] == self.Invalid: + self.invalid.emit(self.invalid_chars(text)) + return results + + def invalid_chars(self, text): + invalid = set() + re_valid = re.compile(self.pattern) + for char in text: + if char == " ": + invalid.add("' '") + continue + if not re_valid.match(char): + invalid.add(char) + return invalid + + +class VariantLineEdit(QtWidgets.QLineEdit): + report = QtCore.Signal(str) + colors = { + "empty": (QtGui.QColor("#78879b"), ""), + "exists": (QtGui.QColor("#4E76BB"), "border-color: #4E76BB;"), + "new": (QtGui.QColor("#7AAB8F"), "border-color: #7AAB8F;"), + } + + def __init__(self, *args, **kwargs): + super(VariantLineEdit, self).__init__(*args, **kwargs) + + validator = SubsetNameValidator() + self.setValidator(validator) + self.setToolTip("Only alphanumeric characters (A-Z a-z 0-9), " + "'_' and '.' are allowed.") + + self._status_color = self.colors["empty"][0] + + anim = QtCore.QPropertyAnimation() + anim.setTargetObject(self) + anim.setPropertyName(b"status_color") + anim.setEasingCurve(QtCore.QEasingCurve.InCubic) + anim.setDuration(300) + anim.setStartValue(QtGui.QColor("#C84747")) # `Invalid` status color + self.animation = anim + + validator.invalid.connect(self.on_invalid) + + def on_invalid(self, invalid): + message = "Invalid character: %s" % ", ".join(invalid) + self.report.emit(message) + self.animation.stop() + self.animation.start() + + def as_empty(self): + self._set_border("empty") + self.report.emit("Empty product name ..") + + def as_exists(self): + self._set_border("exists") + self.report.emit("Existing product, appending next version.") + + def as_new(self): + self._set_border("new") + self.report.emit("New product, creating first version.") + + def _set_border(self, status): + qcolor, style = self.colors[status] + self.animation.setEndValue(qcolor) + self.setStyleSheet(style) + + def _get_status_color(self): + return self._status_color + + def _set_status_color(self, color): + self._status_color = color + self.setStyleSheet("border-color: %s;" % color.name()) + + status_color = QtCore.Property( + QtGui.QColor, _get_status_color, _set_status_color + ) + + +class FamilyDescriptionWidget(QtWidgets.QWidget): + """A family description widget. + + Shows a family icon, family name and a help description. + Used in creator header. + + _________________ + | ____ | + | |icon| FAMILY | + | |____| help | + |_________________| + + """ + + SIZE = 35 + + def __init__(self, parent=None): + super(FamilyDescriptionWidget, self).__init__(parent=parent) + + icon_label = QtWidgets.QLabel(self) + icon_label.setSizePolicy( + QtWidgets.QSizePolicy.Maximum, + QtWidgets.QSizePolicy.Maximum + ) + + # Add 4 pixel padding to avoid icon being cut off + icon_label.setFixedWidth(self.SIZE + 4) + icon_label.setFixedHeight(self.SIZE + 4) + + label_layout = QtWidgets.QVBoxLayout() + label_layout.setSpacing(0) + + family_label = QtWidgets.QLabel(self) + family_label.setObjectName("CreatorFamilyLabel") + family_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft) + + help_label = QtWidgets.QLabel(self) + help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft) + + label_layout.addWidget(family_label) + label_layout.addWidget(help_label) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + layout.addWidget(icon_label) + layout.addLayout(label_layout) + + self._help_label = help_label + self._family_label = family_label + self._icon_label = icon_label + + def set_item(self, creator_plugin): + """Update elements to display information of a family item. + + Args: + item (dict): A family item as registered with name, help and icon + + Returns: + None + + """ + if not creator_plugin: + self._icon_label.setPixmap(None) + self._family_label.setText("") + self._help_label.setText("") + return + + # Support a font-awesome icon + icon_name = getattr(creator_plugin, "icon", None) or "info-circle" + try: + icon = qtawesome.icon("fa.{}".format(icon_name), color="white") + pixmap = icon.pixmap(self.SIZE, self.SIZE) + except Exception: + print("BUG: Couldn't load icon \"fa.{}\"".format(str(icon_name))) + # Create transparent pixmap + pixmap = QtGui.QPixmap() + pixmap.fill(QtCore.Qt.transparent) + pixmap = pixmap.scaled(self.SIZE, self.SIZE) + + # Parse a clean line from the Creator's docstring + docstring = inspect.getdoc(creator_plugin) + creator_help = docstring.splitlines()[0] if docstring else "" + + self._icon_label.setPixmap(pixmap) + self._family_label.setText(creator_plugin.family) + self._help_label.setText(creator_help) diff --git a/client/ayon_core/tools/creator/window.py b/client/ayon_core/tools/creator/window.py new file mode 100644 index 0000000000..676e1c3959 --- /dev/null +++ b/client/ayon_core/tools/creator/window.py @@ -0,0 +1,501 @@ +import sys +import traceback +import re + +from qtpy import QtWidgets, QtCore + +from ayon_core.client import get_asset_by_name, get_subsets +from ayon_core import style +from ayon_core.settings import get_current_project_settings +from ayon_core.tools.utils.lib import qt_app_context +from ayon_core.pipeline import ( + get_current_project_name, + get_current_asset_name, + get_current_task_name, +) +from ayon_core.pipeline.create import ( + SUBSET_NAME_ALLOWED_SYMBOLS, + legacy_create, + CreatorError, +) + +from .model import CreatorsModel +from .widgets import ( + CreateErrorMessageBox, + VariantLineEdit, + FamilyDescriptionWidget +) +from .constants import ( + ITEM_ID_ROLE, + SEPARATOR, + SEPARATORS +) + +module = sys.modules[__name__] +module.window = None + + +class CreatorWindow(QtWidgets.QDialog): + def __init__(self, parent=None): + super(CreatorWindow, self).__init__(parent) + self.setWindowTitle("Instance Creator") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + if not parent: + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + creator_info = FamilyDescriptionWidget(self) + + creators_model = CreatorsModel() + + creators_proxy = QtCore.QSortFilterProxyModel() + creators_proxy.setSourceModel(creators_model) + + creators_view = QtWidgets.QListView(self) + creators_view.setObjectName("CreatorsView") + creators_view.setModel(creators_proxy) + + asset_name_input = QtWidgets.QLineEdit(self) + variant_input = VariantLineEdit(self) + subset_name_input = QtWidgets.QLineEdit(self) + subset_name_input.setEnabled(False) + + subset_button = QtWidgets.QPushButton() + subset_button.setFixedWidth(18) + subset_menu = QtWidgets.QMenu(subset_button) + subset_button.setMenu(subset_menu) + + name_layout = QtWidgets.QHBoxLayout() + name_layout.addWidget(variant_input) + name_layout.addWidget(subset_button) + name_layout.setSpacing(3) + name_layout.setContentsMargins(0, 0, 0, 0) + + body_layout = QtWidgets.QVBoxLayout() + body_layout.setContentsMargins(0, 0, 0, 0) + + body_layout.addWidget(creator_info, 0) + body_layout.addWidget(QtWidgets.QLabel("Family", self), 0) + body_layout.addWidget(creators_view, 1) + body_layout.addWidget(QtWidgets.QLabel("Asset", self), 0) + body_layout.addWidget(asset_name_input, 0) + body_layout.addWidget(QtWidgets.QLabel("Subset", self), 0) + body_layout.addLayout(name_layout, 0) + body_layout.addWidget(subset_name_input, 0) + + useselection_chk = QtWidgets.QCheckBox("Use selection", self) + useselection_chk.setCheckState(QtCore.Qt.Checked) + + create_btn = QtWidgets.QPushButton("Create", self) + # Need to store error_msg to prevent garbage collection + msg_label = QtWidgets.QLabel(self) + + footer_layout = QtWidgets.QVBoxLayout() + footer_layout.addWidget(create_btn, 0) + footer_layout.addWidget(msg_label, 0) + footer_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.addLayout(body_layout, 1) + layout.addWidget(useselection_chk, 0, QtCore.Qt.AlignLeft) + layout.addLayout(footer_layout, 0) + + msg_timer = QtCore.QTimer() + msg_timer.setSingleShot(True) + msg_timer.setInterval(5000) + + validation_timer = QtCore.QTimer() + validation_timer.setSingleShot(True) + validation_timer.setInterval(300) + + msg_timer.timeout.connect(self._on_msg_timer) + validation_timer.timeout.connect(self._on_validation_timer) + + create_btn.clicked.connect(self._on_create) + variant_input.returnPressed.connect(self._on_create) + variant_input.textChanged.connect(self._on_data_changed) + variant_input.report.connect(self.echo) + asset_name_input.textChanged.connect(self._on_data_changed) + creators_view.selectionModel().currentChanged.connect( + self._on_selection_changed + ) + + # Store valid states and + self._is_valid = False + create_btn.setEnabled(self._is_valid) + + self._first_show = True + + # Message dialog when something goes wrong during creation + self._message_dialog = None + + self._creator_info = creator_info + self._create_btn = create_btn + self._useselection_chk = useselection_chk + self._variant_input = variant_input + self._subset_name_input = subset_name_input + self._asset_name_input = asset_name_input + + self._creators_model = creators_model + self._creators_proxy = creators_proxy + self._creators_view = creators_view + + self._subset_btn = subset_button + self._subset_menu = subset_menu + + self._msg_label = msg_label + + self._validation_timer = validation_timer + self._msg_timer = msg_timer + + # Defaults + self.resize(300, 500) + variant_input.setFocus() + + def _set_valid_state(self, valid): + if self._is_valid == valid: + return + self._is_valid = valid + self._create_btn.setEnabled(valid) + + def _build_menu(self, default_names=None): + """Create optional predefined subset names + + Args: + default_names(list): all predefined names + + Returns: + None + """ + if not default_names: + default_names = [] + + menu = self._subset_menu + button = self._subset_btn + + # Get and destroy the action group + group = button.findChild(QtWidgets.QActionGroup) + if group: + group.deleteLater() + + state = any(default_names) + button.setEnabled(state) + if state is False: + return + + # Build new action group + group = QtWidgets.QActionGroup(button) + for name in default_names: + if name in SEPARATORS: + menu.addSeparator() + continue + action = group.addAction(name) + menu.addAction(action) + + group.triggered.connect(self._on_action_clicked) + + def _on_action_clicked(self, action): + self._variant_input.setText(action.text()) + + def _on_data_changed(self, *args): + # Set invalid state until it's reconfirmed to be valid by the + # scheduled callback so any form of creation is held back until + # valid again + self._set_valid_state(False) + + self._validation_timer.start() + + def _on_validation_timer(self): + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_model.get_creator_by_id(item_id) + user_input_text = self._variant_input.text() + asset_name = self._asset_name_input.text() + + # Early exit if no asset name + if not asset_name: + self._build_menu() + self.echo("Asset name is required ..") + self._set_valid_state(False) + return + + project_name = get_current_project_name() + asset_doc = None + if creator_plugin: + # Get the asset from the database which match with the name + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + + # Get plugin + if not asset_doc or not creator_plugin: + subset_name = user_input_text + self._build_menu() + + if not creator_plugin: + self.echo("No registered families ..") + else: + self.echo("Asset '%s' not found .." % asset_name) + self._set_valid_state(False) + return + + asset_id = asset_doc["_id"] + task_name = get_current_task_name() + + # Calculate subset name with Creator plugin + subset_name = creator_plugin.get_subset_name( + user_input_text, task_name, asset_id, project_name + ) + # Force replacement of prohibited symbols + # QUESTION should Creator care about this and here should be only + # validated with schema regex? + + # Allow curly brackets in subset name for dynamic keys + curly_left = "__cbl__" + curly_right = "__cbr__" + tmp_subset_name = ( + subset_name + .replace("{", curly_left) + .replace("}", curly_right) + ) + # Replace prohibited symbols + tmp_subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + tmp_subset_name + ) + subset_name = ( + tmp_subset_name + .replace(curly_left, "{") + .replace(curly_right, "}") + ) + self._subset_name_input.setText(subset_name) + + # Get all subsets of the current asset + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] + ) + existing_subset_names = { + subset_doc["name"] + for subset_doc in subset_docs + } + existing_subset_names_low = set( + _name.lower() + for _name in existing_subset_names + ) + + # Defaults to dropdown + defaults = [] + # Check if Creator plugin has set defaults + if ( + creator_plugin.defaults + and isinstance(creator_plugin.defaults, (list, tuple, set)) + ): + defaults = list(creator_plugin.defaults) + + # Replace + compare_regex = re.compile(re.sub( + user_input_text, "(.+)", subset_name, flags=re.IGNORECASE + )) + subset_hints = set() + if user_input_text: + for _name in existing_subset_names: + _result = compare_regex.search(_name) + if _result: + subset_hints |= set(_result.groups()) + + if subset_hints: + if defaults: + defaults.append(SEPARATOR) + defaults.extend(subset_hints) + self._build_menu(defaults) + + # Indicate subset existence + if not user_input_text: + self._variant_input.as_empty() + elif subset_name.lower() in existing_subset_names_low: + # validate existence of subset name with lowered text + # - "renderMain" vs. "rensermain" mean same path item for + # windows + self._variant_input.as_exists() + else: + self._variant_input.as_new() + + # Update the valid state + valid = subset_name.strip() != "" + + self._set_valid_state(valid) + + def _on_selection_changed(self, old_idx, new_idx): + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + + creator_plugin = self._creators_model.get_creator_by_id(item_id) + + self._creator_info.set_item(creator_plugin) + + if creator_plugin is None: + return + + default = None + if hasattr(creator_plugin, "get_default_variant"): + default = creator_plugin.get_default_variant() + + if not default: + if ( + creator_plugin.defaults + and isinstance(creator_plugin.defaults, list) + ): + default = creator_plugin.defaults[0] + else: + default = "Default" + + self._variant_input.setText(default) + + self._on_data_changed() + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidentally perform Maya commands + whilst trying to name an instance. + + """ + pass + + def showEvent(self, event): + super(CreatorWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + + def refresh(self): + self._asset_name_input.setText(get_current_asset_name()) + + self._creators_model.reset() + + pype_project_setting = ( + get_current_project_settings() + ["global"] + ["tools"] + ["creator"] + ["families_smart_select"] + ) + current_index = None + family = None + task_name = get_current_task_name() or None + lowered_task_name = task_name.lower() + if task_name: + for _family, _task_names in pype_project_setting.items(): + _low_task_names = {name.lower() for name in _task_names} + for _task_name in _low_task_names: + if _task_name in lowered_task_name: + family = _family + break + if family: + break + + if family: + indexes = self._creators_model.get_indexes_by_family(family) + if indexes: + index = indexes[0] + current_index = self._creators_proxy.mapFromSource(index) + + if current_index is None or not current_index.isValid(): + current_index = self._creators_proxy.index(0, 0) + + self._creators_view.setCurrentIndex(current_index) + + def _on_create(self): + # Do not allow creation in an invalid state + if not self._is_valid: + return + + index = self._creators_view.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + creator_plugin = self._creators_model.get_creator_by_id(item_id) + if creator_plugin is None: + return + + subset_name = self._subset_name_input.text() + asset_name = self._asset_name_input.text() + use_selection = self._useselection_chk.isChecked() + + variant = self._variant_input.text() + + error_info = None + try: + legacy_create( + creator_plugin, + subset_name, + asset_name, + options={"useSelection": use_selection}, + data={"variant": variant} + ) + + except CreatorError as exc: + self.echo("Creator error: {}".format(str(exc))) + error_info = (str(exc), None) + + except Exception as exc: + self.echo("Program error: %s" % str(exc)) + + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + error_info = (str(exc), formatted_traceback) + + if error_info: + box = CreateErrorMessageBox( + creator_plugin.family, + subset_name, + asset_name, + *error_info, + parent=self + ) + box.show() + # Store dialog so is not garbage collected before is shown + self._message_dialog = box + + else: + self.echo("Created %s .." % subset_name) + + def _on_msg_timer(self): + self._msg_label.setText("") + + def echo(self, message): + self._msg_label.setText(str(message)) + self._msg_timer.start() + + +def show(parent=None): + """Display asset creator GUI + + Arguments: + debug (bool, optional): Run loader in debug-mode, + defaults to False + parent (QtCore.QObject, optional): When provided parent the interface + to this QObject. + + """ + + try: + module.window.close() + del(module.window) + except (AttributeError, RuntimeError): + pass + + with qt_app_context(): + window = CreatorWindow(parent) + window.refresh() + window.show() + + module.window = window + + # Pull window to the front. + module.window.raise_() + module.window.activateWindow() diff --git a/openpype/tools/experimental_tools/__init__.py b/client/ayon_core/tools/experimental_tools/__init__.py similarity index 100% rename from openpype/tools/experimental_tools/__init__.py rename to client/ayon_core/tools/experimental_tools/__init__.py diff --git a/client/ayon_core/tools/experimental_tools/dialog.py b/client/ayon_core/tools/experimental_tools/dialog.py new file mode 100644 index 0000000000..39789c859e --- /dev/null +++ b/client/ayon_core/tools/experimental_tools/dialog.py @@ -0,0 +1,216 @@ +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.style import ( + load_stylesheet, + app_icon_path +) + +from .tools_def import ExperimentalTools + + +class ToolButton(QtWidgets.QPushButton): + triggered = QtCore.Signal(str) + + def __init__(self, identifier, *args, **kwargs): + super(ToolButton, self).__init__(*args, **kwargs) + self._identifier = identifier + + self.clicked.connect(self._on_click) + + def _on_click(self): + self.triggered.emit(self._identifier) + + +class ExperimentalToolsDialog(QtWidgets.QDialog): + refresh_interval = 3000 + + def __init__(self, parent=None): + super(ExperimentalToolsDialog, self).__init__(parent) + self.setWindowTitle("AYON Experimental tools") + icon = QtGui.QIcon(app_icon_path()) + self.setWindowIcon(icon) + self.setStyleSheet(load_stylesheet()) + + # Widgets for cases there are not available experimental tools + empty_widget = QtWidgets.QWidget(self) + + empty_label = QtWidgets.QLabel( + "There are no experimental tools available...", empty_widget + ) + + empty_btns_layout = QtWidgets.QHBoxLayout() + ok_btn = QtWidgets.QPushButton("OK", empty_widget) + + empty_btns_layout.setContentsMargins(0, 0, 0, 0) + empty_btns_layout.addStretch(1) + empty_btns_layout.addWidget(ok_btn, 0) + + empty_layout = QtWidgets.QVBoxLayout(empty_widget) + empty_layout.setContentsMargins(0, 0, 0, 0) + empty_layout.addWidget(empty_label) + empty_layout.addStretch(1) + empty_layout.addLayout(empty_btns_layout) + + # Content of Experimental tools + + # Layout where buttons are added + content_layout = QtWidgets.QVBoxLayout() + content_layout.setContentsMargins(0, 0, 0, 0) + + # Separator line + separator_widget = QtWidgets.QWidget(self) + separator_widget.setObjectName("Separator") + separator_widget.setMinimumHeight(2) + separator_widget.setMaximumHeight(2) + + # Label describing how to turn off tools + tool_btns_widget = QtWidgets.QWidget(self) + tool_btns_label = QtWidgets.QLabel( + ( + "You can enable these features in" + "
AYON tray -> Settings -> Experimental tools" + ), + tool_btns_widget + ) + tool_btns_label.setAlignment(QtCore.Qt.AlignCenter) + + tool_btns_layout = QtWidgets.QVBoxLayout(tool_btns_widget) + tool_btns_layout.setContentsMargins(0, 0, 0, 0) + tool_btns_layout.addLayout(content_layout) + tool_btns_layout.addStretch(1) + tool_btns_layout.addWidget(separator_widget, 0) + tool_btns_layout.addWidget(tool_btns_label, 0) + + experimental_tools = ExperimentalTools( + parent_widget=parent, refresh=False + ) + + # Main layout + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(empty_widget, 1) + layout.addWidget(tool_btns_widget, 1) + + refresh_timer = QtCore.QTimer() + refresh_timer.setInterval(self.refresh_interval) + refresh_timer.timeout.connect(self._on_refresh_timeout) + + ok_btn.clicked.connect(self._on_ok_click) + + self._empty_widget = empty_widget + self._tool_btns_widget = tool_btns_widget + self._content_layout = content_layout + + self._experimental_tools = experimental_tools + self._buttons_by_tool_identifier = {} + + self._refresh_timer = refresh_timer + + # Is dialog first shown + self._first_show = True + # Trigger refresh when window gets activity + self._refresh_on_active = True + # Is window active + self._window_is_active = False + + def refresh(self): + self._experimental_tools.refresh_availability() + + buttons_to_remove = set(self._buttons_by_tool_identifier.keys()) + tools = self._experimental_tools.get_tools_for_host() + for idx, tool in enumerate(tools): + identifier = tool.identifier + if identifier in buttons_to_remove: + buttons_to_remove.remove(identifier) + is_new = False + button = self._buttons_by_tool_identifier[identifier] + else: + is_new = True + button = ToolButton(identifier, self._tool_btns_widget) + button.triggered.connect(self._on_btn_trigger) + self._buttons_by_tool_identifier[identifier] = button + self._content_layout.insertWidget(idx, button) + + if button.text() != tool.label: + button.setText(tool.label) + + if tool.enabled: + button.setToolTip(tool.tooltip) + + elif is_new or button.isEnabled(): + button.setToolTip(( + "You can enable this tool in local settings." + "\n\nAYON Tray > Settings > Experimental Tools" + )) + + if tool.enabled != button.isEnabled(): + button.setEnabled(tool.enabled) + + for identifier in buttons_to_remove: + button = self._buttons_by_tool_identifier.pop(identifier) + button.setVisible(False) + idx = self._content_layout.indexOf(button) + self._content_layout.takeAt(idx) + button.deleteLater() + + self._set_visibility() + + def _is_content_visible(self): + return len(self._buttons_by_tool_identifier) > 0 + + def _set_visibility(self): + content_visible = self._is_content_visible() + self._tool_btns_widget.setVisible(content_visible) + self._empty_widget.setVisible(not content_visible) + + def _on_ok_click(self): + self.close() + + def _on_btn_trigger(self, identifier): + tool = self._experimental_tools.tools_by_identifier.get(identifier) + if tool is not None: + tool.execute() + + def showEvent(self, event): + super(ExperimentalToolsDialog, self).showEvent(event) + + if self._refresh_on_active: + # Start/Restart timer + self._refresh_timer.start() + # Refresh + self.refresh() + + elif not self._refresh_timer.isActive(): + self._refresh_timer.start() + + if self._first_show: + self._first_show = False + # Set stylesheet + self.setStyleSheet(load_stylesheet()) + # Resize dialog if there is not content + if not self._is_content_visible(): + size = self.size() + size.setWidth(size.width() + size.width() / 3) + self.resize(size) + + def changeEvent(self, event): + if event.type() == QtCore.QEvent.ActivationChange: + self._window_is_active = self.isActiveWindow() + if self._window_is_active and self._refresh_on_active: + self._refresh_timer.start() + self.refresh() + + super(ExperimentalToolsDialog, self).changeEvent(event) + + def _on_refresh_timeout(self): + # Stop timer if window is not visible + if not self.isVisible(): + self._refresh_on_active = True + self._refresh_timer.stop() + + # Skip refreshing if window is not active + elif not self._window_is_active: + self._refresh_on_active = True + + # Window is active and visible so we're refreshing buttons + else: + self.refresh() diff --git a/openpype/tools/experimental_tools/tools_def.py b/client/ayon_core/tools/experimental_tools/tools_def.py similarity index 97% rename from openpype/tools/experimental_tools/tools_def.py rename to client/ayon_core/tools/experimental_tools/tools_def.py index 5a5eec09ed..568c7032d0 100644 --- a/openpype/tools/experimental_tools/tools_def.py +++ b/client/ayon_core/tools/experimental_tools/tools_def.py @@ -1,5 +1,5 @@ import os -from openpype.settings import get_local_settings +from ayon_core.settings import get_local_settings # Constant key under which local settings are stored LOCAL_EXPERIMENTAL_KEY = "experimental_tools" @@ -162,7 +162,7 @@ def refresh_availability(self): def _show_publisher(self): if self._publisher_tool is None: - from openpype.tools.publisher.window import PublisherWindow + from ayon_core.tools.publisher.window import PublisherWindow self._publisher_tool = PublisherWindow( parent=self._parent_widget diff --git a/openpype/tools/flickcharm.py b/client/ayon_core/tools/flickcharm.py similarity index 100% rename from openpype/tools/flickcharm.py rename to client/ayon_core/tools/flickcharm.py diff --git a/openpype/tools/ayon_launcher/abstract.py b/client/ayon_core/tools/launcher/abstract.py similarity index 100% rename from openpype/tools/ayon_launcher/abstract.py rename to client/ayon_core/tools/launcher/abstract.py diff --git a/client/ayon_core/tools/launcher/control.py b/client/ayon_core/tools/launcher/control.py new file mode 100644 index 0000000000..8780b211f1 --- /dev/null +++ b/client/ayon_core/tools/launcher/control.py @@ -0,0 +1,161 @@ +from ayon_core.lib import Logger +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.settings import get_project_settings +from ayon_core.tools.ayon_utils.models import ProjectsModel, HierarchyModel + +from .abstract import AbstractLauncherFrontEnd, AbstractLauncherBackend +from .models import LauncherSelectionModel, ActionsModel + + +class BaseLauncherController( + AbstractLauncherFrontEnd, AbstractLauncherBackend +): + def __init__(self): + self._project_settings = {} + self._event_system = None + self._log = None + + self._selection_model = LauncherSelectionModel(self) + self._projects_model = ProjectsModel(self) + self._hierarchy_model = HierarchyModel(self) + self._actions_model = ActionsModel(self) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @property + def event_system(self): + """Inner event system for workfiles tool controller. + + Is used for communication with UI. Event system is created on demand. + + Returns: + QueuedEventSystem: Event system which can trigger callbacks + for topics. + """ + + if self._event_system is None: + self._event_system = QueuedEventSystem() + return self._event_system + + # --------------------------------- + # Implementation of abstract methods + # --------------------------------- + # Events system + def emit_event(self, topic, data=None, source=None): + """Use implemented event system to trigger event.""" + + if data is None: + data = {} + self.event_system.emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self.event_system.add_callback(topic, callback) + + # Entity items for UI + def get_project_items(self, sender=None): + return self._projects_model.get_project_items(sender) + + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_task_items(self, project_name, folder_id, sender=None): + return self._hierarchy_model.get_task_items( + project_name, folder_id, sender) + + # Project settings for applications actions + def get_project_settings(self, project_name): + if project_name in self._project_settings: + return self._project_settings[project_name] + settings = get_project_settings(project_name) + self._project_settings[project_name] = settings + return settings + + # Entity for backend + def get_project_entity(self, project_name): + return self._projects_model.get_project_entity(project_name) + + def get_folder_entity(self, project_name, folder_id): + return self._hierarchy_model.get_folder_entity( + project_name, folder_id) + + def get_task_entity(self, project_name, task_id): + return self._hierarchy_model.get_task_entity(project_name, task_id) + + # Selection methods + def get_selected_project_name(self): + return self._selection_model.get_selected_project_name() + + def set_selected_project(self, project_name): + self._selection_model.set_selected_project(project_name) + + def get_selected_folder_id(self): + return self._selection_model.get_selected_folder_id() + + def set_selected_folder(self, folder_id): + self._selection_model.set_selected_folder(folder_id) + + def get_selected_task_id(self): + return self._selection_model.get_selected_task_id() + + def get_selected_task_name(self): + return self._selection_model.get_selected_task_name() + + def set_selected_task(self, task_id, task_name): + self._selection_model.set_selected_task(task_id, task_name) + + def get_selected_context(self): + return { + "project_name": self.get_selected_project_name(), + "folder_id": self.get_selected_folder_id(), + "task_id": self.get_selected_task_id(), + "task_name": self.get_selected_task_name(), + } + + # Actions + def get_action_items(self, project_name, folder_id, task_id): + return self._actions_model.get_action_items( + project_name, folder_id, task_id) + + def set_application_force_not_open_workfile( + self, project_name, folder_id, task_id, action_ids, enabled + ): + self._actions_model.set_application_force_not_open_workfile( + project_name, folder_id, task_id, action_ids, enabled + ) + + def trigger_action(self, project_name, folder_id, task_id, identifier): + self._actions_model.trigger_action( + project_name, folder_id, task_id, identifier) + + # General methods + def refresh(self): + self._emit_event("controller.refresh.started") + + self._project_settings = {} + + self._projects_model.reset() + self._hierarchy_model.reset() + + self._actions_model.refresh() + self._projects_model.refresh() + + self._emit_event("controller.refresh.finished") + + def refresh_actions(self): + self._emit_event("controller.refresh.actions.started") + + # Refresh project settings (used for actions discovery) + self._project_settings = {} + # Refresh projects - they define applications + self._projects_model.reset() + # Refresh actions + self._actions_model.refresh() + + self._emit_event("controller.refresh.actions.finished") + + def _emit_event(self, topic, data=None): + self.emit_event(topic, data, "controller") diff --git a/openpype/tools/ayon_launcher/models/__init__.py b/client/ayon_core/tools/launcher/models/__init__.py similarity index 100% rename from openpype/tools/ayon_launcher/models/__init__.py rename to client/ayon_core/tools/launcher/models/__init__.py diff --git a/client/ayon_core/tools/launcher/models/actions.py b/client/ayon_core/tools/launcher/models/actions.py new file mode 100644 index 0000000000..37024b5810 --- /dev/null +++ b/client/ayon_core/tools/launcher/models/actions.py @@ -0,0 +1,509 @@ +import os + +from ayon_core import resources +from ayon_core.lib import Logger, AYONSettingsRegistry +from ayon_core.pipeline.actions import ( + discover_launcher_actions, + LauncherAction, +) + + +# class Action: +# def __init__(self, label, icon=None, identifier=None): +# self._label = label +# self._icon = icon +# self._callbacks = [] +# self._identifier = identifier or uuid.uuid4().hex +# self._checked = True +# self._checkable = False +# +# def set_checked(self, checked): +# self._checked = checked +# +# def set_checkable(self, checkable): +# self._checkable = checkable +# +# def set_label(self, label): +# self._label = label +# +# def add_callback(self, callback): +# self._callbacks = callback +# +# +# class Menu: +# def __init__(self, label, icon=None): +# self.label = label +# self.icon = icon +# self._actions = [] +# +# def add_action(self, action): +# self._actions.append(action) + + +class ApplicationAction(LauncherAction): + """Action to launch an application. + + Application action based on 'ApplicationManager' system. + + Handling of applications in launcher is not ideal and should be completely + redone from scratch. This is just a temporary solution to keep backwards + compatibility with AYON launcher. + + Todos: + Move handling of errors to frontend. + """ + + # Application object + application = None + # Action attributes + name = None + label = None + label_variant = None + group = None + icon = None + color = None + order = 0 + data = {} + project_settings = {} + project_entities = {} + + _log = None + required_session_keys = ( + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK" + ) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def is_compatible(self, session): + for key in self.required_session_keys: + if not session.get(key): + return False + + project_name = session["AVALON_PROJECT"] + project_entity = self.project_entities[project_name] + apps = project_entity["attrib"].get("applications") + if not apps or self.application.full_name not in apps: + return False + + project_settings = self.project_settings[project_name] + only_available = project_settings["applications"]["only_available"] + if only_available and not self.application.find_executable(): + return False + return True + + def _show_message_box(self, title, message, details=None): + from qtpy import QtWidgets, QtGui + from ayon_core import style + + dialog = QtWidgets.QMessageBox() + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + dialog.setWindowIcon(icon) + dialog.setStyleSheet(style.load_stylesheet()) + dialog.setWindowTitle(title) + dialog.setText(message) + if details: + dialog.setDetailedText(details) + dialog.exec_() + + def process(self, session, **kwargs): + """Process the full Application action""" + + from ayon_core.lib import ( + ApplictionExecutableNotFound, + ApplicationLaunchFailed, + ) + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + try: + self.application.launch( + project_name=project_name, + asset_name=asset_name, + task_name=task_name, + **self.data + ) + + except ApplictionExecutableNotFound as exc: + details = exc.details + msg = exc.msg + log_msg = str(msg) + if details: + log_msg += "\n" + details + self.log.warning(log_msg) + self._show_message_box( + "Application executable not found", msg, details + ) + + except ApplicationLaunchFailed as exc: + msg = str(exc) + self.log.warning(msg, exc_info=True) + self._show_message_box("Application launch failed", msg) + + +class ActionItem: + """Item representing single action to trigger. + + Todos: + Get rid of application specific logic. + + Args: + identifier (str): Unique identifier of action item. + label (str): Action label. + variant_label (Union[str, None]): Variant label, full label is + concatenated with space. Actions are grouped under single + action if it has same 'label' and have set 'variant_label'. + icon (dict[str, str]): Icon definition. + order (int): Action ordering. + is_application (bool): Is action application action. + force_not_open_workfile (bool): Force not open workfile. Application + related. + full_label (Optional[str]): Full label, if not set it is generated + from 'label' and 'variant_label'. + """ + + def __init__( + self, + identifier, + label, + variant_label, + icon, + order, + is_application, + force_not_open_workfile, + full_label=None + ): + self.identifier = identifier + self.label = label + self.variant_label = variant_label + self.icon = icon + self.order = order + self.is_application = is_application + self.force_not_open_workfile = force_not_open_workfile + self._full_label = full_label + + def copy(self): + return self.from_data(self.to_data()) + + @property + def full_label(self): + if self._full_label is None: + if self.variant_label: + self._full_label = " ".join([self.label, self.variant_label]) + else: + self._full_label = self.label + return self._full_label + + def to_data(self): + return { + "identifier": self.identifier, + "label": self.label, + "variant_label": self.variant_label, + "icon": self.icon, + "order": self.order, + "is_application": self.is_application, + "force_not_open_workfile": self.force_not_open_workfile, + "full_label": self._full_label, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +def get_action_icon(action): + """Get action icon info. + + Args: + action (LacunherAction): Action instance. + + Returns: + dict[str, str]: Icon info. + """ + + icon = action.icon + if not icon: + return { + "type": "awesome-font", + "name": "fa.cube", + "color": "white" + } + + if isinstance(icon, dict): + return icon + + icon_path = resources.get_resource(icon) + if not os.path.exists(icon_path): + try: + icon_path = icon.format(resources.RESOURCES_DIR) + except Exception: + pass + + if os.path.exists(icon_path): + return { + "type": "path", + "path": icon_path, + } + + return { + "type": "awesome-font", + "name": icon, + "color": action.color or "white" + } + + +class ActionsModel: + """Actions model. + + Args: + controller (AbstractLauncherBackend): Controller instance. + """ + + _not_open_workfile_reg_key = "force_not_open_workfile" + + def __init__(self, controller): + self._controller = controller + + self._log = None + + self._discovered_actions = None + self._actions = None + self._action_items = {} + + self._launcher_tool_reg = AYONSettingsRegistry("launcher_tool") + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def refresh(self): + self._discovered_actions = None + self._actions = None + self._action_items = {} + + self._controller.emit_event("actions.refresh.started") + self._get_action_objects() + self._controller.emit_event("actions.refresh.finished") + + def get_action_items(self, project_name, folder_id, task_id): + """Get actions for project. + + Args: + project_name (Union[str, None]): Project name. + folder_id (Union[str, None]): Folder id. + task_id (Union[str, None]): Task id. + + Returns: + list[ActionItem]: List of actions. + """ + + not_open_workfile_actions = self._get_no_last_workfile_for_context( + project_name, folder_id, task_id) + session = self._prepare_session(project_name, folder_id, task_id) + output = [] + action_items = self._get_action_items(project_name) + for identifier, action in self._get_action_objects().items(): + if not action.is_compatible(session): + continue + + action_item = action_items[identifier] + # Handling of 'force_not_open_workfile' for applications + if action_item.is_application: + action_item = action_item.copy() + action_item.force_not_open_workfile = ( + not_open_workfile_actions.get(identifier, False) + ) + + output.append(action_item) + return output + + def set_application_force_not_open_workfile( + self, project_name, folder_id, task_id, action_ids, enabled + ): + no_workfile_reg_data = self._get_no_last_workfile_reg_data() + project_data = no_workfile_reg_data.setdefault(project_name, {}) + folder_data = project_data.setdefault(folder_id, {}) + task_data = folder_data.setdefault(task_id, {}) + for action_id in action_ids: + task_data[action_id] = enabled + self._launcher_tool_reg.set_item( + self._not_open_workfile_reg_key, no_workfile_reg_data + ) + + def trigger_action(self, project_name, folder_id, task_id, identifier): + session = self._prepare_session(project_name, folder_id, task_id) + failed = False + error_message = None + action_label = identifier + action_items = self._get_action_items(project_name) + try: + action = self._actions[identifier] + action_item = action_items[identifier] + action_label = action_item.full_label + self._controller.emit_event( + "action.trigger.started", + { + "identifier": identifier, + "full_label": action_label, + } + ) + if isinstance(action, ApplicationAction): + per_action = self._get_no_last_workfile_for_context( + project_name, folder_id, task_id + ) + force_not_open_workfile = per_action.get(identifier, False) + if force_not_open_workfile: + action.data["start_last_workfile"] = False + else: + action.data.pop("start_last_workfile", None) + action.process(session) + except Exception as exc: + self.log.warning("Action trigger failed.", exc_info=True) + failed = True + error_message = str(exc) + + self._controller.emit_event( + "action.trigger.finished", + { + "identifier": identifier, + "failed": failed, + "error_message": error_message, + "full_label": action_label, + } + ) + + def _get_no_last_workfile_reg_data(self): + try: + no_workfile_reg_data = self._launcher_tool_reg.get_item( + self._not_open_workfile_reg_key) + except ValueError: + no_workfile_reg_data = {} + self._launcher_tool_reg.set_item( + self._not_open_workfile_reg_key, no_workfile_reg_data) + return no_workfile_reg_data + + def _get_no_last_workfile_for_context( + self, project_name, folder_id, task_id + ): + not_open_workfile_reg_data = self._get_no_last_workfile_reg_data() + return ( + not_open_workfile_reg_data + .get(project_name, {}) + .get(folder_id, {}) + .get(task_id, {}) + ) + + def _prepare_session(self, project_name, folder_id, task_id): + folder_path = None + if folder_id: + folder = self._controller.get_folder_entity( + project_name, folder_id) + if folder: + folder_path = folder["path"] + + task_name = None + if task_id: + task = self._controller.get_task_entity(project_name, task_id) + if task: + task_name = task["name"] + + return { + "AVALON_PROJECT": project_name, + "AVALON_ASSET": folder_path, + "AVALON_TASK": task_name, + } + + def _get_discovered_action_classes(self): + if self._discovered_actions is None: + self._discovered_actions = ( + discover_launcher_actions() + + self._get_applications_action_classes() + ) + return self._discovered_actions + + def _get_action_objects(self): + if self._actions is None: + actions = {} + for cls in self._get_discovered_action_classes(): + obj = cls() + identifier = getattr(obj, "identifier", None) + if identifier is None: + identifier = cls.__name__ + actions[identifier] = obj + self._actions = actions + return self._actions + + def _get_action_items(self, project_name): + action_items = self._action_items.get(project_name) + if action_items is not None: + return action_items + + project_entity = None + if project_name: + project_entity = self._controller.get_project_entity(project_name) + project_settings = self._controller.get_project_settings(project_name) + + action_items = {} + for identifier, action in self._get_action_objects().items(): + is_application = isinstance(action, ApplicationAction) + if is_application: + action.project_entities[project_name] = project_entity + action.project_settings[project_name] = project_settings + label = action.label or identifier + variant_label = getattr(action, "label_variant", None) + icon = get_action_icon(action) + item = ActionItem( + identifier, + label, + variant_label, + icon, + action.order, + is_application, + False + ) + action_items[identifier] = item + self._action_items[project_name] = action_items + return action_items + + def _get_applications_action_classes(self): + from ayon_core.lib.applications import ( + CUSTOM_LAUNCH_APP_GROUPS, + ApplicationManager, + ) + + actions = [] + + manager = ApplicationManager() + for full_name, application in manager.applications.items(): + if ( + application.group.name in CUSTOM_LAUNCH_APP_GROUPS + or not application.enabled + ): + continue + + action = type( + "app_{}".format(full_name), + (ApplicationAction,), + { + "identifier": "application.{}".format(full_name), + "application": application, + "name": application.name, + "label": application.group.label, + "label_variant": application.label, + "group": None, + "icon": application.icon, + "color": getattr(application, "color", None), + "order": getattr(application, "order", None) or 0, + "data": {} + } + ) + actions.append(action) + return actions diff --git a/openpype/tools/ayon_launcher/models/selection.py b/client/ayon_core/tools/launcher/models/selection.py similarity index 100% rename from openpype/tools/ayon_launcher/models/selection.py rename to client/ayon_core/tools/launcher/models/selection.py diff --git a/openpype/tools/ayon_launcher/ui/__init__.py b/client/ayon_core/tools/launcher/ui/__init__.py similarity index 100% rename from openpype/tools/ayon_launcher/ui/__init__.py rename to client/ayon_core/tools/launcher/ui/__init__.py diff --git a/openpype/tools/ayon_launcher/ui/actions_widget.py b/client/ayon_core/tools/launcher/ui/actions_widget.py similarity index 99% rename from openpype/tools/ayon_launcher/ui/actions_widget.py rename to client/ayon_core/tools/launcher/ui/actions_widget.py index 2a1a06695d..6667b4ed5f 100644 --- a/openpype/tools/ayon_launcher/ui/actions_widget.py +++ b/client/ayon_core/tools/launcher/ui/actions_widget.py @@ -3,8 +3,8 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.tools.flickcharm import FlickCharm -from openpype.tools.ayon_utils.widgets import get_qt_icon +from ayon_core.tools.flickcharm import FlickCharm +from ayon_core.tools.ayon_utils.widgets import get_qt_icon from .resources import get_options_image_path diff --git a/openpype/tools/ayon_launcher/ui/hierarchy_page.py b/client/ayon_core/tools/launcher/ui/hierarchy_page.py similarity index 97% rename from openpype/tools/ayon_launcher/ui/hierarchy_page.py rename to client/ayon_core/tools/launcher/ui/hierarchy_page.py index d56d43fdec..5b5f88a802 100644 --- a/openpype/tools/ayon_launcher/ui/hierarchy_page.py +++ b/client/ayon_core/tools/launcher/ui/hierarchy_page.py @@ -1,12 +1,12 @@ import qtawesome from qtpy import QtWidgets, QtCore -from openpype.tools.utils import ( +from ayon_core.tools.utils import ( PlaceholderLineEdit, SquareButton, RefreshButton, ) -from openpype.tools.ayon_utils.widgets import ( +from ayon_core.tools.ayon_utils.widgets import ( ProjectsCombobox, FoldersWidget, TasksWidget, diff --git a/client/ayon_core/tools/launcher/ui/projects_widget.py b/client/ayon_core/tools/launcher/ui/projects_widget.py new file mode 100644 index 0000000000..729caf3232 --- /dev/null +++ b/client/ayon_core/tools/launcher/ui/projects_widget.py @@ -0,0 +1,153 @@ +from qtpy import QtWidgets, QtCore + +from ayon_core.tools.flickcharm import FlickCharm +from ayon_core.tools.utils import PlaceholderLineEdit, RefreshButton +from ayon_core.tools.ayon_utils.widgets import ( + ProjectsQtModel, + ProjectSortFilterProxy, +) +from ayon_core.tools.ayon_utils.models import PROJECTS_MODEL_SENDER + + +class ProjectIconView(QtWidgets.QListView): + """Styled ListView that allows to toggle between icon and list mode. + + Toggling between the two modes is done by Right Mouse Click. + """ + + IconMode = 0 + ListMode = 1 + + def __init__(self, parent=None, mode=ListMode): + super(ProjectIconView, self).__init__(parent=parent) + + # Workaround for scrolling being super slow or fast when + # toggling between the two visual modes + self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) + self.setObjectName("IconView") + + self._mode = None + self.set_mode(mode) + + def set_mode(self, mode): + if mode == self._mode: + return + + self._mode = mode + + if mode == self.IconMode: + self.setViewMode(QtWidgets.QListView.IconMode) + self.setResizeMode(QtWidgets.QListView.Adjust) + self.setWrapping(True) + self.setWordWrap(True) + self.setGridSize(QtCore.QSize(151, 90)) + self.setIconSize(QtCore.QSize(50, 50)) + self.setSpacing(0) + self.setAlternatingRowColors(False) + + self.setProperty("mode", "icon") + self.style().polish(self) + + self.verticalScrollBar().setSingleStep(30) + + elif self.ListMode: + self.setProperty("mode", "list") + self.style().polish(self) + + self.setViewMode(QtWidgets.QListView.ListMode) + self.setResizeMode(QtWidgets.QListView.Adjust) + self.setWrapping(False) + self.setWordWrap(False) + self.setIconSize(QtCore.QSize(20, 20)) + self.setGridSize(QtCore.QSize(100, 25)) + self.setSpacing(0) + self.setAlternatingRowColors(False) + + self.verticalScrollBar().setSingleStep(34) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.RightButton: + self.set_mode(int(not self._mode)) + return super(ProjectIconView, self).mousePressEvent(event) + + +class ProjectsWidget(QtWidgets.QWidget): + """Projects Page""" + + refreshed = QtCore.Signal() + + def __init__(self, controller, parent=None): + super(ProjectsWidget, self).__init__(parent=parent) + + header_widget = QtWidgets.QWidget(self) + + projects_filter_text = PlaceholderLineEdit(header_widget) + projects_filter_text.setPlaceholderText("Filter projects...") + + refresh_btn = RefreshButton(header_widget) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(projects_filter_text, 1) + header_layout.addWidget(refresh_btn, 0) + + projects_view = ProjectIconView(parent=self) + projects_view.setSelectionMode(QtWidgets.QListView.NoSelection) + flick = FlickCharm(parent=self) + flick.activateOn(projects_view) + projects_model = ProjectsQtModel(controller) + projects_proxy_model = ProjectSortFilterProxy() + projects_proxy_model.setSourceModel(projects_model) + + projects_view.setModel(projects_proxy_model) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(header_widget, 0) + main_layout.addWidget(projects_view, 1) + + projects_view.clicked.connect(self._on_view_clicked) + projects_model.refreshed.connect(self.refreshed) + projects_filter_text.textChanged.connect( + self._on_project_filter_change) + refresh_btn.clicked.connect(self._on_refresh_clicked) + + controller.register_event_callback( + "projects.refresh.finished", + self._on_projects_refresh_finished + ) + + self._controller = controller + + self._projects_view = projects_view + self._projects_model = projects_model + self._projects_proxy_model = projects_proxy_model + + def has_content(self): + """Model has at least one project. + + Returns: + bool: True if there is any content in the model. + """ + + return self._projects_model.has_content() + + def _on_view_clicked(self, index): + if not index.isValid(): + return + model = index.model() + flags = model.flags(index) + if not flags & QtCore.Qt.ItemIsEnabled: + return + project_name = index.data(QtCore.Qt.DisplayRole) + self._controller.set_selected_project(project_name) + + def _on_project_filter_change(self, text): + self._projects_proxy_model.setFilterFixedString(text) + + def _on_refresh_clicked(self): + self._controller.refresh() + + def _on_projects_refresh_finished(self, event): + if event["sender"] != PROJECTS_MODEL_SENDER: + self._projects_model.refresh() diff --git a/openpype/tools/ayon_launcher/ui/resources/__init__.py b/client/ayon_core/tools/launcher/ui/resources/__init__.py similarity index 100% rename from openpype/tools/ayon_launcher/ui/resources/__init__.py rename to client/ayon_core/tools/launcher/ui/resources/__init__.py diff --git a/openpype/tools/ayon_launcher/ui/resources/options.png b/client/ayon_core/tools/launcher/ui/resources/options.png similarity index 100% rename from openpype/tools/ayon_launcher/ui/resources/options.png rename to client/ayon_core/tools/launcher/ui/resources/options.png diff --git a/client/ayon_core/tools/launcher/ui/window.py b/client/ayon_core/tools/launcher/ui/window.py new file mode 100644 index 0000000000..34aeab35bb --- /dev/null +++ b/client/ayon_core/tools/launcher/ui/window.py @@ -0,0 +1,312 @@ +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core import style +from ayon_core import resources + +from ayon_core.tools.launcher.control import BaseLauncherController + +from .projects_widget import ProjectsWidget +from .hierarchy_page import HierarchyPage +from .actions_widget import ActionsWidget + + +class LauncherWindow(QtWidgets.QWidget): + """Launcher interface""" + message_interval = 5000 + refresh_interval = 10000 + page_side_anim_interval = 250 + + def __init__(self, controller=None, parent=None): + super(LauncherWindow, self).__init__(parent) + + if controller is None: + controller = BaseLauncherController() + + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowTitle("Launcher") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + self.setAttribute(QtCore.Qt.WA_DeleteOnClose, False) + + self.setStyleSheet(style.load_stylesheet()) + + # Allow minimize + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowMinimizeButtonHint + | QtCore.Qt.WindowCloseButtonHint + ) + + self._controller = controller + + # Main content - Pages & Actions + content_body = QtWidgets.QSplitter(self) + + # Pages + pages_widget = QtWidgets.QWidget(content_body) + + # - First page - Projects + projects_page = ProjectsWidget(controller, pages_widget) + + # - Second page - Hierarchy (folders & tasks) + hierarchy_page = HierarchyPage(controller, pages_widget) + + pages_layout = QtWidgets.QHBoxLayout(pages_widget) + pages_layout.setContentsMargins(0, 0, 0, 0) + pages_layout.addWidget(projects_page, 1) + pages_layout.addWidget(hierarchy_page, 1) + + # Actions + actions_widget = ActionsWidget(controller, content_body) + + # Vertically split Pages and Actions + content_body.setContentsMargins(0, 0, 0, 0) + content_body.setSizePolicy( + QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding + ) + content_body.setOrientation(QtCore.Qt.Vertical) + content_body.addWidget(pages_widget) + content_body.addWidget(actions_widget) + + # Set useful default sizes and set stretch + # for the pages so that is the only one that + # stretches on UI resize. + content_body.setStretchFactor(0, 10) + content_body.setSizes([580, 160]) + + # Footer + footer_widget = QtWidgets.QWidget(self) + + # - Message label + message_label = QtWidgets.QLabel(footer_widget) + + # action_history = ActionHistory(footer_widget) + # action_history.setStatusTip("Show Action History") + + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + footer_layout.setContentsMargins(0, 0, 0, 0) + footer_layout.addWidget(message_label, 1) + # footer_layout.addWidget(action_history, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(content_body, 1) + layout.addWidget(footer_widget, 0) + + message_timer = QtCore.QTimer() + message_timer.setInterval(self.message_interval) + message_timer.setSingleShot(True) + + actions_refresh_timer = QtCore.QTimer() + actions_refresh_timer.setInterval(self.refresh_interval) + + page_slide_anim = QtCore.QVariantAnimation(self) + page_slide_anim.setDuration(self.page_side_anim_interval) + page_slide_anim.setStartValue(0.0) + page_slide_anim.setEndValue(1.0) + page_slide_anim.setEasingCurve(QtCore.QEasingCurve.OutQuad) + + projects_page.refreshed.connect(self._on_projects_refresh) + message_timer.timeout.connect(self._on_message_timeout) + actions_refresh_timer.timeout.connect( + self._on_actions_refresh_timeout) + page_slide_anim.valueChanged.connect( + self._on_page_slide_value_changed) + page_slide_anim.finished.connect(self._on_page_slide_finished) + + controller.register_event_callback( + "selection.project.changed", + self._on_project_selection_change, + ) + controller.register_event_callback( + "action.trigger.started", + self._on_action_trigger_started, + ) + controller.register_event_callback( + "action.trigger.finished", + self._on_action_trigger_finished, + ) + + self._controller = controller + + self._is_on_projects_page = True + self._window_is_active = False + self._refresh_on_activate = False + self._selected_project_name = None + + self._pages_widget = pages_widget + self._pages_layout = pages_layout + self._projects_page = projects_page + self._hierarchy_page = hierarchy_page + self._actions_widget = actions_widget + + self._message_label = message_label + # self._action_history = action_history + + self._message_timer = message_timer + self._actions_refresh_timer = actions_refresh_timer + self._page_slide_anim = page_slide_anim + + hierarchy_page.setVisible(not self._is_on_projects_page) + self.resize(520, 740) + + def showEvent(self, event): + super(LauncherWindow, self).showEvent(event) + self._window_is_active = True + if not self._actions_refresh_timer.isActive(): + self._actions_refresh_timer.start() + self._controller.refresh() + + def closeEvent(self, event): + super(LauncherWindow, self).closeEvent(event) + self._window_is_active = False + self._actions_refresh_timer.stop() + + def changeEvent(self, event): + if event.type() in ( + QtCore.QEvent.Type.WindowStateChange, + QtCore.QEvent.ActivationChange, + ): + is_active = self.isActiveWindow() and not self.isMinimized() + self._window_is_active = is_active + if is_active and self._refresh_on_activate: + self._refresh_on_activate = False + self._on_actions_refresh_timeout() + self._actions_refresh_timer.start() + + super(LauncherWindow, self).changeEvent(event) + + def _on_actions_refresh_timeout(self): + # Stop timer if widget is not visible + if self._window_is_active: + self._controller.refresh_actions() + else: + self._refresh_on_activate = True + + def _echo(self, message): + self._message_label.setText(str(message)) + self._message_timer.start() + + def _on_message_timeout(self): + self._message_label.setText("") + + def _on_project_selection_change(self, event): + project_name = event["project_name"] + self._selected_project_name = project_name + if not project_name: + self._go_to_projects_page() + + elif self._is_on_projects_page: + self._go_to_hierarchy_page(project_name) + + def _on_projects_refresh(self): + # There is nothing to do, we're on projects page + if self._is_on_projects_page: + return + + # No projects were found -> go back to projects page + if not self._projects_page.has_content(): + self._go_to_projects_page() + return + + self._hierarchy_page.refresh() + self._actions_widget.refresh() + + def _on_action_trigger_started(self, event): + self._echo("Running action: {}".format(event["full_label"])) + + def _on_action_trigger_finished(self, event): + if not event["failed"]: + return + self._echo("Failed: {}".format(event["error_message"])) + + def _is_page_slide_anim_running(self): + return ( + self._page_slide_anim.state() == QtCore.QAbstractAnimation.Running + ) + + def _go_to_projects_page(self): + if self._is_on_projects_page: + return + self._is_on_projects_page = True + self._hierarchy_page.set_page_visible(False) + + self._start_page_slide_animation() + + def _go_to_hierarchy_page(self, project_name): + if not self._is_on_projects_page: + return + self._is_on_projects_page = False + self._hierarchy_page.set_page_visible(True, project_name) + + self._start_page_slide_animation() + + def _start_page_slide_animation(self): + if self._is_on_projects_page: + direction = QtCore.QAbstractAnimation.Backward + else: + direction = QtCore.QAbstractAnimation.Forward + self._page_slide_anim.setDirection(direction) + if self._is_page_slide_anim_running(): + return + + layout_spacing = self._pages_layout.spacing() + if self._is_on_projects_page: + hierarchy_geo = self._hierarchy_page.geometry() + projects_geo = QtCore.QRect(hierarchy_geo) + projects_geo.moveRight( + hierarchy_geo.left() - (layout_spacing + 1)) + + self._projects_page.setVisible(True) + + else: + projects_geo = self._projects_page.geometry() + hierarchy_geo = QtCore.QRect(projects_geo) + hierarchy_geo.moveLeft(projects_geo.right() + layout_spacing) + self._hierarchy_page.setVisible(True) + + while self._pages_layout.count(): + self._pages_layout.takeAt(0) + + self._projects_page.setGeometry(projects_geo) + self._hierarchy_page.setGeometry(hierarchy_geo) + + self._page_slide_anim.start() + + def _on_page_slide_value_changed(self, value): + layout_spacing = self._pages_layout.spacing() + content_width = self._pages_widget.width() - layout_spacing + content_height = self._pages_widget.height() + + # Visible widths of other widgets + hierarchy_width = int(content_width * value) + + hierarchy_geo = QtCore.QRect( + content_width - hierarchy_width, 0, content_width, content_height + ) + projects_geo = QtCore.QRect(hierarchy_geo) + projects_geo.moveRight(hierarchy_geo.left() - (layout_spacing + 1)) + + self._projects_page.setGeometry(projects_geo) + self._hierarchy_page.setGeometry(hierarchy_geo) + + def _on_page_slide_finished(self): + self._pages_layout.addWidget(self._projects_page, 1) + self._pages_layout.addWidget(self._hierarchy_page, 1) + self._projects_page.setVisible(self._is_on_projects_page) + self._hierarchy_page.setVisible(not self._is_on_projects_page) + + # def _on_history_action(self, history_data): + # action, session = history_data + # app = QtWidgets.QApplication.instance() + # modifiers = app.keyboardModifiers() + # + # is_control_down = QtCore.Qt.ControlModifier & modifiers + # if is_control_down: + # # Revert to that "session" location + # self.set_session(session) + # else: + # # User is holding control, rerun the action + # self.run_action(action, session=session) diff --git a/openpype/tools/ayon_loader/__init__.py b/client/ayon_core/tools/loader/__init__.py similarity index 100% rename from openpype/tools/ayon_loader/__init__.py rename to client/ayon_core/tools/loader/__init__.py diff --git a/client/ayon_core/tools/loader/abstract.py b/client/ayon_core/tools/loader/abstract.py new file mode 100644 index 0000000000..33add0213b --- /dev/null +++ b/client/ayon_core/tools/loader/abstract.py @@ -0,0 +1,947 @@ +from abc import ABCMeta, abstractmethod +import six + +from ayon_core.lib.attribute_definitions import ( + AbstractAttrDef, + serialize_attr_defs, + deserialize_attr_defs, +) + + +class ProductTypeItem: + """Item representing product type. + + Args: + name (str): Product type name. + icon (dict[str, Any]): Product type icon definition. + checked (bool): Is product type checked for filtering. + """ + + def __init__(self, name, icon, checked): + self.name = name + self.icon = icon + self.checked = checked + + def to_data(self): + return { + "name": self.name, + "icon": self.icon, + "checked": self.checked, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class ProductItem: + """Product item with it versions. + + Args: + product_id (str): Product id. + product_type (str): Product type. + product_name (str): Product name. + product_icon (dict[str, Any]): Product icon definition. + product_type_icon (dict[str, Any]): Product type icon definition. + product_in_scene (bool): Is product in scene (only when used in DCC). + group_name (str): Group name. + folder_id (str): Folder id. + folder_label (str): Folder label. + version_items (dict[str, VersionItem]): Version items by id. + """ + + def __init__( + self, + product_id, + product_type, + product_name, + product_icon, + product_type_icon, + product_in_scene, + group_name, + folder_id, + folder_label, + version_items, + ): + self.product_id = product_id + self.product_type = product_type + self.product_name = product_name + self.product_icon = product_icon + self.product_type_icon = product_type_icon + self.product_in_scene = product_in_scene + self.group_name = group_name + self.folder_id = folder_id + self.folder_label = folder_label + self.version_items = version_items + + def to_data(self): + return { + "product_id": self.product_id, + "product_type": self.product_type, + "product_name": self.product_name, + "product_icon": self.product_icon, + "product_type_icon": self.product_type_icon, + "product_in_scene": self.product_in_scene, + "group_name": self.group_name, + "folder_id": self.folder_id, + "folder_label": self.folder_label, + "version_items": { + version_id: version_item.to_data() + for version_id, version_item in self.version_items.items() + }, + } + + @classmethod + def from_data(cls, data): + version_items = { + version_id: VersionItem.from_data(version) + for version_id, version in data["version_items"].items() + } + data["version_items"] = version_items + return cls(**data) + + +class VersionItem: + """Version item. + + Object have implemented comparison operators to be sortable. + + Args: + version_id (str): Version id. + version (int): Version. Can be negative when is hero version. + is_hero (bool): Is hero version. + product_id (str): Product id. + thumbnail_id (Union[str, None]): Thumbnail id. + published_time (Union[str, None]): Published time in format + '%Y%m%dT%H%M%SZ'. + author (Union[str, None]): Author. + frame_range (Union[str, None]): Frame range. + duration (Union[int, None]): Duration. + handles (Union[str, None]): Handles. + step (Union[int, None]): Step. + comment (Union[str, None]): Comment. + source (Union[str, None]): Source. + """ + + def __init__( + self, + version_id, + version, + is_hero, + product_id, + thumbnail_id, + published_time, + author, + frame_range, + duration, + handles, + step, + comment, + source, + ): + self.version_id = version_id + self.product_id = product_id + self.thumbnail_id = thumbnail_id + self.version = version + self.is_hero = is_hero + self.published_time = published_time + self.author = author + self.frame_range = frame_range + self.duration = duration + self.handles = handles + self.step = step + self.comment = comment + self.source = source + + def __eq__(self, other): + if not isinstance(other, VersionItem): + return False + return ( + self.is_hero == other.is_hero + and self.version == other.version + and self.version_id == other.version_id + and self.product_id == other.product_id + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __gt__(self, other): + if not isinstance(other, VersionItem): + return False + if ( + other.version == self.version + and self.is_hero + ): + return True + return other.version < self.version + + def to_data(self): + return { + "version_id": self.version_id, + "product_id": self.product_id, + "thumbnail_id": self.thumbnail_id, + "version": self.version, + "is_hero": self.is_hero, + "published_time": self.published_time, + "author": self.author, + "frame_range": self.frame_range, + "duration": self.duration, + "handles": self.handles, + "step": self.step, + "comment": self.comment, + "source": self.source, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class RepreItem: + """Representation item. + + Args: + representation_id (str): Representation id. + representation_name (str): Representation name. + representation_icon (dict[str, Any]): Representation icon definition. + product_name (str): Product name. + folder_label (str): Folder label. + """ + + def __init__( + self, + representation_id, + representation_name, + representation_icon, + product_name, + folder_label + ): + self.representation_id = representation_id + self.representation_name = representation_name + self.representation_icon = representation_icon + self.product_name = product_name + self.folder_label = folder_label + + def to_data(self): + return { + "representation_id": self.representation_id, + "representation_name": self.representation_name, + "representation_icon": self.representation_icon, + "product_name": self.product_name, + "folder_label": self.folder_label, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class ActionItem: + """Action item that can be triggered. + + Action item is defined for a specific context. To trigger the action + use 'identifier' and context, it necessary also use 'options'. + + Args: + identifier (str): Action identifier. + label (str): Action label. + icon (dict[str, Any]): Action icon definition. + tooltip (str): Action tooltip. + options (Union[list[AbstractAttrDef], list[qargparse.QArgument]]): + Action options. Note: 'qargparse' is considered as deprecated. + order (int): Action order. + project_name (str): Project name. + folder_ids (list[str]): Folder ids. + product_ids (list[str]): Product ids. + version_ids (list[str]): Version ids. + representation_ids (list[str]): Representation ids. + """ + + def __init__( + self, + identifier, + label, + icon, + tooltip, + options, + order, + project_name, + folder_ids, + product_ids, + version_ids, + representation_ids, + ): + self.identifier = identifier + self.label = label + self.icon = icon + self.tooltip = tooltip + self.options = options + self.order = order + self.project_name = project_name + self.folder_ids = folder_ids + self.product_ids = product_ids + self.version_ids = version_ids + self.representation_ids = representation_ids + + def _options_to_data(self): + options = self.options + if not options: + return options + if isinstance(options[0], AbstractAttrDef): + return serialize_attr_defs(options) + # NOTE: Data conversion is not used by default in loader tool. But for + # future development of detached UI tools it would be better to be + # prepared for it. + raise NotImplementedError( + "{}.to_data is not implemented. Use Attribute definitions" + " from 'ayon_core.lib' instead of 'qargparse'.".format( + self.__class__.__name__ + ) + ) + + def to_data(self): + options = self._options_to_data() + return { + "identifier": self.identifier, + "label": self.label, + "icon": self.icon, + "tooltip": self.tooltip, + "options": options, + "order": self.order, + "project_name": self.project_name, + "folder_ids": self.folder_ids, + "product_ids": self.product_ids, + "version_ids": self.version_ids, + "representation_ids": self.representation_ids, + } + + @classmethod + def from_data(cls, data): + options = data["options"] + if options: + options = deserialize_attr_defs(options) + data["options"] = options + return cls(**data) + + +@six.add_metaclass(ABCMeta) +class _BaseLoaderController(object): + """Base loader controller abstraction. + + Abstract base class that is required for both frontend and backed. + """ + + @abstractmethod + def get_current_context(self): + """Current context is a context of the current scene. + + Example output: + { + "project_name": "MyProject", + "folder_id": "0011223344-5566778-99", + "task_name": "Compositing", + } + + Returns: + dict[str, Union[str, None]]: Context data. + """ + + pass + + @abstractmethod + def reset(self): + """Reset all cached data to reload everything. + + Triggers events "controller.reset.started" and + "controller.reset.finished". + """ + + pass + + # Model wrappers + @abstractmethod + def get_folder_items(self, project_name, sender=None): + """Folder items for a project. + + Args: + project_name (str): Project name. + sender (Optional[str]): Sender who requested the name. + + Returns: + list[FolderItem]: Folder items for the project. + """ + + pass + + # Expected selection helpers + @abstractmethod + def get_expected_selection_data(self): + """Full expected selection information. + + Expected selection is a selection that may not be yet selected in UI + e.g. because of refreshing, this data tell the UI what should be + selected when they finish their refresh. + + Returns: + dict[str, Any]: Expected selection data. + """ + + pass + + @abstractmethod + def set_expected_selection(self, project_name, folder_id): + """Set expected selection. + + Args: + project_name (str): Name of project to be selected. + folder_id (str): Id of folder to be selected. + """ + + pass + + +class BackendLoaderController(_BaseLoaderController): + """Backend loader controller abstraction. + + What backend logic requires from a controller for proper logic. + """ + + @abstractmethod + def emit_event(self, topic, data=None, source=None): + """Emit event with a certain topic, data and source. + + The event should be sent to both frontend and backend. + + Args: + topic (str): Event topic name. + data (Optional[dict[str, Any]]): Event data. + source (Optional[str]): Event source. + """ + + pass + + @abstractmethod + def get_loaded_product_ids(self): + """Return set of loaded product ids. + + Returns: + set[str]: Set of loaded product ids. + """ + + pass + + +class FrontendLoaderController(_BaseLoaderController): + @abstractmethod + def register_event_callback(self, topic, callback): + """Register callback for an event topic. + + Args: + topic (str): Event topic name. + callback (func): Callback triggered when the event is emitted. + """ + + pass + + # Expected selection helpers + @abstractmethod + def expected_project_selected(self, project_name): + """Expected project was selected in frontend. + + Args: + project_name (str): Project name. + """ + + pass + + @abstractmethod + def expected_folder_selected(self, folder_id): + """Expected folder was selected in frontend. + + Args: + folder_id (str): Folder id. + """ + + pass + + # Model wrapper calls + @abstractmethod + def get_project_items(self, sender=None): + """Items for all projects available on server. + + Triggers event topics "projects.refresh.started" and + "projects.refresh.finished" with data: + { + "sender": sender + } + + Notes: + Filtering of projects is done in UI. + + Args: + sender (Optional[str]): Sender who requested the items. + + Returns: + list[ProjectItem]: List of project items. + """ + + pass + + @abstractmethod + def get_product_items(self, project_name, folder_ids, sender=None): + """Product items for folder ids. + + Triggers event topics "products.refresh.started" and + "products.refresh.finished" with data: + { + "project_name": project_name, + "folder_ids": folder_ids, + "sender": sender + } + + Args: + project_name (str): Project name. + folder_ids (Iterable[str]): Folder ids. + sender (Optional[str]): Sender who requested the items. + + Returns: + list[ProductItem]: List of product items. + """ + + pass + + @abstractmethod + def get_product_item(self, project_name, product_id): + """Receive single product item. + + Args: + project_name (str): Project name. + product_id (str): Product id. + + Returns: + Union[ProductItem, None]: Product info or None if not found. + """ + + pass + + @abstractmethod + def get_product_type_items(self, project_name): + """Product type items for a project. + + Product types have defined if are checked for filtering or not. + + Args: + project_name (Union[str, None]): Project name. + + Returns: + list[ProductTypeItem]: List of product type items for a project. + """ + + pass + + @abstractmethod + def get_representation_items( + self, project_name, version_ids, sender=None + ): + """Representation items for version ids. + + Triggers event topics "model.representations.refresh.started" and + "model.representations.refresh.finished" with data: + { + "project_name": project_name, + "version_ids": version_ids, + "sender": sender + } + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + sender (Optional[str]): Sender who requested the items. + + Returns: + list[RepreItem]: List of representation items. + """ + + pass + + @abstractmethod + def get_version_thumbnail_ids(self, project_name, version_ids): + """Get thumbnail ids for version ids. + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + + Returns: + dict[str, Union[str, Any]]: Thumbnail id by version id. + """ + + pass + + @abstractmethod + def get_folder_thumbnail_ids(self, project_name, folder_ids): + """Get thumbnail ids for folder ids. + + Args: + project_name (str): Project name. + folder_ids (Iterable[str]): Folder ids. + + Returns: + dict[str, Union[str, Any]]: Thumbnail id by folder id. + """ + + pass + + @abstractmethod + def get_versions_representation_count( + self, project_name, version_ids, sender=None + ): + """ + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + sender (Optional[str]): Sender who requested the items. + + Returns: + dict[str, int]: Representation count by version id. + """ + + pass + + @abstractmethod + def get_thumbnail_path(self, project_name, thumbnail_id): + """Get thumbnail path for thumbnail id. + + This method should get a path to a thumbnail based on thumbnail id. + Which probably means to download the thumbnail from server and store + it locally. + + Args: + project_name (str): Project name. + thumbnail_id (str): Thumbnail id. + + Returns: + Union[str, None]: Thumbnail path or None if not found. + """ + + pass + + # Selection model wrapper calls + @abstractmethod + def get_selected_project_name(self): + """Get selected project name. + + The information is based on last selection from UI. + + Returns: + Union[str, None]: Selected project name. + """ + + pass + + @abstractmethod + def get_selected_folder_ids(self): + """Get selected folder ids. + + The information is based on last selection from UI. + + Returns: + list[str]: Selected folder ids. + """ + + pass + + @abstractmethod + def get_selected_version_ids(self): + """Get selected version ids. + + The information is based on last selection from UI. + + Returns: + list[str]: Selected version ids. + """ + + pass + + @abstractmethod + def get_selected_representation_ids(self): + """Get selected representation ids. + + The information is based on last selection from UI. + + Returns: + list[str]: Selected representation ids. + """ + + pass + + @abstractmethod + def set_selected_project(self, project_name): + """Set selected project. + + Project selection changed in UI. Method triggers event with topic + "selection.project.changed" with data: + { + "project_name": self._project_name + } + + Args: + project_name (Union[str, None]): Selected project name. + """ + + pass + + @abstractmethod + def set_selected_folders(self, folder_ids): + """Set selected folders. + + Folder selection changed in UI. Method triggers event with topic + "selection.folders.changed" with data: + { + "project_name": project_name, + "folder_ids": folder_ids + } + + Args: + folder_ids (Iterable[str]): Selected folder ids. + """ + + pass + + @abstractmethod + def set_selected_versions(self, version_ids): + """Set selected versions. + + Version selection changed in UI. Method triggers event with topic + "selection.versions.changed" with data: + { + "project_name": project_name, + "folder_ids": folder_ids, + "version_ids": version_ids + } + + Args: + version_ids (Iterable[str]): Selected version ids. + """ + + pass + + @abstractmethod + def set_selected_representations(self, repre_ids): + """Set selected representations. + + Representation selection changed in UI. Method triggers event with + topic "selection.representations.changed" with data: + { + "project_name": project_name, + "folder_ids": folder_ids, + "version_ids": version_ids, + "representation_ids": representation_ids + } + + Args: + repre_ids (Iterable[str]): Selected representation ids. + """ + + pass + + # Load action items + @abstractmethod + def get_versions_action_items(self, project_name, version_ids): + """Action items for versions selection. + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + + Returns: + list[ActionItem]: List of action items. + """ + + pass + + @abstractmethod + def get_representations_action_items( + self, project_name, representation_ids + ): + """Action items for representations selection. + + Args: + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + + Returns: + list[ActionItem]: List of action items. + """ + + pass + + @abstractmethod + def trigger_action_item( + self, + identifier, + options, + project_name, + version_ids, + representation_ids + ): + """Trigger action item. + + Triggers event "load.started" with data: + { + "identifier": identifier, + "id": , + } + + And triggers "load.finished" with data: + { + "identifier": identifier, + "id": , + "error_info": [...], + } + + Args: + identifier (str): Action identifier. + options (dict[str, Any]): Action option values from UI. + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + representation_ids (Iterable[str]): Representation ids. + """ + + pass + + @abstractmethod + def change_products_group(self, project_name, product_ids, group_name): + """Change group of products. + + Triggers event "products.group.changed" with data: + { + "project_name": project_name, + "folder_ids": folder_ids, + "product_ids": product_ids, + "group_name": group_name, + } + + Args: + project_name (str): Project name. + product_ids (Iterable[str]): Product ids. + group_name (str): New group name. + """ + + pass + + @abstractmethod + def fill_root_in_source(self, source): + """Fill root in source path. + + Args: + source (Union[str, None]): Source of a published version. Usually + rootless workfile path. + """ + + pass + + # NOTE: Methods 'is_loaded_products_supported' and + # 'is_standard_projects_filter_enabled' are both based on being in host + # or not. Maybe we could implement only single method 'is_in_host'? + @abstractmethod + def is_loaded_products_supported(self): + """Is capable to get information about loaded products. + + Returns: + bool: True if it is supported. + """ + + pass + + @abstractmethod + def is_standard_projects_filter_enabled(self): + """Is standard projects filter enabled. + + This is used for filtering out when loader tool is used in a host. In + that case only current project and library projects should be shown. + + Returns: + bool: Frontend should filter out non-library projects, except + current context project. + """ + + pass + + # Site sync functions + @abstractmethod + def is_site_sync_enabled(self, project_name=None): + """Is site sync enabled. + + Site sync addon can be enabled but can be disabled per project. + + When asked for enabled state without project name, it should return + True if site sync addon is available and enabled. + + Args: + project_name (Optional[str]): Project name. + + Returns: + bool: True if site sync is enabled. + """ + + pass + + @abstractmethod + def get_active_site_icon_def(self, project_name): + """Active site icon definition. + + Args: + project_name (Union[str, None]): Project name. + + Returns: + Union[dict[str, Any], None]: Icon definition or None if site sync + is not enabled for the project. + """ + + pass + + @abstractmethod + def get_remote_site_icon_def(self, project_name): + """Remote site icon definition. + + Args: + project_name (Union[str, None]): Project name. + + Returns: + Union[dict[str, Any], None]: Icon definition or None if site sync + is not enabled for the project. + """ + + pass + + @abstractmethod + def get_version_sync_availability(self, project_name, version_ids): + """Version sync availability. + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + + Returns: + dict[str, tuple[int, int]]: Sync availability by version id. + """ + + pass + + @abstractmethod + def get_representations_sync_status( + self, project_name, representation_ids + ): + """Representations sync status. + + Args: + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + + Returns: + dict[str, tuple[int, int]]: Sync status by representation id. + """ + + pass diff --git a/client/ayon_core/tools/loader/control.py b/client/ayon_core/tools/loader/control.py new file mode 100644 index 0000000000..29ca06e3e2 --- /dev/null +++ b/client/ayon_core/tools/loader/control.py @@ -0,0 +1,410 @@ +import logging +import uuid + +import ayon_api + +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.pipeline import Anatomy, get_current_context +from ayon_core.host import ILoadHost +from ayon_core.tools.ayon_utils.models import ( + ProjectsModel, + HierarchyModel, + NestedCacheItem, + CacheItem, + ThumbnailsModel, +) + +from .abstract import BackendLoaderController, FrontendLoaderController +from .models import ( + SelectionModel, + ProductsModel, + LoaderActionsModel, + SiteSyncModel +) + + +class ExpectedSelection: + def __init__(self, controller): + self._project_name = None + self._folder_id = None + + self._project_selected = True + self._folder_selected = True + + self._controller = controller + + def _emit_change(self): + self._controller.emit_event( + "expected_selection_changed", + self.get_expected_selection_data(), + ) + + def set_expected_selection(self, project_name, folder_id): + self._project_name = project_name + self._folder_id = folder_id + + self._project_selected = False + self._folder_selected = False + self._emit_change() + + def get_expected_selection_data(self): + project_current = False + folder_current = False + if not self._project_selected: + project_current = True + elif not self._folder_selected: + folder_current = True + return { + "project": { + "name": self._project_name, + "current": project_current, + "selected": self._project_selected, + }, + "folder": { + "id": self._folder_id, + "current": folder_current, + "selected": self._folder_selected, + }, + } + + def is_expected_project_selected(self, project_name): + return project_name == self._project_name and self._project_selected + + def is_expected_folder_selected(self, folder_id): + return folder_id == self._folder_id and self._folder_selected + + def expected_project_selected(self, project_name): + if project_name != self._project_name: + return False + self._project_selected = True + self._emit_change() + return True + + def expected_folder_selected(self, folder_id): + if folder_id != self._folder_id: + return False + self._folder_selected = True + self._emit_change() + return True + + +class LoaderController(BackendLoaderController, FrontendLoaderController): + """ + + Args: + host (Optional[AbstractHost]): Host object. Defaults to None. + """ + + def __init__(self, host=None): + self._log = None + self._host = host + + self._event_system = self._create_event_system() + + self._project_anatomy_cache = NestedCacheItem( + levels=1, lifetime=60) + self._loaded_products_cache = CacheItem( + default_factory=set, lifetime=60) + + self._selection_model = SelectionModel(self) + self._expected_selection = ExpectedSelection(self) + self._projects_model = ProjectsModel(self) + self._hierarchy_model = HierarchyModel(self) + self._products_model = ProductsModel(self) + self._loader_actions_model = LoaderActionsModel(self) + self._thumbnails_model = ThumbnailsModel() + self._site_sync_model = SiteSyncModel(self) + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + # --------------------------------- + # Implementation of abstract methods + # --------------------------------- + # Events system + def emit_event(self, topic, data=None, source=None): + """Use implemented event system to trigger event.""" + + if data is None: + data = {} + self._event_system.emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self._event_system.add_callback(topic, callback) + + def reset(self): + self._emit_event("controller.reset.started") + + project_name = self.get_selected_project_name() + folder_ids = self.get_selected_folder_ids() + + self._project_anatomy_cache.reset() + self._loaded_products_cache.reset() + + self._products_model.reset() + self._hierarchy_model.reset() + self._loader_actions_model.reset() + self._projects_model.reset() + self._thumbnails_model.reset() + self._site_sync_model.reset() + + self._projects_model.refresh() + + if not project_name and not folder_ids: + context = self.get_current_context() + project_name = context["project_name"] + folder_id = context["folder_id"] + self.set_expected_selection(project_name, folder_id) + + self._emit_event("controller.reset.finished") + + # Expected selection helpers + def get_expected_selection_data(self): + return self._expected_selection.get_expected_selection_data() + + def set_expected_selection(self, project_name, folder_id): + self._expected_selection.set_expected_selection( + project_name, folder_id + ) + + def expected_project_selected(self, project_name): + self._expected_selection.expected_project_selected(project_name) + + def expected_folder_selected(self, folder_id): + self._expected_selection.expected_folder_selected(folder_id) + + # Entity model wrappers + def get_project_items(self, sender=None): + return self._projects_model.get_project_items(sender) + + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_product_items(self, project_name, folder_ids, sender=None): + return self._products_model.get_product_items( + project_name, folder_ids, sender) + + def get_product_item(self, project_name, product_id): + return self._products_model.get_product_item( + project_name, product_id + ) + + def get_product_type_items(self, project_name): + return self._products_model.get_product_type_items(project_name) + + def get_representation_items( + self, project_name, version_ids, sender=None + ): + return self._products_model.get_repre_items( + project_name, version_ids, sender + ) + + def get_versions_representation_count( + self, project_name, version_ids, sender=None + ): + return self._products_model.get_versions_repre_count( + project_name, version_ids, sender + ) + + def get_folder_thumbnail_ids(self, project_name, folder_ids): + return self._thumbnails_model.get_folder_thumbnail_ids( + project_name, folder_ids + ) + + def get_version_thumbnail_ids(self, project_name, version_ids): + return self._thumbnails_model.get_version_thumbnail_ids( + project_name, version_ids + ) + + def get_thumbnail_path(self, project_name, thumbnail_id): + return self._thumbnails_model.get_thumbnail_path( + project_name, thumbnail_id + ) + + def change_products_group(self, project_name, product_ids, group_name): + self._products_model.change_products_group( + project_name, product_ids, group_name + ) + + def get_versions_action_items(self, project_name, version_ids): + return self._loader_actions_model.get_versions_action_items( + project_name, version_ids) + + def get_representations_action_items( + self, project_name, representation_ids): + action_items = ( + self._loader_actions_model.get_representations_action_items( + project_name, representation_ids) + ) + + action_items.extend(self._site_sync_model.get_site_sync_action_items( + project_name, representation_ids) + ) + + return action_items + + def trigger_action_item( + self, + identifier, + options, + project_name, + version_ids, + representation_ids + ): + if self._site_sync_model.is_site_sync_action(identifier): + self._site_sync_model.trigger_action_item( + identifier, + project_name, + representation_ids + ) + return + + self._loader_actions_model.trigger_action_item( + identifier, + options, + project_name, + version_ids, + representation_ids + ) + + # Selection model wrappers + def get_selected_project_name(self): + return self._selection_model.get_selected_project_name() + + def set_selected_project(self, project_name): + self._selection_model.set_selected_project(project_name) + + # Selection model wrappers + def get_selected_folder_ids(self): + return self._selection_model.get_selected_folder_ids() + + def set_selected_folders(self, folder_ids): + self._selection_model.set_selected_folders(folder_ids) + + def get_selected_version_ids(self): + return self._selection_model.get_selected_version_ids() + + def set_selected_versions(self, version_ids): + self._selection_model.set_selected_versions(version_ids) + + def get_selected_representation_ids(self): + return self._selection_model.get_selected_representation_ids() + + def set_selected_representations(self, repre_ids): + self._selection_model.set_selected_representations(repre_ids) + + def fill_root_in_source(self, source): + project_name = self.get_selected_project_name() + anatomy = self._get_project_anatomy(project_name) + if anatomy is None: + return source + + try: + return anatomy.fill_root(source) + except Exception: + return source + + def get_current_context(self): + if self._host is None: + return { + "project_name": None, + "folder_id": None, + "task_name": None, + } + if hasattr(self._host, "get_current_context"): + context = self._host.get_current_context() + else: + context = get_current_context() + folder_id = None + project_name = context.get("project_name") + asset_name = context.get("asset_name") + if project_name and asset_name: + folder = ayon_api.get_folder_by_path( + project_name, asset_name, fields=["id"] + ) + if folder: + folder_id = folder["id"] + return { + "project_name": project_name, + "folder_id": folder_id, + "task_name": context.get("task_name"), + } + + def get_loaded_product_ids(self): + if self._host is None: + return set() + + context = self.get_current_context() + project_name = context["project_name"] + if not project_name: + return set() + + if not self._loaded_products_cache.is_valid: + if isinstance(self._host, ILoadHost): + containers = self._host.get_containers() + else: + containers = self._host.ls() + repre_ids = set() + for container in containers: + repre_id = container.get("representation") + # Ignore invalid representation ids. + # - invalid representation ids may be available if e.g. is + # opened scene from OpenPype whe 'ObjectId' was used instead + # of 'uuid'. + # NOTE: Server call would crash if there is any invalid id. + # That would cause crash we won't get any information. + try: + uuid.UUID(repre_id) + repre_ids.add(repre_id) + except ValueError: + pass + + product_ids = self._products_model.get_product_ids_by_repre_ids( + project_name, repre_ids + ) + self._loaded_products_cache.update_data(product_ids) + return self._loaded_products_cache.get_data() + + def is_site_sync_enabled(self, project_name=None): + return self._site_sync_model.is_site_sync_enabled(project_name) + + def get_active_site_icon_def(self, project_name): + return self._site_sync_model.get_active_site_icon_def(project_name) + + def get_remote_site_icon_def(self, project_name): + return self._site_sync_model.get_remote_site_icon_def(project_name) + + def get_version_sync_availability(self, project_name, version_ids): + return self._site_sync_model.get_version_sync_availability( + project_name, version_ids + ) + + def get_representations_sync_status( + self, project_name, representation_ids + ): + return self._site_sync_model.get_representations_sync_status( + project_name, representation_ids + ) + + def is_loaded_products_supported(self): + return self._host is not None + + def is_standard_projects_filter_enabled(self): + return self._host is not None + + def _get_project_anatomy(self, project_name): + if not project_name: + return None + cache = self._project_anatomy_cache[project_name] + if not cache.is_valid: + cache.update_data(Anatomy(project_name)) + return cache.get_data() + + def _create_event_system(self): + return QueuedEventSystem() + + def _emit_event(self, topic, data=None): + self._event_system.emit(topic, data or {}, "controller") diff --git a/openpype/tools/ayon_loader/models/__init__.py b/client/ayon_core/tools/loader/models/__init__.py similarity index 100% rename from openpype/tools/ayon_loader/models/__init__.py rename to client/ayon_core/tools/loader/models/__init__.py diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py new file mode 100644 index 0000000000..c70ccb3e18 --- /dev/null +++ b/client/ayon_core/tools/loader/models/actions.py @@ -0,0 +1,867 @@ +import sys +import traceback +import inspect +import copy +import collections +import uuid + +from ayon_core.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_representations, +) +from ayon_core.pipeline.load import ( + discover_loader_plugins, + SubsetLoaderPlugin, + filter_repre_contexts_by_loader, + get_loader_identifier, + load_with_repre_context, + load_with_subset_context, + load_with_subset_contexts, + LoadError, + IncompatibleLoaderError, +) +from ayon_core.tools.ayon_utils.models import NestedCacheItem +from ayon_core.tools.loader.abstract import ActionItem + +ACTIONS_MODEL_SENDER = "actions.model" +NOT_SET = object() + + +class LoaderActionsModel: + """Model for loader actions. + + This is probably only part of models that requires to use codebase from + 'ayon_core.client' because of backwards compatibility with loaders logic + which are expecting mongo documents. + + TODOs: + Deprecate 'qargparse' usage in loaders and implement conversion + of 'ActionItem' to data (and 'from_data'). + Use controller to get entities (documents) -> possible only when + loaders are able to handle AYON vs. OpenPype logic. + Add missing site sync logic, and if possible remove it from loaders. + Implement loader actions to replace load plugins. + Ask loader actions to return action items instead of guessing them. + """ + + # Cache loader plugins for some time + # NOTE Set to '0' for development + loaders_cache_lifetime = 30 + + def __init__(self, controller): + self._controller = controller + self._current_context_project = NOT_SET + self._loaders_by_identifier = NestedCacheItem( + levels=1, lifetime=self.loaders_cache_lifetime) + self._product_loaders = NestedCacheItem( + levels=1, lifetime=self.loaders_cache_lifetime) + self._repre_loaders = NestedCacheItem( + levels=1, lifetime=self.loaders_cache_lifetime) + + def reset(self): + """Reset the model with all cached items.""" + + self._current_context_project = NOT_SET + self._loaders_by_identifier.reset() + self._product_loaders.reset() + self._repre_loaders.reset() + + def get_versions_action_items(self, project_name, version_ids): + """Get action items for given version ids. + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + + Returns: + list[ActionItem]: List of action items. + """ + + ( + version_context_by_id, + repre_context_by_id + ) = self._contexts_for_versions( + project_name, + version_ids + ) + return self._get_action_items_for_contexts( + project_name, + version_context_by_id, + repre_context_by_id + ) + + def get_representations_action_items( + self, project_name, representation_ids + ): + """Get action items for given representation ids. + + Args: + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + + Returns: + list[ActionItem]: List of action items. + """ + + ( + product_context_by_id, + repre_context_by_id + ) = self._contexts_for_representations( + project_name, + representation_ids + ) + return self._get_action_items_for_contexts( + project_name, + product_context_by_id, + repre_context_by_id + ) + + def trigger_action_item( + self, + identifier, + options, + project_name, + version_ids, + representation_ids + ): + """Trigger action by identifier. + + Triggers the action by identifier for given contexts. + + Triggers events "load.started" and "load.finished". Finished event + also contains "error_info" key with error information if any + happened. + + Args: + identifier (str): Loader identifier. + options (dict[str, Any]): Loader option values. + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + representation_ids (Iterable[str]): Representation ids. + """ + + event_data = { + "identifier": identifier, + "id": uuid.uuid4().hex, + } + self._controller.emit_event( + "load.started", + event_data, + ACTIONS_MODEL_SENDER, + ) + loader = self._get_loader_by_identifier(project_name, identifier) + if representation_ids is not None: + error_info = self._trigger_representation_loader( + loader, + options, + project_name, + representation_ids, + ) + elif version_ids is not None: + error_info = self._trigger_version_loader( + loader, + options, + project_name, + version_ids, + ) + else: + raise NotImplementedError( + "Invalid arguments to trigger action item") + + event_data["error_info"] = error_info + self._controller.emit_event( + "load.finished", + event_data, + ACTIONS_MODEL_SENDER, + ) + + def _get_current_context_project(self): + """Get current context project name. + + The value is based on controller (host) and cached. + + Returns: + Union[str, None]: Current context project. + """ + + if self._current_context_project is NOT_SET: + context = self._controller.get_current_context() + self._current_context_project = context["project_name"] + return self._current_context_project + + def _get_action_label(self, loader, representation=None): + """Pull label info from loader class. + + Args: + loader (LoaderPlugin): Plugin class. + representation (Optional[dict[str, Any]]): Representation data. + + Returns: + str: Action label. + """ + + label = getattr(loader, "label", None) + if label is None: + label = loader.__name__ + if representation: + # Add the representation as suffix + label = "{} ({})".format(label, representation["name"]) + return label + + def _get_action_icon(self, loader): + """Pull icon info from loader class. + + Args: + loader (LoaderPlugin): Plugin class. + + Returns: + Union[dict[str, Any], None]: Icon definition based on + loader plugin. + """ + + # Support font-awesome icons using the `.icon` and `.color` + # attributes on plug-ins. + icon = getattr(loader, "icon", None) + if icon is not None and not isinstance(icon, dict): + icon = { + "type": "awesome-font", + "name": icon, + "color": getattr(loader, "color", None) or "white" + } + return icon + + def _get_action_tooltip(self, loader): + """Pull tooltip info from loader class. + + Args: + loader (LoaderPlugin): Plugin class. + + Returns: + str: Action tooltip. + """ + + # Add tooltip and statustip from Loader docstring + return inspect.getdoc(loader) + + def _filter_loaders_by_tool_name(self, project_name, loaders): + """Filter loaders by tool name. + + Tool names are based on AYON tools loader tool and library + loader tool. The new tool merged both into one tool and the difference + is based only on current project name. + + Args: + project_name (str): Project name. + loaders (list[LoaderPlugin]): List of loader plugins. + + Returns: + list[LoaderPlugin]: Filtered list of loader plugins. + """ + + # Keep filtering by tool name + # - if current context project name is same as project name we do + # expect the tool is used as AYON loader tool, otherwise + # as library loader tool. + if project_name == self._get_current_context_project(): + tool_name = "loader" + else: + tool_name = "library_loader" + filtered_loaders = [] + for loader in loaders: + tool_names = getattr(loader, "tool_names", None) + if ( + tool_names is None + or "*" in tool_names + or tool_name in tool_names + ): + filtered_loaders.append(loader) + return filtered_loaders + + def _create_loader_action_item( + self, + loader, + contexts, + project_name, + folder_ids=None, + product_ids=None, + version_ids=None, + representation_ids=None, + repre_name=None, + ): + label = self._get_action_label(loader) + if repre_name: + label = "{} ({})".format(label, repre_name) + return ActionItem( + get_loader_identifier(loader), + label=label, + icon=self._get_action_icon(loader), + tooltip=self._get_action_tooltip(loader), + options=loader.get_options(contexts), + order=loader.order, + project_name=project_name, + folder_ids=folder_ids, + product_ids=product_ids, + version_ids=version_ids, + representation_ids=representation_ids, + ) + + def _get_loaders(self, project_name): + """Loaders with loaded settings for a project. + + Questions: + Project name is required because of settings. Should we actually + pass in current project name instead of project name where + we want to show loaders for? + + Returns: + tuple[list[SubsetLoaderPlugin], list[LoaderPlugin]]: Discovered + loader plugins. + """ + + loaders_by_identifier_c = self._loaders_by_identifier[project_name] + product_loaders_c = self._product_loaders[project_name] + repre_loaders_c = self._repre_loaders[project_name] + if loaders_by_identifier_c.is_valid: + return product_loaders_c.get_data(), repre_loaders_c.get_data() + + # Get all representation->loader combinations available for the + # index under the cursor, so we can list the user the options. + available_loaders = self._filter_loaders_by_tool_name( + project_name, discover_loader_plugins(project_name) + ) + + repre_loaders = [] + product_loaders = [] + loaders_by_identifier = {} + for loader_cls in available_loaders: + if not loader_cls.enabled: + continue + + identifier = get_loader_identifier(loader_cls) + loaders_by_identifier[identifier] = loader_cls + if issubclass(loader_cls, SubsetLoaderPlugin): + product_loaders.append(loader_cls) + else: + repre_loaders.append(loader_cls) + + loaders_by_identifier_c.update_data(loaders_by_identifier) + product_loaders_c.update_data(product_loaders) + repre_loaders_c.update_data(repre_loaders) + return product_loaders, repre_loaders + + def _get_loader_by_identifier(self, project_name, identifier): + if not self._loaders_by_identifier[project_name].is_valid: + self._get_loaders(project_name) + loaders_by_identifier_c = self._loaders_by_identifier[project_name] + loaders_by_identifier = loaders_by_identifier_c.get_data() + return loaders_by_identifier.get(identifier) + + def _actions_sorter(self, action_item): + """Sort the Loaders by their order and then their name. + + Returns: + tuple[int, str]: Sort keys. + """ + + return action_item.order, action_item.label + + def _get_version_docs(self, project_name, version_ids): + """Get version documents for given version ids. + + This function also handles hero versions and copies data from + source version to it. + + Todos: + Remove this function when this is completely rewritten to + use AYON calls. + """ + + version_docs = list(get_versions( + project_name, version_ids=version_ids, hero=True + )) + hero_versions_by_src_id = collections.defaultdict(list) + src_hero_version = set() + for version_doc in version_docs: + if version_doc["type"] != "hero": + continue + version_id = "" + src_hero_version.add(version_id) + hero_versions_by_src_id[version_id].append(version_doc) + + src_versions = [] + if src_hero_version: + src_versions = get_versions(project_name, version_ids=version_ids) + for src_version in src_versions: + src_version_id = src_version["_id"] + for hero_version in hero_versions_by_src_id[src_version_id]: + hero_version["data"] = copy.deepcopy(src_version["data"]) + + return version_docs + + def _contexts_for_versions(self, project_name, version_ids): + """Get contexts for given version ids. + + Prepare version contexts for 'SubsetLoaderPlugin' and representation + contexts for 'LoaderPlugin' for all children representations of + given versions. + + This method is very similar to '_contexts_for_representations' but the + queries of documents are called in a different order. + + Args: + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + + Returns: + tuple[list[dict[str, Any]], list[dict[str, Any]]]: Version and + representation contexts. + """ + + # TODO fix hero version + version_context_by_id = {} + repre_context_by_id = {} + if not project_name and not version_ids: + return version_context_by_id, repre_context_by_id + + version_docs = self._get_version_docs(project_name, version_ids) + version_docs_by_id = {} + version_docs_by_product_id = collections.defaultdict(list) + for version_doc in version_docs: + version_id = version_doc["_id"] + product_id = version_doc["parent"] + version_docs_by_id[version_id] = version_doc + version_docs_by_product_id[product_id].append(version_doc) + + _product_ids = set(version_docs_by_product_id.keys()) + _product_docs = get_subsets(project_name, subset_ids=_product_ids) + product_docs_by_id = {p["_id"]: p for p in _product_docs} + + _folder_ids = {p["parent"] for p in product_docs_by_id.values()} + _folder_docs = get_assets(project_name, asset_ids=_folder_ids) + folder_docs_by_id = {f["_id"]: f for f in _folder_docs} + + project_doc = get_project(project_name) + project_doc["code"] = project_doc["data"]["code"] + + for version_doc in version_docs: + version_id = version_doc["_id"] + product_id = version_doc["parent"] + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + version_context_by_id[version_id] = { + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + "version": version_doc, + } + + repre_docs = get_representations( + project_name, version_ids=version_ids) + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = version_docs_by_id[version_id] + product_id = version_doc["parent"] + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + + repre_context_by_id[repre_doc["_id"]] = { + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + "version": version_doc, + "representation": repre_doc, + } + + return version_context_by_id, repre_context_by_id + + def _contexts_for_representations(self, project_name, repre_ids): + """Get contexts for given representation ids. + + Prepare version contexts for 'SubsetLoaderPlugin' and representation + contexts for 'LoaderPlugin' for all children representations of + given versions. + + This method is very similar to '_contexts_for_versions' but the + queries of documents are called in a different order. + + Args: + project_name (str): Project name. + repre_ids (Iterable[str]): Representation ids. + + Returns: + tuple[list[dict[str, Any]], list[dict[str, Any]]]: Version and + representation contexts. + """ + + product_context_by_id = {} + repre_context_by_id = {} + if not project_name and not repre_ids: + return product_context_by_id, repre_context_by_id + + repre_docs = list(get_representations( + project_name, representation_ids=repre_ids + )) + version_ids = {r["parent"] for r in repre_docs} + version_docs = self._get_version_docs(project_name, version_ids) + version_docs_by_id = { + v["_id"]: v for v in version_docs + } + + product_ids = {v["parent"] for v in version_docs_by_id.values()} + product_docs = get_subsets(project_name, subset_ids=product_ids) + product_docs_by_id = { + p["_id"]: p for p in product_docs + } + + folder_ids = {p["parent"] for p in product_docs_by_id.values()} + folder_docs = get_assets(project_name, asset_ids=folder_ids) + folder_docs_by_id = { + f["_id"]: f for f in folder_docs + } + + project_doc = get_project(project_name) + project_doc["code"] = project_doc["data"]["code"] + + for product_id, product_doc in product_docs_by_id.items(): + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + product_context_by_id[product_id] = { + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + } + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = version_docs_by_id[version_id] + product_id = version_doc["parent"] + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + + repre_context_by_id[repre_doc["_id"]] = { + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + "version": version_doc, + "representation": repre_doc, + } + return product_context_by_id, repre_context_by_id + + def _get_action_items_for_contexts( + self, + project_name, + version_context_by_id, + repre_context_by_id + ): + """Prepare action items based on contexts. + + Actions are prepared based on discovered loader plugins and contexts. + The context must be valid for the loader plugin. + + Args: + project_name (str): Project name. + version_context_by_id (dict[str, dict[str, Any]]): Version + contexts by version id. + repre_context_by_id (dict[str, dict[str, Any]]): Representation + """ + + action_items = [] + if not version_context_by_id and not repre_context_by_id: + return action_items + + product_loaders, repre_loaders = self._get_loaders(project_name) + + repre_contexts_by_name = collections.defaultdict(list) + for repre_context in repre_context_by_id.values(): + repre_name = repre_context["representation"]["name"] + repre_contexts_by_name[repre_name].append(repre_context) + + for loader in repre_loaders: + for repre_name, repre_contexts in repre_contexts_by_name.items(): + filtered_repre_contexts = filter_repre_contexts_by_loader( + repre_contexts, loader) + if not filtered_repre_contexts: + continue + + repre_ids = set() + repre_version_ids = set() + repre_product_ids = set() + repre_folder_ids = set() + for repre_context in filtered_repre_contexts: + repre_ids.add(repre_context["representation"]["_id"]) + repre_product_ids.add(repre_context["subset"]["_id"]) + repre_version_ids.add(repre_context["version"]["_id"]) + repre_folder_ids.add(repre_context["asset"]["_id"]) + + item = self._create_loader_action_item( + loader, + repre_contexts, + project_name=project_name, + folder_ids=repre_folder_ids, + product_ids=repre_product_ids, + version_ids=repre_version_ids, + representation_ids=repre_ids, + repre_name=repre_name, + ) + action_items.append(item) + + # Subset Loaders. + version_ids = set(version_context_by_id.keys()) + product_folder_ids = set() + product_ids = set() + for product_context in version_context_by_id.values(): + product_ids.add(product_context["subset"]["_id"]) + product_folder_ids.add(product_context["asset"]["_id"]) + + version_contexts = list(version_context_by_id.values()) + for loader in product_loaders: + item = self._create_loader_action_item( + loader, + version_contexts, + project_name=project_name, + folder_ids=product_folder_ids, + product_ids=product_ids, + version_ids=version_ids, + ) + action_items.append(item) + + action_items.sort(key=self._actions_sorter) + return action_items + + def _trigger_version_loader( + self, + loader, + options, + project_name, + version_ids, + ): + """Trigger version loader. + + This triggers 'load' method of 'SubsetLoaderPlugin' for given version + ids. + + Note: + Even when the plugin is 'SubsetLoaderPlugin' it actually expects + versions and should be named 'VersionLoaderPlugin'. Because it + is planned to refactor load system and introduce + 'LoaderAction' plugins it is not relevant to change it + anymore. + + Args: + loader (SubsetLoaderPlugin): Loader plugin to use. + options (dict): Option values for loader. + project_name (str): Project name. + version_ids (Iterable[str]): Version ids. + """ + + project_doc = get_project(project_name) + project_doc["code"] = project_doc["data"]["code"] + + version_docs = self._get_version_docs(project_name, version_ids) + product_ids = {v["parent"] for v in version_docs} + product_docs = get_subsets(project_name, subset_ids=product_ids) + product_docs_by_id = {f["_id"]: f for f in product_docs} + folder_ids = {p["parent"] for p in product_docs_by_id.values()} + folder_docs = get_assets(project_name, asset_ids=folder_ids) + folder_docs_by_id = {f["_id"]: f for f in folder_docs} + product_contexts = [] + for version_doc in version_docs: + product_id = version_doc["parent"] + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + product_contexts.append({ + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + "version": version_doc, + }) + + return self._load_products_by_loader( + loader, product_contexts, options + ) + + def _trigger_representation_loader( + self, + loader, + options, + project_name, + representation_ids, + ): + """Trigger representation loader. + + This triggers 'load' method of 'LoaderPlugin' for given representation + ids. For that are prepared contexts for each representation, with + all parent documents. + + Args: + loader (LoaderPlugin): Loader plugin to use. + options (dict): Option values for loader. + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + """ + + project_doc = get_project(project_name) + project_doc["code"] = project_doc["data"]["code"] + repre_docs = list(get_representations( + project_name, representation_ids=representation_ids + )) + version_ids = {r["parent"] for r in repre_docs} + version_docs = self._get_version_docs(project_name, version_ids) + version_docs_by_id = {v["_id"]: v for v in version_docs} + product_ids = {v["parent"] for v in version_docs_by_id.values()} + product_docs = get_subsets(project_name, subset_ids=product_ids) + product_docs_by_id = {p["_id"]: p for p in product_docs} + folder_ids = {p["parent"] for p in product_docs_by_id.values()} + folder_docs = get_assets(project_name, asset_ids=folder_ids) + folder_docs_by_id = {f["_id"]: f for f in folder_docs} + repre_contexts = [] + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = version_docs_by_id[version_id] + product_id = version_doc["parent"] + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + folder_doc = folder_docs_by_id[folder_id] + repre_contexts.append({ + "project": project_doc, + "asset": folder_doc, + "subset": product_doc, + "version": version_doc, + "representation": repre_doc, + }) + + return self._load_representations_by_loader( + loader, repre_contexts, options + ) + + def _load_representations_by_loader(self, loader, repre_contexts, options): + """Loops through list of repre_contexts and loads them with one loader + + Args: + loader (LoaderPlugin): Loader plugin to use. + repre_contexts (list[dict]): Full info about selected + representations, containing repre, version, subset, asset and + project documents. + options (dict): Data from options. + """ + + error_info = [] + for repre_context in repre_contexts: + version_doc = repre_context["version"] + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc.get("name") + try: + load_with_repre_context( + loader, + repre_context, + options=options + ) + + except IncompatibleLoaderError as exc: + print(exc) + error_info.append(( + "Incompatible Loader", + None, + repre_context["representation"]["name"], + repre_context["subset"]["name"], + version_name + )) + + except Exception as exc: + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + + error_info.append(( + str(exc), + formatted_traceback, + repre_context["representation"]["name"], + repre_context["subset"]["name"], + version_name + )) + return error_info + + def _load_products_by_loader(self, loader, version_contexts, options): + """Triggers load with SubsetLoader type of loaders. + + Warning: + Plugin is named 'SubsetLoader' but version is passed to context + too. + + Args: + loader (SubsetLoder): Loader used to load. + version_contexts (list[dict[str, Any]]): For context for each + version. + options (dict[str, Any]): Options for loader that user could fill. + """ + + error_info = [] + if loader.is_multiple_contexts_compatible: + subset_names = [] + for context in version_contexts: + subset_name = context.get("subset", {}).get("name") or "N/A" + subset_names.append(subset_name) + try: + load_with_subset_contexts( + loader, + version_contexts, + options=options + ) + + except Exception as exc: + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + error_info.append(( + str(exc), + formatted_traceback, + None, + ", ".join(subset_names), + None + )) + else: + for version_context in version_contexts: + subset_name = ( + version_context.get("subset", {}).get("name") or "N/A" + ) + try: + load_with_subset_context( + loader, + version_context, + options=options + ) + + except Exception as exc: + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join( + traceback.format_exception( + exc_type, exc_value, exc_traceback + ) + ) + + error_info.append(( + str(exc), + formatted_traceback, + None, + subset_name, + None + )) + + return error_info diff --git a/openpype/tools/ayon_loader/models/products.py b/client/ayon_core/tools/loader/models/products.py similarity index 99% rename from openpype/tools/ayon_loader/models/products.py rename to client/ayon_core/tools/loader/models/products.py index 40b6474d12..63547bef8b 100644 --- a/openpype/tools/ayon_loader/models/products.py +++ b/client/ayon_core/tools/loader/models/products.py @@ -5,9 +5,9 @@ import ayon_api from ayon_api.operations import OperationsSession -from openpype.style import get_default_entity_icon_color -from openpype.tools.ayon_utils.models import NestedCacheItem -from openpype.tools.ayon_loader.abstract import ( +from ayon_core.style import get_default_entity_icon_color +from ayon_core.tools.ayon_utils.models import NestedCacheItem +from ayon_core.tools.loader.abstract import ( ProductTypeItem, ProductItem, VersionItem, diff --git a/openpype/tools/ayon_loader/models/selection.py b/client/ayon_core/tools/loader/models/selection.py similarity index 100% rename from openpype/tools/ayon_loader/models/selection.py rename to client/ayon_core/tools/loader/models/selection.py diff --git a/client/ayon_core/tools/loader/models/site_sync.py b/client/ayon_core/tools/loader/models/site_sync.py new file mode 100644 index 0000000000..e6158ea280 --- /dev/null +++ b/client/ayon_core/tools/loader/models/site_sync.py @@ -0,0 +1,514 @@ +import collections + +from ayon_core.lib import Logger +from ayon_core.client.entities import get_representations +from ayon_core.client import get_linked_representation_id +from ayon_core.addon import AddonsManager +from ayon_core.tools.ayon_utils.models import NestedCacheItem +from ayon_core.tools.loader.abstract import ActionItem + +DOWNLOAD_IDENTIFIER = "sitesync.download" +UPLOAD_IDENTIFIER = "sitesync.upload" +REMOVE_IDENTIFIER = "sitesync.remove" + +log = Logger.get_logger(__name__) + + +def _default_version_availability(): + return 0, 0 + + +def _default_repre_status(): + return 0.0, 0.0 + + +class SiteSyncModel: + """Model handling site sync logic. + + Model cares about handling of site sync functionality. All public + functions should be possible to call even if site sync is not available. + """ + + lifetime = 60 # In seconds (minute by default) + status_lifetime = 20 + + def __init__(self, controller): + self._controller = controller + + self._site_icons = None + self._site_sync_enabled_cache = NestedCacheItem( + levels=1, lifetime=self.lifetime + ) + self._active_site_cache = NestedCacheItem( + levels=1, lifetime=self.lifetime + ) + self._remote_site_cache = NestedCacheItem( + levels=1, lifetime=self.lifetime + ) + self._version_availability_cache = NestedCacheItem( + levels=2, + default_factory=_default_version_availability, + lifetime=self.status_lifetime + ) + self._repre_status_cache = NestedCacheItem( + levels=2, + default_factory=_default_repre_status, + lifetime=self.status_lifetime + ) + + manager = AddonsManager() + self._site_sync_addon = manager.get("sync_server") + + def reset(self): + self._site_icons = None + self._site_sync_enabled_cache.reset() + self._active_site_cache.reset() + self._remote_site_cache.reset() + self._version_availability_cache.reset() + self._repre_status_cache.reset() + + def is_site_sync_enabled(self, project_name=None): + """Site sync is enabled for a project. + + Returns false if site sync addon is not available or enabled + or project has disabled it. + + Args: + project_name (Union[str, None]): Project name. If project name + is 'None', True is returned if site sync addon + is available and enabled. + + Returns: + bool: Site sync is enabled. + """ + + if not self._is_site_sync_addon_enabled(): + return False + cache = self._site_sync_enabled_cache[project_name] + if not cache.is_valid: + enabled = True + if project_name: + enabled = self._site_sync_addon.is_project_enabled( + project_name, single=True + ) + cache.update_data(enabled) + return cache.get_data() + + def get_active_site(self, project_name): + """Active site name for a project. + + Args: + project_name (str): Project name. + + Returns: + Union[str, None]: Remote site name. + """ + + cache = self._active_site_cache[project_name] + if not cache.is_valid: + site_name = None + if project_name and self._is_site_sync_addon_enabled(): + site_name = self._site_sync_addon.get_active_site(project_name) + cache.update_data(site_name) + return cache.get_data() + + def get_remote_site(self, project_name): + """Remote site name for a project. + + Args: + project_name (str): Project name. + + Returns: + Union[str, None]: Remote site name. + """ + + cache = self._remote_site_cache[project_name] + if not cache.is_valid: + site_name = None + if project_name and self._is_site_sync_addon_enabled(): + site_name = self._site_sync_addon.get_remote_site(project_name) + cache.update_data(site_name) + return cache.get_data() + + def get_active_site_icon_def(self, project_name): + """Active site icon definition. + + Args: + project_name (Union[str, None]): Name of project. + + Returns: + Union[dict[str, Any], None]: Site icon definition. + """ + + if not project_name or not self.is_site_sync_enabled(project_name): + return None + active_site = self.get_active_site(project_name) + return self._get_site_icon_def(project_name, active_site) + + def get_remote_site_icon_def(self, project_name): + """Remote site icon definition. + + Args: + project_name (Union[str, None]): Name of project. + + Returns: + Union[dict[str, Any], None]: Site icon definition. + """ + + if not project_name or not self.is_site_sync_enabled(project_name): + return None + remote_site = self.get_remote_site(project_name) + return self._get_site_icon_def(project_name, remote_site) + + def _get_site_icon_def(self, project_name, site_name): + # use different icon for studio even if provider is 'local_drive' + if site_name == self._site_sync_addon.DEFAULT_SITE: + provider = "studio" + else: + provider = self._get_provider_for_site(project_name, site_name) + return self._get_provider_icon(provider) + + def get_version_sync_availability(self, project_name, version_ids): + """Returns how many representations are available on sites. + + Returned value `{version_id: (4, 6)}` denotes that locally are + available 4 and remotely 6 representation. + NOTE: Available means they were synced to site. + + Returns: + dict[str, tuple[int, int]] + """ + + if not self.is_site_sync_enabled(project_name): + return { + version_id: _default_version_availability() + for version_id in version_ids + } + + output = {} + project_cache = self._version_availability_cache[project_name] + invalid_ids = set() + for version_id in version_ids: + repre_cache = project_cache[version_id] + if repre_cache.is_valid: + output[version_id] = repre_cache.get_data() + else: + invalid_ids.add(version_id) + + if invalid_ids: + self._refresh_version_availability( + project_name, invalid_ids + ) + for version_id in invalid_ids: + version_cache = project_cache[version_id] + output[version_id] = version_cache.get_data() + return output + + def get_representations_sync_status( + self, project_name, representation_ids + ): + """ + + Args: + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + + Returns: + dict[str, tuple[float, float]] + """ + + if not self.is_site_sync_enabled(project_name): + return { + repre_id: _default_repre_status() + for repre_id in representation_ids + } + + output = {} + project_cache = self._repre_status_cache[project_name] + invalid_ids = set() + for repre_id in representation_ids: + repre_cache = project_cache[repre_id] + if repre_cache.is_valid: + output[repre_id] = repre_cache.get_data() + else: + invalid_ids.add(repre_id) + + if invalid_ids: + self._refresh_representations_sync_status( + project_name, invalid_ids + ) + for repre_id in invalid_ids: + repre_cache = project_cache[repre_id] + output[repre_id] = repre_cache.get_data() + return output + + def get_site_sync_action_items(self, project_name, representation_ids): + """ + + Args: + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + + Returns: + list[ActionItem]: Actions that can be shown in loader. + """ + + if not self.is_site_sync_enabled(project_name): + return [] + + repres_status = self.get_representations_sync_status( + project_name, representation_ids + ) + + repre_ids_per_identifier = collections.defaultdict(set) + for repre_id in representation_ids: + repre_status = repres_status[repre_id] + local_status, remote_status = repre_status + + if local_status: + repre_ids_per_identifier[UPLOAD_IDENTIFIER].add(repre_id) + repre_ids_per_identifier[REMOVE_IDENTIFIER].add(repre_id) + + if remote_status: + repre_ids_per_identifier[DOWNLOAD_IDENTIFIER].add(repre_id) + + action_items = [] + for identifier, repre_ids in repre_ids_per_identifier.items(): + if identifier == DOWNLOAD_IDENTIFIER: + action_items.append(self._create_download_action_item( + project_name, repre_ids + )) + elif identifier == UPLOAD_IDENTIFIER: + action_items.append(self._create_upload_action_item( + project_name, repre_ids + )) + elif identifier == REMOVE_IDENTIFIER: + action_items.append(self._create_delete_action_item( + project_name, repre_ids + )) + + return action_items + + def is_site_sync_action(self, identifier): + """Should be `identifier` handled by SiteSync. + + Args: + identifier (str): Action identifier. + + Returns: + bool: Should action be handled by SiteSync. + """ + + return identifier in { + UPLOAD_IDENTIFIER, + DOWNLOAD_IDENTIFIER, + REMOVE_IDENTIFIER, + } + + def trigger_action_item( + self, + identifier, + project_name, + representation_ids + ): + """Resets status for site_name or remove local files. + + Args: + identifier (str): Action identifier. + project_name (str): Project name. + representation_ids (Iterable[str]): Representation ids. + """ + + active_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) + + repre_docs = list(get_representations( + project_name, representation_ids=representation_ids + )) + families_per_repre_id = { + item["_id"]: item["context"]["family"] + for item in repre_docs + } + + for repre_id in representation_ids: + family = families_per_repre_id[repre_id] + if identifier == DOWNLOAD_IDENTIFIER: + self._add_site( + project_name, repre_id, active_site, family + ) + + elif identifier == UPLOAD_IDENTIFIER: + self._add_site( + project_name, repre_id, remote_site, family + ) + + elif identifier == REMOVE_IDENTIFIER: + self._site_sync_addon.remove_site( + project_name, + repre_id, + active_site, + remove_local_files=True + ) + + def _is_site_sync_addon_enabled(self): + """ + Returns: + bool: Site sync addon is enabled. + """ + + if self._site_sync_addon is None: + return False + return self._site_sync_addon.enabled + + def _get_provider_for_site(self, project_name, site_name): + """Provider for a site. + + Args: + project_name (str): Project name. + site_name (str): Site name. + + Returns: + Union[str, None]: Provider name. + """ + + if not self._is_site_sync_addon_enabled(): + return None + return self._site_sync_addon.get_provider_for_site( + project_name, site_name + ) + + def _get_provider_icon(self, provider): + """site provider icons. + + Returns: + Union[dict[str, Any], None]: Icon of site provider. + """ + + if not provider: + return None + + if self._site_icons is None: + self._site_icons = self._site_sync_addon.get_site_icons() + return self._site_icons.get(provider) + + def _refresh_version_availability(self, project_name, version_ids): + if not project_name or not version_ids: + return + project_cache = self._version_availability_cache[project_name] + + avail_by_id = self._site_sync_addon.get_version_availability( + project_name, + version_ids, + self.get_active_site(project_name), + self.get_remote_site(project_name), + ) + for version_id in version_ids: + status = avail_by_id.get(version_id) + if status is None: + status = _default_version_availability() + project_cache[version_id].update_data(status) + + def _refresh_representations_sync_status( + self, project_name, representation_ids + ): + if not project_name or not representation_ids: + return + project_cache = self._repre_status_cache[project_name] + status_by_repre_id = ( + self._site_sync_addon.get_representations_sync_state( + project_name, + representation_ids, + self.get_active_site(project_name), + self.get_remote_site(project_name), + ) + ) + for repre_id in representation_ids: + status = status_by_repre_id.get(repre_id) + if status is None: + status = _default_repre_status() + project_cache[repre_id].update_data(status) + + def _create_download_action_item(self, project_name, representation_ids): + return self._create_action_item( + project_name, + representation_ids, + DOWNLOAD_IDENTIFIER, + "Download", + "Mark representation for download locally", + "fa.download" + ) + + def _create_upload_action_item(self, project_name, representation_ids): + return self._create_action_item( + project_name, + representation_ids, + UPLOAD_IDENTIFIER, + "Upload", + "Mark representation for upload remotely", + "fa.upload" + ) + + def _create_delete_action_item(self, project_name, representation_ids): + return self._create_action_item( + project_name, + representation_ids, + REMOVE_IDENTIFIER, + "Remove from local", + "Remove local synchronization", + "fa.trash" + ) + + def _create_action_item( + self, + project_name, + representation_ids, + identifier, + label, + tooltip, + icon_name + ): + return ActionItem( + identifier, + label, + icon={ + "type": "awesome-font", + "name": icon_name, + "color": "#999999" + }, + tooltip=tooltip, + options={}, + order=1, + project_name=project_name, + folder_ids=[], + product_ids=[], + version_ids=[], + representation_ids=representation_ids, + ) + + def _add_site(self, project_name, repre_id, site_name, family): + self._site_sync_addon.add_site( + project_name, repre_id, site_name, force=True + ) + + # TODO this should happen in site sync addon + if family != "workfile": + return + + links = get_linked_representation_id( + project_name, + repre_id=repre_id, + link_type="reference" + ) + for link_repre_id in links: + try: + print("Adding {} to linked representation: {}".format( + site_name, link_repre_id)) + self._site_sync_addon.add_site( + project_name, + link_repre_id, + site_name, + force=False + ) + except Exception: + # do not add/reset working site for references + log.debug("Site present", exc_info=True) diff --git a/openpype/tools/ayon_loader/ui/__init__.py b/client/ayon_core/tools/loader/ui/__init__.py similarity index 100% rename from openpype/tools/ayon_loader/ui/__init__.py rename to client/ayon_core/tools/loader/ui/__init__.py diff --git a/openpype/tools/ayon_loader/ui/actions_utils.py b/client/ayon_core/tools/loader/ui/actions_utils.py similarity index 93% rename from openpype/tools/ayon_loader/ui/actions_utils.py rename to client/ayon_core/tools/loader/ui/actions_utils.py index a269b643dc..bf6ab6eeb5 100644 --- a/openpype/tools/ayon_loader/ui/actions_utils.py +++ b/client/ayon_core/tools/loader/ui/actions_utils.py @@ -3,14 +3,14 @@ from qtpy import QtWidgets, QtGui import qtawesome -from openpype.lib.attribute_definitions import AbstractAttrDef -from openpype.tools.attribute_defs import AttributeDefinitionsDialog -from openpype.tools.utils.widgets import ( +from ayon_core.lib.attribute_definitions import AbstractAttrDef +from ayon_core.tools.attribute_defs import AttributeDefinitionsDialog +from ayon_core.tools.utils.widgets import ( OptionalMenu, OptionalAction, OptionDialog, ) -from openpype.tools.ayon_utils.widgets import get_qt_icon +from ayon_core.tools.ayon_utils.widgets import get_qt_icon def show_actions_menu(action_items, global_point, one_item_selected, parent): diff --git a/client/ayon_core/tools/loader/ui/folders_widget.py b/client/ayon_core/tools/loader/ui/folders_widget.py new file mode 100644 index 0000000000..9d5b95b2a6 --- /dev/null +++ b/client/ayon_core/tools/loader/ui/folders_widget.py @@ -0,0 +1,407 @@ +import qtpy +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.utils import ( + RecursiveSortFilterProxyModel, + DeselectableTreeView, +) +from ayon_core.style import get_objected_colors + +from ayon_core.tools.ayon_utils.widgets import ( + FoldersQtModel, + FOLDERS_MODEL_SENDER_NAME, +) +from ayon_core.tools.ayon_utils.widgets.folders_widget import FOLDER_ID_ROLE + +if qtpy.API == "pyside": + from PySide.QtGui import QStyleOptionViewItemV4 +elif qtpy.API == "pyqt4": + from PyQt4.QtGui import QStyleOptionViewItemV4 + +UNDERLINE_COLORS_ROLE = QtCore.Qt.UserRole + 50 + + +class UnderlinesFolderDelegate(QtWidgets.QItemDelegate): + """Item delegate drawing bars under folder label. + + This is used in loader tool. Multiselection of folders + may group products by name under colored groups. Selected color groups are + then propagated back to selected folders as underlines. + """ + bar_height = 3 + + def __init__(self, *args, **kwargs): + super(UnderlinesFolderDelegate, self).__init__(*args, **kwargs) + colors = get_objected_colors("loader", "asset-view") + self._selected_color = colors["selected"].get_qcolor() + self._hover_color = colors["hover"].get_qcolor() + self._selected_hover_color = colors["selected-hover"].get_qcolor() + + def sizeHint(self, option, index): + """Add bar height to size hint.""" + result = super(UnderlinesFolderDelegate, self).sizeHint(option, index) + height = result.height() + result.setHeight(height + self.bar_height) + + return result + + def paint(self, painter, option, index): + """Replicate painting of an item and draw color bars if needed.""" + # Qt4 compat + if qtpy.API in ("pyside", "pyqt4"): + option = QStyleOptionViewItemV4(option) + + painter.save() + + item_rect = QtCore.QRect(option.rect) + item_rect.setHeight(option.rect.height() - self.bar_height) + + subset_colors = index.data(UNDERLINE_COLORS_ROLE) or [] + + subset_colors_width = 0 + if subset_colors: + subset_colors_width = option.rect.width() / len(subset_colors) + + subset_rects = [] + counter = 0 + for subset_c in subset_colors: + new_color = None + new_rect = None + if subset_c: + new_color = QtGui.QColor(subset_c) + + new_rect = QtCore.QRect( + option.rect.left() + (counter * subset_colors_width), + option.rect.top() + ( + option.rect.height() - self.bar_height + ), + subset_colors_width, + self.bar_height + ) + subset_rects.append((new_color, new_rect)) + counter += 1 + + # Background + if option.state & QtWidgets.QStyle.State_Selected: + if len(subset_colors) == 0: + item_rect.setTop(item_rect.top() + (self.bar_height / 2)) + + if option.state & QtWidgets.QStyle.State_MouseOver: + bg_color = self._selected_hover_color + else: + bg_color = self._selected_color + else: + item_rect.setTop(item_rect.top() + (self.bar_height / 2)) + if option.state & QtWidgets.QStyle.State_MouseOver: + bg_color = self._hover_color + else: + bg_color = QtGui.QColor() + bg_color.setAlpha(0) + + # When not needed to do a rounded corners (easier and without + # painter restore): + painter.fillRect( + option.rect, + QtGui.QBrush(bg_color) + ) + + if option.state & QtWidgets.QStyle.State_Selected: + for color, subset_rect in subset_rects: + if not color or not subset_rect: + continue + painter.fillRect(subset_rect, QtGui.QBrush(color)) + + # Icon + icon_index = index.model().index( + index.row(), index.column(), index.parent() + ) + # - Default icon_rect if not icon + icon_rect = QtCore.QRect( + item_rect.left(), + item_rect.top(), + # To make sure it's same size all the time + option.rect.height() - self.bar_height, + option.rect.height() - self.bar_height + ) + icon = index.model().data(icon_index, QtCore.Qt.DecorationRole) + + if icon: + mode = QtGui.QIcon.Normal + if not (option.state & QtWidgets.QStyle.State_Enabled): + mode = QtGui.QIcon.Disabled + elif option.state & QtWidgets.QStyle.State_Selected: + mode = QtGui.QIcon.Selected + + if isinstance(icon, QtGui.QPixmap): + icon = QtGui.QIcon(icon) + option.decorationSize = icon.size() / icon.devicePixelRatio() + + elif isinstance(icon, QtGui.QColor): + pixmap = QtGui.QPixmap(option.decorationSize) + pixmap.fill(icon) + icon = QtGui.QIcon(pixmap) + + elif isinstance(icon, QtGui.QImage): + icon = QtGui.QIcon(QtGui.QPixmap.fromImage(icon)) + option.decorationSize = icon.size() / icon.devicePixelRatio() + + elif isinstance(icon, QtGui.QIcon): + state = QtGui.QIcon.Off + if option.state & QtWidgets.QStyle.State_Open: + state = QtGui.QIcon.On + actual_size = option.icon.actualSize( + option.decorationSize, mode, state + ) + option.decorationSize = QtCore.QSize( + min(option.decorationSize.width(), actual_size.width()), + min(option.decorationSize.height(), actual_size.height()) + ) + + state = QtGui.QIcon.Off + if option.state & QtWidgets.QStyle.State_Open: + state = QtGui.QIcon.On + + icon.paint( + painter, icon_rect, + QtCore.Qt.AlignLeft, mode, state + ) + + # Text + text_rect = QtCore.QRect( + icon_rect.left() + icon_rect.width() + 2, + item_rect.top(), + item_rect.width(), + item_rect.height() + ) + + painter.drawText( + text_rect, QtCore.Qt.AlignVCenter, + index.data(QtCore.Qt.DisplayRole) + ) + + painter.restore() + + +class LoaderFoldersModel(FoldersQtModel): + def __init__(self, *args, **kwargs): + super(LoaderFoldersModel, self).__init__(*args, **kwargs) + + self._colored_items = set() + + def _fill_item_data(self, item, folder_item): + """ + + Args: + item (QtGui.QStandardItem): Item to fill data. + folder_item (FolderItem): Folder item. + """ + + super(LoaderFoldersModel, self)._fill_item_data(item, folder_item) + + def set_merged_products_selection(self, items): + changes = { + folder_id: None + for folder_id in self._colored_items + } + + all_folder_ids = set() + for item in items: + folder_ids = item["folder_ids"] + all_folder_ids.update(folder_ids) + + for folder_id in all_folder_ids: + changes[folder_id] = [] + + for item in items: + item_color = item["color"] + item_folder_ids = item["folder_ids"] + for folder_id in all_folder_ids: + folder_color = ( + item_color + if folder_id in item_folder_ids + else None + ) + changes[folder_id].append(folder_color) + + for folder_id, color_value in changes.items(): + item = self._items_by_id.get(folder_id) + if item is not None: + item.setData(color_value, UNDERLINE_COLORS_ROLE) + + self._colored_items = all_folder_ids + + +class LoaderFoldersWidget(QtWidgets.QWidget): + """Folders widget. + + Widget that handles folders view, model and selection. + + Expected selection handling is disabled by default. If enabled, the + widget will handle the expected in predefined way. Widget is listening + to event 'expected_selection_changed' with expected event data below, + the same data must be available when called method + 'get_expected_selection_data' on controller. + + { + "folder": { + "current": bool, # Folder is what should be set now + "folder_id": Union[str, None], # Folder id that should be selected + }, + ... + } + + Selection is confirmed by calling method 'expected_folder_selected' on + controller. + + + Args: + controller (AbstractWorkfilesFrontend): The control object. + parent (QtWidgets.QWidget): The parent widget. + """ + + refreshed = QtCore.Signal() + + def __init__(self, controller, parent): + super(LoaderFoldersWidget, self).__init__(parent) + + folders_view = DeselectableTreeView(self) + folders_view.setHeaderHidden(True) + folders_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + + folders_model = LoaderFoldersModel(controller) + folders_proxy_model = RecursiveSortFilterProxyModel() + folders_proxy_model.setSourceModel(folders_model) + folders_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + folders_label_delegate = UnderlinesFolderDelegate(folders_view) + + folders_view.setModel(folders_proxy_model) + folders_view.setItemDelegate(folders_label_delegate) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(folders_view, 1) + + controller.register_event_callback( + "selection.project.changed", + self._on_project_selection_change, + ) + controller.register_event_callback( + "folders.refresh.finished", + self._on_folders_refresh_finished + ) + controller.register_event_callback( + "controller.refresh.finished", + self._on_controller_refresh + ) + controller.register_event_callback( + "expected_selection_changed", + self._on_expected_selection_change + ) + + selection_model = folders_view.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + + folders_model.refreshed.connect(self._on_model_refresh) + + self._controller = controller + self._folders_view = folders_view + self._folders_model = folders_model + self._folders_proxy_model = folders_proxy_model + self._folders_label_delegate = folders_label_delegate + + self._expected_selection = None + + def set_name_filter(self, name): + """Set filter of folder name. + + Args: + name (str): The string filter. + """ + + self._folders_proxy_model.setFilterFixedString(name) + + def set_merged_products_selection(self, items): + """ + + Args: + items (list[dict[str, Any]]): List of merged items with folder + ids. + """ + + self._folders_model.set_merged_products_selection(items) + + def refresh(self): + self._folders_model.refresh() + + def _on_project_selection_change(self, event): + project_name = event["project_name"] + self._set_project_name(project_name) + + def _set_project_name(self, project_name): + self._folders_model.set_project_name(project_name) + + def _clear(self): + self._folders_model.clear() + + def _on_folders_refresh_finished(self, event): + if event["sender"] != FOLDERS_MODEL_SENDER_NAME: + self._set_project_name(event["project_name"]) + + def _on_controller_refresh(self): + self._update_expected_selection() + + def _on_model_refresh(self): + if self._expected_selection: + self._set_expected_selection() + self._folders_proxy_model.sort(0) + self.refreshed.emit() + + def _get_selected_item_ids(self): + selection_model = self._folders_view.selectionModel() + item_ids = [] + for index in selection_model.selectedIndexes(): + item_id = index.data(FOLDER_ID_ROLE) + if item_id is not None: + item_ids.append(item_id) + return item_ids + + def _on_selection_change(self): + item_ids = self._get_selected_item_ids() + self._controller.set_selected_folders(item_ids) + + # Expected selection handling + def _on_expected_selection_change(self, event): + self._update_expected_selection(event.data) + + def _update_expected_selection(self, expected_data=None): + if expected_data is None: + expected_data = self._controller.get_expected_selection_data() + + folder_data = expected_data.get("folder") + if not folder_data or not folder_data["current"]: + return + + folder_id = folder_data["id"] + self._expected_selection = folder_id + if not self._folders_model.is_refreshing: + self._set_expected_selection() + + def _set_expected_selection(self): + folder_id = self._expected_selection + selected_ids = self._get_selected_item_ids() + self._expected_selection = None + skip_selection = ( + folder_id is None + or ( + folder_id in selected_ids + and len(selected_ids) == 1 + ) + ) + if not skip_selection: + index = self._folders_model.get_index_by_id(folder_id) + if index.isValid(): + proxy_index = self._folders_proxy_model.mapFromSource(index) + self._folders_view.setCurrentIndex(proxy_index) + self._controller.expected_folder_selected(folder_id) diff --git a/client/ayon_core/tools/loader/ui/info_widget.py b/client/ayon_core/tools/loader/ui/info_widget.py new file mode 100644 index 0000000000..16b817a38d --- /dev/null +++ b/client/ayon_core/tools/loader/ui/info_widget.py @@ -0,0 +1,141 @@ +import datetime + +from qtpy import QtWidgets + +from ayon_core.tools.utils.lib import format_version + + +class VersionTextEdit(QtWidgets.QTextEdit): + """QTextEdit that displays version specific information. + + This also overrides the context menu to add actions like copying + source path to clipboard or copying the raw data of the version + to clipboard. + + """ + def __init__(self, controller, parent): + super(VersionTextEdit, self).__init__(parent=parent) + + self._version_item = None + self._product_item = None + + self._controller = controller + + # Reset + self.set_current_item() + + def set_current_item(self, product_item=None, version_item=None): + """ + + Args: + product_item (Union[ProductItem, None]): Product item. + version_item (Union[VersionItem, None]): Version item to display. + """ + + self._product_item = product_item + self._version_item = version_item + + if version_item is None: + # Reset state to empty + self.setText("") + return + + version_label = format_version(abs(version_item.version)) + if version_item.version < 0: + version_label = "Hero version {}".format(version_label) + + # Define readable creation timestamp + created = version_item.published_time + created = datetime.datetime.strptime(created, "%Y%m%dT%H%M%SZ") + created = datetime.datetime.strftime(created, "%b %d %Y %H:%M") + + comment = version_item.comment or "No comment" + source = version_item.source or "No source" + + self.setHtml( + ( + "

{product_name}

" + "

{version_label}

" + "Comment
" + "{comment}

" + + "Created
" + "{created}

" + + "Source
" + "{source}" + ).format( + product_name=product_item.product_name, + version_label=version_label, + comment=comment, + created=created, + source=source, + ) + ) + + def contextMenuEvent(self, event): + """Context menu with additional actions""" + menu = self.createStandardContextMenu() + + # Add additional actions when any text, so we can assume + # the version is set. + source = None + if self._version_item is not None: + source = self._version_item.source + + if source: + menu.addSeparator() + action = QtWidgets.QAction( + "Copy source path to clipboard", menu + ) + action.triggered.connect(self._on_copy_source) + menu.addAction(action) + + menu.exec_(event.globalPos()) + + def _on_copy_source(self): + """Copy formatted source path to clipboard.""" + + source = self._version_item.source + if not source: + return + + filled_source = self._controller.fill_root_in_source(source) + clipboard = QtWidgets.QApplication.clipboard() + clipboard.setText(filled_source) + + +class InfoWidget(QtWidgets.QWidget): + """A Widget that display information about a specific version""" + def __init__(self, controller, parent): + super(InfoWidget, self).__init__(parent=parent) + + label_widget = QtWidgets.QLabel("Version Info", self) + info_text_widget = VersionTextEdit(controller, self) + info_text_widget.setReadOnly(True) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(label_widget, 0) + layout.addWidget(info_text_widget, 1) + + self._controller = controller + + self._info_text_widget = info_text_widget + self._label_widget = label_widget + + def set_selected_version_info(self, project_name, items): + if not items or not project_name: + self._info_text_widget.set_current_item() + return + first_item = next(iter(items)) + product_item = self._controller.get_product_item( + project_name, + first_item["product_id"], + ) + version_id = first_item["version_id"] + version_item = None + if product_item is not None: + version_item = product_item.version_items.get(version_id) + + self._info_text_widget.set_current_item(product_item, version_item) diff --git a/openpype/tools/ayon_loader/ui/product_group_dialog.py b/client/ayon_core/tools/loader/ui/product_group_dialog.py similarity index 96% rename from openpype/tools/ayon_loader/ui/product_group_dialog.py rename to client/ayon_core/tools/loader/ui/product_group_dialog.py index 5737ce58a4..edae78c9a1 100644 --- a/openpype/tools/ayon_loader/ui/product_group_dialog.py +++ b/client/ayon_core/tools/loader/ui/product_group_dialog.py @@ -1,6 +1,6 @@ from qtpy import QtWidgets -from openpype.tools.utils import PlaceholderLineEdit +from ayon_core.tools.utils import PlaceholderLineEdit class ProductGroupDialog(QtWidgets.QDialog): diff --git a/openpype/tools/ayon_loader/ui/product_types_widget.py b/client/ayon_core/tools/loader/ui/product_types_widget.py similarity index 99% rename from openpype/tools/ayon_loader/ui/product_types_widget.py rename to client/ayon_core/tools/loader/ui/product_types_widget.py index a84a7ff846..26244517ec 100644 --- a/openpype/tools/ayon_loader/ui/product_types_widget.py +++ b/client/ayon_core/tools/loader/ui/product_types_widget.py @@ -1,6 +1,6 @@ from qtpy import QtWidgets, QtGui, QtCore -from openpype.tools.ayon_utils.widgets import get_qt_icon +from ayon_core.tools.ayon_utils.widgets import get_qt_icon PRODUCT_TYPE_ROLE = QtCore.Qt.UserRole + 1 diff --git a/openpype/tools/ayon_loader/ui/products_delegates.py b/client/ayon_core/tools/loader/ui/products_delegates.py similarity index 99% rename from openpype/tools/ayon_loader/ui/products_delegates.py rename to client/ayon_core/tools/loader/ui/products_delegates.py index 979fa57fd2..53d35c2bb7 100644 --- a/openpype/tools/ayon_loader/ui/products_delegates.py +++ b/client/ayon_core/tools/loader/ui/products_delegates.py @@ -1,7 +1,7 @@ import numbers from qtpy import QtWidgets, QtCore, QtGui -from openpype.tools.utils.lib import format_version +from ayon_core.tools.utils.lib import format_version from .products_model import ( PRODUCT_ID_ROLE, diff --git a/openpype/tools/ayon_loader/ui/products_model.py b/client/ayon_core/tools/loader/ui/products_model.py similarity index 99% rename from openpype/tools/ayon_loader/ui/products_model.py rename to client/ayon_core/tools/loader/ui/products_model.py index 84f5bc9a5f..331efad68a 100644 --- a/openpype/tools/ayon_loader/ui/products_model.py +++ b/client/ayon_core/tools/loader/ui/products_model.py @@ -3,8 +3,8 @@ import qtawesome from qtpy import QtGui, QtCore -from openpype.style import get_default_entity_icon_color -from openpype.tools.ayon_utils.widgets import get_qt_icon +from ayon_core.style import get_default_entity_icon_color +from ayon_core.tools.ayon_utils.widgets import get_qt_icon PRODUCTS_MODEL_SENDER_NAME = "qt_products_model" diff --git a/openpype/tools/ayon_loader/ui/products_widget.py b/client/ayon_core/tools/loader/ui/products_widget.py similarity index 99% rename from openpype/tools/ayon_loader/ui/products_widget.py rename to client/ayon_core/tools/loader/ui/products_widget.py index 99faefe693..5a29f3f762 100644 --- a/openpype/tools/ayon_loader/ui/products_widget.py +++ b/client/ayon_core/tools/loader/ui/products_widget.py @@ -2,11 +2,11 @@ from qtpy import QtWidgets, QtCore -from openpype.tools.utils import ( +from ayon_core.tools.utils import ( RecursiveSortFilterProxyModel, DeselectableTreeView, ) -from openpype.tools.utils.delegates import PrettyTimeDelegate +from ayon_core.tools.utils.delegates import PrettyTimeDelegate from .products_model import ( ProductsModel, diff --git a/openpype/tools/ayon_loader/ui/repres_widget.py b/client/ayon_core/tools/loader/ui/repres_widget.py similarity index 98% rename from openpype/tools/ayon_loader/ui/repres_widget.py rename to client/ayon_core/tools/loader/ui/repres_widget.py index efc1bb89a4..27db8dda40 100644 --- a/openpype/tools/ayon_loader/ui/repres_widget.py +++ b/client/ayon_core/tools/loader/ui/repres_widget.py @@ -3,9 +3,9 @@ from qtpy import QtWidgets, QtGui, QtCore import qtawesome -from openpype.style import get_default_entity_icon_color -from openpype.tools.ayon_utils.widgets import get_qt_icon -from openpype.tools.utils import DeselectableTreeView +from ayon_core.style import get_default_entity_icon_color +from ayon_core.tools.ayon_utils.widgets import get_qt_icon +from ayon_core.tools.utils import DeselectableTreeView from .actions_utils import show_actions_menu diff --git a/client/ayon_core/tools/loader/ui/window.py b/client/ayon_core/tools/loader/ui/window.py new file mode 100644 index 0000000000..104b64d81c --- /dev/null +++ b/client/ayon_core/tools/loader/ui/window.py @@ -0,0 +1,518 @@ +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.resources import get_ayon_icon_filepath +from ayon_core.style import load_stylesheet +from ayon_core.tools.utils import ( + PlaceholderLineEdit, + ErrorMessageBox, + ThumbnailPainterWidget, + RefreshButton, + GoToCurrentButton, +) +from ayon_core.tools.utils.lib import center_window +from ayon_core.tools.ayon_utils.widgets import ProjectsCombobox +from ayon_core.tools.loader.control import LoaderController + +from .folders_widget import LoaderFoldersWidget +from .products_widget import ProductsWidget +from .product_types_widget import ProductTypesView +from .product_group_dialog import ProductGroupDialog +from .info_widget import InfoWidget +from .repres_widget import RepresentationsWidget + + +class LoadErrorMessageBox(ErrorMessageBox): + def __init__(self, messages, parent=None): + self._messages = messages + super(LoadErrorMessageBox, self).__init__("Loading failed", parent) + + def _create_top_widget(self, parent_widget): + label_widget = QtWidgets.QLabel(parent_widget) + label_widget.setText( + "Failed to load items" + ) + return label_widget + + def _get_report_data(self): + report_data = [] + for exc_msg, tb_text, repre, product, version in self._messages: + report_message = ( + "During load error happened on Product: \"{product}\"" + " Representation: \"{repre}\" Version: {version}" + "\n\nError message: {message}" + ).format( + product=product, + repre=repre, + version=version, + message=exc_msg + ) + if tb_text: + report_message += "\n\n{}".format(tb_text) + report_data.append(report_message) + return report_data + + def _create_content(self, content_layout): + item_name_template = ( + "Product: {}
" + "Version: {}
" + "Representation: {}
" + ) + exc_msg_template = "{}" + + for exc_msg, tb_text, repre, product, version in self._messages: + line = self._create_line() + content_layout.addWidget(line) + + item_name = item_name_template.format(product, version, repre) + item_name_widget = QtWidgets.QLabel( + item_name.replace("\n", "
"), self + ) + item_name_widget.setWordWrap(True) + content_layout.addWidget(item_name_widget) + + exc_msg = exc_msg_template.format(exc_msg.replace("\n", "
")) + message_label_widget = QtWidgets.QLabel(exc_msg, self) + message_label_widget.setWordWrap(True) + content_layout.addWidget(message_label_widget) + + if tb_text: + line = self._create_line() + tb_widget = self._create_traceback_widget(tb_text, self) + content_layout.addWidget(line) + content_layout.addWidget(tb_widget) + + +class RefreshHandler: + def __init__(self): + self._project_refreshed = False + self._folders_refreshed = False + self._products_refreshed = False + + @property + def project_refreshed(self): + return self._products_refreshed + + @property + def folders_refreshed(self): + return self._folders_refreshed + + @property + def products_refreshed(self): + return self._products_refreshed + + def reset(self): + self._project_refreshed = False + self._folders_refreshed = False + self._products_refreshed = False + + def set_project_refreshed(self): + self._project_refreshed = True + + def set_folders_refreshed(self): + self._folders_refreshed = True + + def set_products_refreshed(self): + self._products_refreshed = True + + +class LoaderWindow(QtWidgets.QWidget): + def __init__(self, controller=None, parent=None): + super(LoaderWindow, self).__init__(parent) + + icon = QtGui.QIcon(get_ayon_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowTitle("AYON Loader") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + self.setAttribute(QtCore.Qt.WA_DeleteOnClose, False) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.Window) + + if controller is None: + controller = LoaderController() + + main_splitter = QtWidgets.QSplitter(self) + + context_splitter = QtWidgets.QSplitter(main_splitter) + context_splitter.setOrientation(QtCore.Qt.Vertical) + + # Context selection widget + context_widget = QtWidgets.QWidget(context_splitter) + + context_top_widget = QtWidgets.QWidget(context_widget) + projects_combobox = ProjectsCombobox( + controller, + context_top_widget, + handle_expected_selection=True + ) + projects_combobox.set_select_item_visible(True) + projects_combobox.set_libraries_separator_visible(True) + projects_combobox.set_standard_filter_enabled( + controller.is_standard_projects_filter_enabled() + ) + + go_to_current_btn = GoToCurrentButton(context_top_widget) + refresh_btn = RefreshButton(context_top_widget) + + context_top_layout = QtWidgets.QHBoxLayout(context_top_widget) + context_top_layout.setContentsMargins(0, 0, 0, 0,) + context_top_layout.addWidget(projects_combobox, 1) + context_top_layout.addWidget(go_to_current_btn, 0) + context_top_layout.addWidget(refresh_btn, 0) + + folders_filter_input = PlaceholderLineEdit(context_widget) + folders_filter_input.setPlaceholderText("Folder name filter...") + + folders_widget = LoaderFoldersWidget(controller, context_widget) + + product_types_widget = ProductTypesView(controller, context_splitter) + + context_layout = QtWidgets.QVBoxLayout(context_widget) + context_layout.setContentsMargins(0, 0, 0, 0) + context_layout.addWidget(context_top_widget, 0) + context_layout.addWidget(folders_filter_input, 0) + context_layout.addWidget(folders_widget, 1) + + context_splitter.addWidget(context_widget) + context_splitter.addWidget(product_types_widget) + context_splitter.setStretchFactor(0, 65) + context_splitter.setStretchFactor(1, 35) + + # Product + version selection item + products_wrap_widget = QtWidgets.QWidget(main_splitter) + + products_inputs_widget = QtWidgets.QWidget(products_wrap_widget) + + products_filter_input = PlaceholderLineEdit(products_inputs_widget) + products_filter_input.setPlaceholderText("Product name filter...") + product_group_checkbox = QtWidgets.QCheckBox( + "Enable grouping", products_inputs_widget) + product_group_checkbox.setChecked(True) + + products_widget = ProductsWidget(controller, products_wrap_widget) + + products_inputs_layout = QtWidgets.QHBoxLayout(products_inputs_widget) + products_inputs_layout.setContentsMargins(0, 0, 0, 0) + products_inputs_layout.addWidget(products_filter_input, 1) + products_inputs_layout.addWidget(product_group_checkbox, 0) + + products_wrap_layout = QtWidgets.QVBoxLayout(products_wrap_widget) + products_wrap_layout.setContentsMargins(0, 0, 0, 0) + products_wrap_layout.addWidget(products_inputs_widget, 0) + products_wrap_layout.addWidget(products_widget, 1) + + right_panel_splitter = QtWidgets.QSplitter(main_splitter) + right_panel_splitter.setOrientation(QtCore.Qt.Vertical) + + thumbnails_widget = ThumbnailPainterWidget(right_panel_splitter) + thumbnails_widget.set_use_checkboard(False) + + info_widget = InfoWidget(controller, right_panel_splitter) + + repre_widget = RepresentationsWidget(controller, right_panel_splitter) + + right_panel_splitter.addWidget(thumbnails_widget) + right_panel_splitter.addWidget(info_widget) + right_panel_splitter.addWidget(repre_widget) + + right_panel_splitter.setStretchFactor(0, 1) + right_panel_splitter.setStretchFactor(1, 1) + right_panel_splitter.setStretchFactor(2, 2) + + main_splitter.addWidget(context_splitter) + main_splitter.addWidget(products_wrap_widget) + main_splitter.addWidget(right_panel_splitter) + + main_splitter.setStretchFactor(0, 4) + main_splitter.setStretchFactor(1, 6) + main_splitter.setStretchFactor(2, 1) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(main_splitter) + + show_timer = QtCore.QTimer() + show_timer.setInterval(1) + + show_timer.timeout.connect(self._on_show_timer) + + projects_combobox.refreshed.connect(self._on_projects_refresh) + folders_widget.refreshed.connect(self._on_folders_refresh) + products_widget.refreshed.connect(self._on_products_refresh) + folders_filter_input.textChanged.connect( + self._on_folder_filter_change + ) + product_types_widget.filter_changed.connect( + self._on_product_type_filter_change + ) + products_filter_input.textChanged.connect( + self._on_product_filter_change + ) + product_group_checkbox.stateChanged.connect( + self._on_product_group_change + ) + products_widget.merged_products_selection_changed.connect( + self._on_merged_products_selection_change + ) + products_widget.selection_changed.connect( + self._on_products_selection_change + ) + go_to_current_btn.clicked.connect( + self._on_go_to_current_context_click + ) + refresh_btn.clicked.connect( + self._on_refresh_click + ) + controller.register_event_callback( + "load.finished", + self._on_load_finished, + ) + controller.register_event_callback( + "selection.project.changed", + self._on_project_selection_changed, + ) + controller.register_event_callback( + "selection.folders.changed", + self._on_folders_selection_changed, + ) + controller.register_event_callback( + "selection.versions.changed", + self._on_versions_selection_changed, + ) + controller.register_event_callback( + "controller.reset.started", + self._on_controller_reset_start, + ) + controller.register_event_callback( + "controller.reset.finished", + self._on_controller_reset_finish, + ) + + self._group_dialog = ProductGroupDialog(controller, self) + + self._main_splitter = main_splitter + + self._go_to_current_btn = go_to_current_btn + self._refresh_btn = refresh_btn + self._projects_combobox = projects_combobox + + self._folders_filter_input = folders_filter_input + self._folders_widget = folders_widget + + self._product_types_widget = product_types_widget + + self._products_filter_input = products_filter_input + self._product_group_checkbox = product_group_checkbox + self._products_widget = products_widget + + self._right_panel_splitter = right_panel_splitter + self._thumbnails_widget = thumbnails_widget + self._info_widget = info_widget + self._repre_widget = repre_widget + + self._controller = controller + self._refresh_handler = RefreshHandler() + self._first_show = True + self._reset_on_show = True + self._show_counter = 0 + self._show_timer = show_timer + self._selected_project_name = None + self._selected_folder_ids = set() + self._selected_version_ids = set() + + self._products_widget.set_enable_grouping( + self._product_group_checkbox.isChecked() + ) + + def refresh(self): + self._reset_on_show = False + self._controller.reset() + + def showEvent(self, event): + super(LoaderWindow, self).showEvent(event) + + if self._first_show: + self._on_first_show() + + self._show_timer.start() + + def closeEvent(self, event): + super(LoaderWindow, self).closeEvent(event) + # Deselect project so current context will be selected + # on next 'showEvent' + self._controller.set_selected_project(None) + self._reset_on_show = True + + def keyPressEvent(self, event): + modifiers = event.modifiers() + ctrl_pressed = QtCore.Qt.ControlModifier & modifiers + + # Grouping products on pressing Ctrl + G + if ( + ctrl_pressed + and event.key() == QtCore.Qt.Key_G + and not event.isAutoRepeat() + ): + self._show_group_dialog() + event.setAccepted(True) + return + + super(LoaderWindow, self).keyPressEvent(event) + + def _on_first_show(self): + self._first_show = False + # width, height = 1800, 900 + width, height = 1500, 750 + + self.resize(width, height) + + mid_width = int(width / 1.8) + sides_width = int((width - mid_width) * 0.5) + self._main_splitter.setSizes( + [sides_width, mid_width, sides_width] + ) + + thumbnail_height = int(height / 3.6) + info_height = int((height - thumbnail_height) * 0.5) + self._right_panel_splitter.setSizes( + [thumbnail_height, info_height, info_height] + ) + self.setStyleSheet(load_stylesheet()) + center_window(self) + + def _on_show_timer(self): + if self._show_counter < 2: + self._show_counter += 1 + return + + self._show_counter = 0 + self._show_timer.stop() + + if self._reset_on_show: + self.refresh() + + def _show_group_dialog(self): + project_name = self._projects_combobox.get_selected_project_name() + if not project_name: + return + + product_ids = { + i["product_id"] + for i in self._products_widget.get_selected_version_info() + } + if not product_ids: + return + + self._group_dialog.set_product_ids(project_name, product_ids) + self._group_dialog.show() + + def _on_folder_filter_change(self, text): + self._folders_widget.set_name_filter(text) + + def _on_product_group_change(self): + self._products_widget.set_enable_grouping( + self._product_group_checkbox.isChecked() + ) + + def _on_product_filter_change(self, text): + self._products_widget.set_name_filter(text) + + def _on_product_type_filter_change(self): + self._products_widget.set_product_type_filter( + self._product_types_widget.get_filter_info() + ) + + def _on_merged_products_selection_change(self): + items = self._products_widget.get_selected_merged_products() + self._folders_widget.set_merged_products_selection(items) + + def _on_products_selection_change(self): + items = self._products_widget.get_selected_version_info() + self._info_widget.set_selected_version_info( + self._projects_combobox.get_selected_project_name(), + items + ) + + def _on_go_to_current_context_click(self): + context = self._controller.get_current_context() + self._controller.set_expected_selection( + context["project_name"], + context["folder_id"], + ) + + def _on_refresh_click(self): + self._controller.reset() + + def _on_controller_reset_start(self): + self._refresh_handler.reset() + + def _on_controller_reset_finish(self): + context = self._controller.get_current_context() + project_name = context["project_name"] + self._go_to_current_btn.setVisible(bool(project_name)) + self._projects_combobox.set_current_context_project(project_name) + if not self._refresh_handler.project_refreshed: + self._projects_combobox.refresh() + + def _on_load_finished(self, event): + error_info = event["error_info"] + if not error_info: + return + + box = LoadErrorMessageBox(error_info, self) + box.show() + + def _on_project_selection_changed(self, event): + self._selected_project_name = event["project_name"] + + def _on_folders_selection_changed(self, event): + self._selected_folder_ids = set(event["folder_ids"]) + self._update_thumbnails() + + def _on_versions_selection_changed(self, event): + self._selected_version_ids = set(event["version_ids"]) + self._update_thumbnails() + + def _update_thumbnails(self): + project_name = self._selected_project_name + thumbnail_ids = set() + if self._selected_version_ids: + thumbnail_id_by_entity_id = ( + self._controller.get_version_thumbnail_ids( + project_name, + self._selected_version_ids + ) + ) + thumbnail_ids = set(thumbnail_id_by_entity_id.values()) + elif self._selected_folder_ids: + thumbnail_id_by_entity_id = ( + self._controller.get_folder_thumbnail_ids( + project_name, + self._selected_folder_ids + ) + ) + thumbnail_ids = set(thumbnail_id_by_entity_id.values()) + + thumbnail_ids.discard(None) + + if not thumbnail_ids: + self._thumbnails_widget.set_current_thumbnails(None) + return + + thumbnail_paths = set() + for thumbnail_id in thumbnail_ids: + thumbnail_path = self._controller.get_thumbnail_path( + project_name, thumbnail_id) + thumbnail_paths.add(thumbnail_path) + thumbnail_paths.discard(None) + self._thumbnails_widget.set_current_thumbnail_paths(thumbnail_paths) + + def _on_projects_refresh(self): + self._refresh_handler.set_project_refreshed() + if not self._refresh_handler.folders_refreshed: + self._folders_widget.refresh() + + def _on_folders_refresh(self): + self._refresh_handler.set_folders_refreshed() + if not self._refresh_handler.products_refreshed: + self._products_widget.refresh() + + def _on_products_refresh(self): + self._refresh_handler.set_products_refreshed() diff --git a/openpype/scripts/__init__.py b/client/ayon_core/tools/publisher/__init__.py similarity index 100% rename from openpype/scripts/__init__.py rename to client/ayon_core/tools/publisher/__init__.py diff --git a/openpype/tools/publisher/app.py b/client/ayon_core/tools/publisher/app.py similarity index 100% rename from openpype/tools/publisher/app.py rename to client/ayon_core/tools/publisher/app.py diff --git a/openpype/tools/publisher/constants.py b/client/ayon_core/tools/publisher/constants.py similarity index 100% rename from openpype/tools/publisher/constants.py rename to client/ayon_core/tools/publisher/constants.py diff --git a/client/ayon_core/tools/publisher/control.py b/client/ayon_core/tools/publisher/control.py new file mode 100644 index 0000000000..988362fee4 --- /dev/null +++ b/client/ayon_core/tools/publisher/control.py @@ -0,0 +1,2576 @@ +import os +import copy +import logging +import traceback +import collections +import uuid +import tempfile +import shutil +import inspect +from abc import ABCMeta, abstractmethod + +import six +import arrow +import pyblish.api + +from ayon_core.client import ( + get_assets, + get_asset_by_id, + get_subsets, + get_asset_name_identifier, +) +from ayon_core.lib.events import EventSystem +from ayon_core.lib.attribute_definitions import ( + UIDef, + serialize_attr_defs, + deserialize_attr_defs, +) +from ayon_core.pipeline import ( + PublishValidationError, + KnownPublishError, + registered_host, + get_process_id, + OptionalPyblishPluginMixin, +) +from ayon_core.pipeline.create import ( + CreateContext, + AutoCreator, + HiddenCreator, + Creator, +) +from ayon_core.pipeline.create.context import ( + CreatorsOperationFailed, + ConvertorsOperationFailed, +) +from ayon_core.pipeline.publish import get_publish_instance_label + +# Define constant for plugin orders offset +PLUGIN_ORDER_OFFSET = 0.5 + + +class CardMessageTypes: + standard = None + info = "info" + error = "error" + + +class MainThreadItem: + """Callback with args and kwargs.""" + + def __init__(self, callback, *args, **kwargs): + self.callback = callback + self.args = args + self.kwargs = kwargs + + def process(self): + self.callback(*self.args, **self.kwargs) + + +class AssetDocsCache: + """Cache asset documents for creation part.""" + + projection = { + "_id": True, + "name": True, + "data.visualParent": True, + "data.tasks": True, + "data.parents": True, + } + + def __init__(self, controller): + self._controller = controller + self._asset_docs = None + self._asset_docs_hierarchy = None + self._task_names_by_asset_name = {} + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} + + def reset(self): + self._asset_docs = None + self._asset_docs_hierarchy = None + self._task_names_by_asset_name = {} + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} + + def _query(self): + if self._asset_docs is not None: + return + + project_name = self._controller.project_name + asset_docs = list(get_assets( + project_name, fields=self.projection.keys() + )) + asset_docs_by_name = {} + task_names_by_asset_name = {} + for asset_doc in asset_docs: + if "data" not in asset_doc: + asset_doc["data"] = {"tasks": {}, "visualParent": None} + elif "tasks" not in asset_doc["data"]: + asset_doc["data"]["tasks"] = {} + + asset_name = get_asset_name_identifier(asset_doc) + asset_tasks = asset_doc["data"]["tasks"] + task_names_by_asset_name[asset_name] = list(asset_tasks.keys()) + asset_docs_by_name[asset_name] = asset_doc + + self._asset_docs = asset_docs + self._asset_docs_by_name = asset_docs_by_name + self._task_names_by_asset_name = task_names_by_asset_name + + def get_asset_docs(self): + self._query() + return copy.deepcopy(self._asset_docs) + + def get_asset_hierarchy(self): + """Prepare asset documents into hierarchy. + + Convert ObjectId to string. Asset id is not used during whole + process of publisher but asset name is used rather. + + Returns: + Dict[Union[str, None]: Any]: Mapping of parent id to it's children. + Top level assets have parent id 'None'. + """ + + if self._asset_docs_hierarchy is None: + _queue = collections.deque(self.get_asset_docs()) + + output = collections.defaultdict(list) + while _queue: + asset_doc = _queue.popleft() + asset_doc["_id"] = str(asset_doc["_id"]) + parent_id = asset_doc["data"]["visualParent"] + if parent_id is not None: + parent_id = str(parent_id) + asset_doc["data"]["visualParent"] = parent_id + output[parent_id].append(asset_doc) + self._asset_docs_hierarchy = output + return copy.deepcopy(self._asset_docs_hierarchy) + + def get_task_names_by_asset_name(self): + self._query() + return copy.deepcopy(self._task_names_by_asset_name) + + def get_asset_by_name(self, asset_name): + self._query() + asset_doc = self._asset_docs_by_name.get(asset_name) + if asset_doc is None: + return None + return copy.deepcopy(asset_doc) + + def get_full_asset_by_name(self, asset_name): + self._query() + if asset_name not in self._full_asset_docs_by_name: + asset_doc = self._asset_docs_by_name.get(asset_name) + project_name = self._controller.project_name + full_asset_doc = get_asset_by_id(project_name, asset_doc["_id"]) + self._full_asset_docs_by_name[asset_name] = full_asset_doc + return copy.deepcopy(self._full_asset_docs_by_name[asset_name]) + + +class PublishReportMaker: + """Report for single publishing process. + + Report keeps current state of publishing and currently processed plugin. + """ + + def __init__(self, controller): + self.controller = controller + self._create_discover_result = None + self._convert_discover_result = None + self._publish_discover_result = None + + self._plugin_data_by_id = {} + self._current_plugin = None + self._current_plugin_data = {} + self._all_instances_by_id = {} + self._current_context = None + + def reset(self, context, create_context): + """Reset report and clear all data.""" + + self._create_discover_result = create_context.creator_discover_result + self._convert_discover_result = ( + create_context.convertor_discover_result + ) + self._publish_discover_result = create_context.publish_discover_result + + self._plugin_data_by_id = {} + self._current_plugin = None + self._current_plugin_data = {} + self._all_instances_by_id = {} + self._current_context = context + + for plugin in create_context.publish_plugins_mismatch_targets: + plugin_data = self._add_plugin_data_item(plugin) + plugin_data["skipped"] = True + + def add_plugin_iter(self, plugin, context): + """Add report about single iteration of plugin.""" + for instance in context: + self._all_instances_by_id[instance.id] = instance + + if self._current_plugin_data: + self._current_plugin_data["passed"] = True + + self._current_plugin = plugin + self._current_plugin_data = self._add_plugin_data_item(plugin) + + def _add_plugin_data_item(self, plugin): + if plugin.id in self._plugin_data_by_id: + # A plugin would be processed more than once. What can cause it: + # - there is a bug in controller + # - plugin class is imported into multiple files + # - this can happen even with base classes from 'pyblish' + raise ValueError( + "Plugin '{}' is already stored".format(str(plugin))) + + plugin_data_item = self._create_plugin_data_item(plugin) + self._plugin_data_by_id[plugin.id] = plugin_data_item + + return plugin_data_item + + def _create_plugin_data_item(self, plugin): + label = None + if hasattr(plugin, "label"): + label = plugin.label + + return { + "id": plugin.id, + "name": plugin.__name__, + "label": label, + "order": plugin.order, + "targets": list(plugin.targets), + "instances_data": [], + "actions_data": [], + "skipped": False, + "passed": False + } + + def set_plugin_skipped(self): + """Set that current plugin has been skipped.""" + self._current_plugin_data["skipped"] = True + + def add_result(self, result): + """Handle result of one plugin and it's instance.""" + + instance = result["instance"] + instance_id = None + if instance is not None: + instance_id = instance.id + self._current_plugin_data["instances_data"].append({ + "id": instance_id, + "logs": self._extract_instance_log_items(result), + "process_time": result["duration"] + }) + + def add_action_result(self, action, result): + """Add result of single action.""" + plugin = result["plugin"] + + store_item = self._plugin_data_by_id.get(plugin.id) + if store_item is None: + store_item = self._add_plugin_data_item(plugin) + + action_name = action.__name__ + action_label = action.label or action_name + log_items = self._extract_log_items(result) + store_item["actions_data"].append({ + "success": result["success"], + "name": action_name, + "label": action_label, + "logs": log_items + }) + + def get_report(self, publish_plugins=None): + """Report data with all details of current state.""" + + now = arrow.utcnow().to("local") + instances_details = {} + for instance in self._all_instances_by_id.values(): + instances_details[instance.id] = self._extract_instance_data( + instance, instance in self._current_context + ) + + plugins_data_by_id = copy.deepcopy( + self._plugin_data_by_id + ) + + # Ensure the current plug-in is marked as `passed` in the result + # so that it shows on reports for paused publishes + if self._current_plugin is not None: + current_plugin_data = plugins_data_by_id.get( + self._current_plugin.id + ) + if current_plugin_data and not current_plugin_data["passed"]: + current_plugin_data["passed"] = True + + if publish_plugins: + for plugin in publish_plugins: + if plugin.id not in plugins_data_by_id: + plugins_data_by_id[plugin.id] = \ + self._create_plugin_data_item(plugin) + + reports = [] + if self._create_discover_result is not None: + reports.append(self._create_discover_result) + + if self._convert_discover_result is not None: + reports.append(self._convert_discover_result) + + if self._publish_discover_result is not None: + reports.append(self._publish_discover_result) + + crashed_file_paths = {} + for report in reports: + items = report.crashed_file_paths.items() + for filepath, exc_info in items: + crashed_file_paths[filepath] = "".join( + traceback.format_exception(*exc_info) + ) + + return { + "plugins_data": list(plugins_data_by_id.values()), + "instances": instances_details, + "context": self._extract_context_data(self._current_context), + "crashed_file_paths": crashed_file_paths, + "id": uuid.uuid4().hex, + "created_at": now.isoformat(), + "report_version": "1.0.1", + } + + def _extract_context_data(self, context): + context_label = "Context" + if context is not None: + context_label = context.data.get("label") + return { + "label": context_label + } + + def _extract_instance_data(self, instance, exists): + return { + "name": instance.data.get("name"), + "label": get_publish_instance_label(instance), + "family": instance.data["family"], + "families": instance.data.get("families") or [], + "exists": exists, + "creator_identifier": instance.data.get("creator_identifier"), + "instance_id": instance.data.get("instance_id"), + } + + def _extract_instance_log_items(self, result): + instance = result["instance"] + instance_id = None + if instance: + instance_id = instance.id + + log_items = self._extract_log_items(result) + for item in log_items: + item["instance_id"] = instance_id + return log_items + + def _extract_log_items(self, result): + output = [] + records = result.get("records") or [] + for record in records: + record_exc_info = record.exc_info + if record_exc_info is not None: + record_exc_info = "".join( + traceback.format_exception(*record_exc_info) + ) + + try: + msg = record.getMessage() + except Exception: + msg = str(record.msg) + + output.append({ + "type": "record", + "msg": msg, + "name": record.name, + "lineno": record.lineno, + "levelno": record.levelno, + "levelname": record.levelname, + "threadName": record.threadName, + "filename": record.filename, + "pathname": record.pathname, + "msecs": record.msecs, + "exc_info": record_exc_info + }) + + exception = result.get("error") + if exception: + fname, line_no, func, exc = exception.traceback + + # Conversion of exception into string may crash + try: + msg = str(exception) + except BaseException: + msg = ( + "Publisher Controller: ERROR" + " - Failed to get exception message" + ) + + # Action result does not have 'is_validation_error' + is_validation_error = result.get("is_validation_error", False) + output.append({ + "type": "error", + "is_validation_error": is_validation_error, + "msg": msg, + "filename": str(fname), + "lineno": str(line_no), + "func": str(func), + "traceback": exception.formatted_traceback + }) + + return output + + +class PublishPluginsProxy: + """Wrapper around publish plugin. + + Prepare mapping for publish plugins and actions. Also can create + serializable data for plugin actions so UI don't have to have access to + them. + + This object is created in process where publishing is actually running. + + Notes: + Actions have id but single action can be used on multiple plugins so + to run an action is needed combination of plugin and action. + + Args: + plugins [List[pyblish.api.Plugin]]: Discovered plugins that will be + processed. + """ + + def __init__(self, plugins): + plugins_by_id = {} + actions_by_plugin_id = {} + action_ids_by_plugin_id = {} + for plugin in plugins: + plugin_id = plugin.id + plugins_by_id[plugin_id] = plugin + + action_ids = [] + actions_by_id = {} + action_ids_by_plugin_id[plugin_id] = action_ids + actions_by_plugin_id[plugin_id] = actions_by_id + + actions = getattr(plugin, "actions", None) or [] + for action in actions: + action_id = action.id + action_ids.append(action_id) + actions_by_id[action_id] = action + + self._plugins_by_id = plugins_by_id + self._actions_by_plugin_id = actions_by_plugin_id + self._action_ids_by_plugin_id = action_ids_by_plugin_id + + def get_action(self, plugin_id, action_id): + return self._actions_by_plugin_id[plugin_id][action_id] + + def get_plugin(self, plugin_id): + return self._plugins_by_id[plugin_id] + + def get_plugin_id(self, plugin): + """Get id of plugin based on plugin object. + + It's used for validation errors report. + + Args: + plugin (pyblish.api.Plugin): Publish plugin for which id should be + returned. + + Returns: + str: Plugin id. + """ + + return plugin.id + + def get_plugin_action_items(self, plugin_id): + """Get plugin action items for plugin by its id. + + Args: + plugin_id (str): Publish plugin id. + + Returns: + List[PublishPluginActionItem]: Items with information about publish + plugin actions. + """ + + return [ + self._create_action_item( + self.get_action(plugin_id, action_id), plugin_id + ) + for action_id in self._action_ids_by_plugin_id[plugin_id] + ] + + def _create_action_item(self, action, plugin_id): + label = action.label or action.__name__ + icon = getattr(action, "icon", None) + return PublishPluginActionItem( + action.id, + plugin_id, + action.active, + action.on, + label, + icon + ) + + +class PublishPluginActionItem: + """Representation of publish plugin action. + + Data driven object which is used as proxy for controller and UI. + + Args: + action_id (str): Action id. + plugin_id (str): Plugin id. + active (bool): Action is active. + on_filter (str): Actions have 'on' attribte which define when can be + action triggered (e.g. 'all', 'failed', ...). + label (str): Action's label. + icon (Union[str, None]) Action's icon. + """ + + def __init__(self, action_id, plugin_id, active, on_filter, label, icon): + self.action_id = action_id + self.plugin_id = plugin_id + self.active = active + self.on_filter = on_filter + self.label = label + self.icon = icon + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str,bool,None]]: Serialized object. + """ + + return { + "action_id": self.action_id, + "plugin_id": self.plugin_id, + "active": self.active, + "on_filter": self.on_filter, + "label": self.label, + "icon": self.icon + } + + @classmethod + def from_data(cls, data): + """Create object from data. + + Args: + data (Dict[str, Union[str,bool,None]]): Data used to recreate + object. + + Returns: + PublishPluginActionItem: Object created using data. + """ + + return cls(**data) + + +class ValidationErrorItem: + """Data driven validation error item. + + Prepared data container with information about validation error and it's + source plugin. + + Can be converted to raw data and recreated should be used for controller + and UI connection. + + Args: + instance_id (str): Id of pyblish instance to which is validation error + connected. + instance_label (str): Prepared instance label. + plugin_id (str): Id of pyblish Plugin which triggered the validation + error. Id is generated using 'PublishPluginsProxy'. + """ + + def __init__( + self, + instance_id, + instance_label, + plugin_id, + context_validation, + title, + description, + detail + ): + self.instance_id = instance_id + self.instance_label = instance_label + self.plugin_id = plugin_id + self.context_validation = context_validation + self.title = title + self.description = description + self.detail = detail + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str, bool, None]]: Serialized object data. + """ + + return { + "instance_id": self.instance_id, + "instance_label": self.instance_label, + "plugin_id": self.plugin_id, + "context_validation": self.context_validation, + "title": self.title, + "description": self.description, + "detail": self.detail, + } + + @classmethod + def from_result(cls, plugin_id, error, instance): + """Create new object based on resukt from controller. + + Returns: + ValidationErrorItem: New object with filled data. + """ + + instance_label = None + instance_id = None + if instance is not None: + instance_label = ( + instance.data.get("label") or instance.data.get("name") + ) + instance_id = instance.id + + return cls( + instance_id, + instance_label, + plugin_id, + instance is None, + error.title, + error.description, + error.detail, + ) + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class PublishValidationErrorsReport: + """Publish validation errors report that can be parsed to raw data. + + Args: + error_items (List[ValidationErrorItem]): List of validation errors. + plugin_action_items (Dict[str, PublishPluginActionItem]): Action items + by plugin id. + """ + + def __init__(self, error_items, plugin_action_items): + self._error_items = error_items + self._plugin_action_items = plugin_action_items + + def __iter__(self): + for item in self._error_items: + yield item + + def group_items_by_title(self): + """Group errors by plugin and their titles. + + Items are grouped by plugin and title -> same title from different + plugin is different item. Items are ordered by plugin order. + + Returns: + List[Dict[str, Any]]: List where each item title, instance + information related to title and possible plugin actions. + """ + + ordered_plugin_ids = [] + error_items_by_plugin_id = collections.defaultdict(list) + for error_item in self._error_items: + plugin_id = error_item.plugin_id + if plugin_id not in ordered_plugin_ids: + ordered_plugin_ids.append(plugin_id) + error_items_by_plugin_id[plugin_id].append(error_item) + + grouped_error_items = [] + for plugin_id in ordered_plugin_ids: + plugin_action_items = self._plugin_action_items[plugin_id] + error_items = error_items_by_plugin_id[plugin_id] + + titles = [] + error_items_by_title = collections.defaultdict(list) + for error_item in error_items: + title = error_item.title + if title not in titles: + titles.append(error_item.title) + error_items_by_title[title].append(error_item) + + for title in titles: + grouped_error_items.append({ + "id": uuid.uuid4().hex, + "plugin_id": plugin_id, + "plugin_action_items": list(plugin_action_items), + "error_items": error_items_by_title[title], + "title": title + }) + return grouped_error_items + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Any]: Serialized data. + """ + + error_items = [ + item.to_data() + for item in self._error_items + ] + + plugin_action_items = { + plugin_id: [ + action_item.to_data() + for action_item in action_items + ] + for plugin_id, action_items in self._plugin_action_items.items() + } + + return { + "error_items": error_items, + "plugin_action_items": plugin_action_items + } + + @classmethod + def from_data(cls, data): + """Recreate object from data. + + Args: + data (dict[str, Any]): Data to recreate object. Can be created + using 'to_data' method. + + Returns: + PublishValidationErrorsReport: New object based on data. + """ + + error_items = [ + ValidationErrorItem.from_data(error_item) + for error_item in data["error_items"] + ] + plugin_action_items = [ + PublishPluginActionItem.from_data(action_item) + for action_item in data["plugin_action_items"] + ] + return cls(error_items, plugin_action_items) + + +class PublishValidationErrors: + """Object to keep track about validation errors by plugin.""" + + def __init__(self): + self._plugins_proxy = None + self._error_items = [] + self._plugin_action_items = {} + + def __bool__(self): + return self.has_errors + + @property + def has_errors(self): + """At least one error was added.""" + + return bool(self._error_items) + + def reset(self, plugins_proxy): + """Reset object to default state. + + Args: + plugins_proxy (PublishPluginsProxy): Proxy which store plugins, + actions by ids and create mapping of action ids by plugin ids. + """ + + self._plugins_proxy = plugins_proxy + self._error_items = [] + self._plugin_action_items = {} + + def create_report(self): + """Create report based on currently existing errors. + + Returns: + PublishValidationErrorsReport: Validation error report with all + error information and publish plugin action items. + """ + + return PublishValidationErrorsReport( + self._error_items, self._plugin_action_items + ) + + def add_error(self, plugin, error, instance): + """Add error from pyblish result. + + Args: + plugin (pyblish.api.Plugin): Plugin which triggered error. + error (ValidationException): Validation error. + instance (Union[pyblish.api.Instance, None]): Instance on which was + error raised or None if was raised on context. + """ + + # Make sure the cached report is cleared + plugin_id = self._plugins_proxy.get_plugin_id(plugin) + if not error.title: + if hasattr(plugin, "label") and plugin.label: + plugin_label = plugin.label + else: + plugin_label = plugin.__name__ + error.title = plugin_label + + self._error_items.append( + ValidationErrorItem.from_result(plugin_id, error, instance) + ) + if plugin_id in self._plugin_action_items: + return + + plugin_actions = self._plugins_proxy.get_plugin_action_items( + plugin_id + ) + self._plugin_action_items[plugin_id] = plugin_actions + + +class CreatorType: + def __init__(self, name): + self.name = name + + def __str__(self): + return self.name + + def __eq__(self, other): + return self.name == str(other) + + def __ne__(self, other): + # This is implemented only because of Python 2 + return not self == other + + +class CreatorTypes: + base = CreatorType("base") + auto = CreatorType("auto") + hidden = CreatorType("hidden") + artist = CreatorType("artist") + + @classmethod + def from_str(cls, value): + for creator_type in ( + cls.base, + cls.auto, + cls.hidden, + cls.artist + ): + if value == creator_type: + return creator_type + raise ValueError("Unknown type \"{}\"".format(str(value))) + + +class CreatorItem: + """Wrapper around Creator plugin. + + Object can be serialized and recreated. + """ + + def __init__( + self, + identifier, + creator_type, + family, + label, + group_label, + icon, + description, + detailed_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + show_order, + pre_create_attributes_defs, + ): + self.identifier = identifier + self.creator_type = creator_type + self.family = family + self.label = label + self.group_label = group_label + self.icon = icon + self.description = description + self.detailed_description = detailed_description + self.default_variant = default_variant + self.default_variants = default_variants + self.create_allow_context_change = create_allow_context_change + self.create_allow_thumbnail = create_allow_thumbnail + self.show_order = show_order + self.pre_create_attributes_defs = pre_create_attributes_defs + + def get_group_label(self): + return self.group_label + + @classmethod + def from_creator(cls, creator): + if isinstance(creator, AutoCreator): + creator_type = CreatorTypes.auto + elif isinstance(creator, HiddenCreator): + creator_type = CreatorTypes.hidden + elif isinstance(creator, Creator): + creator_type = CreatorTypes.artist + else: + creator_type = CreatorTypes.base + + description = None + detail_description = None + default_variant = None + default_variants = None + pre_create_attr_defs = None + create_allow_context_change = None + create_allow_thumbnail = None + show_order = creator.order + if creator_type is CreatorTypes.artist: + description = creator.get_description() + detail_description = creator.get_detail_description() + default_variant = creator.get_default_variant() + default_variants = creator.get_default_variants() + pre_create_attr_defs = creator.get_pre_create_attr_defs() + create_allow_context_change = creator.create_allow_context_change + create_allow_thumbnail = creator.create_allow_thumbnail + show_order = creator.show_order + + identifier = creator.identifier + return cls( + identifier, + creator_type, + creator.family, + creator.label or identifier, + creator.get_group_label(), + creator.get_icon(), + description, + detail_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + show_order, + pre_create_attr_defs, + ) + + def to_data(self): + pre_create_attributes_defs = None + if self.pre_create_attributes_defs is not None: + pre_create_attributes_defs = serialize_attr_defs( + self.pre_create_attributes_defs + ) + + return { + "identifier": self.identifier, + "creator_type": str(self.creator_type), + "family": self.family, + "label": self.label, + "group_label": self.group_label, + "icon": self.icon, + "description": self.description, + "detailed_description": self.detailed_description, + "default_variant": self.default_variant, + "default_variants": self.default_variants, + "create_allow_context_change": self.create_allow_context_change, + "create_allow_thumbnail": self.create_allow_thumbnail, + "show_order": self.show_order, + "pre_create_attributes_defs": pre_create_attributes_defs, + } + + @classmethod + def from_data(cls, data): + pre_create_attributes_defs = data["pre_create_attributes_defs"] + if pre_create_attributes_defs is not None: + data["pre_create_attributes_defs"] = deserialize_attr_defs( + pre_create_attributes_defs + ) + + data["creator_type"] = CreatorTypes.from_str(data["creator_type"]) + return cls(**data) + + +@six.add_metaclass(ABCMeta) +class AbstractPublisherController(object): + """Publisher tool controller. + + Define what must be implemented to be able use Publisher functionality. + + Goal is to have "data driven" controller that can be used to control UI + running in different process. That lead to some disadvantages like UI can't + access objects directly but by using wrappers that can be serialized. + """ + + @property + @abstractmethod + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + pass + + @property + @abstractmethod + def event_system(self): + """Inner event system for publisher controller.""" + + pass + + @property + @abstractmethod + def project_name(self): + """Current context project name. + + Returns: + str: Name of project. + """ + + pass + + @property + @abstractmethod + def current_asset_name(self): + """Current context asset name. + + Returns: + Union[str, None]: Name of asset. + """ + + pass + + @property + @abstractmethod + def current_task_name(self): + """Current context task name. + + Returns: + Union[str, None]: Name of task. + """ + + pass + + @property + @abstractmethod + def host_context_has_changed(self): + """Host context changed after last reset. + + 'CreateContext' has this option available using 'context_has_changed'. + + Returns: + bool: Context has changed. + """ + + pass + + @property + @abstractmethod + def host_is_valid(self): + """Host is valid for creation part. + + Host must have implemented certain functionality to be able create + in Publisher tool. + + Returns: + bool: Host can handle creation of instances. + """ + + pass + + @property + @abstractmethod + def instances(self): + """Collected/created instances. + + Returns: + List[CreatedInstance]: List of created instances. + """ + + pass + + @abstractmethod + def get_context_title(self): + """Get context title for artist shown at the top of main window. + + Returns: + Union[str, None]: Context title for window or None. In case of None + a warning is displayed (not nice for artists). + """ + + pass + + @abstractmethod + def get_asset_docs(self): + pass + + @abstractmethod + def get_asset_hierarchy(self): + pass + + @abstractmethod + def get_task_names_by_asset_names(self, asset_names): + pass + + @abstractmethod + def get_existing_subset_names(self, asset_name): + pass + + @abstractmethod + def reset(self): + """Reset whole controller. + + This should reset create context, publish context and all variables + that are related to it. + """ + + pass + + @abstractmethod + def get_creator_attribute_definitions(self, instances): + pass + + @abstractmethod + def get_publish_attribute_definitions(self, instances, include_context): + pass + + @abstractmethod + def get_creator_icon(self, identifier): + """Receive creator's icon by identifier. + + Args: + identifier (str): Creator's identifier. + + Returns: + Union[str, None]: Creator's icon string. + """ + + pass + + @abstractmethod + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + pass + + @abstractmethod + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation by creator identifier. + + Should also trigger refresh of instanes. + + Args: + creator_identifier (str): Identifier of Creator plugin. + subset_name (str): Calculated subset name. + instance_data (Dict[str, Any]): Base instance data with variant, + asset name and task name. + options (Dict[str, Any]): Data from pre-create attributes. + """ + + pass + + @abstractmethod + def save_changes(self): + """Save changes in create context. + + Save can crash because of unexpected errors. + + Returns: + bool: Save was successful. + """ + + pass + + @abstractmethod + def remove_instances(self, instance_ids): + """Remove list of instances from create context.""" + # TODO expect instance ids + + pass + + @property + @abstractmethod + def publish_has_started(self): + """Has publishing finished. + + Returns: + bool: If publishing finished and all plugins were iterated. + """ + + pass + + @property + @abstractmethod + def publish_has_finished(self): + """Has publishing finished. + + Returns: + bool: If publishing finished and all plugins were iterated. + """ + + pass + + @property + @abstractmethod + def publish_is_running(self): + """Publishing is running right now. + + Returns: + bool: If publishing is in progress. + """ + + pass + + @property + @abstractmethod + def publish_has_validated(self): + """Publish validation passed. + + Returns: + bool: If publishing passed last possible validation order. + """ + + pass + + @property + @abstractmethod + def publish_has_crashed(self): + """Publishing crashed for any reason. + + Returns: + bool: Publishing crashed. + """ + + pass + + @property + @abstractmethod + def publish_has_validation_errors(self): + """During validation happened at least one validation error. + + Returns: + bool: Validation error was raised during validation. + """ + + pass + + @property + @abstractmethod + def publish_max_progress(self): + """Get maximum possible progress number. + + Returns: + int: Number that can be used as 100% of publish progress bar. + """ + + pass + + @property + @abstractmethod + def publish_progress(self): + """Current progress number. + + Returns: + int: Current progress value from 0 to 'publish_max_progress'. + """ + + pass + + @property + @abstractmethod + def publish_error_msg(self): + """Current error message which cause fail of publishing. + + Returns: + Union[str, None]: Message which will be showed to artist or + None. + """ + + pass + + @abstractmethod + def get_publish_report(self): + pass + + @abstractmethod + def get_validation_errors(self): + pass + + @abstractmethod + def publish(self): + """Trigger publishing without any order limitations.""" + + pass + + @abstractmethod + def validate(self): + """Trigger publishing which will stop after validation order.""" + + pass + + @abstractmethod + def stop_publish(self): + """Stop publishing can be also used to pause publishing. + + Pause of publishing is possible only if all plugins successfully + finished. + """ + + pass + + @abstractmethod + def run_action(self, plugin_id, action_id): + """Trigger pyblish action on a plugin. + + Args: + plugin_id (str): Id of publish plugin. + action_id (str): Id of publish action. + """ + + pass + + @property + @abstractmethod + def convertor_items(self): + pass + + @abstractmethod + def trigger_convertor_items(self, convertor_identifiers): + pass + + @abstractmethod + def get_thumbnail_paths_for_instances(self, instance_ids): + pass + + @abstractmethod + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + pass + + @abstractmethod + def set_comment(self, comment): + """Set comment on pyblish context. + + Set "comment" key on current pyblish.api.Context data. + + Args: + comment (str): Artist's comment. + """ + + pass + + @abstractmethod + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + """Emit a card message which can have a lifetime. + + This is for UI purposes. Method can be extended to more arguments + in future e.g. different message timeout or type (color). + + Args: + message (str): Message that will be showed. + """ + + pass + + @abstractmethod + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + pass + + @abstractmethod + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + pass + + +class BasePublisherController(AbstractPublisherController): + """Implement common logic for controllers. + + Implement event system, logger and common attributes. Attributes are + triggering value changes so anyone can listen to their topics. + + Prepare implementation for creator items. Controller must implement just + their filling by '_collect_creator_items'. + + All prepared implementation is based on calling super '__init__'. + """ + + def __init__(self): + self._log = None + self._event_system = None + + # Host is valid for creation + self._host_is_valid = False + + # Any other exception that happened during publishing + self._publish_error_msg = None + # Publishing is in progress + self._publish_is_running = False + # Publishing is over validation order + self._publish_has_validated = False + + self._publish_has_validation_errors = False + self._publish_has_crashed = False + # All publish plugins are processed + self._publish_has_started = False + self._publish_has_finished = False + self._publish_max_progress = 0 + self._publish_progress = 0 + + # Controller must '_collect_creator_items' to fill the value + self._creator_items = None + + @property + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @property + def event_system(self): + """Inner event system for publisher controller. + + Is used for communication with UI. Event system is autocreated. + + Known topics: + "show.detailed.help" - Detailed help requested (UI related). + "show.card.message" - Show card message request (UI related). + "instances.refresh.finished" - Instances are refreshed. + "plugins.refresh.finished" - Plugins refreshed. + "publish.reset.finished" - Reset finished. + "controller.reset.started" - Controller reset started. + "controller.reset.finished" - Controller reset finished. + "publish.process.started" - Publishing started. Can be started from + paused state. + "publish.process.stopped" - Publishing stopped/paused process. + "publish.process.plugin.changed" - Plugin state has changed. + "publish.process.instance.changed" - Instance state has changed. + "publish.has_validated.changed" - Attr 'publish_has_validated' + changed. + "publish.is_running.changed" - Attr 'publish_is_running' changed. + "publish.has_crashed.changed" - Attr 'publish_has_crashed' changed. + "publish.publish_error.changed" - Attr 'publish_error' + "publish.has_validation_errors.changed" - Attr + 'has_validation_errors' changed. + "publish.max_progress.changed" - Attr 'publish_max_progress' + changed. + "publish.progress.changed" - Attr 'publish_progress' changed. + "publish.host_is_valid.changed" - Attr 'host_is_valid' changed. + "publish.finished.changed" - Attr 'publish_has_finished' changed. + + Returns: + EventSystem: Event system which can trigger callbacks for topics. + """ + + if self._event_system is None: + self._event_system = EventSystem() + return self._event_system + + def _emit_event(self, topic, data=None): + if data is None: + data = {} + self.event_system.emit(topic, data, "controller") + + def _get_host_is_valid(self): + return self._host_is_valid + + def _set_host_is_valid(self, value): + if self._host_is_valid != value: + self._host_is_valid = value + self._emit_event( + "publish.host_is_valid.changed", {"value": value} + ) + + def _get_publish_has_started(self): + return self._publish_has_started + + def _set_publish_has_started(self, value): + if value != self._publish_has_started: + self._publish_has_started = value + + def _get_publish_has_finished(self): + return self._publish_has_finished + + def _set_publish_has_finished(self, value): + if self._publish_has_finished != value: + self._publish_has_finished = value + self._emit_event("publish.finished.changed", {"value": value}) + + def _get_publish_is_running(self): + return self._publish_is_running + + def _set_publish_is_running(self, value): + if self._publish_is_running != value: + self._publish_is_running = value + self._emit_event("publish.is_running.changed", {"value": value}) + + def _get_publish_has_validated(self): + return self._publish_has_validated + + def _set_publish_has_validated(self, value): + if self._publish_has_validated != value: + self._publish_has_validated = value + self._emit_event( + "publish.has_validated.changed", {"value": value} + ) + + def _get_publish_has_crashed(self): + return self._publish_has_crashed + + def _set_publish_has_crashed(self, value): + if self._publish_has_crashed != value: + self._publish_has_crashed = value + self._emit_event("publish.has_crashed.changed", {"value": value}) + + def _get_publish_has_validation_errors(self): + return self._publish_has_validation_errors + + def _set_publish_has_validation_errors(self, value): + if self._publish_has_validation_errors != value: + self._publish_has_validation_errors = value + self._emit_event( + "publish.has_validation_errors.changed", + {"value": value} + ) + + def _get_publish_max_progress(self): + return self._publish_max_progress + + def _set_publish_max_progress(self, value): + if self._publish_max_progress != value: + self._publish_max_progress = value + self._emit_event("publish.max_progress.changed", {"value": value}) + + def _get_publish_progress(self): + return self._publish_progress + + def _set_publish_progress(self, value): + if self._publish_progress != value: + self._publish_progress = value + self._emit_event("publish.progress.changed", {"value": value}) + + def _get_publish_error_msg(self): + return self._publish_error_msg + + def _set_publish_error_msg(self, value): + if self._publish_error_msg != value: + self._publish_error_msg = value + self._emit_event("publish.publish_error.changed", {"value": value}) + + host_is_valid = property( + _get_host_is_valid, _set_host_is_valid + ) + publish_has_started = property( + _get_publish_has_started, _set_publish_has_started + ) + publish_has_finished = property( + _get_publish_has_finished, _set_publish_has_finished + ) + publish_is_running = property( + _get_publish_is_running, _set_publish_is_running + ) + publish_has_validated = property( + _get_publish_has_validated, _set_publish_has_validated + ) + publish_has_crashed = property( + _get_publish_has_crashed, _set_publish_has_crashed + ) + publish_has_validation_errors = property( + _get_publish_has_validation_errors, _set_publish_has_validation_errors + ) + publish_max_progress = property( + _get_publish_max_progress, _set_publish_max_progress + ) + publish_progress = property( + _get_publish_progress, _set_publish_progress + ) + publish_error_msg = property( + _get_publish_error_msg, _set_publish_error_msg + ) + + def _reset_attributes(self): + """Reset most of attributes that can be reset.""" + + self.publish_is_running = False + self.publish_has_started = False + self.publish_has_validated = False + self.publish_has_crashed = False + self.publish_has_validation_errors = False + self.publish_has_finished = False + + self.publish_error_msg = None + self.publish_progress = 0 + + @property + def creator_items(self): + """Creators that can be shown in create dialog.""" + if self._creator_items is None: + self._creator_items = self._collect_creator_items() + return self._creator_items + + @abstractmethod + def _collect_creator_items(self): + """Receive CreatorItems to work with. + + Returns: + Dict[str, CreatorItem]: Creator items by their identifier. + """ + + pass + + def get_creator_icon(self, identifier): + """Function to receive icon for creator identifier. + + Args: + str: Creator's identifier for which should be icon returned. + """ + + creator_item = self.creator_items.get(identifier) + if creator_item is not None: + return creator_item.icon + return None + + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + return os.path.join( + tempfile.gettempdir(), + "publisher_thumbnails", + get_process_id() + ) + + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + dirpath = self.get_thumbnail_temp_dir_path() + if os.path.exists(dirpath): + shutil.rmtree(dirpath) + + +class PublisherController(BasePublisherController): + """Middleware between UI, CreateContext and publish Context. + + Handle both creation and publishing parts. + + Args: + headless (bool): Headless publishing. ATM not implemented or used. + """ + + _log = None + + def __init__(self, headless=False): + super(PublisherController, self).__init__() + + self._host = registered_host() + self._headless = headless + + self._create_context = CreateContext( + self._host, headless=headless, reset=False + ) + + self._publish_plugins_proxy = None + + # pyblish.api.Context + self._publish_context = None + # Pyblish report + self._publish_report = PublishReportMaker(self) + # Store exceptions of validation error + self._publish_validation_errors = PublishValidationErrors() + + # Publishing should stop at validation stage + self._publish_up_validation = False + # This information is not much important for controller but for widget + # which can change (and set) the comment. + self._publish_comment_is_set = False + + # Validation order + # - plugin with order same or higher than this value is extractor or + # higher + self._validation_order = ( + pyblish.api.ValidatorOrder + PLUGIN_ORDER_OFFSET + ) + + # Plugin iterator + self._main_thread_iter = None + + # State flags to prevent executing method which is already in progress + self._resetting_plugins = False + self._resetting_instances = False + + # Cacher of avalon documents + self._asset_docs_cache = AssetDocsCache(self) + + @property + def project_name(self): + """Current project context defined by host. + + Returns: + str: Project name. + """ + + return self._create_context.get_current_project_name() + + @property + def current_asset_name(self): + """Current context asset name defined by host. + + Returns: + Union[str, None]: Asset name or None if asset is not set. + """ + + return self._create_context.get_current_asset_name() + + @property + def current_task_name(self): + """Current context task name defined by host. + + Returns: + Union[str, None]: Task name or None if task is not set. + """ + + return self._create_context.get_current_task_name() + + @property + def host_context_has_changed(self): + return self._create_context.context_has_changed + + @property + def instances(self): + """Current instances in create context.""" + return self._create_context.instances_by_id + + @property + def convertor_items(self): + return self._create_context.convertor_items_by_id + + @property + def _creators(self): + """All creators loaded in create context.""" + + return self._create_context.creators + + @property + def _publish_plugins(self): + """Publish plugins.""" + return self._create_context.publish_plugins + + # --- Publish specific callbacks --- + def get_asset_docs(self): + """Get asset documents from cache for whole project.""" + return self._asset_docs_cache.get_asset_docs() + + def get_context_title(self): + """Get context title for artist shown at the top of main window.""" + + context_title = None + if hasattr(self._host, "get_context_title"): + context_title = self._host.get_context_title() + + if context_title is None: + context_title = os.environ.get("AVALON_APP_NAME") + if context_title is None: + context_title = os.environ.get("AVALON_APP") + + return context_title + + def get_asset_hierarchy(self): + """Prepare asset documents into hierarchy.""" + + return self._asset_docs_cache.get_asset_hierarchy() + + def get_task_names_by_asset_names(self, asset_names): + """Prepare task names by asset name.""" + task_names_by_asset_name = ( + self._asset_docs_cache.get_task_names_by_asset_name() + ) + result = {} + for asset_name in asset_names: + result[asset_name] = set( + task_names_by_asset_name.get(asset_name) or [] + ) + return result + + def get_existing_subset_names(self, asset_name): + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_asset_by_name(asset_name) + if not asset_doc: + return None + + asset_id = asset_doc["_id"] + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] + ) + return { + subset_doc["name"] + for subset_doc in subset_docs + } + + def reset(self): + """Reset everything related to creation and publishing.""" + self.stop_publish() + + self._emit_event("controller.reset.started") + + self.host_is_valid = self._create_context.host_is_valid + + self._create_context.reset_preparation() + + # Reset avalon context + self._create_context.reset_current_context() + + self._asset_docs_cache.reset() + + self._reset_plugins() + # Publish part must be reset after plugins + self._reset_publish() + self._reset_instances() + + self._create_context.reset_finalization() + + self._emit_event("controller.reset.finished") + + self.emit_card_message("Refreshed..") + + def _reset_plugins(self): + """Reset to initial state.""" + if self._resetting_plugins: + return + + self._resetting_plugins = True + + self._create_context.reset_plugins() + # Reset creator items + self._creator_items = None + + self._resetting_plugins = False + + self._emit_event("plugins.refresh.finished") + + def _collect_creator_items(self): + # TODO add crashed initialization of create plugins to report + output = {} + for identifier, creator in self._create_context.creators.items(): + try: + output[identifier] = CreatorItem.from_creator(creator) + except Exception: + self.log.error( + "Failed to create creator item for '%s'", + identifier, + exc_info=True + ) + + return output + + def _reset_instances(self): + """Reset create instances.""" + if self._resetting_instances: + return + + self._resetting_instances = True + + self._create_context.reset_context_data() + with self._create_context.bulk_instances_collection(): + try: + self._create_context.reset_instances() + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.collection.failed", + { + "title": "Instance collection failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.find_convertor_items() + except ConvertorsOperationFailed as exc: + self._emit_event( + "convertors.find.failed", + { + "title": "Collection of unsupported subset failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.execute_autocreators() + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.create.failed", + { + "title": "AutoCreation failed", + "failed_info": exc.failed_info + } + ) + + self._resetting_instances = False + + self._on_create_instance_change() + + def get_thumbnail_paths_for_instances(self, instance_ids): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + return { + instance_id: thumbnail_paths_by_instance_id.get(instance_id) + for instance_id in instance_ids + } + + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + for instance_id, thumbnail_path in thumbnail_path_mapping.items(): + thumbnail_paths_by_instance_id[instance_id] = thumbnail_path + + self._emit_event( + "instance.thumbnail.changed", + { + "mapping": thumbnail_path_mapping + } + ) + + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + self._emit_event( + "show.card.message", + { + "message": message, + "message_type": message_type + } + ) + + def get_creator_attribute_definitions(self, instances): + """Collect creator attribute definitions for multuple instances. + + Args: + instances(List[CreatedInstance]): List of created instances for + which should be attribute definitions returned. + """ + + # NOTE it would be great if attrdefs would have hash method implemented + # so they could be used as keys in dictionary + output = [] + _attr_defs = {} + for instance in instances: + for attr_def in instance.creator_attribute_defs: + found_idx = None + for idx, _attr_def in _attr_defs.items(): + if attr_def == _attr_def: + found_idx = idx + break + + value = None + if attr_def.is_value_def: + value = instance.creator_attributes[attr_def.key] + if found_idx is None: + idx = len(output) + output.append((attr_def, [instance], [value])) + _attr_defs[idx] = attr_def + else: + item = output[found_idx] + item[1].append(instance) + item[2].append(value) + return output + + def get_publish_attribute_definitions(self, instances, include_context): + """Collect publish attribute definitions for passed instances. + + Args: + instances(list): List of created instances for + which should be attribute definitions returned. + include_context(bool): Add context specific attribute definitions. + """ + + _tmp_items = [] + if include_context: + _tmp_items.append(self._create_context) + + for instance in instances: + _tmp_items.append(instance) + + all_defs_by_plugin_name = {} + all_plugin_values = {} + for item in _tmp_items: + for plugin_name, attr_val in item.publish_attributes.items(): + attr_defs = attr_val.attr_defs + if not attr_defs: + continue + + if plugin_name not in all_defs_by_plugin_name: + all_defs_by_plugin_name[plugin_name] = attr_val.attr_defs + + if plugin_name not in all_plugin_values: + all_plugin_values[plugin_name] = {} + + plugin_values = all_plugin_values[plugin_name] + + for attr_def in attr_defs: + if isinstance(attr_def, UIDef): + continue + if attr_def.key not in plugin_values: + plugin_values[attr_def.key] = [] + attr_values = plugin_values[attr_def.key] + + value = attr_val[attr_def.key] + attr_values.append((item, value)) + + output = [] + for plugin in self._create_context.plugins_with_defs: + plugin_name = plugin.__name__ + if plugin_name not in all_defs_by_plugin_name: + continue + output.append(( + plugin_name, + all_defs_by_plugin_name[plugin_name], + all_plugin_values + )) + return output + + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + creator = self._creators[creator_identifier] + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_full_asset_by_name(asset_name) + instance = None + if instance_id: + instance = self.instances[instance_id] + + return creator.get_subset_name( + variant, task_name, asset_doc, project_name, instance=instance + ) + + def trigger_convertor_items(self, convertor_identifiers): + """Trigger legacy item convertors. + + This functionality requires to save and reset CreateContext. The reset + is needed so Creators can collect converted items. + + Args: + convertor_identifiers (list[str]): Identifiers of convertor + plugins. + """ + + success = True + try: + self._create_context.run_convertors(convertor_identifiers) + + except ConvertorsOperationFailed as exc: + success = False + self._emit_event( + "convertors.convert.failed", + { + "title": "Conversion failed", + "failed_info": exc.failed_info + } + ) + + if success: + self.emit_card_message("Conversion finished") + else: + self.emit_card_message("Conversion failed", CardMessageTypes.error) + + self.reset() + + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation and refresh of instances in UI.""" + + success = True + try: + self._create_context.create_with_unified_error( + creator_identifier, subset_name, instance_data, options + ) + + except CreatorsOperationFailed as exc: + success = False + self._emit_event( + "instances.create.failed", + { + "title": "Creation failed", + "failed_info": exc.failed_info + } + ) + + self._on_create_instance_change() + return success + + def save_changes(self, show_message=True): + """Save changes happened during creation. + + Trigger save of changes using host api. This functionality does not + validate anything. It is required to do checks before this method is + called to be able to give user actionable response e.g. check of + context using 'host_context_has_changed'. + + Args: + show_message (bool): Show message that changes were + saved successfully. + + Returns: + bool: Save of changes was successful. + """ + + if not self._create_context.host_is_valid: + # TODO remove + # Fake success save when host is not valid for CreateContext + # this is for testing as experimental feature + return True + + try: + self._create_context.save_changes() + if show_message: + self.emit_card_message("Saved changes..") + return True + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.save.failed", + { + "title": "Instances save failed", + "failed_info": exc.failed_info + } + ) + + return False + + def remove_instances(self, instance_ids): + """Remove instances based on instance ids. + + Args: + instance_ids (List[str]): List of instance ids to remove. + """ + + # QUESTION Expect that instances are really removed? In that case reset + # is not required. + self._remove_instances_from_context(instance_ids) + + self._on_create_instance_change() + + def _remove_instances_from_context(self, instance_ids): + instances_by_id = self._create_context.instances_by_id + instances = [ + instances_by_id[instance_id] + for instance_id in instance_ids + ] + try: + self._create_context.remove_instances(instances) + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.remove.failed", + { + "title": "Instance removement failed", + "failed_info": exc.failed_info + } + ) + + def _on_create_instance_change(self): + self._emit_event("instances.refresh.finished") + + def get_publish_report(self): + return self._publish_report.get_report(self._publish_plugins) + + def get_validation_errors(self): + return self._publish_validation_errors.create_report() + + def _reset_publish(self): + self._reset_attributes() + + self._publish_up_validation = False + self._publish_comment_is_set = False + + self._main_thread_iter = self._publish_iterator() + self._publish_context = pyblish.api.Context() + # Make sure "comment" is set on publish context + self._publish_context.data["comment"] = "" + # Add access to create context during publishing + # - must not be used for changing CreatedInstances during publishing! + # QUESTION + # - pop the key after first collector using it would be safest option? + self._publish_context.data["create_context"] = self._create_context + + self._publish_plugins_proxy = PublishPluginsProxy( + self._publish_plugins + ) + + self._publish_report.reset(self._publish_context, self._create_context) + self._publish_validation_errors.reset(self._publish_plugins_proxy) + + self.publish_max_progress = len(self._publish_plugins) + + self._emit_event("publish.reset.finished") + + def set_comment(self, comment): + """Set comment from ui to pyblish context. + + This should be called always before publishing is started but should + happen only once on first publish start thus variable + '_publish_comment_is_set' is used to keep track about the information. + """ + + if not self._publish_comment_is_set: + self._publish_context.data["comment"] = comment + self._publish_comment_is_set = True + + def publish(self): + """Run publishing. + + Make sure all changes are saved before method is called (Call + 'save_changes' and check output). + """ + + self._publish_up_validation = False + self._start_publish() + + def validate(self): + """Run publishing and stop after Validation. + + Make sure all changes are saved before method is called (Call + 'save_changes' and check output). + """ + + if self.publish_has_validated: + return + self._publish_up_validation = True + self._start_publish() + + def _start_publish(self): + """Start or continue in publishing.""" + if self.publish_is_running: + return + + self.publish_is_running = True + self.publish_has_started = True + + self._emit_event("publish.process.started") + + self._publish_next_process() + + def _stop_publish(self): + """Stop or pause publishing.""" + self.publish_is_running = False + + self._emit_event("publish.process.stopped") + + def stop_publish(self): + """Stop publishing process (any reason).""" + + if self.publish_is_running: + self._stop_publish() + + def run_action(self, plugin_id, action_id): + # TODO handle result in UI + plugin = self._publish_plugins_proxy.get_plugin(plugin_id) + action = self._publish_plugins_proxy.get_action(plugin_id, action_id) + + result = pyblish.plugin.process( + plugin, self._publish_context, None, action.id + ) + exception = result.get("error") + if exception: + self._emit_event( + "publish.action.failed", + { + "title": "Action failed", + "message": "Action failed.", + "traceback": "".join( + traceback.format_exception(exception) + ), + "label": action.__name__, + "identifier": action.id + } + ) + + self._publish_report.add_action_result(action, result) + + self.emit_card_message("Action finished.") + + def _publish_next_process(self): + # Validations of progress before using iterator + # - same conditions may be inside iterator but they may be used + # only in specific cases (e.g. when it happens for a first time) + + # There are validation errors and validation is passed + # - can't do any progree + if ( + self.publish_has_validated + and self.publish_has_validation_errors + ): + item = MainThreadItem(self.stop_publish) + + # Any unexpected error happened + # - everything should stop + elif self.publish_has_crashed: + item = MainThreadItem(self.stop_publish) + + # Everything is ok so try to get new processing item + else: + item = next(self._main_thread_iter) + + self._process_main_thread_item(item) + + def _process_main_thread_item(self, item): + item() + + def _is_publish_plugin_active(self, plugin): + """Decide if publish plugin is active. + + This is hack because 'active' is mis-used in mixin + 'OptionalPyblishPluginMixin' where 'active' is used for default value + of optional plugins. Because of that is 'active' state of plugin + which inherit from 'OptionalPyblishPluginMixin' ignored. That affects + headless publishing inside host, potentially remote publishing. + + We have to change that to match pyblish base, but we can do that + only when all hosts use Publisher because the change requires + change of settings schemas. + + Args: + plugin (pyblish.Plugin): Plugin which should be checked if is + active. + + Returns: + bool: Is plugin active. + """ + + if plugin.active: + return True + + if not plugin.optional: + return False + + if OptionalPyblishPluginMixin in inspect.getmro(plugin): + return True + return False + + def _publish_iterator(self): + """Main logic center of publishing. + + Iterator returns `MainThreadItem` objects with callbacks that should be + processed in main thread (threaded in future?). Cares about changing + states of currently processed publish plugin and instance. Also + change state of processed orders like validation order has passed etc. + + Also stops publishing, if should stop on validation. + """ + + for idx, plugin in enumerate(self._publish_plugins): + self._publish_progress = idx + + # Check if plugin is over validation order + if not self.publish_has_validated: + self.publish_has_validated = ( + plugin.order >= self._validation_order + ) + + # Stop if plugin is over validation order and process + # should process up to validation. + if self._publish_up_validation and self.publish_has_validated: + yield MainThreadItem(self.stop_publish) + + # Stop if validation is over and validation errors happened + if ( + self.publish_has_validated + and self.publish_has_validation_errors + ): + yield MainThreadItem(self.stop_publish) + + # Add plugin to publish report + self._publish_report.add_plugin_iter( + plugin, self._publish_context) + + # WARNING This is hack fix for optional plugins + if not self._is_publish_plugin_active(plugin): + self._publish_report.set_plugin_skipped() + continue + + # Trigger callback that new plugin is going to be processed + plugin_label = plugin.__name__ + if hasattr(plugin, "label") and plugin.label: + plugin_label = plugin.label + self._emit_event( + "publish.process.plugin.changed", + {"plugin_label": plugin_label} + ) + + # Plugin is instance plugin + if plugin.__instanceEnabled__: + instances = pyblish.logic.instances_by_plugin( + self._publish_context, plugin + ) + if not instances: + self._publish_report.set_plugin_skipped() + continue + + for instance in instances: + if instance.data.get("publish") is False: + continue + + instance_label = ( + instance.data.get("label") + or instance.data["name"] + ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} + ) + + yield MainThreadItem( + self._process_and_continue, plugin, instance + ) + else: + families = collect_families_from_instances( + self._publish_context, only_active=True + ) + plugins = pyblish.logic.plugins_by_families( + [plugin], families + ) + if plugins: + instance_label = ( + self._publish_context.data.get("label") + or self._publish_context.data.get("name") + or "Context" + ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} + ) + yield MainThreadItem( + self._process_and_continue, plugin, None + ) + else: + self._publish_report.set_plugin_skipped() + + # Cleanup of publishing process + self.publish_has_finished = True + self.publish_progress = self.publish_max_progress + yield MainThreadItem(self.stop_publish) + + def _add_validation_error(self, result): + self.publish_has_validation_errors = True + self._publish_validation_errors.add_error( + result["plugin"], + result["error"], + result["instance"] + ) + + def _process_and_continue(self, plugin, instance): + result = pyblish.plugin.process( + plugin, self._publish_context, instance + ) + + exception = result.get("error") + if exception: + has_validation_error = False + if ( + isinstance(exception, PublishValidationError) + and not self.publish_has_validated + ): + has_validation_error = True + self._add_validation_error(result) + + else: + if isinstance(exception, KnownPublishError): + msg = str(exception) + else: + msg = ( + "Something went wrong. Send report" + " to your supervisor or Ynput team." + ) + self.publish_error_msg = msg + self.publish_has_crashed = True + + result["is_validation_error"] = has_validation_error + + self._publish_report.add_result(result) + + self._publish_next_process() + + +def collect_families_from_instances(instances, only_active=False): + """Collect all families for passed publish instances. + + Args: + instances(list): List of publish instances from + which are families collected. + only_active(bool): Return families only for active instances. + + Returns: + list[str]: Families available on instances. + """ + + all_families = set() + for instance in instances: + if only_active: + if instance.data.get("publish") is False: + continue + family = instance.data.get("family") + if family: + all_families.add(family) + + families = instance.data.get("families") or tuple() + for family in families: + all_families.add(family) + + return list(all_families) diff --git a/openpype/tools/publisher/control_qt.py b/client/ayon_core/tools/publisher/control_qt.py similarity index 99% rename from openpype/tools/publisher/control_qt.py rename to client/ayon_core/tools/publisher/control_qt.py index 132b42f9ec..3d56c08131 100644 --- a/openpype/tools/publisher/control_qt.py +++ b/client/ayon_core/tools/publisher/control_qt.py @@ -3,8 +3,8 @@ from qtpy import QtCore -from openpype.lib.events import Event -from openpype.pipeline.create import CreatedInstance +from ayon_core.lib.events import Event +from ayon_core.pipeline.create import CreatedInstance from .control import ( MainThreadItem, diff --git a/openpype/tools/publisher/publish_report_viewer/__init__.py b/client/ayon_core/tools/publisher/publish_report_viewer/__init__.py similarity index 100% rename from openpype/tools/publisher/publish_report_viewer/__init__.py rename to client/ayon_core/tools/publisher/publish_report_viewer/__init__.py diff --git a/openpype/tools/publisher/publish_report_viewer/constants.py b/client/ayon_core/tools/publisher/publish_report_viewer/constants.py similarity index 100% rename from openpype/tools/publisher/publish_report_viewer/constants.py rename to client/ayon_core/tools/publisher/publish_report_viewer/constants.py diff --git a/openpype/tools/publisher/publish_report_viewer/delegates.py b/client/ayon_core/tools/publisher/publish_report_viewer/delegates.py similarity index 100% rename from openpype/tools/publisher/publish_report_viewer/delegates.py rename to client/ayon_core/tools/publisher/publish_report_viewer/delegates.py diff --git a/client/ayon_core/tools/publisher/publish_report_viewer/model.py b/client/ayon_core/tools/publisher/publish_report_viewer/model.py new file mode 100644 index 0000000000..9ed1bf555d --- /dev/null +++ b/client/ayon_core/tools/publisher/publish_report_viewer/model.py @@ -0,0 +1,212 @@ +import uuid +from qtpy import QtCore, QtGui + +import pyblish.api + +from ayon_core.tools.utils.lib import html_escape +from .constants import ( + ITEM_ID_ROLE, + ITEM_IS_GROUP_ROLE, + ITEM_LABEL_ROLE, + ITEM_ERRORED_ROLE, + PLUGIN_SKIPPED_ROLE, + PLUGIN_PASSED_ROLE, + INSTANCE_REMOVED_ROLE +) + + +class InstancesModel(QtGui.QStandardItemModel): + def __init__(self, *args, **kwargs): + super(InstancesModel, self).__init__(*args, **kwargs) + + self._items_by_id = {} + self._plugin_items_by_id = {} + + def get_items_by_id(self): + return self._items_by_id + + def set_report(self, report_item): + root_item = self.invisibleRootItem() + if root_item.rowCount() > 0: + root_item.removeRows(0, root_item.rowCount()) + self._items_by_id.clear() + self._plugin_items_by_id.clear() + if not report_item: + return + + families = set(report_item.instance_items_by_family.keys()) + families.remove(None) + all_families = list(sorted(families)) + all_families.insert(0, None) + + family_items = [] + for family in all_families: + items = [] + instance_items = report_item.instance_items_by_family[family] + all_removed = True + for instance_item in instance_items: + src_instance_label = instance_item.label + if src_instance_label is None: + # Do not cause UI crash if label is 'None' + src_instance_label = "No label" + instance_label = html_escape(src_instance_label) + + item = QtGui.QStandardItem(src_instance_label) + item.setData(instance_label, ITEM_LABEL_ROLE) + item.setData(instance_item.errored, ITEM_ERRORED_ROLE) + item.setData(instance_item.id, ITEM_ID_ROLE) + item.setData(instance_item.removed, INSTANCE_REMOVED_ROLE) + if all_removed and not instance_item.removed: + all_removed = False + item.setData(False, ITEM_IS_GROUP_ROLE) + items.append(item) + self._items_by_id[instance_item.id] = item + self._plugin_items_by_id[instance_item.id] = item + + if family is None: + family_items.extend(items) + continue + + family_item = QtGui.QStandardItem(family) + family_item.setData(family, ITEM_LABEL_ROLE) + family_item.setFlags(QtCore.Qt.ItemIsEnabled) + family_id = uuid.uuid4() + family_item.setData(family_id, ITEM_ID_ROLE) + family_item.setData(all_removed, INSTANCE_REMOVED_ROLE) + family_item.setData(True, ITEM_IS_GROUP_ROLE) + family_item.appendRows(items) + family_items.append(family_item) + self._items_by_id[family_id] = family_item + + root_item.appendRows(family_items) + + +class InstanceProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(InstanceProxyModel, self).__init__(*args, **kwargs) + + self._ignore_removed = True + + @property + def ignore_removed(self): + return self._ignore_removed + + def set_ignore_removed(self, value): + if value == self._ignore_removed: + return + self._ignore_removed = value + + if self.sourceModel(): + self.invalidateFilter() + + def filterAcceptsRow(self, row, parent): + source_index = self.sourceModel().index(row, 0, parent) + if self._ignore_removed and source_index.data(INSTANCE_REMOVED_ROLE): + return False + return True + + +class PluginsModel(QtGui.QStandardItemModel): + order_label_mapping = ( + (pyblish.api.CollectorOrder + 0.5, "Collect"), + (pyblish.api.ValidatorOrder + 0.5, "Validate"), + (pyblish.api.ExtractorOrder + 0.5, "Extract"), + (pyblish.api.IntegratorOrder + 0.5, "Integrate"), + (None, "Other") + ) + + def __init__(self, *args, **kwargs): + super(PluginsModel, self).__init__(*args, **kwargs) + + self._items_by_id = {} + self._plugin_items_by_id = {} + + def get_items_by_id(self): + return self._items_by_id + + def set_report(self, report_item): + root_item = self.invisibleRootItem() + if root_item.rowCount() > 0: + root_item.removeRows(0, root_item.rowCount()) + self._items_by_id.clear() + self._plugin_items_by_id.clear() + if not report_item: + return + + labels_iter = iter(self.order_label_mapping) + cur_order, cur_label = next(labels_iter) + cur_plugin_items = [] + + plugin_items_by_group_labels = [] + plugin_items_by_group_labels.append((cur_label, cur_plugin_items)) + for plugin_id in report_item.plugins_id_order: + plugin_item = report_item.plugins_items_by_id[plugin_id] + if cur_order is not None and plugin_item.order >= cur_order: + cur_order, cur_label = next(labels_iter) + cur_plugin_items = [] + plugin_items_by_group_labels.append( + (cur_label, cur_plugin_items) + ) + + cur_plugin_items.append(plugin_item) + + group_items = [] + for group_label, plugin_items in plugin_items_by_group_labels: + group_id = uuid.uuid4() + group_item = QtGui.QStandardItem(group_label) + group_item.setData(group_label, ITEM_LABEL_ROLE) + group_item.setData(group_id, ITEM_ID_ROLE) + group_item.setData(True, ITEM_IS_GROUP_ROLE) + group_item.setFlags(QtCore.Qt.ItemIsEnabled) + group_items.append(group_item) + + self._items_by_id[group_id] = group_item + + if not plugin_items: + continue + + items = [] + for plugin_item in plugin_items: + label = plugin_item.label or plugin_item.name + item = QtGui.QStandardItem(label) + item.setData(False, ITEM_IS_GROUP_ROLE) + item.setData(plugin_item.label, ITEM_LABEL_ROLE) + item.setData(plugin_item.id, ITEM_ID_ROLE) + item.setData(plugin_item.skipped, PLUGIN_SKIPPED_ROLE) + item.setData(plugin_item.passed, PLUGIN_PASSED_ROLE) + item.setData(plugin_item.errored, ITEM_ERRORED_ROLE) + items.append(item) + self._items_by_id[plugin_item.id] = item + self._plugin_items_by_id[plugin_item.id] = item + group_item.appendRows(items) + + root_item.appendRows(group_items) + + +class PluginProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(PluginProxyModel, self).__init__(*args, **kwargs) + + self._ignore_skipped = True + + @property + def ignore_skipped(self): + return self._ignore_skipped + + def set_ignore_skipped(self, value): + if value == self._ignore_skipped: + return + self._ignore_skipped = value + + if self.sourceModel(): + self.invalidateFilter() + + def filterAcceptsRow(self, row, parent): + model = self.sourceModel() + source_index = model.index(row, 0, parent) + if source_index.data(ITEM_IS_GROUP_ROLE): + return model.rowCount(source_index) > 0 + + if self._ignore_skipped and source_index.data(PLUGIN_SKIPPED_ROLE): + return False + return True diff --git a/openpype/tools/publisher/publish_report_viewer/report_items.py b/client/ayon_core/tools/publisher/publish_report_viewer/report_items.py similarity index 100% rename from openpype/tools/publisher/publish_report_viewer/report_items.py rename to client/ayon_core/tools/publisher/publish_report_viewer/report_items.py diff --git a/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py b/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py new file mode 100644 index 0000000000..544d45ce89 --- /dev/null +++ b/client/ayon_core/tools/publisher/publish_report_viewer/widgets.py @@ -0,0 +1,563 @@ +from math import ceil +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.utils import NiceCheckbox + +# from ayon_core.tools.utils import DeselectableTreeView +from .constants import ( + ITEM_ID_ROLE, + ITEM_IS_GROUP_ROLE +) +from .delegates import GroupItemDelegate +from .model import ( + InstancesModel, + InstanceProxyModel, + PluginsModel, + PluginProxyModel +) +from .report_items import PublishReport + +FILEPATH_ROLE = QtCore.Qt.UserRole + 1 +TRACEBACK_ROLE = QtCore.Qt.UserRole + 2 +IS_DETAIL_ITEM_ROLE = QtCore.Qt.UserRole + 3 + + +class PluginLoadReportModel(QtGui.QStandardItemModel): + def set_report(self, report): + parent = self.invisibleRootItem() + parent.removeRows(0, parent.rowCount()) + + if report is None: + return + + new_items = [] + new_items_by_filepath = {} + for filepath in report.crashed_plugin_paths.keys(): + item = QtGui.QStandardItem(filepath) + new_items.append(item) + new_items_by_filepath[filepath] = item + + if not new_items: + return + + parent.appendRows(new_items) + for filepath, item in new_items_by_filepath.items(): + traceback_txt = report.crashed_plugin_paths[filepath] + detail_item = QtGui.QStandardItem() + detail_item.setData(filepath, FILEPATH_ROLE) + detail_item.setData(traceback_txt, TRACEBACK_ROLE) + detail_item.setData(True, IS_DETAIL_ITEM_ROLE) + item.appendRow(detail_item) + + +class DetailWidget(QtWidgets.QTextEdit): + def __init__(self, text, *args, **kwargs): + super(DetailWidget, self).__init__(*args, **kwargs) + + self.setReadOnly(True) + self.setHtml(text) + self.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + self.setWordWrapMode( + QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere + ) + + def sizeHint(self): + content_margins = ( + self.contentsMargins().top() + + self.contentsMargins().bottom() + ) + size = self.document().documentLayout().documentSize().toSize() + size.setHeight(size.height() + content_margins) + return size + + +class PluginLoadReportWidget(QtWidgets.QWidget): + def __init__(self, parent): + super(PluginLoadReportWidget, self).__init__(parent) + + view = QtWidgets.QTreeView(self) + view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + view.setTextElideMode(QtCore.Qt.ElideLeft) + view.setHeaderHidden(True) + view.setAlternatingRowColors(True) + view.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) + + model = PluginLoadReportModel() + view.setModel(model) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(view, 1) + + view.expanded.connect(self._on_expand) + + self._view = view + self._model = model + self._widgets_by_filepath = {} + + def _on_expand(self, index): + for row in range(self._model.rowCount(index)): + child_index = self._model.index(row, index.column(), index) + self._create_widget(child_index) + + def showEvent(self, event): + super(PluginLoadReportWidget, self).showEvent(event) + self._update_widgets_size_hints() + + def resizeEvent(self, event): + super(PluginLoadReportWidget, self).resizeEvent(event) + self._update_widgets_size_hints() + + def _update_widgets_size_hints(self): + for item in self._widgets_by_filepath.values(): + widget, index = item + if not widget.isVisible(): + continue + self._model.setData( + index, widget.sizeHint(), QtCore.Qt.SizeHintRole + ) + + def _create_widget(self, index): + if not index.data(IS_DETAIL_ITEM_ROLE): + return + + filepath = index.data(FILEPATH_ROLE) + if filepath in self._widgets_by_filepath: + return + + traceback_txt = index.data(TRACEBACK_ROLE) + detail_text = ( + "Filepath:
" + "{}

" + "Traceback:
" + "{}" + ).format(filepath, traceback_txt.replace("\n", "
")) + widget = DetailWidget(detail_text, self) + self._view.setIndexWidget(index, widget) + self._widgets_by_filepath[filepath] = (widget, index) + + def set_report(self, report): + self._widgets_by_filepath = {} + self._model.set_report(report) + + +class ZoomPlainText(QtWidgets.QPlainTextEdit): + min_point_size = 1.0 + max_point_size = 200.0 + + def __init__(self, *args, **kwargs): + super(ZoomPlainText, self).__init__(*args, **kwargs) + + anim_timer = QtCore.QTimer() + anim_timer.setInterval(20) + + anim_timer.timeout.connect(self._scaling_callback) + + self._anim_timer = anim_timer + self._scheduled_scalings = 0 + self._point_size = None + + def wheelEvent(self, event): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers != QtCore.Qt.ControlModifier: + super(ZoomPlainText, self).wheelEvent(event) + return + + if hasattr(event, "angleDelta"): + delta = event.angleDelta().y() + else: + delta = event.delta() + degrees = float(delta) / 8 + steps = int(ceil(degrees / 5)) + self._scheduled_scalings += steps + if (self._scheduled_scalings * steps < 0): + self._scheduled_scalings = steps + + self._anim_timer.start() + + def _scaling_callback(self): + if self._scheduled_scalings == 0: + self._anim_timer.stop() + return + + factor = 1.0 + (self._scheduled_scalings / 300) + font = self.font() + + if self._point_size is None: + point_size = font.pointSizeF() + else: + point_size = self._point_size + + point_size *= factor + min_hit = False + max_hit = False + if point_size < self.min_point_size: + point_size = self.min_point_size + min_hit = True + elif point_size > self.max_point_size: + point_size = self.max_point_size + max_hit = True + + self._point_size = point_size + + font.setPointSizeF(point_size) + # Using 'self.setFont(font)' would not be propagated when stylesheets + # are applied on this widget + self.setStyleSheet("font-size: {}pt".format(font.pointSize())) + + if ( + (max_hit and self._scheduled_scalings > 0) + or (min_hit and self._scheduled_scalings < 0) + ): + self._scheduled_scalings = 0 + + elif self._scheduled_scalings > 0: + self._scheduled_scalings -= 1 + else: + self._scheduled_scalings += 1 + + +class DetailsWidget(QtWidgets.QWidget): + def __init__(self, parent): + super(DetailsWidget, self).__init__(parent) + + output_widget = ZoomPlainText(self) + output_widget.setObjectName("PublishLogConsole") + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(output_widget) + + self._output_widget = output_widget + self._report_item = None + self._instance_filter = set() + self._plugin_filter = set() + + def clear(self): + self._output_widget.setPlainText("") + + def set_report(self, report): + self._report_item = report + self._plugin_filter = set() + self._instance_filter = set() + self._update_logs() + + def set_plugin_filter(self, plugin_filter): + self._plugin_filter = plugin_filter + self._update_logs() + + def set_instance_filter(self, instance_filter): + self._instance_filter = instance_filter + self._update_logs() + + def _update_logs(self): + if not self._report_item: + self._output_widget.setPlainText("") + return + + filtered_logs = [] + for log in self._report_item.logs: + if ( + self._instance_filter + and log.instance_id not in self._instance_filter + ): + continue + + if ( + self._plugin_filter + and log.plugin_id not in self._plugin_filter + ): + continue + filtered_logs.append(log) + + self._set_logs(filtered_logs) + + def _set_logs(self, logs): + lines = [] + for log in logs: + if log["type"] == "record": + message = "{}: {}".format(log["levelname"], log["msg"]) + + lines.append(message) + exc_info = log["exc_info"] + if exc_info: + lines.append(exc_info) + + elif log["type"] == "error": + lines.append(log["traceback"]) + + else: + print(log["type"]) + + text = "\n".join(lines) + self._output_widget.setPlainText(text) + + +class DeselectableTreeView(QtWidgets.QTreeView): + """A tree view that deselects on clicking on an empty area in the view""" + + def mousePressEvent(self, event): + index = self.indexAt(event.pos()) + clear_selection = False + if not index.isValid(): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers == QtCore.Qt.ShiftModifier: + return + elif modifiers == QtCore.Qt.ControlModifier: + return + clear_selection = True + else: + indexes = self.selectedIndexes() + if len(indexes) == 1 and index in indexes: + clear_selection = True + + if clear_selection: + # clear the selection + self.clearSelection() + # clear the current index + self.setCurrentIndex(QtCore.QModelIndex()) + event.accept() + return + + QtWidgets.QTreeView.mousePressEvent(self, event) + + +class DetailsPopup(QtWidgets.QDialog): + closed = QtCore.Signal() + + def __init__(self, parent, center_widget): + super(DetailsPopup, self).__init__(parent) + self.setWindowTitle("Report Details") + layout = QtWidgets.QHBoxLayout(self) + + self._center_widget = center_widget + self._first_show = True + self._layout = layout + + def showEvent(self, event): + layout = self.layout() + layout.insertWidget(0, self._center_widget) + super(DetailsPopup, self).showEvent(event) + if self._first_show: + self._first_show = False + self.resize(700, 400) + + def closeEvent(self, event): + super(DetailsPopup, self).closeEvent(event) + self.closed.emit() + + +class PublishReportViewerWidget(QtWidgets.QFrame): + def __init__(self, parent=None): + super(PublishReportViewerWidget, self).__init__(parent) + + instances_model = InstancesModel() + instances_proxy = InstanceProxyModel() + instances_proxy.setSourceModel(instances_model) + + plugins_model = PluginsModel() + plugins_proxy = PluginProxyModel() + plugins_proxy.setSourceModel(plugins_model) + + removed_instances_check = NiceCheckbox(parent=self) + removed_instances_check.setChecked(instances_proxy.ignore_removed) + removed_instances_label = QtWidgets.QLabel( + "Hide removed instances", self + ) + + removed_instances_layout = QtWidgets.QHBoxLayout() + removed_instances_layout.setContentsMargins(0, 0, 0, 0) + removed_instances_layout.addWidget(removed_instances_check, 0) + removed_instances_layout.addWidget(removed_instances_label, 1) + + instances_view = DeselectableTreeView(self) + instances_view.setObjectName("PublishDetailViews") + instances_view.setModel(instances_proxy) + instances_view.setIndentation(0) + instances_view.setHeaderHidden(True) + instances_view.setEditTriggers( + QtWidgets.QAbstractItemView.NoEditTriggers) + instances_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + instances_view.setExpandsOnDoubleClick(False) + + instances_delegate = GroupItemDelegate(instances_view) + instances_view.setItemDelegate(instances_delegate) + + skipped_plugins_check = NiceCheckbox(parent=self) + skipped_plugins_check.setChecked(plugins_proxy.ignore_skipped) + skipped_plugins_label = QtWidgets.QLabel("Hide skipped plugins", self) + + skipped_plugins_layout = QtWidgets.QHBoxLayout() + skipped_plugins_layout.setContentsMargins(0, 0, 0, 0) + skipped_plugins_layout.addWidget(skipped_plugins_check, 0) + skipped_plugins_layout.addWidget(skipped_plugins_label, 1) + + plugins_view = DeselectableTreeView(self) + plugins_view.setObjectName("PublishDetailViews") + plugins_view.setModel(plugins_proxy) + plugins_view.setIndentation(0) + plugins_view.setHeaderHidden(True) + plugins_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + plugins_view.setEditTriggers( + QtWidgets.QAbstractItemView.NoEditTriggers) + plugins_view.setExpandsOnDoubleClick(False) + + plugins_delegate = GroupItemDelegate(plugins_view) + plugins_view.setItemDelegate(plugins_delegate) + + details_widget = QtWidgets.QWidget(self) + details_tab_widget = QtWidgets.QTabWidget(details_widget) + details_popup_btn = QtWidgets.QPushButton("PopUp", details_widget) + + details_layout = QtWidgets.QVBoxLayout(details_widget) + details_layout.setContentsMargins(0, 0, 0, 0) + details_layout.addWidget(details_tab_widget, 1) + details_layout.addWidget(details_popup_btn, 0) + + details_popup = DetailsPopup(self, details_tab_widget) + + logs_text_widget = DetailsWidget(details_tab_widget) + plugin_load_report_widget = PluginLoadReportWidget(details_tab_widget) + + details_tab_widget.addTab(logs_text_widget, "Logs") + details_tab_widget.addTab(plugin_load_report_widget, "Crashed plugins") + + middle_widget = QtWidgets.QWidget(self) + middle_layout = QtWidgets.QGridLayout(middle_widget) + middle_layout.setContentsMargins(0, 0, 0, 0) + # Row 1 + middle_layout.addLayout(removed_instances_layout, 0, 0) + middle_layout.addLayout(skipped_plugins_layout, 0, 1) + # Row 2 + middle_layout.addWidget(instances_view, 1, 0) + middle_layout.addWidget(plugins_view, 1, 1) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(middle_widget, 0) + layout.addWidget(details_widget, 1) + + instances_view.selectionModel().selectionChanged.connect( + self._on_instance_change + ) + instances_view.clicked.connect(self._on_instance_view_clicked) + plugins_view.clicked.connect(self._on_plugin_view_clicked) + plugins_view.selectionModel().selectionChanged.connect( + self._on_plugin_change + ) + + skipped_plugins_check.stateChanged.connect( + self._on_skipped_plugin_check + ) + removed_instances_check.stateChanged.connect( + self._on_removed_instances_check + ) + details_popup_btn.clicked.connect(self._on_details_popup) + details_popup.closed.connect(self._on_popup_close) + + self._ignore_selection_changes = False + self._report_item = None + self._logs_text_widget = logs_text_widget + self._plugin_load_report_widget = plugin_load_report_widget + + self._removed_instances_check = removed_instances_check + self._instances_view = instances_view + self._instances_model = instances_model + self._instances_proxy = instances_proxy + + self._instances_delegate = instances_delegate + self._plugins_delegate = plugins_delegate + + self._skipped_plugins_check = skipped_plugins_check + self._plugins_view = plugins_view + self._plugins_model = plugins_model + self._plugins_proxy = plugins_proxy + + self._details_widget = details_widget + self._details_tab_widget = details_tab_widget + self._details_popup = details_popup + + def _on_instance_view_clicked(self, index): + if not index.isValid() or not index.data(ITEM_IS_GROUP_ROLE): + return + + if self._instances_view.isExpanded(index): + self._instances_view.collapse(index) + else: + self._instances_view.expand(index) + + def _on_plugin_view_clicked(self, index): + if not index.isValid() or not index.data(ITEM_IS_GROUP_ROLE): + return + + if self._plugins_view.isExpanded(index): + self._plugins_view.collapse(index) + else: + self._plugins_view.expand(index) + + def set_report_data(self, report_data): + report = PublishReport(report_data) + self.set_report(report) + + def set_report(self, report): + self._ignore_selection_changes = True + + self._report_item = report + + self._instances_model.set_report(report) + self._plugins_model.set_report(report) + self._logs_text_widget.set_report(report) + self._plugin_load_report_widget.set_report(report) + + self._ignore_selection_changes = False + + self._instances_view.expandAll() + self._plugins_view.expandAll() + + def _on_instance_change(self, *_args): + if self._ignore_selection_changes: + return + + instance_ids = set() + for index in self._instances_view.selectedIndexes(): + if index.isValid(): + instance_ids.add(index.data(ITEM_ID_ROLE)) + + self._logs_text_widget.set_instance_filter(instance_ids) + + def _on_plugin_change(self, *_args): + if self._ignore_selection_changes: + return + + plugin_ids = set() + for index in self._plugins_view.selectedIndexes(): + if index.isValid(): + plugin_ids.add(index.data(ITEM_ID_ROLE)) + + self._logs_text_widget.set_plugin_filter(plugin_ids) + + def _on_skipped_plugin_check(self): + self._plugins_proxy.set_ignore_skipped( + self._skipped_plugins_check.isChecked() + ) + + def _on_removed_instances_check(self): + self._instances_proxy.set_ignore_removed( + self._removed_instances_check.isChecked() + ) + + def _on_details_popup(self): + self._details_widget.setVisible(False) + self._details_popup.show() + + def _on_popup_close(self): + self._details_widget.setVisible(True) + layout = self._details_widget.layout() + layout.insertWidget(0, self._details_tab_widget) + + def close_details_popup(self): + if self._details_popup.isVisible(): + self._details_popup.close() diff --git a/client/ayon_core/tools/publisher/publish_report_viewer/window.py b/client/ayon_core/tools/publisher/publish_report_viewer/window.py new file mode 100644 index 0000000000..6427b915a8 --- /dev/null +++ b/client/ayon_core/tools/publisher/publish_report_viewer/window.py @@ -0,0 +1,638 @@ +import os +import json +import six +import uuid + +import appdirs +import arrow +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core import style +from ayon_core.resources import get_ayon_icon_filepath +from ayon_core.tools import resources +from ayon_core.tools.utils import ( + IconButton, + paint_image_with_color +) + +from ayon_core.tools.utils.delegates import PrettyTimeDelegate + +if __package__: + from .widgets import PublishReportViewerWidget + from .report_items import PublishReport +else: + from widgets import PublishReportViewerWidget + from report_items import PublishReport + + +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 +ITEM_CREATED_AT_ROLE = QtCore.Qt.UserRole + 2 + + +def get_reports_dir(): + """Root directory where publish reports are stored for next session. + + Returns: + str: Path to directory where reports are stored. + """ + + report_dir = os.path.join( + appdirs.user_data_dir("AYON", "Ynput"), + "publish_report_viewer" + ) + if not os.path.exists(report_dir): + os.makedirs(report_dir) + return report_dir + + +class PublishReportItem: + """Report item representing one file in report directory.""" + + def __init__(self, content): + changed = self._fix_content(content) + + report_path = os.path.join(get_reports_dir(), content["id"]) + file_modified = None + if os.path.exists(report_path): + file_modified = os.path.getmtime(report_path) + + created_at_obj = arrow.get(content["created_at"]).to("local") + created_at = created_at_obj.float_timestamp + + self.content = content + self.report_path = report_path + self.file_modified = file_modified + self.created_at = float(created_at) + self._loaded_label = content.get("label") + self._changed = changed + self.publish_report = PublishReport(content) + + @property + def version(self): + """Publish report version. + + Returns: + str: Publish report version. + """ + return self.content["report_version"] + + @property + def id(self): + """Publish report id. + + Returns: + str: Publish report id. + """ + + return self.content["id"] + + def get_label(self): + """Publish report label. + + Returns: + str: Publish report label showed in UI. + """ + + return self.content.get("label") or "Unfilled label" + + def set_label(self, label): + """Set publish report label. + + Args: + label (str): New publish report label. + """ + + if not label: + self.content.pop("label", None) + self.content["label"] = label + + label = property(get_label, set_label) + + @property + def loaded_label(self): + return self._loaded_label + + def mark_as_changed(self): + """Mark report as changed.""" + + self._changed = True + + def save(self): + """Save publish report to file.""" + + save = False + if ( + self._changed + or self._loaded_label != self.label + or not os.path.exists(self.report_path) + or self.file_modified != os.path.getmtime(self.report_path) + ): + save = True + + if not save: + return + + with open(self.report_path, "w") as stream: + json.dump(self.content, stream) + + self._loaded_label = self.content.get("label") + self._changed = False + self.file_modified = os.path.getmtime(self.report_path) + + @classmethod + def from_filepath(cls, filepath): + """Create report item from file. + + Args: + filepath (str): Path to report file. Content must be json. + + Returns: + PublishReportItem: Report item. + """ + + if not os.path.exists(filepath): + return None + + try: + with open(filepath, "r") as stream: + content = json.load(stream) + + file_modified = os.path.getmtime(filepath) + changed = cls._fix_content(content, file_modified=file_modified) + obj = cls(content) + if changed: + obj.mark_as_changed() + return obj + + except Exception: + return None + + def remove_file(self): + """Remove report file.""" + + if os.path.exists(self.report_path): + os.remove(self.report_path) + + def update_file_content(self): + """Update report content in file.""" + + if not os.path.exists(self.report_path): + return + + file_modified = os.path.getmtime(self.report_path) + if file_modified == self.file_modified: + return + + with open(self.report_path, "r") as stream: + content = json.load(self.content, stream) + + item_id = content.get("id") + version = content.get("report_version") + if not item_id: + item_id = str(uuid.uuid4()) + content["id"] = item_id + + if not version: + version = "0.0.1" + content["report_version"] = version + + self.content = content + self.file_modified = file_modified + + @classmethod + def _fix_content(cls, content, file_modified=None): + """Fix content for backward compatibility of older report items. + + Args: + content (dict[str, Any]): Report content. + file_modified (Optional[float]): File modification time. + + Returns: + bool: True if content was changed, False otherwise. + """ + + # Fix created_at key + changed = cls._fix_created_at(content, file_modified) + + # NOTE backward compatibility for 'id' and 'report_version' is from + # 28.10.2022 https://github.com/ynput/OpenPype/pull/4040 + # We can probably safely remove it + + # Fix missing 'id' + item_id = content.get("id") + if not item_id: + item_id = str(uuid.uuid4()) + changed = True + content["id"] = item_id + + # Fix missing 'report_version' + if not content.get("report_version"): + changed = True + content["report_version"] = "0.0.1" + return changed + + @classmethod + def _fix_created_at(cls, content, file_modified): + # Key 'create_at' was added in report version 1.0.1 + created_at = content.get("created_at") + if created_at: + return False + + # Auto fix 'created_at', use file modification time if it is not set + # or current time if modification could not be received. + if file_modified is not None: + created_at_obj = arrow.Arrow.fromtimestamp(file_modified) + else: + created_at_obj = arrow.utcnow() + content["created_at"] = created_at_obj.to("local").isoformat() + return True + + +class PublisherReportHandler: + """Class handling storing publish report items.""" + + def __init__(self): + self._reports = None + self._reports_by_id = {} + + def reset(self): + self._reports = None + self._reports_by_id = {} + + def list_reports(self): + if self._reports is not None: + return self._reports + + reports = [] + reports_by_id = {} + report_dir = get_reports_dir() + for filename in os.listdir(report_dir): + ext = os.path.splitext(filename)[-1] + if ext == ".json": + continue + filepath = os.path.join(report_dir, filename) + item = PublishReportItem.from_filepath(filepath) + if item is not None: + reports.append(item) + reports_by_id[item.id] = item + + self._reports = reports + self._reports_by_id = reports_by_id + return reports + + def remove_report_item(self, item_id): + """Remove report item by id. + + Remove from cache and also remove the file with the content. + + Args: + item_id (str): Report item id. + """ + + item = self._reports_by_id.get(item_id) + if item: + try: + item.remove_file() + self._reports_by_id.get(item_id) + except Exception: + pass + + +class LoadedFilesModel(QtGui.QStandardItemModel): + header_labels = ("Reports", "Created") + + def __init__(self, *args, **kwargs): + super(LoadedFilesModel, self).__init__(*args, **kwargs) + + # Column count must be set before setting header data + self.setColumnCount(len(self.header_labels)) + for col, label in enumerate(self.header_labels): + self.setHeaderData(col, QtCore.Qt.Horizontal, label) + + self._items_by_id = {} + self._report_items_by_id = {} + + self._handler = PublisherReportHandler() + + self._loading_registry = False + + def refresh(self): + root_item = self.invisibleRootItem() + if root_item.rowCount() > 0: + root_item.removeRows(0, root_item.rowCount()) + self._items_by_id = {} + self._report_items_by_id = {} + + self._handler.reset() + + new_items = [] + for report_item in self._handler.list_reports(): + item = self._create_item(report_item) + self._report_items_by_id[report_item.id] = report_item + self._items_by_id[report_item.id] = item + new_items.append(item) + + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) + + def data(self, index, role=None): + if role is None: + role = QtCore.Qt.DisplayRole + + col = index.column() + if col == 1: + if role in ( + QtCore.Qt.DisplayRole, QtCore.Qt.InitialSortOrderRole + ): + role = ITEM_CREATED_AT_ROLE + + if col != 0: + index = self.index(index.row(), 0, index.parent()) + + return super(LoadedFilesModel, self).data(index, role) + + def setData(self, index, value, role=None): + if role is None: + role = QtCore.Qt.EditRole + + if role == QtCore.Qt.EditRole: + item_id = index.data(ITEM_ID_ROLE) + report_item = self._report_items_by_id.get(item_id) + if report_item is not None: + report_item.label = value + report_item.save() + value = report_item.label + + return super(LoadedFilesModel, self).setData(index, value, role) + + def flags(self, index): + # Allow editable flag only for first column + if index.column() > 0: + return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled + return super(LoadedFilesModel, self).flags(index) + + def _create_item(self, report_item): + if report_item.id in self._items_by_id: + return None + + item = QtGui.QStandardItem(report_item.label) + item.setColumnCount(self.columnCount()) + item.setData(report_item.id, ITEM_ID_ROLE) + item.setData(report_item.created_at, ITEM_CREATED_AT_ROLE) + + return item + + def add_filepaths(self, filepaths): + if not filepaths: + return + + if isinstance(filepaths, six.string_types): + filepaths = [filepaths] + + filtered_paths = [] + for filepath in filepaths: + normalized_path = os.path.normpath(filepath) + if ( + os.path.exists(normalized_path) + and normalized_path not in filtered_paths + ): + filtered_paths.append(normalized_path) + + if not filtered_paths: + return + + new_items = [] + for normalized_path in filtered_paths: + report_item = PublishReportItem.from_filepath(normalized_path) + if report_item is None: + continue + + # Skip already added report items + # QUESTION: Should we replace existing or skip the item? + if report_item.id in self._items_by_id: + continue + + if not report_item.loaded_label: + report_item.label = ( + os.path.splitext(os.path.basename(filepath))[0] + ) + + item = self._create_item(report_item) + if item is None: + continue + + new_items.append(item) + report_item.save() + self._items_by_id[report_item.id] = item + self._report_items_by_id[report_item.id] = report_item + + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) + + def remove_item_by_id(self, item_id): + self._handler.remove_report_item(item_id) + + self._report_items_by_id.pop(item_id, None) + item = self._items_by_id.pop(item_id, None) + if item is not None: + parent = self.invisibleRootItem() + parent.removeRow(item.row()) + + def get_report_by_id(self, item_id): + report_item = self._report_items_by_id.get(item_id) + if report_item: + return report_item.publish_report + return None + + +class LoadedFilesView(QtWidgets.QTreeView): + selection_changed = QtCore.Signal() + + def __init__(self, *args, **kwargs): + super(LoadedFilesView, self).__init__(*args, **kwargs) + self.setEditTriggers( + QtWidgets.QAbstractItemView.EditKeyPressed + | QtWidgets.QAbstractItemView.SelectedClicked + | QtWidgets.QAbstractItemView.DoubleClicked + ) + self.setIndentation(0) + self.setAlternatingRowColors(True) + self.setSortingEnabled(True) + + model = LoadedFilesModel() + proxy_model = QtCore.QSortFilterProxyModel() + proxy_model.setSourceModel(model) + self.setModel(proxy_model) + + time_delegate = PrettyTimeDelegate() + self.setItemDelegateForColumn(1, time_delegate) + + self.sortByColumn(1, QtCore.Qt.AscendingOrder) + + remove_btn = IconButton(self) + remove_icon_path = resources.get_icon_path("delete") + loaded_remove_image = QtGui.QImage(remove_icon_path) + pix = paint_image_with_color(loaded_remove_image, QtCore.Qt.white) + icon = QtGui.QIcon(pix) + remove_btn.setIcon(icon) + + model.rowsInserted.connect(self._on_rows_inserted) + remove_btn.clicked.connect(self._on_remove_clicked) + self.selectionModel().selectionChanged.connect( + self._on_selection_change + ) + + self._model = model + self._proxy_model = proxy_model + self._time_delegate = time_delegate + self._remove_btn = remove_btn + + def _update_remove_btn(self): + viewport = self.viewport() + height = viewport.height() + self.header().height() + pos_x = viewport.width() - self._remove_btn.width() - 5 + pos_y = height - self._remove_btn.height() - 5 + self._remove_btn.move(max(0, pos_x), max(0, pos_y)) + + def _on_rows_inserted(self): + header = self.header() + header.resizeSections(QtWidgets.QHeaderView.ResizeToContents) + self._update_remove_btn() + + def resizeEvent(self, event): + super(LoadedFilesView, self).resizeEvent(event) + self._update_remove_btn() + + def showEvent(self, event): + super(LoadedFilesView, self).showEvent(event) + self._model.refresh() + header = self.header() + header.resizeSections(QtWidgets.QHeaderView.ResizeToContents) + self._update_remove_btn() + + def _on_selection_change(self): + self.selection_changed.emit() + + def add_filepaths(self, filepaths): + self._model.add_filepaths(filepaths) + self._fill_selection() + + def remove_item_by_id(self, item_id): + self._model.remove_item_by_id(item_id) + self._fill_selection() + + def _on_remove_clicked(self): + index = self.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + self.remove_item_by_id(item_id) + + def _fill_selection(self): + index = self.currentIndex() + if index.isValid(): + return + + model = self.model() + index = model.index(0, 0) + if index.isValid(): + self.setCurrentIndex(index) + + def get_current_report(self): + index = self.currentIndex() + item_id = index.data(ITEM_ID_ROLE) + return self._model.get_report_by_id(item_id) + + +class LoadedFilesWidget(QtWidgets.QWidget): + report_changed = QtCore.Signal() + + def __init__(self, parent): + super(LoadedFilesWidget, self).__init__(parent) + + self.setAcceptDrops(True) + + view = LoadedFilesView(self) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(view, 1) + + view.selection_changed.connect(self._on_report_change) + + self._view = view + + def dragEnterEvent(self, event): + mime_data = event.mimeData() + if mime_data.hasUrls(): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + def dragLeaveEvent(self, event): + event.accept() + + def dropEvent(self, event): + mime_data = event.mimeData() + if mime_data.hasUrls(): + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + ext = os.path.splitext(filepath)[-1] + if os.path.exists(filepath) and ext == ".json": + filepaths.append(filepath) + self._add_filepaths(filepaths) + event.accept() + + def _on_report_change(self): + self.report_changed.emit() + + def _add_filepaths(self, filepaths): + self._view.add_filepaths(filepaths) + + def get_current_report(self): + return self._view.get_current_report() + + +class PublishReportViewerWindow(QtWidgets.QWidget): + default_width = 1200 + default_height = 600 + + def __init__(self, parent=None): + super(PublishReportViewerWindow, self).__init__(parent) + self.setWindowTitle("Publish report viewer") + icon = QtGui.QIcon(get_ayon_icon_filepath()) + self.setWindowIcon(icon) + + body = QtWidgets.QSplitter(self) + body.setContentsMargins(0, 0, 0, 0) + body.setSizePolicy( + QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding + ) + body.setOrientation(QtCore.Qt.Horizontal) + + loaded_files_widget = LoadedFilesWidget(body) + main_widget = PublishReportViewerWidget(body) + + body.addWidget(loaded_files_widget) + body.addWidget(main_widget) + body.setStretchFactor(0, 70) + body.setStretchFactor(1, 65) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(body, 1) + + loaded_files_widget.report_changed.connect(self._on_report_change) + + self._loaded_files_widget = loaded_files_widget + self._main_widget = main_widget + + self.resize(self.default_width, self.default_height) + self.setStyleSheet(style.load_stylesheet()) + + def _on_report_change(self): + report = self._loaded_files_widget.get_current_report() + self.set_report(report) + + def set_report(self, report_data): + self._main_widget.set_report(report_data) diff --git a/openpype/tools/publisher/widgets/__init__.py b/client/ayon_core/tools/publisher/widgets/__init__.py similarity index 100% rename from openpype/tools/publisher/widgets/__init__.py rename to client/ayon_core/tools/publisher/widgets/__init__.py diff --git a/client/ayon_core/tools/publisher/widgets/assets_widget.py b/client/ayon_core/tools/publisher/widgets/assets_widget.py new file mode 100644 index 0000000000..8a72c03e8b --- /dev/null +++ b/client/ayon_core/tools/publisher/widgets/assets_widget.py @@ -0,0 +1,360 @@ +import collections + +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core.tools.utils import ( + PlaceholderLineEdit, + RecursiveSortFilterProxyModel, + get_asset_icon, +) +from ayon_core.tools.utils.assets_widget import ( + SingleSelectAssetsWidget, + ASSET_ID_ROLE, + ASSET_NAME_ROLE, + ASSET_PATH_ROLE, +) + + +class CreateWidgetAssetsWidget(SingleSelectAssetsWidget): + current_context_required = QtCore.Signal() + header_height_changed = QtCore.Signal(int) + + def __init__(self, controller, parent): + self._controller = controller + super(CreateWidgetAssetsWidget, self).__init__(None, parent) + + self.set_refresh_btn_visibility(False) + self.set_current_asset_btn_visibility(False) + + self._last_selection = None + self._enabled = None + + self._last_filter_height = None + + def get_selected_asset_name(self): + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + for index in indexes: + return index.data(ASSET_PATH_ROLE) + return None + + def _check_header_height(self): + """Catch header height changes. + + Label on top of creaters should have same height so Creators view has + same offset. + """ + height = self.header_widget.height() + if height != self._last_filter_height: + self._last_filter_height = height + self.header_height_changed.emit(height) + + def resizeEvent(self, event): + super(CreateWidgetAssetsWidget, self).resizeEvent(event) + self._check_header_height() + + def showEvent(self, event): + super(CreateWidgetAssetsWidget, self).showEvent(event) + self._check_header_height() + + def _on_current_asset_click(self): + self.current_context_required.emit() + + def set_enabled(self, enabled): + if self._enabled == enabled: + return + self._enabled = enabled + if not enabled: + self._last_selection = self.get_selected_asset_id() + self._clear_selection() + elif self._last_selection is not None: + self.select_asset(self._last_selection) + + def _select_indexes(self, *args, **kwargs): + super(CreateWidgetAssetsWidget, self)._select_indexes(*args, **kwargs) + if self._enabled: + return + self._last_selection = self.get_selected_asset_id() + self._clear_selection() + + def update_current_asset(self): + # Hide set current asset if there is no one + asset_name = self._get_current_session_asset() + self.set_current_asset_btn_visibility(bool(asset_name)) + + def _get_current_session_asset(self): + return self._controller.current_asset_name + + def _create_source_model(self): + return AssetsHierarchyModel(self._controller) + + def _refresh_model(self): + self._model.reset() + self._on_model_refresh(self._model.rowCount() > 0) + + +class AssetsHierarchyModel(QtGui.QStandardItemModel): + """Assets hierarchy model. + + For selecting asset for which an instance should be created. + + Uses controller to load asset hierarchy. All asset documents are stored by + their parents. + """ + + def __init__(self, controller): + super(AssetsHierarchyModel, self).__init__() + self._controller = controller + + self._items_by_name = {} + self._items_by_path = {} + self._items_by_asset_id = {} + + def reset(self): + self.clear() + + self._items_by_name = {} + self._items_by_path = {} + self._items_by_asset_id = {} + assets_by_parent_id = self._controller.get_asset_hierarchy() + + items_by_name = {} + items_by_path = {} + items_by_asset_id = {} + _queue = collections.deque() + _queue.append((self.invisibleRootItem(), None, None)) + while _queue: + parent_item, parent_id, parent_path = _queue.popleft() + children = assets_by_parent_id.get(parent_id) + if not children: + continue + + children_by_name = { + child["name"]: child + for child in children + } + items = [] + for name in sorted(children_by_name.keys()): + child = children_by_name[name] + child_id = child["_id"] + if parent_path: + child_path = "{}/{}".format(parent_path, name) + else: + child_path = "/{}".format(name) + + has_children = bool(assets_by_parent_id.get(child_id)) + icon = get_asset_icon(child, has_children) + + item = QtGui.QStandardItem(name) + item.setFlags( + QtCore.Qt.ItemIsEnabled + | QtCore.Qt.ItemIsSelectable + ) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setData(child_id, ASSET_ID_ROLE) + item.setData(name, ASSET_NAME_ROLE) + item.setData(child_path, ASSET_PATH_ROLE) + + items_by_name[name] = item + items_by_path[child_path] = item + items_by_asset_id[child_id] = item + items.append(item) + _queue.append((item, child_id, child_path)) + + parent_item.appendRows(items) + + self._items_by_name = items_by_name + self._items_by_path = items_by_path + self._items_by_asset_id = items_by_asset_id + + def get_index_by_asset_id(self, asset_id): + item = self._items_by_asset_id.get(asset_id) + if item is not None: + return item.index() + return QtCore.QModelIndex() + + def get_index_by_asset_name(self, asset_name): + item = self._items_by_path.get(asset_name) + if item is None: + item = self._items_by_name.get(asset_name) + + if item is None: + return QtCore.QModelIndex() + return item.index() + + def name_is_valid(self, item_name): + return item_name in self._items_by_path + + +class AssetDialogView(QtWidgets.QTreeView): + double_clicked = QtCore.Signal(QtCore.QModelIndex) + + def mouseDoubleClickEvent(self, event): + index = self.indexAt(event.pos()) + if index.isValid(): + self.double_clicked.emit(index) + event.accept() + + +class AssetsDialog(QtWidgets.QDialog): + """Dialog to select asset for a context of instance.""" + + def __init__(self, controller, parent): + super(AssetsDialog, self).__init__(parent) + self.setWindowTitle("Select asset") + + model = AssetsHierarchyModel(controller) + proxy_model = RecursiveSortFilterProxyModel() + proxy_model.setSourceModel(model) + proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + + filter_input = PlaceholderLineEdit(self) + filter_input.setPlaceholderText("Filter folders..") + + asset_view = AssetDialogView(self) + asset_view.setModel(proxy_model) + asset_view.setHeaderHidden(True) + asset_view.setFrameShape(QtWidgets.QFrame.NoFrame) + asset_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + asset_view.setAlternatingRowColors(True) + asset_view.setSelectionBehavior(QtWidgets.QTreeView.SelectRows) + asset_view.setAllColumnsShowFocus(True) + + ok_btn = QtWidgets.QPushButton("OK", self) + cancel_btn = QtWidgets.QPushButton("Cancel", self) + + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.addStretch(1) + btns_layout.addWidget(ok_btn) + btns_layout.addWidget(cancel_btn) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(filter_input, 0) + layout.addWidget(asset_view, 1) + layout.addLayout(btns_layout, 0) + + controller.event_system.add_callback( + "controller.reset.finished", self._on_controller_reset + ) + + asset_view.double_clicked.connect(self._on_ok_clicked) + filter_input.textChanged.connect(self._on_filter_change) + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + self._filter_input = filter_input + self._ok_btn = ok_btn + self._cancel_btn = cancel_btn + + self._model = model + self._proxy_model = proxy_model + + self._asset_view = asset_view + + self._selected_asset = None + # Soft refresh is enabled + # - reset will happen at all cost if soft reset is enabled + # - adds ability to call reset on multiple places without repeating + self._soft_reset_enabled = True + + self._first_show = True + self._default_height = 500 + + def _on_first_show(self): + center = self.rect().center() + size = self.size() + size.setHeight(self._default_height) + + self.resize(size) + new_pos = self.mapToGlobal(center) + new_pos.setX(new_pos.x() - int(self.width() / 2)) + new_pos.setY(new_pos.y() - int(self.height() / 2)) + self.move(new_pos) + + def _on_controller_reset(self): + # Change reset enabled so model is reset on show event + self._soft_reset_enabled = True + + def showEvent(self, event): + """Refresh asset model on show.""" + super(AssetsDialog, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + # Refresh on show + self.reset(False) + + def reset(self, force=True): + """Reset asset model.""" + if not force and not self._soft_reset_enabled: + return + + if self._soft_reset_enabled: + self._soft_reset_enabled = False + + self._model.reset() + + def name_is_valid(self, name): + """Is asset name valid. + + Args: + name(str): Asset name that should be checked. + """ + # Make sure we're reset + self.reset(False) + # Valid the name by model + return self._model.name_is_valid(name) + + def _on_filter_change(self, text): + """Trigger change of filter of assets.""" + self._proxy_model.setFilterFixedString(text) + + def _on_cancel_clicked(self): + self.done(0) + + def _on_ok_clicked(self): + index = self._asset_view.currentIndex() + asset_name = None + if index.isValid(): + asset_name = index.data(ASSET_PATH_ROLE) + self._selected_asset = asset_name + self.done(1) + + def set_selected_assets(self, asset_names): + """Change preselected asset before showing the dialog. + + This also resets model and clean filter. + """ + self.reset(False) + self._asset_view.collapseAll() + self._filter_input.setText("") + + indexes = [] + for asset_name in asset_names: + index = self._model.get_index_by_asset_name(asset_name) + if index.isValid(): + indexes.append(index) + + if not indexes: + return + + index_deque = collections.deque() + for index in indexes: + index_deque.append(index) + + all_indexes = [] + while index_deque: + index = index_deque.popleft() + all_indexes.append(index) + + parent_index = index.parent() + if parent_index.isValid(): + index_deque.append(parent_index) + + for index in all_indexes: + proxy_index = self._proxy_model.mapFromSource(index) + self._asset_view.expand(proxy_index) + + def get_selected_asset(self): + """Get selected asset name.""" + return self._selected_asset diff --git a/openpype/tools/publisher/widgets/border_label_widget.py b/client/ayon_core/tools/publisher/widgets/border_label_widget.py similarity index 99% rename from openpype/tools/publisher/widgets/border_label_widget.py rename to client/ayon_core/tools/publisher/widgets/border_label_widget.py index e5693368b1..324c70df34 100644 --- a/openpype/tools/publisher/widgets/border_label_widget.py +++ b/client/ayon_core/tools/publisher/widgets/border_label_widget.py @@ -2,7 +2,7 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors +from ayon_core.style import get_objected_colors class _VLineWidget(QtWidgets.QWidget): diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py similarity index 99% rename from openpype/tools/publisher/widgets/card_view_widgets.py rename to client/ayon_core/tools/publisher/widgets/card_view_widgets.py index 5cdd429cd4..3396110121 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py @@ -25,10 +25,10 @@ from qtpy import QtWidgets, QtCore -from openpype.widgets.nice_checkbox import NiceCheckbox +from ayon_core.tools.utils import NiceCheckbox -from openpype.tools.utils import BaseClickableFrame -from openpype.tools.utils.lib import html_escape +from ayon_core.tools.utils import BaseClickableFrame +from ayon_core.tools.utils.lib import html_escape from .widgets import ( AbstractInstanceView, ContextWarningLabel, diff --git a/openpype/tools/publisher/widgets/create_widget.py b/client/ayon_core/tools/publisher/widgets/create_widget.py similarity index 98% rename from openpype/tools/publisher/widgets/create_widget.py rename to client/ayon_core/tools/publisher/widgets/create_widget.py index 73dcae51a5..12135c6891 100644 --- a/openpype/tools/publisher/widgets/create_widget.py +++ b/client/ayon_core/tools/publisher/widgets/create_widget.py @@ -2,8 +2,7 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline.create import ( +from ayon_core.pipeline.create import ( SUBSET_NAME_ALLOWED_SYMBOLS, PRE_CREATE_THUMBNAIL_KEY, DEFAULT_VARIANT_VALUE, @@ -205,9 +204,7 @@ def __init__(self, controller, parent=None): variant_subset_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING) variant_subset_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING) variant_subset_layout.addRow("Variant", variant_widget) - variant_subset_layout.addRow( - "Product" if AYON_SERVER_ENABLED else "Subset", - subset_name_input) + variant_subset_layout.addRow("Product", subset_name_input) creator_basics_layout = QtWidgets.QVBoxLayout(creator_basics_widget) creator_basics_layout.setContentsMargins(0, 0, 0, 0) @@ -816,13 +813,8 @@ def _on_create(self): # Where to define these data? # - what data show be stored? - if AYON_SERVER_ENABLED: - asset_key = "folderPath" - else: - asset_key = "asset" - instance_data = { - asset_key: asset_name, + "folderPath": asset_name, "task": task_name, "variant": variant, "family": family diff --git a/openpype/tools/publisher/widgets/help_widget.py b/client/ayon_core/tools/publisher/widgets/help_widget.py similarity index 100% rename from openpype/tools/publisher/widgets/help_widget.py rename to client/ayon_core/tools/publisher/widgets/help_widget.py diff --git a/openpype/tools/publisher/widgets/icons.py b/client/ayon_core/tools/publisher/widgets/icons.py similarity index 100% rename from openpype/tools/publisher/widgets/icons.py rename to client/ayon_core/tools/publisher/widgets/icons.py diff --git a/openpype/tools/publisher/widgets/images/add.png b/client/ayon_core/tools/publisher/widgets/images/add.png similarity index 100% rename from openpype/tools/publisher/widgets/images/add.png rename to client/ayon_core/tools/publisher/widgets/images/add.png diff --git a/openpype/tools/publisher/widgets/images/branch_closed.png b/client/ayon_core/tools/publisher/widgets/images/branch_closed.png similarity index 100% rename from openpype/tools/publisher/widgets/images/branch_closed.png rename to client/ayon_core/tools/publisher/widgets/images/branch_closed.png diff --git a/openpype/tools/publisher/widgets/images/branch_open.png b/client/ayon_core/tools/publisher/widgets/images/branch_open.png similarity index 100% rename from openpype/tools/publisher/widgets/images/branch_open.png rename to client/ayon_core/tools/publisher/widgets/images/branch_open.png diff --git a/openpype/tools/publisher/widgets/images/browse.png b/client/ayon_core/tools/publisher/widgets/images/browse.png similarity index 100% rename from openpype/tools/publisher/widgets/images/browse.png rename to client/ayon_core/tools/publisher/widgets/images/browse.png diff --git a/openpype/tools/publisher/widgets/images/change_view.png b/client/ayon_core/tools/publisher/widgets/images/change_view.png similarity index 100% rename from openpype/tools/publisher/widgets/images/change_view.png rename to client/ayon_core/tools/publisher/widgets/images/change_view.png diff --git a/openpype/tools/publisher/widgets/images/clear_thumbnail.png b/client/ayon_core/tools/publisher/widgets/images/clear_thumbnail.png similarity index 100% rename from openpype/tools/publisher/widgets/images/clear_thumbnail.png rename to client/ayon_core/tools/publisher/widgets/images/clear_thumbnail.png diff --git a/openpype/tools/publisher/widgets/images/create.png b/client/ayon_core/tools/publisher/widgets/images/create.png similarity index 100% rename from openpype/tools/publisher/widgets/images/create.png rename to client/ayon_core/tools/publisher/widgets/images/create.png diff --git a/openpype/tools/publisher/widgets/images/error.png b/client/ayon_core/tools/publisher/widgets/images/error.png similarity index 100% rename from openpype/tools/publisher/widgets/images/error.png rename to client/ayon_core/tools/publisher/widgets/images/error.png diff --git a/openpype/tools/publisher/widgets/images/minus.png b/client/ayon_core/tools/publisher/widgets/images/minus.png similarity index 100% rename from openpype/tools/publisher/widgets/images/minus.png rename to client/ayon_core/tools/publisher/widgets/images/minus.png diff --git a/openpype/tools/publisher/widgets/images/options.png b/client/ayon_core/tools/publisher/widgets/images/options.png similarity index 100% rename from openpype/tools/publisher/widgets/images/options.png rename to client/ayon_core/tools/publisher/widgets/images/options.png diff --git a/openpype/tools/publisher/widgets/images/paste.png b/client/ayon_core/tools/publisher/widgets/images/paste.png similarity index 100% rename from openpype/tools/publisher/widgets/images/paste.png rename to client/ayon_core/tools/publisher/widgets/images/paste.png diff --git a/openpype/tools/publisher/widgets/images/play.png b/client/ayon_core/tools/publisher/widgets/images/play.png similarity index 100% rename from openpype/tools/publisher/widgets/images/play.png rename to client/ayon_core/tools/publisher/widgets/images/play.png diff --git a/openpype/tools/publisher/widgets/images/refresh.png b/client/ayon_core/tools/publisher/widgets/images/refresh.png similarity index 100% rename from openpype/tools/publisher/widgets/images/refresh.png rename to client/ayon_core/tools/publisher/widgets/images/refresh.png diff --git a/openpype/tools/publisher/widgets/images/save.png b/client/ayon_core/tools/publisher/widgets/images/save.png similarity index 100% rename from openpype/tools/publisher/widgets/images/save.png rename to client/ayon_core/tools/publisher/widgets/images/save.png diff --git a/openpype/tools/publisher/widgets/images/stop.png b/client/ayon_core/tools/publisher/widgets/images/stop.png similarity index 100% rename from openpype/tools/publisher/widgets/images/stop.png rename to client/ayon_core/tools/publisher/widgets/images/stop.png diff --git a/openpype/tools/publisher/widgets/images/success.png b/client/ayon_core/tools/publisher/widgets/images/success.png similarity index 100% rename from openpype/tools/publisher/widgets/images/success.png rename to client/ayon_core/tools/publisher/widgets/images/success.png diff --git a/openpype/tools/publisher/widgets/images/take_screenshot.png b/client/ayon_core/tools/publisher/widgets/images/take_screenshot.png similarity index 100% rename from openpype/tools/publisher/widgets/images/take_screenshot.png rename to client/ayon_core/tools/publisher/widgets/images/take_screenshot.png diff --git a/openpype/tools/publisher/widgets/images/thumbnail.png b/client/ayon_core/tools/publisher/widgets/images/thumbnail.png similarity index 100% rename from openpype/tools/publisher/widgets/images/thumbnail.png rename to client/ayon_core/tools/publisher/widgets/images/thumbnail.png diff --git a/openpype/tools/publisher/widgets/images/validate.png b/client/ayon_core/tools/publisher/widgets/images/validate.png similarity index 100% rename from openpype/tools/publisher/widgets/images/validate.png rename to client/ayon_core/tools/publisher/widgets/images/validate.png diff --git a/openpype/tools/publisher/widgets/images/view_report.png b/client/ayon_core/tools/publisher/widgets/images/view_report.png similarity index 100% rename from openpype/tools/publisher/widgets/images/view_report.png rename to client/ayon_core/tools/publisher/widgets/images/view_report.png diff --git a/openpype/tools/publisher/widgets/images/warning.png b/client/ayon_core/tools/publisher/widgets/images/warning.png similarity index 100% rename from openpype/tools/publisher/widgets/images/warning.png rename to client/ayon_core/tools/publisher/widgets/images/warning.png diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py similarity index 99% rename from openpype/tools/publisher/widgets/list_view_widgets.py rename to client/ayon_core/tools/publisher/widgets/list_view_widgets.py index 3370f71701..fc76c47334 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py @@ -26,9 +26,9 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors -from openpype.widgets.nice_checkbox import NiceCheckbox -from openpype.tools.utils.lib import html_escape, checkstate_int_to_enum +from ayon_core.style import get_objected_colors +from ayon_core.tools.utils import NiceCheckbox +from ayon_core.tools.utils.lib import html_escape, checkstate_int_to_enum from .widgets import AbstractInstanceView from ..constants import ( INSTANCE_ID_ROLE, diff --git a/openpype/tools/publisher/widgets/overview_widget.py b/client/ayon_core/tools/publisher/widgets/overview_widget.py similarity index 99% rename from openpype/tools/publisher/widgets/overview_widget.py rename to client/ayon_core/tools/publisher/widgets/overview_widget.py index 10151250f6..f1b271850a 100644 --- a/openpype/tools/publisher/widgets/overview_widget.py +++ b/client/ayon_core/tools/publisher/widgets/overview_widget.py @@ -1,7 +1,5 @@ from qtpy import QtWidgets, QtCore -from openpype import AYON_SERVER_ENABLED - from .border_label_widget import BorderedLabelWidget from .card_view_widgets import InstanceCardView @@ -37,9 +35,7 @@ def __init__(self, controller, parent): # --- Created Subsets/Instances --- # Common widget for creation and overview subset_views_widget = BorderedLabelWidget( - "{} to publish".format( - "Products" if AYON_SERVER_ENABLED else "Subsets" - ), + "Products to publish", subset_content_widget ) diff --git a/openpype/tools/publisher/widgets/precreate_widget.py b/client/ayon_core/tools/publisher/widgets/precreate_widget.py similarity index 98% rename from openpype/tools/publisher/widgets/precreate_widget.py rename to client/ayon_core/tools/publisher/widgets/precreate_widget.py index 3bf0bc3657..ae0deb8410 100644 --- a/openpype/tools/publisher/widgets/precreate_widget.py +++ b/client/ayon_core/tools/publisher/widgets/precreate_widget.py @@ -1,6 +1,6 @@ from qtpy import QtWidgets, QtCore -from openpype.tools.attribute_defs import create_widget_for_attr_def +from ayon_core.tools.attribute_defs import create_widget_for_attr_def from ..constants import INPUTS_LAYOUT_HSPACING, INPUTS_LAYOUT_VSPACING diff --git a/openpype/tools/publisher/widgets/publish_frame.py b/client/ayon_core/tools/publisher/widgets/publish_frame.py similarity index 100% rename from openpype/tools/publisher/widgets/publish_frame.py rename to client/ayon_core/tools/publisher/widgets/publish_frame.py diff --git a/openpype/tools/publisher/widgets/report_page.py b/client/ayon_core/tools/publisher/widgets/report_page.py similarity index 99% rename from openpype/tools/publisher/widgets/report_page.py rename to client/ayon_core/tools/publisher/widgets/report_page.py index 50a619f0a8..c4a37da887 100644 --- a/openpype/tools/publisher/widgets/report_page.py +++ b/client/ayon_core/tools/publisher/widgets/report_page.py @@ -9,8 +9,8 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors -from openpype.tools.utils import ( +from ayon_core.style import get_objected_colors +from ayon_core.tools.utils import ( BaseClickableFrame, ClickableFrame, ExpandingTextEdit, diff --git a/openpype/tools/publisher/widgets/screenshot_widget.py b/client/ayon_core/tools/publisher/widgets/screenshot_widget.py similarity index 100% rename from openpype/tools/publisher/widgets/screenshot_widget.py rename to client/ayon_core/tools/publisher/widgets/screenshot_widget.py diff --git a/openpype/tools/publisher/widgets/tabs_widget.py b/client/ayon_core/tools/publisher/widgets/tabs_widget.py similarity index 98% rename from openpype/tools/publisher/widgets/tabs_widget.py rename to client/ayon_core/tools/publisher/widgets/tabs_widget.py index 4b87b76178..e484dc8681 100644 --- a/openpype/tools/publisher/widgets/tabs_widget.py +++ b/client/ayon_core/tools/publisher/widgets/tabs_widget.py @@ -1,5 +1,5 @@ from qtpy import QtWidgets, QtCore -from openpype.tools.utils import set_style_property +from ayon_core.tools.utils import set_style_property class PublisherTabBtn(QtWidgets.QPushButton): diff --git a/client/ayon_core/tools/publisher/widgets/tasks_widget.py b/client/ayon_core/tools/publisher/widgets/tasks_widget.py new file mode 100644 index 0000000000..44e290408a --- /dev/null +++ b/client/ayon_core/tools/publisher/widgets/tasks_widget.py @@ -0,0 +1,183 @@ +from qtpy import QtCore, QtGui + +from ayon_core.tools.utils.tasks_widget import TasksWidget, TASK_NAME_ROLE +from ayon_core.tools.utils.lib import get_default_task_icon + + +class TasksModel(QtGui.QStandardItemModel): + """Tasks model. + + Task model must have set context of asset documents. + + Items in model are based on 0-infinite asset documents. Always contain + an interserction of context asset tasks. When no assets are in context + them model is empty if 2 or more are in context assets that don't have + tasks with same names then model is empty too. + + Args: + controller (PublisherController): Controller which handles creation and + publishing. + """ + def __init__(self, controller, allow_empty_task=False): + super(TasksModel, self).__init__() + + self._allow_empty_task = allow_empty_task + self._controller = controller + self._items_by_name = {} + self._asset_names = [] + self._task_names_by_asset_name = {} + + def set_asset_names(self, asset_names): + """Set assets context.""" + self._asset_names = asset_names + self.reset() + + @staticmethod + def get_intersection_of_tasks(task_names_by_asset_name): + """Calculate intersection of task names from passed data. + + Example: + ``` + # Passed `task_names_by_asset_name` + { + "asset_1": ["compositing", "animation"], + "asset_2": ["compositing", "editorial"] + } + ``` + Result: + ``` + # Set + {"compositing"} + ``` + + Args: + task_names_by_asset_name (dict): Task names in iterable by parent. + """ + tasks = None + for task_names in task_names_by_asset_name.values(): + if tasks is None: + tasks = set(task_names) + else: + tasks &= set(task_names) + + if not tasks: + break + return tasks or set() + + def is_task_name_valid(self, asset_name, task_name): + """Is task name available for asset. + + Args: + asset_name (str): Name of asset where should look for task. + task_name (str): Name of task which should be available in asset's + tasks. + """ + if asset_name not in self._task_names_by_asset_name: + return False + + if self._allow_empty_task and not task_name: + return True + + task_names = self._task_names_by_asset_name[asset_name] + if task_name in task_names: + return True + return False + + def reset(self): + """Update model by current context.""" + if not self._asset_names: + self._items_by_name = {} + self._task_names_by_asset_name = {} + self.clear() + return + + task_names_by_asset_name = ( + self._controller.get_task_names_by_asset_names(self._asset_names) + ) + + self._task_names_by_asset_name = task_names_by_asset_name + + new_task_names = self.get_intersection_of_tasks( + task_names_by_asset_name + ) + if self._allow_empty_task: + new_task_names.add("") + old_task_names = set(self._items_by_name.keys()) + if new_task_names == old_task_names: + return + + root_item = self.invisibleRootItem() + for task_name in old_task_names: + if task_name not in new_task_names: + item = self._items_by_name.pop(task_name) + root_item.removeRow(item.row()) + + new_items = [] + for task_name in new_task_names: + if task_name in self._items_by_name: + continue + + item = QtGui.QStandardItem(task_name) + item.setData(task_name, TASK_NAME_ROLE) + if task_name: + item.setData(get_default_task_icon(), QtCore.Qt.DecorationRole) + self._items_by_name[task_name] = item + new_items.append(item) + + if new_items: + root_item.appendRows(new_items) + + def headerData(self, section, orientation, role=None): + if role is None: + role = QtCore.Qt.EditRole + # Show nice labels in the header + if section == 0: + if ( + role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole) + and orientation == QtCore.Qt.Horizontal + ): + return "Tasks" + + return super(TasksModel, self).headerData(section, orientation, role) + + +class CreateWidgetTasksWidget(TasksWidget): + def __init__(self, controller, parent): + self._controller = controller + super(CreateWidgetTasksWidget, self).__init__(None, parent) + + self._enabled = None + + def _create_source_model(self): + return TasksModel(self._controller) + + def set_asset_name(self, asset_name): + current = self.get_selected_task_name() + if current: + self._last_selected_task_name = current + + self._tasks_model.set_asset_names([asset_name]) + if self._last_selected_task_name and self._enabled: + self.select_task_name(self._last_selected_task_name) + + # Force a task changed emit. + self.task_changed.emit() + + def select_task_name(self, task_name): + super(CreateWidgetTasksWidget, self).select_task_name(task_name) + if not self._enabled: + current = self.get_selected_task_name() + if current: + self._last_selected_task_name = current + self._clear_selection() + + def set_enabled(self, enabled): + self._enabled = enabled + if not enabled: + last_selected_task_name = self.get_selected_task_name() + if last_selected_task_name: + self._last_selected_task_name = last_selected_task_name + self._clear_selection() + + elif self._last_selected_task_name is not None: + self.select_task_name(self._last_selected_task_name) diff --git a/openpype/tools/publisher/widgets/thumbnail_widget.py b/client/ayon_core/tools/publisher/widgets/thumbnail_widget.py similarity index 98% rename from openpype/tools/publisher/widgets/thumbnail_widget.py rename to client/ayon_core/tools/publisher/widgets/thumbnail_widget.py index 60970710d8..07dc532534 100644 --- a/openpype/tools/publisher/widgets/thumbnail_widget.py +++ b/client/ayon_core/tools/publisher/widgets/thumbnail_widget.py @@ -3,23 +3,23 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors -from openpype.lib import ( +from ayon_core.style import get_objected_colors +from ayon_core.lib import ( run_subprocess, is_oiio_supported, get_oiio_tool_args, get_ffmpeg_tool_args, ) -from openpype.lib.transcoding import ( +from ayon_core.lib.transcoding import ( IMAGE_EXTENSIONS, VIDEO_EXTENSIONS, ) -from openpype.tools.utils import ( +from ayon_core.tools.utils import ( paint_image_with_color, PixmapButton, ) -from openpype.tools.publisher.control import CardMessageTypes +from ayon_core.tools.publisher.control import CardMessageTypes from .icons import get_image from .screenshot_widget import capture_to_file diff --git a/client/ayon_core/tools/publisher/widgets/widgets.py b/client/ayon_core/tools/publisher/widgets/widgets.py new file mode 100644 index 0000000000..bd5ab250bd --- /dev/null +++ b/client/ayon_core/tools/publisher/widgets/widgets.py @@ -0,0 +1,2033 @@ +# -*- coding: utf-8 -*- +import os +import re +import copy +import functools +import uuid +import shutil +import collections +from qtpy import QtWidgets, QtCore, QtGui +import qtawesome + +from ayon_core.lib.attribute_definitions import UnknownDef +from ayon_core.tools.attribute_defs import create_widget_for_attr_def +from ayon_core.tools import resources +from ayon_core.tools.flickcharm import FlickCharm +from ayon_core.tools.utils import ( + PlaceholderLineEdit, + IconButton, + PixmapLabel, + BaseClickableFrame, + set_style_property, +) +from ayon_core.style import get_objected_colors +from ayon_core.pipeline.create import ( + SUBSET_NAME_ALLOWED_SYMBOLS, + TaskNotSetError, +) +from .thumbnail_widget import ThumbnailWidget +from .assets_widget import AssetsDialog +from .tasks_widget import TasksModel +from .icons import ( + get_pixmap, + get_icon_path +) + +from ..constants import ( + VARIANT_TOOLTIP, + ResetKeySequence, + INPUTS_LAYOUT_HSPACING, + INPUTS_LAYOUT_VSPACING, +) + +FA_PREFIXES = ["", "fa.", "fa5.", "fa5b.", "fa5s.", "ei.", "mdi."] + + +def parse_icon_def( + icon_def, default_width=None, default_height=None, color=None +): + if not icon_def: + return None + + if isinstance(icon_def, QtGui.QPixmap): + return icon_def + + color = color or "white" + default_width = default_width or 512 + default_height = default_height or 512 + + if isinstance(icon_def, QtGui.QIcon): + return icon_def.pixmap(default_width, default_height) + + try: + if os.path.exists(icon_def): + return QtGui.QPixmap(icon_def) + except Exception: + # TODO logging + pass + + for prefix in FA_PREFIXES: + try: + icon_name = "{}{}".format(prefix, icon_def) + icon = qtawesome.icon(icon_name, color=color) + return icon.pixmap(default_width, default_height) + except Exception: + # TODO logging + continue + + +class PublishPixmapLabel(PixmapLabel): + def _get_pix_size(self): + size = self.fontMetrics().height() + size += size % 2 + return size, size + + +class IconValuePixmapLabel(PublishPixmapLabel): + """Label resizing to width and height of font. + + Handle icon parsing from creators/instances. Using of QAwesome module + of path to images. + """ + default_size = 200 + + def __init__(self, icon_def, parent): + source_pixmap = self._parse_icon_def(icon_def) + + super(IconValuePixmapLabel, self).__init__(source_pixmap, parent) + + def set_icon_def(self, icon_def): + """Set icon by it's definition name. + + Args: + icon_def (str): Name of FontAwesome icon or path to image. + """ + source_pixmap = self._parse_icon_def(icon_def) + self.set_source_pixmap(source_pixmap) + + def _default_pixmap(self): + pix = QtGui.QPixmap(1, 1) + pix.fill(QtCore.Qt.transparent) + return pix + + def _parse_icon_def(self, icon_def): + icon = parse_icon_def(icon_def, self.default_size, self.default_size) + if icon: + return icon + return self._default_pixmap() + + +class ContextWarningLabel(PublishPixmapLabel): + """Pixmap label with warning icon.""" + def __init__(self, parent): + pix = get_pixmap("warning") + + super(ContextWarningLabel, self).__init__(pix, parent) + + self.setToolTip( + "Contain invalid context. Please check details." + ) + self.setObjectName("FamilyIconLabel") + + +class PublishIconBtn(IconButton): + """Button using alpha of source image to redraw with different color. + + Main class for buttons showed in publisher. + + TODO: + Add different states: + - normal : before publishing + - publishing : publishing is running + - validation error : validation error happened + - error : other error happened + - success : publishing finished + """ + + def __init__(self, pixmap_path, *args, **kwargs): + super(PublishIconBtn, self).__init__(*args, **kwargs) + + colors = get_objected_colors() + icon = self.generate_icon( + pixmap_path, + enabled_color=colors["font"].get_qcolor(), + disabled_color=colors["font-disabled"].get_qcolor()) + self.setIcon(icon) + + def generate_icon(self, pixmap_path, enabled_color, disabled_color): + icon = QtGui.QIcon() + image = QtGui.QImage(pixmap_path) + enabled_pixmap = self.paint_image_with_color(image, enabled_color) + icon.addPixmap(enabled_pixmap, QtGui.QIcon.Normal) + disabled_pixmap = self.paint_image_with_color(image, disabled_color) + icon.addPixmap(disabled_pixmap, QtGui.QIcon.Disabled) + return icon + + @staticmethod + def paint_image_with_color(image, color): + """Redraw image with single color using it's alpha. + + It is expected that input image is singlecolor image with alpha. + + Args: + image (QImage): Loaded image with alpha. + color (QColor): Color that will be used to paint image. + """ + width = image.width() + height = image.height() + partition = 8 + part_w = int(width / partition) + part_h = int(height / partition) + part_w -= part_w % 2 + part_h -= part_h % 2 + scaled_image = image.scaled( + width - (2 * part_w), + height - (2 * part_h), + QtCore.Qt.IgnoreAspectRatio, + QtCore.Qt.SmoothTransformation + ) + alpha_mask = scaled_image.createAlphaMask() + alpha_region = QtGui.QRegion(QtGui.QBitmap.fromImage(alpha_mask)) + alpha_region.translate(part_w, part_h) + + pixmap = QtGui.QPixmap(width, height) + pixmap.fill(QtCore.Qt.transparent) + + painter = QtGui.QPainter(pixmap) + painter.setClipRegion(alpha_region) + painter.setPen(QtCore.Qt.NoPen) + painter.setBrush(color) + painter.drawRect(QtCore.QRect(0, 0, width, height)) + painter.end() + + return pixmap + + +class CreateBtn(PublishIconBtn): + """Create instance button.""" + + def __init__(self, parent=None): + icon_path = get_icon_path("create") + super(CreateBtn, self).__init__(icon_path, "Create", parent) + self.setToolTip("Create new product/s") + self.setLayoutDirection(QtCore.Qt.RightToLeft) + + +class SaveBtn(PublishIconBtn): + """Save context and instances information.""" + def __init__(self, parent=None): + icon_path = get_icon_path("save") + super(SaveBtn, self).__init__(icon_path, parent) + self.setToolTip( + "Save changes ({})".format( + QtGui.QKeySequence(QtGui.QKeySequence.Save).toString() + ) + ) + + +class ResetBtn(PublishIconBtn): + """Publish reset button.""" + def __init__(self, parent=None): + icon_path = get_icon_path("refresh") + super(ResetBtn, self).__init__(icon_path, parent) + self.setToolTip( + "Reset & discard changes ({})".format(ResetKeySequence.toString()) + ) + + +class StopBtn(PublishIconBtn): + """Publish stop button.""" + def __init__(self, parent): + icon_path = get_icon_path("stop") + super(StopBtn, self).__init__(icon_path, parent) + self.setToolTip("Stop/Pause publishing") + + +class ValidateBtn(PublishIconBtn): + """Publish validate button.""" + def __init__(self, parent=None): + icon_path = get_icon_path("validate") + super(ValidateBtn, self).__init__(icon_path, parent) + self.setToolTip("Validate") + + +class PublishBtn(PublishIconBtn): + """Publish start publish button.""" + def __init__(self, parent=None): + icon_path = get_icon_path("play") + super(PublishBtn, self).__init__(icon_path, "Publish", parent) + self.setToolTip("Publish") + + +class CreateInstanceBtn(PublishIconBtn): + """Create add button.""" + def __init__(self, parent=None): + icon_path = get_icon_path("add") + super(CreateInstanceBtn, self).__init__(icon_path, parent) + self.setToolTip("Create new instance") + + +class PublishReportBtn(PublishIconBtn): + """Publish report button.""" + + triggered = QtCore.Signal(str) + + def __init__(self, parent=None): + icon_path = get_icon_path("view_report") + super(PublishReportBtn, self).__init__(icon_path, parent) + self.setToolTip("Copy report") + self._actions = [] + + def add_action(self, label, identifier): + self._actions.append( + (label, identifier) + ) + + def _on_action_trigger(self, identifier): + self.triggered.emit(identifier) + + def mouseReleaseEvent(self, event): + super(PublishReportBtn, self).mouseReleaseEvent(event) + menu = QtWidgets.QMenu(self) + actions = [] + for item in self._actions: + label, identifier = item + action = QtWidgets.QAction(label, menu) + action.triggered.connect( + functools.partial(self._on_action_trigger, identifier) + ) + actions.append(action) + menu.addActions(actions) + menu.exec_(event.globalPos()) + + +class RemoveInstanceBtn(PublishIconBtn): + """Create remove button.""" + def __init__(self, parent=None): + icon_path = resources.get_icon_path("delete") + super(RemoveInstanceBtn, self).__init__(icon_path, parent) + self.setToolTip("Remove selected instances") + + +class ChangeViewBtn(PublishIconBtn): + """Create toggle view button.""" + def __init__(self, parent=None): + icon_path = get_icon_path("change_view") + super(ChangeViewBtn, self).__init__(icon_path, parent) + self.setToolTip("Swap between views") + + +class AbstractInstanceView(QtWidgets.QWidget): + """Abstract class for instance view in creation part.""" + selection_changed = QtCore.Signal() + active_changed = QtCore.Signal() + # Refreshed attribute is not changed by view itself + # - widget which triggers `refresh` is changing the state + # TODO store that information in widget which cares about refreshing + refreshed = False + + def set_refreshed(self, refreshed): + """View is refreshed with last instances. + + Views are not updated all the time. Only if are visible. + """ + self.refreshed = refreshed + + def refresh(self): + """Refresh instances in the view from current `CreatedContext`.""" + raise NotImplementedError(( + "{} Method 'refresh' is not implemented." + ).format(self.__class__.__name__)) + + def has_items(self): + """View has at least one item. + + This is more a question for controller but is called from widget + which should probably should not use controller. + + Returns: + bool: There is at least one instance or conversion item. + """ + + raise NotImplementedError(( + "{} Method 'has_items' is not implemented." + ).format(self.__class__.__name__)) + + def get_selected_items(self): + """Selected instances required for callbacks. + + Example: When delete button is clicked to know what should be deleted. + """ + + raise NotImplementedError(( + "{} Method 'get_selected_items' is not implemented." + ).format(self.__class__.__name__)) + + def set_selected_items(self, instance_ids, context_selected): + """Change selection for instances and context. + + Used to applying selection from one view to other. + + Args: + instance_ids (List[str]): Selected instance ids. + context_selected (bool): Context is selected. + """ + + raise NotImplementedError(( + "{} Method 'set_selected_items' is not implemented." + ).format(self.__class__.__name__)) + + def set_active_toggle_enabled(self, enabled): + """Instances are disabled for changing enabled state. + + Active state should stay the same until is "unset". + + Args: + enabled (bool): Instance state can be changed. + """ + + raise NotImplementedError(( + "{} Method 'set_active_toggle_enabled' is not implemented." + ).format(self.__class__.__name__)) + + +class ClickableLineEdit(QtWidgets.QLineEdit): + """QLineEdit capturing left mouse click. + + Triggers `clicked` signal on mouse click. + """ + clicked = QtCore.Signal() + + def __init__(self, *args, **kwargs): + super(ClickableLineEdit, self).__init__(*args, **kwargs) + self.setReadOnly(True) + self._mouse_pressed = False + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + event.accept() + + def mouseMoveEvent(self, event): + event.accept() + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + event.accept() + + def mouseDoubleClickEvent(self, event): + event.accept() + + +class AssetsField(BaseClickableFrame): + """Field where asset name of selected instance/s is showed. + + Click on the field will trigger `AssetsDialog`. + """ + value_changed = QtCore.Signal() + + def __init__(self, controller, parent): + super(AssetsField, self).__init__(parent) + self.setObjectName("AssetNameInputWidget") + + # Don't use 'self' for parent! + # - this widget has specific styles + dialog = AssetsDialog(controller, parent) + + name_input = ClickableLineEdit(self) + name_input.setObjectName("AssetNameInput") + + icon_name = "fa.window-maximize" + icon = qtawesome.icon(icon_name, color="white") + icon_btn = QtWidgets.QPushButton(self) + icon_btn.setIcon(icon) + icon_btn.setObjectName("AssetNameInputButton") + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + layout.addWidget(name_input, 1) + layout.addWidget(icon_btn, 0) + + # Make sure all widgets are vertically extended to highest widget + for widget in ( + name_input, + icon_btn + ): + size_policy = widget.sizePolicy() + size_policy.setVerticalPolicy( + QtWidgets.QSizePolicy.MinimumExpanding) + widget.setSizePolicy(size_policy) + name_input.clicked.connect(self._mouse_release_callback) + icon_btn.clicked.connect(self._mouse_release_callback) + dialog.finished.connect(self._on_dialog_finish) + + self._dialog = dialog + self._name_input = name_input + self._icon_btn = icon_btn + + self._origin_value = [] + self._origin_selection = [] + self._selected_items = [] + self._has_value_changed = False + self._is_valid = True + self._multiselection_text = None + + def _on_dialog_finish(self, result): + if not result: + return + + asset_name = self._dialog.get_selected_asset() + if asset_name is None: + return + + self._selected_items = [asset_name] + self._has_value_changed = ( + self._origin_value != self._selected_items + ) + self.set_text(asset_name) + self._set_is_valid(True) + + self.value_changed.emit() + + def _mouse_release_callback(self): + self._dialog.set_selected_assets(self._selected_items) + self._dialog.open() + + def set_multiselection_text(self, text): + """Change text for multiselection of different assets. + + When there are selected multiple instances at once and they don't have + same asset in context. + """ + self._multiselection_text = text + + def _set_is_valid(self, valid): + if valid == self._is_valid: + return + self._is_valid = valid + state = "" + if not valid: + state = "invalid" + self._set_state_property(state) + + def _set_state_property(self, state): + set_style_property(self, "state", state) + set_style_property(self._name_input, "state", state) + set_style_property(self._icon_btn, "state", state) + + def is_valid(self): + """Is asset valid.""" + return self._is_valid + + def has_value_changed(self): + """Value of asset has changed.""" + return self._has_value_changed + + def get_selected_items(self): + """Selected asset names.""" + return list(self._selected_items) + + def set_text(self, text): + """Set text in text field. + + Does not change selected items (assets). + """ + self._name_input.setText(text) + self._name_input.end(False) + + def set_selected_items(self, asset_names=None): + """Set asset names for selection of instances. + + Passed asset names are validated and if there are 2 or more different + asset names then multiselection text is shown. + + Args: + asset_names (list, tuple, set, NoneType): List of asset names. + """ + if asset_names is None: + asset_names = [] + + self._has_value_changed = False + self._origin_value = list(asset_names) + self._selected_items = list(asset_names) + is_valid = True + if not asset_names: + self.set_text("") + + elif len(asset_names) == 1: + asset_name = tuple(asset_names)[0] + is_valid = self._dialog.name_is_valid(asset_name) + self.set_text(asset_name) + else: + for asset_name in asset_names: + is_valid = self._dialog.name_is_valid(asset_name) + if not is_valid: + break + + multiselection_text = self._multiselection_text + if multiselection_text is None: + multiselection_text = "|".join(asset_names) + self.set_text(multiselection_text) + + self._set_is_valid(is_valid) + + def reset_to_origin(self): + """Change to asset names set with last `set_selected_items` call.""" + self.set_selected_items(self._origin_value) + + def confirm_value(self): + self._origin_value = copy.deepcopy(self._selected_items) + self._has_value_changed = False + + +class TasksComboboxProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(TasksComboboxProxy, self).__init__(*args, **kwargs) + self._filter_empty = False + + def set_filter_empty(self, filter_empty): + if self._filter_empty is filter_empty: + return + self._filter_empty = filter_empty + self.invalidate() + + def filterAcceptsRow(self, source_row, parent_index): + if self._filter_empty: + model = self.sourceModel() + source_index = model.index( + source_row, self.filterKeyColumn(), parent_index + ) + if not source_index.data(QtCore.Qt.DisplayRole): + return False + return True + + +class TasksCombobox(QtWidgets.QComboBox): + """Combobox to show tasks for selected instances. + + Combobox gives ability to select only from intersection of task names for + asset names in selected instances. + + If asset names in selected instances does not have same tasks then combobox + will be empty. + """ + value_changed = QtCore.Signal() + + def __init__(self, controller, parent): + super(TasksCombobox, self).__init__(parent) + self.setObjectName("TasksCombobox") + + # Set empty delegate to propagate stylesheet to a combobox + delegate = QtWidgets.QStyledItemDelegate() + self.setItemDelegate(delegate) + + model = TasksModel(controller, True) + proxy_model = TasksComboboxProxy() + proxy_model.setSourceModel(model) + self.setModel(proxy_model) + + self.currentIndexChanged.connect(self._on_index_change) + + self._delegate = delegate + self._model = model + self._proxy_model = proxy_model + self._origin_value = [] + self._origin_selection = [] + self._selected_items = [] + self._has_value_changed = False + self._ignore_index_change = False + self._multiselection_text = None + self._is_valid = True + + self._text = None + + # Make sure combobox is extended horizontally + size_policy = self.sizePolicy() + size_policy.setHorizontalPolicy( + QtWidgets.QSizePolicy.MinimumExpanding) + self.setSizePolicy(size_policy) + + def set_invalid_empty_task(self, invalid=True): + self._proxy_model.set_filter_empty(invalid) + if invalid: + self._set_is_valid(False) + self.set_text( + "< One or more products require Task selected >" + ) + else: + self.set_text(None) + + def set_multiselection_text(self, text): + """Change text shown when multiple different tasks are in context.""" + self._multiselection_text = text + + def _on_index_change(self): + if self._ignore_index_change: + return + + self.set_text(None) + text = self.currentText() + idx = self.findText(text) + if idx < 0: + return + + self._set_is_valid(True) + self._selected_items = [text] + self._has_value_changed = ( + self._origin_selection != self._selected_items + ) + + self.value_changed.emit() + + def set_text(self, text): + """Set context shown in combobox without changing selected items.""" + if text == self._text: + return + + self._text = text + self.repaint() + + def paintEvent(self, event): + """Paint custom text without using QLineEdit. + + The easiest way how to draw custom text in combobox and keep combobox + properties and event handling. + """ + painter = QtGui.QPainter(self) + painter.setPen(self.palette().color(QtGui.QPalette.Text)) + opt = QtWidgets.QStyleOptionComboBox() + self.initStyleOption(opt) + if self._text is not None: + opt.currentText = self._text + + style = self.style() + style.drawComplexControl( + QtWidgets.QStyle.CC_ComboBox, opt, painter, self + ) + style.drawControl( + QtWidgets.QStyle.CE_ComboBoxLabel, opt, painter, self + ) + painter.end() + + def is_valid(self): + """Are all selected items valid.""" + return self._is_valid + + def has_value_changed(self): + """Did selection of task changed.""" + return self._has_value_changed + + def _set_is_valid(self, valid): + if valid == self._is_valid: + return + self._is_valid = valid + state = "" + if not valid: + state = "invalid" + self._set_state_property(state) + + def _set_state_property(self, state): + current_value = self.property("state") + if current_value != state: + self.setProperty("state", state) + self.style().polish(self) + + def get_selected_items(self): + """Get selected tasks. + + If value has changed then will return list with single item. + + Returns: + list: Selected tasks. + """ + return list(self._selected_items) + + def set_asset_names(self, asset_names): + """Set asset names for which should show tasks.""" + self._ignore_index_change = True + + self._model.set_asset_names(asset_names) + self._proxy_model.set_filter_empty(False) + self._proxy_model.sort(0) + + self._ignore_index_change = False + + # It is a bug if not exactly one asset got here + if len(asset_names) != 1: + self.set_selected_item("") + self._set_is_valid(False) + return + + asset_name = tuple(asset_names)[0] + + is_valid = False + if self._selected_items: + is_valid = True + + valid_task_names = [] + for task_name in self._selected_items: + _is_valid = self._model.is_task_name_valid(asset_name, task_name) + if _is_valid: + valid_task_names.append(task_name) + else: + is_valid = _is_valid + + self._selected_items = valid_task_names + if len(self._selected_items) == 0: + self.set_selected_item("") + + elif len(self._selected_items) == 1: + self.set_selected_item(self._selected_items[0]) + + else: + multiselection_text = self._multiselection_text + if multiselection_text is None: + multiselection_text = "|".join(self._selected_items) + self.set_selected_item(multiselection_text) + + self._set_is_valid(is_valid) + + def confirm_value(self, asset_names): + new_task_name = self._selected_items[0] + self._origin_value = [ + (asset_name, new_task_name) + for asset_name in asset_names + ] + self._origin_selection = copy.deepcopy(self._selected_items) + self._has_value_changed = False + + def set_selected_items(self, asset_task_combinations=None): + """Set items for selected instances. + + Args: + asset_task_combinations (list): List of tuples. Each item in + the list contain asset name and task name. + """ + self._proxy_model.set_filter_empty(False) + self._proxy_model.sort(0) + + if asset_task_combinations is None: + asset_task_combinations = [] + + task_names = set() + task_names_by_asset_name = collections.defaultdict(set) + for asset_name, task_name in asset_task_combinations: + task_names.add(task_name) + task_names_by_asset_name[asset_name].add(task_name) + asset_names = set(task_names_by_asset_name.keys()) + + self._ignore_index_change = True + + self._model.set_asset_names(asset_names) + + self._has_value_changed = False + + self._origin_value = copy.deepcopy(asset_task_combinations) + + self._origin_selection = list(task_names) + self._selected_items = list(task_names) + # Reset current index + self.setCurrentIndex(-1) + is_valid = True + if not task_names: + self.set_selected_item("") + + elif len(task_names) == 1: + task_name = tuple(task_names)[0] + idx = self.findText(task_name) + is_valid = not idx < 0 + if not is_valid and len(asset_names) > 1: + is_valid = self._validate_task_names_by_asset_names( + task_names_by_asset_name + ) + self.set_selected_item(task_name) + + else: + for task_name in task_names: + idx = self.findText(task_name) + is_valid = not idx < 0 + if not is_valid: + break + + if not is_valid and len(asset_names) > 1: + is_valid = self._validate_task_names_by_asset_names( + task_names_by_asset_name + ) + multiselection_text = self._multiselection_text + if multiselection_text is None: + multiselection_text = "|".join(task_names) + self.set_selected_item(multiselection_text) + + self._set_is_valid(is_valid) + + self._ignore_index_change = False + + self.value_changed.emit() + + def _validate_task_names_by_asset_names(self, task_names_by_asset_name): + for asset_name, task_names in task_names_by_asset_name.items(): + for task_name in task_names: + if not self._model.is_task_name_valid(asset_name, task_name): + return False + return True + + def set_selected_item(self, item_name): + """Set task which is set on selected instance. + + Args: + item_name(str): Task name which should be selected. + """ + idx = self.findText(item_name) + # Set current index (must be set to -1 if is invalid) + self.setCurrentIndex(idx) + self.set_text(item_name) + + def reset_to_origin(self): + """Change to task names set with last `set_selected_items` call.""" + self.set_selected_items(self._origin_value) + + +class VariantInputWidget(PlaceholderLineEdit): + """Input widget for variant.""" + value_changed = QtCore.Signal() + + def __init__(self, parent): + super(VariantInputWidget, self).__init__(parent) + + self.setObjectName("VariantInput") + self.setToolTip(VARIANT_TOOLTIP) + + name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) + self._name_pattern = name_pattern + self._compiled_name_pattern = re.compile(name_pattern) + + self._origin_value = [] + self._current_value = [] + + self._ignore_value_change = False + self._has_value_changed = False + self._multiselection_text = None + + self._is_valid = True + + self.textChanged.connect(self._on_text_change) + + def is_valid(self): + """Is variant text valid.""" + return self._is_valid + + def has_value_changed(self): + """Value of variant has changed.""" + return self._has_value_changed + + def _set_state_property(self, state): + current_value = self.property("state") + if current_value != state: + self.setProperty("state", state) + self.style().polish(self) + + def set_multiselection_text(self, text): + """Change text of multiselection.""" + self._multiselection_text = text + + def confirm_value(self): + self._origin_value = copy.deepcopy(self._current_value) + self._has_value_changed = False + + def _set_is_valid(self, valid): + if valid == self._is_valid: + return + self._is_valid = valid + state = "" + if not valid: + state = "invalid" + self._set_state_property(state) + + def _on_text_change(self): + if self._ignore_value_change: + return + + is_valid = bool(self._compiled_name_pattern.match(self.text())) + self._set_is_valid(is_valid) + + self._current_value = [self.text()] + self._has_value_changed = self._current_value != self._origin_value + + self.value_changed.emit() + + def reset_to_origin(self): + """Set origin value of selected instances.""" + self.set_value(self._origin_value) + + def get_value(self): + """Get current value. + + Origin value returned if didn't change. + """ + return copy.deepcopy(self._current_value) + + def set_value(self, variants=None): + """Set value of currently selected instances.""" + if variants is None: + variants = [] + + self._ignore_value_change = True + + self._has_value_changed = False + + self._origin_value = list(variants) + self._current_value = list(variants) + + self.setPlaceholderText("") + if not variants: + self.setText("") + + elif len(variants) == 1: + self.setText(self._current_value[0]) + + else: + multiselection_text = self._multiselection_text + if multiselection_text is None: + multiselection_text = "|".join(variants) + self.setText("") + self.setPlaceholderText(multiselection_text) + + self._ignore_value_change = False + + +class MultipleItemWidget(QtWidgets.QWidget): + """Widget for immutable text which can have more than one value. + + Content may be bigger than widget's size and does not have scroll but has + flick widget on top (is possible to move around with clicked mouse). + """ + + def __init__(self, parent): + super(MultipleItemWidget, self).__init__(parent) + + model = QtGui.QStandardItemModel() + + view = QtWidgets.QListView(self) + view.setObjectName("MultipleItemView") + view.setLayoutMode(QtWidgets.QListView.Batched) + view.setViewMode(QtWidgets.QListView.IconMode) + view.setResizeMode(QtWidgets.QListView.Adjust) + view.setWrapping(False) + view.setSpacing(2) + view.setModel(model) + view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) + view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) + + flick = FlickCharm(parent=view) + flick.activateOn(view) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(view) + + model.rowsInserted.connect(self._on_insert) + + self._view = view + self._model = model + + self._value = [] + + def _on_insert(self): + self._update_size() + + def _update_size(self): + model = self._view.model() + if model.rowCount() == 0: + return + height = self._view.sizeHintForRow(0) + self.setMaximumHeight(height + (2 * self._view.spacing())) + + def showEvent(self, event): + super(MultipleItemWidget, self).showEvent(event) + tmp_item = None + if not self._value: + # Add temp item to be able calculate maximum height of widget + tmp_item = QtGui.QStandardItem("tmp") + self._model.appendRow(tmp_item) + self._update_size() + + if tmp_item is not None: + self._model.clear() + + def resizeEvent(self, event): + super(MultipleItemWidget, self).resizeEvent(event) + self._update_size() + + def set_value(self, value=None): + """Set value/s of currently selected instance.""" + if value is None: + value = [] + self._value = value + + self._model.clear() + for item_text in value: + item = QtGui.QStandardItem(item_text) + item.setEditable(False) + item.setSelectable(False) + self._model.appendRow(item) + + +class GlobalAttrsWidget(QtWidgets.QWidget): + """Global attributes mainly to define context and subset name of instances. + + Subset name is or may be affected on context. Gives abiity to modify + context and subset name of instance. This change is not autopromoted but + must be submitted. + + Warning: Until artist hit `Submit` changes must not be propagated to + instance data. + + Global attributes contain these widgets: + Variant: [ text input ] + Asset: [ asset dialog ] + Task: [ combobox ] + Family: [ immutable ] + Subset name: [ immutable ] + [Submit] [Cancel] + """ + instance_context_changed = QtCore.Signal() + + multiselection_text = "< Multiselection >" + unknown_value = "N/A" + + def __init__(self, controller, parent): + super(GlobalAttrsWidget, self).__init__(parent) + + self._controller = controller + self._current_instances = [] + + variant_input = VariantInputWidget(self) + asset_value_widget = AssetsField(controller, self) + task_value_widget = TasksCombobox(controller, self) + family_value_widget = MultipleItemWidget(self) + subset_value_widget = MultipleItemWidget(self) + + variant_input.set_multiselection_text(self.multiselection_text) + asset_value_widget.set_multiselection_text(self.multiselection_text) + task_value_widget.set_multiselection_text(self.multiselection_text) + + variant_input.set_value() + asset_value_widget.set_selected_items() + task_value_widget.set_selected_items() + family_value_widget.set_value() + subset_value_widget.set_value() + + submit_btn = QtWidgets.QPushButton("Confirm", self) + cancel_btn = QtWidgets.QPushButton("Cancel", self) + submit_btn.setEnabled(False) + cancel_btn.setEnabled(False) + + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addStretch(1) + btns_layout.setSpacing(5) + btns_layout.addWidget(submit_btn) + btns_layout.addWidget(cancel_btn) + + main_layout = QtWidgets.QFormLayout(self) + main_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING) + main_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING) + main_layout.addRow("Variant", variant_input) + main_layout.addRow("Folder", asset_value_widget) + main_layout.addRow("Task", task_value_widget) + main_layout.addRow("Product type", family_value_widget) + main_layout.addRow("Product name", subset_value_widget) + main_layout.addRow(btns_layout) + + variant_input.value_changed.connect(self._on_variant_change) + asset_value_widget.value_changed.connect(self._on_asset_change) + task_value_widget.value_changed.connect(self._on_task_change) + submit_btn.clicked.connect(self._on_submit) + cancel_btn.clicked.connect(self._on_cancel) + + self.variant_input = variant_input + self.asset_value_widget = asset_value_widget + self.task_value_widget = task_value_widget + self.family_value_widget = family_value_widget + self.subset_value_widget = subset_value_widget + self.submit_btn = submit_btn + self.cancel_btn = cancel_btn + + def _on_submit(self): + """Commit changes for selected instances.""" + + variant_value = None + asset_name = None + task_name = None + if self.variant_input.has_value_changed(): + variant_value = self.variant_input.get_value()[0] + + if self.asset_value_widget.has_value_changed(): + asset_name = self.asset_value_widget.get_selected_items()[0] + + if self.task_value_widget.has_value_changed(): + task_name = self.task_value_widget.get_selected_items()[0] + + subset_names = set() + invalid_tasks = False + asset_names = [] + for instance in self._current_instances: + new_variant_value = instance.get("variant") + new_asset_name = instance.get("folderPath") + new_task_name = instance.get("task") + if variant_value is not None: + new_variant_value = variant_value + + if asset_name is not None: + new_asset_name = asset_name + + if task_name is not None: + new_task_name = task_name + + asset_names.append(new_asset_name) + try: + new_subset_name = self._controller.get_subset_name( + instance.creator_identifier, + new_variant_value, + new_task_name, + new_asset_name, + instance.id, + ) + + except TaskNotSetError: + invalid_tasks = True + instance.set_task_invalid(True) + subset_names.add(instance["subset"]) + continue + + subset_names.add(new_subset_name) + if variant_value is not None: + instance["variant"] = variant_value + + if asset_name is not None: + instance["folderPath"] = asset_name + instance.set_asset_invalid(False) + + if task_name is not None: + instance["task"] = task_name or None + instance.set_task_invalid(False) + + instance["subset"] = new_subset_name + + if invalid_tasks: + self.task_value_widget.set_invalid_empty_task() + + self.subset_value_widget.set_value(subset_names) + + self._set_btns_enabled(False) + self._set_btns_visible(invalid_tasks) + + if variant_value is not None: + self.variant_input.confirm_value() + + if asset_name is not None: + self.asset_value_widget.confirm_value() + + if task_name is not None: + self.task_value_widget.confirm_value(asset_names) + + self.instance_context_changed.emit() + + def _on_cancel(self): + """Cancel changes and set back to their irigin value.""" + + self.variant_input.reset_to_origin() + self.asset_value_widget.reset_to_origin() + self.task_value_widget.reset_to_origin() + self._set_btns_enabled(False) + + def _on_value_change(self): + any_invalid = ( + not self.variant_input.is_valid() + or not self.asset_value_widget.is_valid() + or not self.task_value_widget.is_valid() + ) + any_changed = ( + self.variant_input.has_value_changed() + or self.asset_value_widget.has_value_changed() + or self.task_value_widget.has_value_changed() + ) + self._set_btns_visible(any_changed or any_invalid) + self.cancel_btn.setEnabled(any_changed) + self.submit_btn.setEnabled(not any_invalid) + + def _on_variant_change(self): + self._on_value_change() + + def _on_asset_change(self): + asset_names = self.asset_value_widget.get_selected_items() + self.task_value_widget.set_asset_names(asset_names) + self._on_value_change() + + def _on_task_change(self): + self._on_value_change() + + def _set_btns_visible(self, visible): + self.cancel_btn.setVisible(visible) + self.submit_btn.setVisible(visible) + + def _set_btns_enabled(self, enabled): + self.cancel_btn.setEnabled(enabled) + self.submit_btn.setEnabled(enabled) + + def set_current_instances(self, instances): + """Set currently selected instances. + + Args: + instances(List[CreatedInstance]): List of selected instances. + Empty instances tells that nothing or context is selected. + """ + self._set_btns_visible(False) + + self._current_instances = instances + + asset_names = set() + variants = set() + families = set() + subset_names = set() + + editable = True + if len(instances) == 0: + editable = False + + asset_task_combinations = [] + for instance in instances: + # NOTE I'm not sure how this can even happen? + if instance.creator_identifier is None: + editable = False + + variants.add(instance.get("variant") or self.unknown_value) + families.add(instance.get("family") or self.unknown_value) + asset_name = instance.get("folderPath") or self.unknown_value + task_name = instance.get("task") or "" + asset_names.add(asset_name) + asset_task_combinations.append((asset_name, task_name)) + subset_names.add(instance.get("subset") or self.unknown_value) + + self.variant_input.set_value(variants) + + # Set context of asset widget + self.asset_value_widget.set_selected_items(asset_names) + # Set context of task widget + self.task_value_widget.set_selected_items(asset_task_combinations) + self.family_value_widget.set_value(families) + self.subset_value_widget.set_value(subset_names) + + self.variant_input.setEnabled(editable) + self.asset_value_widget.setEnabled(editable) + self.task_value_widget.setEnabled(editable) + + +class CreatorAttrsWidget(QtWidgets.QWidget): + """Widget showing creator specific attributes for selected instances. + + Attributes are defined on creator so are dynamic. Their look and type is + based on attribute definitions that are defined in + `~/ayon_core/lib/attribute_definitions.py` and their widget + representation in `~/openpype/tools/attribute_defs/*`. + + Widgets are disabled if context of instance is not valid. + + Definitions are shown for all instance no matter if they are created with + different creators. If creator have same (similar) definitions their + widgets are merged into one (different label does not count). + """ + + def __init__(self, controller, parent): + super(CreatorAttrsWidget, self).__init__(parent) + + scroll_area = QtWidgets.QScrollArea(self) + scroll_area.setWidgetResizable(True) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.addWidget(scroll_area, 1) + + self._main_layout = main_layout + + self._controller = controller + self._scroll_area = scroll_area + + self._attr_def_id_to_instances = {} + self._attr_def_id_to_attr_def = {} + + # To store content of scroll area to prevent garbage collection + self._content_widget = None + + def set_instances_valid(self, valid): + """Change valid state of current instances.""" + + if ( + self._content_widget is not None + and self._content_widget.isEnabled() != valid + ): + self._content_widget.setEnabled(valid) + + def set_current_instances(self, instances): + """Set current instances for which are attribute definitions shown.""" + + prev_content_widget = self._scroll_area.widget() + if prev_content_widget: + self._scroll_area.takeWidget() + prev_content_widget.hide() + prev_content_widget.deleteLater() + + self._content_widget = None + self._attr_def_id_to_instances = {} + self._attr_def_id_to_attr_def = {} + + result = self._controller.get_creator_attribute_definitions( + instances + ) + + content_widget = QtWidgets.QWidget(self._scroll_area) + content_layout = QtWidgets.QGridLayout(content_widget) + content_layout.setColumnStretch(0, 0) + content_layout.setColumnStretch(1, 1) + content_layout.setAlignment(QtCore.Qt.AlignTop) + content_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING) + content_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING) + + row = 0 + for attr_def, attr_instances, values in result: + widget = create_widget_for_attr_def(attr_def, content_widget) + if attr_def.is_value_def: + if len(values) == 1: + value = values[0] + if value is not None: + widget.set_value(values[0]) + else: + widget.set_value(values, True) + + widget.value_changed.connect(self._input_value_changed) + self._attr_def_id_to_instances[attr_def.id] = attr_instances + self._attr_def_id_to_attr_def[attr_def.id] = attr_def + + if attr_def.hidden: + continue + + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + + col_num = 2 - expand_cols + + label = None + if attr_def.is_value_def: + label = attr_def.label or attr_def.key + if label: + label_widget = QtWidgets.QLabel(label, self) + tooltip = attr_def.tooltip + if tooltip: + label_widget.setToolTip(tooltip) + if attr_def.is_label_horizontal: + label_widget.setAlignment( + QtCore.Qt.AlignRight + | QtCore.Qt.AlignVCenter + ) + content_layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + + content_layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + row += 1 + + self._scroll_area.setWidget(content_widget) + self._content_widget = content_widget + + def _input_value_changed(self, value, attr_id): + instances = self._attr_def_id_to_instances.get(attr_id) + attr_def = self._attr_def_id_to_attr_def.get(attr_id) + if not instances or not attr_def: + return + + for instance in instances: + creator_attributes = instance["creator_attributes"] + if attr_def.key in creator_attributes: + creator_attributes[attr_def.key] = value + + +class PublishPluginAttrsWidget(QtWidgets.QWidget): + """Widget showing publsish plugin attributes for selected instances. + + Attributes are defined on publish plugins. Publihs plugin may define + attribute definitions but must inherit `AYONPyblishPluginMixin` + (~/ayon_core/pipeline/publish). At the moment requires to implement + `get_attribute_defs` and `convert_attribute_values` class methods. + + Look and type of attributes is based on attribute definitions that are + defined in `~/ayon_core/lib/attribute_definitions.py` and their + widget representation in `~/ayon_core/tools/attribute_defs/*`. + + Widgets are disabled if context of instance is not valid. + + Definitions are shown for all instance no matter if they have different + families. Similar definitions are merged into one (different label + does not count). + """ + + def __init__(self, controller, parent): + super(PublishPluginAttrsWidget, self).__init__(parent) + + scroll_area = QtWidgets.QScrollArea(self) + scroll_area.setWidgetResizable(True) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.addWidget(scroll_area, 1) + + self._main_layout = main_layout + + self._controller = controller + self._scroll_area = scroll_area + + self._attr_def_id_to_instances = {} + self._attr_def_id_to_attr_def = {} + self._attr_def_id_to_plugin_name = {} + + # Store content of scroll area to prevent garbage collection + self._content_widget = None + + def set_instances_valid(self, valid): + """Change valid state of current instances.""" + if ( + self._content_widget is not None + and self._content_widget.isEnabled() != valid + ): + self._content_widget.setEnabled(valid) + + def set_current_instances(self, instances, context_selected): + """Set current instances for which are attribute definitions shown.""" + + prev_content_widget = self._scroll_area.widget() + if prev_content_widget: + self._scroll_area.takeWidget() + prev_content_widget.hide() + prev_content_widget.deleteLater() + + self._content_widget = None + + self._attr_def_id_to_instances = {} + self._attr_def_id_to_attr_def = {} + self._attr_def_id_to_plugin_name = {} + + result = self._controller.get_publish_attribute_definitions( + instances, context_selected + ) + + content_widget = QtWidgets.QWidget(self._scroll_area) + attr_def_widget = QtWidgets.QWidget(content_widget) + attr_def_layout = QtWidgets.QGridLayout(attr_def_widget) + attr_def_layout.setColumnStretch(0, 0) + attr_def_layout.setColumnStretch(1, 1) + attr_def_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING) + attr_def_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING) + + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.addWidget(attr_def_widget, 0) + content_layout.addStretch(1) + + row = 0 + for plugin_name, attr_defs, all_plugin_values in result: + plugin_values = all_plugin_values[plugin_name] + + for attr_def in attr_defs: + widget = create_widget_for_attr_def( + attr_def, content_widget + ) + hidden_widget = attr_def.hidden + # Hide unknown values of publish plugins + # - The keys in most of cases does not represent what would + # label represent + if isinstance(attr_def, UnknownDef): + widget.setVisible(False) + hidden_widget = True + + if not hidden_widget: + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + + col_num = 2 - expand_cols + label = None + if attr_def.is_value_def: + label = attr_def.label or attr_def.key + if label: + label_widget = QtWidgets.QLabel(label, content_widget) + tooltip = attr_def.tooltip + if tooltip: + label_widget.setToolTip(tooltip) + if attr_def.is_label_horizontal: + label_widget.setAlignment( + QtCore.Qt.AlignRight + | QtCore.Qt.AlignVCenter + ) + attr_def_layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + attr_def_layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + row += 1 + + if not attr_def.is_value_def: + continue + + widget.value_changed.connect(self._input_value_changed) + + attr_values = plugin_values[attr_def.key] + multivalue = len(attr_values) > 1 + values = [] + instances = [] + for instance, value in attr_values: + values.append(value) + instances.append(instance) + + self._attr_def_id_to_attr_def[attr_def.id] = attr_def + self._attr_def_id_to_instances[attr_def.id] = instances + self._attr_def_id_to_plugin_name[attr_def.id] = plugin_name + + if multivalue: + widget.set_value(values, multivalue) + else: + widget.set_value(values[0]) + + self._scroll_area.setWidget(content_widget) + self._content_widget = content_widget + + def _input_value_changed(self, value, attr_id): + instances = self._attr_def_id_to_instances.get(attr_id) + attr_def = self._attr_def_id_to_attr_def.get(attr_id) + plugin_name = self._attr_def_id_to_plugin_name.get(attr_id) + if not instances or not attr_def or not plugin_name: + return + + for instance in instances: + plugin_val = instance.publish_attributes[plugin_name] + plugin_val[attr_def.key] = value + + +class SubsetAttributesWidget(QtWidgets.QWidget): + """Wrapper widget where attributes of instance/s are modified. + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Global โ”‚ โ”‚ + โ”‚ attributes โ”‚ Thumbnail โ”‚ TOP + โ”‚ โ”‚ โ”‚ + โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค + โ”‚ Creator โ”‚ Publish โ”‚ + โ”‚ attributes โ”‚ plugin โ”‚ BOTTOM + โ”‚ โ”‚ attributes โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + """ + instance_context_changed = QtCore.Signal() + convert_requested = QtCore.Signal() + + def __init__(self, controller, parent): + super(SubsetAttributesWidget, self).__init__(parent) + + # TOP PART + top_widget = QtWidgets.QWidget(self) + + # Global attributes + global_attrs_widget = GlobalAttrsWidget(controller, top_widget) + thumbnail_widget = ThumbnailWidget(controller, top_widget) + + top_layout = QtWidgets.QHBoxLayout(top_widget) + top_layout.setContentsMargins(0, 0, 0, 0) + top_layout.addWidget(global_attrs_widget, 7) + top_layout.addWidget(thumbnail_widget, 3) + + # BOTTOM PART + bottom_widget = QtWidgets.QWidget(self) + + # Wrap Creator attributes to widget to be able add convert button + creator_widget = QtWidgets.QWidget(bottom_widget) + + # Convert button widget (with layout to handle stretch) + convert_widget = QtWidgets.QWidget(creator_widget) + convert_label = QtWidgets.QLabel(creator_widget) + # Set the label text with 'setText' to apply html + convert_label.setText( + ( + "Found old publishable subsets" + " incompatible with new publisher." + "

Press the update subsets button" + " to automatically update them" + " to be able to publish again." + ) + ) + convert_label.setWordWrap(True) + convert_label.setAlignment(QtCore.Qt.AlignCenter) + + convert_btn = QtWidgets.QPushButton( + "Update subsets", convert_widget + ) + convert_separator = QtWidgets.QFrame(convert_widget) + convert_separator.setObjectName("Separator") + convert_separator.setMinimumHeight(1) + convert_separator.setMaximumHeight(1) + + convert_layout = QtWidgets.QGridLayout(convert_widget) + convert_layout.setContentsMargins(5, 0, 5, 0) + convert_layout.setVerticalSpacing(10) + convert_layout.addWidget(convert_label, 0, 0, 1, 3) + convert_layout.addWidget(convert_btn, 1, 1) + convert_layout.addWidget(convert_separator, 2, 0, 1, 3) + convert_layout.setColumnStretch(0, 1) + convert_layout.setColumnStretch(1, 0) + convert_layout.setColumnStretch(2, 1) + + # Creator attributes widget + creator_attrs_widget = CreatorAttrsWidget( + controller, creator_widget + ) + creator_layout = QtWidgets.QVBoxLayout(creator_widget) + creator_layout.setContentsMargins(0, 0, 0, 0) + creator_layout.addWidget(convert_widget, 0) + creator_layout.addWidget(creator_attrs_widget, 1) + + publish_attrs_widget = PublishPluginAttrsWidget( + controller, bottom_widget + ) + + bottom_separator = QtWidgets.QWidget(bottom_widget) + bottom_separator.setObjectName("Separator") + bottom_separator.setMinimumWidth(1) + + bottom_layout = QtWidgets.QHBoxLayout(bottom_widget) + bottom_layout.setContentsMargins(0, 0, 0, 0) + bottom_layout.addWidget(creator_widget, 1) + bottom_layout.addWidget(bottom_separator, 0) + bottom_layout.addWidget(publish_attrs_widget, 1) + + top_bottom = QtWidgets.QWidget(self) + top_bottom.setObjectName("Separator") + top_bottom.setMinimumHeight(1) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(top_widget, 0) + layout.addWidget(top_bottom, 0) + layout.addWidget(bottom_widget, 1) + + self._convertor_identifiers = None + self._current_instances = None + self._context_selected = False + self._all_instances_valid = True + + global_attrs_widget.instance_context_changed.connect( + self._on_instance_context_changed + ) + convert_btn.clicked.connect(self._on_convert_click) + thumbnail_widget.thumbnail_created.connect(self._on_thumbnail_create) + thumbnail_widget.thumbnail_cleared.connect(self._on_thumbnail_clear) + + controller.event_system.add_callback( + "instance.thumbnail.changed", self._on_thumbnail_changed + ) + + self._controller = controller + + self._convert_widget = convert_widget + + self.global_attrs_widget = global_attrs_widget + + self.creator_attrs_widget = creator_attrs_widget + self.publish_attrs_widget = publish_attrs_widget + self._thumbnail_widget = thumbnail_widget + + self.top_bottom = top_bottom + self.bottom_separator = bottom_separator + + def _on_instance_context_changed(self): + all_valid = True + for instance in self._current_instances: + if not instance.has_valid_context: + all_valid = False + break + + self._all_instances_valid = all_valid + self.creator_attrs_widget.set_instances_valid(all_valid) + self.publish_attrs_widget.set_instances_valid(all_valid) + + self.instance_context_changed.emit() + + def _on_convert_click(self): + self.convert_requested.emit() + + def set_current_instances( + self, instances, context_selected, convertor_identifiers + ): + """Change currently selected items. + + Args: + instances(List[CreatedInstance]): List of currently selected + instances. + context_selected(bool): Is context selected. + convertor_identifiers(List[str]): Identifiers of convert items. + """ + + all_valid = True + for instance in instances: + if not instance.has_valid_context: + all_valid = False + break + + s_convertor_identifiers = set(convertor_identifiers) + self._convertor_identifiers = s_convertor_identifiers + self._current_instances = instances + self._context_selected = context_selected + self._all_instances_valid = all_valid + + self._convert_widget.setVisible(len(s_convertor_identifiers) > 0) + self.global_attrs_widget.set_current_instances(instances) + self.creator_attrs_widget.set_current_instances(instances) + self.publish_attrs_widget.set_current_instances( + instances, context_selected + ) + self.creator_attrs_widget.set_instances_valid(all_valid) + self.publish_attrs_widget.set_instances_valid(all_valid) + + self._update_thumbnails() + + def _on_thumbnail_create(self, path): + instance_ids = [ + instance.id + for instance in self._current_instances + ] + if self._context_selected: + instance_ids.append(None) + + if not instance_ids: + return + + mapping = {} + if len(instance_ids) == 1: + mapping[instance_ids[0]] = path + + else: + for instance_id in instance_ids: + root = os.path.dirname(path) + ext = os.path.splitext(path)[-1] + dst_path = os.path.join(root, str(uuid.uuid4()) + ext) + shutil.copy(path, dst_path) + mapping[instance_id] = dst_path + + self._controller.set_thumbnail_paths_for_instances(mapping) + + def _on_thumbnail_clear(self): + instance_ids = [ + instance.id + for instance in self._current_instances + ] + if self._context_selected: + instance_ids.append(None) + + if not instance_ids: + return + + mapping = { + instance_id: None + for instance_id in instance_ids + } + self._controller.set_thumbnail_paths_for_instances(mapping) + + def _on_thumbnail_changed(self, event): + self._update_thumbnails() + + def _update_thumbnails(self): + instance_ids = [ + instance.id + for instance in self._current_instances + ] + if self._context_selected: + instance_ids.append(None) + + if not instance_ids: + self._thumbnail_widget.setVisible(False) + self._thumbnail_widget.set_current_thumbnails(None) + return + + mapping = self._controller.get_thumbnail_paths_for_instances( + instance_ids + ) + thumbnail_paths = [] + for instance_id in instance_ids: + path = mapping[instance_id] + if path: + thumbnail_paths.append(path) + + self._thumbnail_widget.setVisible(True) + self._thumbnail_widget.set_current_thumbnails(thumbnail_paths) + + +class CreateNextPageOverlay(QtWidgets.QWidget): + clicked = QtCore.Signal() + + def __init__(self, parent): + super(CreateNextPageOverlay, self).__init__(parent) + self.setCursor(QtCore.Qt.PointingHandCursor) + self._arrow_color = ( + get_objected_colors("font").get_qcolor() + ) + self._bg_color = ( + get_objected_colors("bg-buttons").get_qcolor() + ) + + change_anim = QtCore.QVariantAnimation() + change_anim.setStartValue(0.0) + change_anim.setEndValue(1.0) + change_anim.setDuration(200) + change_anim.setEasingCurve(QtCore.QEasingCurve.OutCubic) + + change_anim.valueChanged.connect(self._on_anim) + + self._change_anim = change_anim + self._is_visible = None + self._anim_value = 0.0 + self._increasing = False + self._under_mouse = None + self._handle_show_on_own = True + self._mouse_pressed = False + self.set_visible(True) + + def set_increasing(self, increasing): + if self._increasing is increasing: + return + self._increasing = increasing + if increasing: + self._change_anim.setDirection(QtCore.QAbstractAnimation.Forward) + else: + self._change_anim.setDirection(QtCore.QAbstractAnimation.Backward) + + if self._change_anim.state() != QtCore.QAbstractAnimation.Running: + self._change_anim.start() + + def set_visible(self, visible): + if self._is_visible is visible: + return + + self._is_visible = visible + if not visible: + self.set_increasing(False) + if not self._is_anim_finished(): + return + + self.setVisible(visible) + self._check_anim_timer() + + def _is_anim_finished(self): + if self._increasing: + return self._anim_value == 1.0 + return self._anim_value == 0.0 + + def _on_anim(self, value): + self._check_anim_timer() + + self._anim_value = value + + self.update() + + if not self._is_anim_finished(): + return + + if not self._is_visible: + self.setVisible(False) + + def set_under_mouse(self, under_mouse): + if self._under_mouse is under_mouse: + return + + self._under_mouse = under_mouse + self.set_increasing(under_mouse) + + def _is_under_mouse(self): + mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos()) + under_mouse = self.rect().contains(mouse_pos) + return under_mouse + + def _check_anim_timer(self): + if not self.isVisible(): + return + + self.set_increasing(self._under_mouse) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(CreateNextPageOverlay, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(CreateNextPageOverlay, self).mouseReleaseEvent(event) + + def paintEvent(self, event): + painter = QtGui.QPainter() + painter.begin(self) + if self._anim_value == 0.0: + painter.end() + return + + painter.setClipRect(event.rect()) + painter.setRenderHints( + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform + ) + + painter.setPen(QtCore.Qt.NoPen) + + rect = QtCore.QRect(self.rect()) + rect_width = rect.width() + rect_height = rect.height() + radius = rect_width * 0.2 + + x_offset = 0 + y_offset = 0 + if self._anim_value != 1.0: + x_offset += rect_width - (rect_width * self._anim_value) + + arrow_height = rect_height * 0.4 + arrow_half_height = arrow_height * 0.5 + arrow_x_start = x_offset + ((rect_width - arrow_half_height) * 0.5) + arrow_x_end = arrow_x_start + arrow_half_height + center_y = rect.center().y() + + painter.setBrush(self._bg_color) + painter.drawRoundedRect( + x_offset, y_offset, + rect_width + radius, rect_height, + radius, radius + ) + + src_arrow_path = QtGui.QPainterPath() + src_arrow_path.moveTo(arrow_x_start, center_y - arrow_half_height) + src_arrow_path.lineTo(arrow_x_end, center_y) + src_arrow_path.lineTo(arrow_x_start, center_y + arrow_half_height) + + arrow_stroker = QtGui.QPainterPathStroker() + arrow_stroker.setWidth(min(4, arrow_half_height * 0.2)) + arrow_path = arrow_stroker.createStroke(src_arrow_path) + + painter.fillPath(arrow_path, self._arrow_color) + + painter.end() diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py new file mode 100644 index 0000000000..f4dadf7f67 --- /dev/null +++ b/client/ayon_core/tools/publisher/window.py @@ -0,0 +1,1178 @@ +import os +import json +import time +import collections +import copy +from qtpy import QtWidgets, QtCore, QtGui + +from ayon_core import ( + resources, + style +) +from ayon_core.tools.utils import ( + ErrorMessageBox, + PlaceholderLineEdit, + MessageOverlayObject, + PixmapLabel, +) +from ayon_core.tools.utils.lib import center_window + +from .constants import ResetKeySequence +from .publish_report_viewer import PublishReportViewerWidget +from .control import CardMessageTypes +from .control_qt import QtPublisherController +from .widgets import ( + OverviewWidget, + ReportPageWidget, + PublishFrame, + + PublisherTabsWidget, + + SaveBtn, + ResetBtn, + StopBtn, + ValidateBtn, + PublishBtn, + + HelpButton, + HelpDialog, + + CreateNextPageOverlay, +) + + +class PublisherWindow(QtWidgets.QDialog): + """Main window of publisher.""" + default_width = 1300 + default_height = 800 + footer_border = 8 + publish_footer_spacer = 2 + + def __init__(self, parent=None, controller=None, reset_on_show=None): + super(PublisherWindow, self).__init__(parent) + + self.setObjectName("PublishWindow") + + self.setWindowTitle("AYON publisher") + + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + + if reset_on_show is None: + reset_on_show = True + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowMaximizeButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + | QtCore.Qt.WindowCloseButtonHint + ) + + if controller is None: + controller = QtPublisherController() + + help_dialog = HelpDialog(controller, self) + + overlay_object = MessageOverlayObject(self) + + # Header + header_widget = QtWidgets.QWidget(self) + + icon_pixmap = QtGui.QPixmap(resources.get_ayon_icon_filepath()) + icon_label = PixmapLabel(icon_pixmap, header_widget) + icon_label.setObjectName("PublishContextLabel") + + context_label = QtWidgets.QLabel(header_widget) + context_label.setObjectName("PublishContextLabel") + + header_extra_widget = QtWidgets.QWidget(header_widget) + + help_btn = HelpButton(header_widget) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(15, 15, 0, 15) + header_layout.setSpacing(15) + header_layout.addWidget(icon_label, 0) + header_layout.addWidget(context_label, 0) + header_layout.addStretch(1) + header_layout.addWidget(header_extra_widget, 0) + header_layout.addWidget(help_btn, 0) + + # Tabs widget under header + tabs_widget = PublisherTabsWidget(self) + create_tab = tabs_widget.add_tab("Create", "create") + tabs_widget.add_tab("Publish", "publish") + tabs_widget.add_tab("Report", "report") + tabs_widget.add_tab("Details", "details") + + # Widget where is stacked publish overlay and widgets that should be + # covered by it + under_publish_stack = QtWidgets.QWidget(self) + # Added wrap widget where all widgets under overlay are added + # - this is because footer is also under overlay and the top part + # is faked with floating frame + under_publish_widget = QtWidgets.QWidget(under_publish_stack) + + # Footer + footer_widget = QtWidgets.QWidget(under_publish_widget) + footer_bottom_widget = QtWidgets.QWidget(footer_widget) + + comment_input = PlaceholderLineEdit(footer_widget) + comment_input.setObjectName("PublishCommentInput") + comment_input.setPlaceholderText( + "Attach a comment to your publish" + ) + + save_btn = SaveBtn(footer_widget) + reset_btn = ResetBtn(footer_widget) + stop_btn = StopBtn(footer_widget) + validate_btn = ValidateBtn(footer_widget) + publish_btn = PublishBtn(footer_widget) + + footer_bottom_layout = QtWidgets.QHBoxLayout(footer_bottom_widget) + footer_bottom_layout.setContentsMargins(0, 0, 0, 0) + footer_bottom_layout.addStretch(1) + footer_bottom_layout.addWidget(save_btn, 0) + footer_bottom_layout.addWidget(reset_btn, 0) + footer_bottom_layout.addWidget(stop_btn, 0) + footer_bottom_layout.addWidget(validate_btn, 0) + footer_bottom_layout.addWidget(publish_btn, 0) + + # Spacer helps keep distance of Publish Frame when comment input + # is hidden - so when is shrunken it is not overlaying pages + footer_spacer = QtWidgets.QWidget(footer_widget) + footer_spacer.setMinimumHeight(self.publish_footer_spacer) + footer_spacer.setMaximumHeight(self.publish_footer_spacer) + footer_spacer.setVisible(False) + + footer_layout = QtWidgets.QVBoxLayout(footer_widget) + footer_margins = footer_layout.contentsMargins() + + footer_layout.setContentsMargins( + footer_margins.left() + self.footer_border, + footer_margins.top(), + footer_margins.right() + self.footer_border, + footer_margins.bottom() + self.footer_border + ) + + footer_layout.addWidget(comment_input, 0) + footer_layout.addWidget(footer_spacer, 0) + footer_layout.addWidget(footer_bottom_widget, 0) + + # Content + # - wrap stacked widget under one more widget to be able to propagate + # margins (QStackedLayout can't have margins) + content_widget = QtWidgets.QWidget(under_publish_widget) + + content_stacked_widget = QtWidgets.QWidget(content_widget) + + content_layout = QtWidgets.QVBoxLayout(content_widget) + marings = content_layout.contentsMargins() + marings.setLeft(marings.left() * 2) + marings.setRight(marings.right() * 2) + marings.setTop(marings.top() * 2) + marings.setBottom(0) + content_layout.setContentsMargins(marings) + content_layout.addWidget(content_stacked_widget, 1) + + # Overview - create and attributes part + overview_widget = OverviewWidget( + controller, content_stacked_widget + ) + + report_widget = ReportPageWidget(controller, content_stacked_widget) + + # Details - Publish details + publish_details_widget = PublishReportViewerWidget( + content_stacked_widget + ) + + content_stacked_layout = QtWidgets.QStackedLayout( + content_stacked_widget + ) + content_stacked_layout.setContentsMargins(0, 0, 0, 0) + content_stacked_layout.setStackingMode( + QtWidgets.QStackedLayout.StackAll + ) + content_stacked_layout.addWidget(overview_widget) + content_stacked_layout.addWidget(report_widget) + content_stacked_layout.addWidget(publish_details_widget) + content_stacked_layout.setCurrentWidget(overview_widget) + + under_publish_layout = QtWidgets.QVBoxLayout(under_publish_widget) + under_publish_layout.setContentsMargins(0, 0, 0, 0) + under_publish_layout.setSpacing(0) + under_publish_layout.addWidget(content_widget, 1) + under_publish_layout.addWidget(footer_widget, 0) + + # Overlay which covers inputs during publishing + publish_overlay = QtWidgets.QFrame(under_publish_stack) + publish_overlay.setObjectName("OverlayFrame") + + under_publish_stack_layout = QtWidgets.QStackedLayout( + under_publish_stack + ) + under_publish_stack_layout.setContentsMargins(0, 0, 0, 0) + under_publish_stack_layout.setStackingMode( + QtWidgets.QStackedLayout.StackAll + ) + under_publish_stack_layout.addWidget(under_publish_widget) + under_publish_stack_layout.addWidget(publish_overlay) + under_publish_stack_layout.setCurrentWidget(under_publish_widget) + + # Add main frame to this window + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.addWidget(header_widget, 0) + main_layout.addWidget(tabs_widget, 0) + main_layout.addWidget(under_publish_stack, 1) + + # Floating publish frame + publish_frame = PublishFrame(controller, self.footer_border, self) + + create_overlay_button = CreateNextPageOverlay(self) + + show_timer = QtCore.QTimer() + show_timer.setInterval(1) + show_timer.timeout.connect(self._on_show_timer) + + errors_dialog_message_timer = QtCore.QTimer() + errors_dialog_message_timer.setInterval(100) + errors_dialog_message_timer.timeout.connect( + self._on_errors_message_timeout + ) + + help_btn.clicked.connect(self._on_help_click) + tabs_widget.tab_changed.connect(self._on_tab_change) + overview_widget.active_changed.connect( + self._on_context_or_active_change + ) + overview_widget.instance_context_changed.connect( + self._on_context_or_active_change + ) + overview_widget.create_requested.connect( + self._on_create_request + ) + overview_widget.convert_requested.connect( + self._on_convert_requested + ) + + save_btn.clicked.connect(self._on_save_clicked) + reset_btn.clicked.connect(self._on_reset_clicked) + stop_btn.clicked.connect(self._on_stop_clicked) + validate_btn.clicked.connect(self._on_validate_clicked) + publish_btn.clicked.connect(self._on_publish_clicked) + + publish_frame.details_page_requested.connect(self._go_to_details_tab) + create_overlay_button.clicked.connect( + self._on_create_overlay_button_click + ) + + controller.event_system.add_callback( + "instances.refresh.finished", self._on_instances_refresh + ) + controller.event_system.add_callback( + "publish.reset.finished", self._on_publish_reset + ) + controller.event_system.add_callback( + "controller.reset.finished", self._on_controller_reset + ) + controller.event_system.add_callback( + "publish.process.started", self._on_publish_start + ) + controller.event_system.add_callback( + "publish.has_validated.changed", self._on_publish_validated_change + ) + controller.event_system.add_callback( + "publish.finished.changed", self._on_publish_finished_change + ) + controller.event_system.add_callback( + "publish.process.stopped", self._on_publish_stop + ) + controller.event_system.add_callback( + "show.card.message", self._on_overlay_message + ) + controller.event_system.add_callback( + "instances.collection.failed", self._on_creator_error + ) + controller.event_system.add_callback( + "instances.save.failed", self._on_creator_error + ) + controller.event_system.add_callback( + "instances.remove.failed", self._on_creator_error + ) + controller.event_system.add_callback( + "instances.create.failed", self._on_creator_error + ) + controller.event_system.add_callback( + "convertors.convert.failed", self._on_convertor_error + ) + controller.event_system.add_callback( + "convertors.find.failed", self._on_convertor_error + ) + controller.event_system.add_callback( + "publish.action.failed", self._on_action_error + ) + controller.event_system.add_callback( + "export_report.request", self._export_report + ) + controller.event_system.add_callback( + "copy_report.request", self._copy_report + ) + + # Store extra header widget for TrayPublisher + # - can be used to add additional widgets to header between context + # label and help button + self._help_dialog = help_dialog + self._help_btn = help_btn + + self._header_extra_widget = header_extra_widget + + self._tabs_widget = tabs_widget + self._create_tab = create_tab + + self._under_publish_stack_layout = under_publish_stack_layout + + self._under_publish_widget = under_publish_widget + self._publish_overlay = publish_overlay + self._publish_frame = publish_frame + + self._content_widget = content_widget + self._content_stacked_layout = content_stacked_layout + + self._overview_widget = overview_widget + self._report_widget = report_widget + self._publish_details_widget = publish_details_widget + + self._context_label = context_label + + self._comment_input = comment_input + self._footer_spacer = footer_spacer + + self._save_btn = save_btn + self._reset_btn = reset_btn + self._stop_btn = stop_btn + self._validate_btn = validate_btn + self._publish_btn = publish_btn + + self._overlay_object = overlay_object + + self._controller = controller + + self._first_show = True + self._first_reset = True + # This is a little bit confusing but 'reset_on_first_show' is too long + # for init + self._reset_on_first_show = reset_on_show + self._reset_on_show = True + self._publish_frame_visible = None + self._tab_on_reset = None + + self._error_messages_to_show = collections.deque() + self._errors_dialog_message_timer = errors_dialog_message_timer + + self._set_publish_visibility(False) + + self._create_overlay_button = create_overlay_button + self._app_event_listener_installed = False + + self._show_timer = show_timer + self._show_counter = 0 + self._window_is_visible = False + + @property + def controller(self): + return self._controller + + def show_and_publish(self, comment=None): + """Show the window and start publishing. + + The method will reset controller and start the publishing afterwards. + + Todos: + Move validations from '_on_publish_clicked' and change of + 'comment' value in controller to controller so it can be + simplified. + + Args: + comment (Optional[str]): Comment to be set to publish. + If is set to 'None' a comment is not changed at all. + """ + + self._reset_on_show = False + self._reset_on_first_show = False + + if comment is not None: + self.set_comment(comment) + self.make_sure_is_visible() + # Reset controller + self._controller.reset() + # Fake publish click to trigger save validation and propagate + # comment to controller + self._on_publish_clicked() + + def set_comment(self, comment): + """Change comment text. + + Todos: + Be able to set the comment via controller. + + Args: + comment (str): Comment text. + """ + + self._comment_input.setText(comment) + + def make_sure_is_visible(self): + if self._window_is_visible: + self.setWindowState(QtCore.Qt.WindowActive) + + else: + self.show() + + def showEvent(self, event): + self._window_is_visible = True + super(PublisherWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + + self._show_timer.start() + + def resizeEvent(self, event): + super(PublisherWindow, self).resizeEvent(event) + self._update_publish_frame_rect() + self._update_create_overlay_size() + + def closeEvent(self, event): + self._window_is_visible = False + self._uninstall_app_event_listener() + # TODO capture changes and ask user if wants to save changes on close + if not self._controller.host_context_has_changed: + self._save_changes(False) + self._comment_input.setText("") # clear comment + self._reset_on_show = True + self._controller.clear_thumbnail_temp_dir_path() + # Trigger custom event that should be captured only in UI + # - backend (controller) must not be dependent on this event topic!!! + self._controller.event_system.emit("main.window.closed", {}, "window") + super(PublisherWindow, self).closeEvent(event) + + def leaveEvent(self, event): + super(PublisherWindow, self).leaveEvent(event) + self._update_create_overlay_visibility() + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.MouseMove: + self._update_create_overlay_visibility(event.globalPos()) + return super(PublisherWindow, self).eventFilter(obj, event) + + def _install_app_event_listener(self): + if self._app_event_listener_installed: + return + self._app_event_listener_installed = True + app = QtWidgets.QApplication.instance() + app.installEventFilter(self) + + def _uninstall_app_event_listener(self): + if not self._app_event_listener_installed: + return + self._app_event_listener_installed = False + app = QtWidgets.QApplication.instance() + app.removeEventFilter(self) + + def keyPressEvent(self, event): + if event.key() in { + # Ignore escape button to close window + QtCore.Qt.Key_Escape, + # Ignore enter keyboard event which by default triggers + # first available button in QDialog + QtCore.Qt.Key_Enter, + QtCore.Qt.Key_Return, + }: + event.accept() + return + + save_match = event.matches(QtGui.QKeySequence.Save) + # PySide2 and PySide6 support + if not isinstance(save_match, bool): + save_match = save_match == QtGui.QKeySequence.ExactMatch + + if save_match: + if not self._controller.publish_has_started: + self._save_changes(True) + event.accept() + return + + # PySide6 Support + if hasattr(event, "keyCombination"): + reset_match_result = ResetKeySequence.matches( + QtGui.QKeySequence(event.keyCombination()) + ) + else: + reset_match_result = ResetKeySequence.matches( + QtGui.QKeySequence(event.modifiers() | event.key()) + ) + + if reset_match_result == QtGui.QKeySequence.ExactMatch: + if not self.controller.publish_is_running: + self.reset() + event.accept() + return + + super(PublisherWindow, self).keyPressEvent(event) + + def _on_overlay_message(self, event): + self._overlay_object.add_message( + event["message"], + event.get("message_type") + ) + + def _on_first_show(self): + self.resize(self.default_width, self.default_height) + self.setStyleSheet(style.load_stylesheet()) + center_window(self) + self._reset_on_show = self._reset_on_first_show + + def _on_show_timer(self): + # Add 1 to counter until hits 2 + if self._show_counter < 3: + self._show_counter += 1 + return + + # Stop the timer + self._show_timer.stop() + # Reset counter when done for next show event + self._show_counter = 0 + + self._update_create_overlay_size() + self._update_create_overlay_visibility() + if self._is_on_create_tab(): + self._install_app_event_listener() + + # Reset if requested + if self._reset_on_show: + self._reset_on_show = False + self.reset() + + def _checks_before_save(self, explicit_save): + """Save of changes may trigger some issues. + + Check if context did change and ask user if he is really sure the + save should happen. A dialog can be shown during this method. + + Args: + explicit_save (bool): Method was called when user explicitly asked + for save. Value affects shown message. + + Returns: + bool: Save can happen. + """ + + if not self._controller.host_context_has_changed: + return True + + title = "Host context changed" + if explicit_save: + message = ( + "Context has changed since Publisher window was refreshed last" + " time.\n\nAre you sure you want to save changes?" + ) + else: + message = ( + "Your action requires save of changes but context has changed" + " since Publisher window was refreshed last time.\n\nAre you" + " sure you want to continue and save changes?" + ) + + result = QtWidgets.QMessageBox.question( + self, + title, + message, + QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Cancel + ) + return result == QtWidgets.QMessageBox.Save + + def _save_changes(self, explicit_save): + """Save changes of Creation part. + + All possible triggers of save changes were moved to main window (here), + so it can handle possible issues with save at one place. Do checks, + so user don't accidentally save changes to different file or using + different context. + Moving responsibility to this place gives option to show the dialog and + wait for user's response without breaking action he wanted to do. + + Args: + explicit_save (bool): Method was called when user explicitly asked + for save. Value affects shown message. + + Returns: + bool: Save happened successfully. + """ + + if not self._checks_before_save(explicit_save): + return False + return self._controller.save_changes() + + def reset(self): + self._controller.reset() + + def set_context_label(self, label): + self._context_label.setText(label) + + def set_tab_on_reset(self, tab): + """Define tab that will be selected on window show. + + This is single use method, when publisher window is showed the value is + unset and not used on next show. + + Args: + tab (Union[int, Literal[create, publish, details, report]]: Index + or name of tab which will be selected on show (after reset). + """ + + self._tab_on_reset = tab + + def _update_publish_details_widget(self, force=False): + if not force and not self._is_on_details_tab(): + return + + report_data = self.controller.get_publish_report() + self._publish_details_widget.set_report_data(report_data) + + def _on_help_click(self): + if self._help_dialog.isVisible(): + return + + self._help_dialog.show() + + window = self.window() + if hasattr(QtWidgets.QApplication, "desktop"): + desktop = QtWidgets.QApplication.desktop() + screen_idx = desktop.screenNumber(window) + screen_geo = desktop.screenGeometry(screen_idx) + else: + screen = window.screen() + screen_geo = screen.geometry() + + window_geo = window.geometry() + dialog_x = window_geo.x() + window_geo.width() + dialog_right = (dialog_x + self._help_dialog.width()) - 1 + diff = dialog_right - screen_geo.right() + if diff > 0: + dialog_x -= diff + + self._help_dialog.setGeometry( + dialog_x, window_geo.y(), + self._help_dialog.width(), self._help_dialog.height() + ) + + def _on_create_overlay_button_click(self): + self._create_overlay_button.set_under_mouse(False) + self._go_to_publish_tab() + + def _on_tab_change(self, old_tab, new_tab): + if old_tab == "details": + self._publish_details_widget.close_details_popup() + + if new_tab == "details": + self._content_stacked_layout.setCurrentWidget( + self._publish_details_widget + ) + self._update_publish_details_widget() + + elif new_tab == "report": + self._content_stacked_layout.setCurrentWidget( + self._report_widget + ) + + old_on_overview = old_tab in ("create", "publish") + if new_tab in ("create", "publish"): + self._content_stacked_layout.setCurrentWidget( + self._overview_widget + ) + # Overview state is animated only when switching between + # 'create' and 'publish' tab + self._overview_widget.set_state(new_tab, old_on_overview) + + elif old_on_overview: + # Make sure animation finished if previous tab was 'create' + # or 'publish'. That is just for safety to avoid stuck animation + # when user clicks too fast. + self._overview_widget.make_sure_animation_is_finished() + + is_create = new_tab == "create" + if is_create: + self._install_app_event_listener() + else: + self._uninstall_app_event_listener() + self._create_overlay_button.set_visible(is_create) + + def _on_context_or_active_change(self): + self._validate_create_instances() + + def _on_create_request(self): + self._go_to_create_tab() + + def _on_convert_requested(self): + if not self._save_changes(False): + return + convertor_identifiers = ( + self._overview_widget.get_selected_legacy_convertors() + ) + self._controller.trigger_convertor_items(convertor_identifiers) + + def _set_current_tab(self, identifier): + self._tabs_widget.set_current_tab(identifier) + + def set_current_tab(self, tab): + if tab == "create": + self._go_to_create_tab() + elif tab == "publish": + self._go_to_publish_tab() + elif tab == "report": + self._go_to_report_tab() + elif tab == "details": + self._go_to_details_tab() + + if not self._window_is_visible: + self.set_tab_on_reset(tab) + + def _is_current_tab(self, identifier): + return self._tabs_widget.is_current_tab(identifier) + + def _go_to_create_tab(self): + if self._create_tab.isEnabled(): + self._set_current_tab("create") + return + + self._overlay_object.add_message( + "Can't switch to Create tab because publishing is paused.", + message_type="info" + ) + + def _go_to_publish_tab(self): + self._set_current_tab("publish") + + def _go_to_report_tab(self): + self._set_current_tab("report") + + def _go_to_details_tab(self): + self._set_current_tab("details") + + def _is_on_create_tab(self): + return self._is_current_tab("create") + + def _is_on_publish_tab(self): + return self._is_current_tab("publish") + + def _is_on_report_tab(self): + return self._is_current_tab("report") + + def _is_on_details_tab(self): + return self._is_current_tab("details") + + def _set_publish_overlay_visibility(self, visible): + if visible: + widget = self._publish_overlay + else: + widget = self._under_publish_widget + self._under_publish_stack_layout.setCurrentWidget(widget) + + def _set_publish_visibility(self, visible): + if visible is self._publish_frame_visible: + return + self._publish_frame_visible = visible + self._publish_frame.setVisible(visible) + self._update_publish_frame_rect() + + def _on_save_clicked(self): + self._save_changes(True) + + def _on_reset_clicked(self): + self.reset() + + def _on_stop_clicked(self): + self._controller.stop_publish() + + def _set_publish_comment(self): + self._controller.set_comment(self._comment_input.text()) + + def _on_validate_clicked(self): + if self._save_changes(False): + self._set_publish_comment() + self._controller.validate() + + def _on_publish_clicked(self): + if self._save_changes(False): + self._set_publish_comment() + self._controller.publish() + + def _set_footer_enabled(self, enabled): + self._save_btn.setEnabled(True) + self._reset_btn.setEnabled(True) + if enabled: + self._stop_btn.setEnabled(False) + self._validate_btn.setEnabled(True) + self._publish_btn.setEnabled(True) + else: + self._stop_btn.setEnabled(enabled) + self._validate_btn.setEnabled(enabled) + self._publish_btn.setEnabled(enabled) + + def _on_publish_reset(self): + self._create_tab.setEnabled(True) + self._set_comment_input_visiblity(True) + self._set_publish_overlay_visibility(False) + self._set_publish_visibility(False) + self._set_footer_enabled(False) + self._update_publish_details_widget() + + def _on_controller_reset(self): + self._first_reset, first_reset = False, self._first_reset + if self._tab_on_reset is not None: + self._tab_on_reset, new_tab = None, self._tab_on_reset + self._set_current_tab(new_tab) + return + + # On first reset change tab based on available items + # - if there is at least one instance the tab is changed to 'publish' + # otherwise 'create' is used + # - this happens only on first show + if first_reset: + self._go_to_create_tab() + + elif self._is_on_report_tab(): + # Go to 'Publish' tab if is on 'Details' tab + # - this can happen when publishing started and was reset + # at that moment it doesn't make sense to stay at publish + # specific tabs. + self._go_to_publish_tab() + + def _on_publish_start(self): + self._create_tab.setEnabled(False) + + self._reset_btn.setEnabled(False) + self._stop_btn.setEnabled(True) + self._validate_btn.setEnabled(False) + self._publish_btn.setEnabled(False) + + self._set_comment_input_visiblity(False) + self._set_publish_visibility(True) + self._set_publish_overlay_visibility(True) + + self._publish_details_widget.close_details_popup() + + if self._is_on_create_tab(): + self._go_to_publish_tab() + + def _on_publish_validated_change(self, event): + if event["value"]: + self._validate_btn.setEnabled(False) + + def _on_publish_finished_change(self, event): + if event["value"]: + # Successful publish, remove comment from UI + self._comment_input.setText("") + + def _on_publish_stop(self): + self._set_publish_overlay_visibility(False) + self._reset_btn.setEnabled(True) + self._stop_btn.setEnabled(False) + publish_has_crashed = self._controller.publish_has_crashed + validate_enabled = not publish_has_crashed + publish_enabled = not publish_has_crashed + if self._is_on_publish_tab(): + self._go_to_report_tab() + + if validate_enabled: + validate_enabled = not self._controller.publish_has_validated + if publish_enabled: + if ( + self._controller.publish_has_validated + and self._controller.publish_has_validation_errors + ): + publish_enabled = False + + else: + publish_enabled = not self._controller.publish_has_finished + + self._validate_btn.setEnabled(validate_enabled) + self._publish_btn.setEnabled(publish_enabled) + + if not publish_enabled: + self._publish_frame.set_shrunk_state(True) + + self._update_publish_details_widget() + + def _validate_create_instances(self): + if not self._controller.host_is_valid: + self._set_footer_enabled(True) + return + + all_valid = None + for instance in self._controller.instances.values(): + if not instance["active"]: + continue + + if not instance.has_valid_context: + all_valid = False + break + + if all_valid is None: + all_valid = True + + self._set_footer_enabled(bool(all_valid)) + + def _on_instances_refresh(self): + self._validate_create_instances() + + context_title = self.controller.get_context_title() + self.set_context_label(context_title) + self._update_publish_details_widget() + + def _set_comment_input_visiblity(self, visible): + self._comment_input.setVisible(visible) + self._footer_spacer.setVisible(not visible) + + def _update_publish_frame_rect(self): + if not self._publish_frame_visible: + return + + window_size = self.size() + size_hint = self._publish_frame.minimumSizeHint() + + width = window_size.width() + height = size_hint.height() + + self._publish_frame.resize(width, height) + + self._publish_frame.move( + 0, window_size.height() - height + ) + + def add_error_message_dialog(self, title, failed_info, message_start=None): + self._error_messages_to_show.append( + (title, failed_info, message_start) + ) + self._errors_dialog_message_timer.start() + + def _on_errors_message_timeout(self): + if not self._error_messages_to_show: + self._errors_dialog_message_timer.stop() + return + + item = self._error_messages_to_show.popleft() + title, failed_info, message_start = item + dialog = ErrorsMessageBox( + title, failed_info, message_start, self + ) + dialog.exec_() + dialog.deleteLater() + + def _on_creator_error(self, event): + new_failed_info = [] + for item in event["failed_info"]: + new_item = copy.deepcopy(item) + new_item["label"] = new_item.pop("creator_label") + new_item["identifier"] = new_item.pop("creator_identifier") + new_failed_info.append(new_item) + self.add_error_message_dialog(event["title"], new_failed_info, "Creator:") + + def _on_convertor_error(self, event): + new_failed_info = [] + for item in event["failed_info"]: + new_item = copy.deepcopy(item) + new_item["identifier"] = new_item.pop("convertor_identifier") + new_failed_info.append(new_item) + self.add_error_message_dialog( + event["title"], new_failed_info, "Convertor:" + ) + + def _on_action_error(self, event): + self.add_error_message_dialog( + event["title"], + [{ + "message": event["message"], + "traceback": event["traceback"], + "label": event["label"], + "identifier": event["identifier"] + }], + "Action:" + ) + + def _update_create_overlay_size(self): + metrics = self._create_overlay_button.fontMetrics() + height = int(metrics.height()) + width = int(height * 0.7) + end_pos_x = self.width() + start_pos_x = end_pos_x - width + + center = self._content_widget.parent().mapTo( + self, + self._content_widget.rect().center() + ) + pos_y = center.y() - (height * 0.5) + + self._create_overlay_button.setGeometry( + start_pos_x, pos_y, + width, height + ) + + def _update_create_overlay_visibility(self, global_pos=None): + if global_pos is None: + global_pos = QtGui.QCursor.pos() + + under_mouse = False + my_pos = self.mapFromGlobal(global_pos) + if self.rect().contains(my_pos): + widget_geo = self._overview_widget.get_subset_views_geo() + widget_x = widget_geo.left() + (widget_geo.width() * 0.5) + under_mouse = widget_x < global_pos.x() + self._create_overlay_button.set_under_mouse(under_mouse) + + def _copy_report(self): + logs = self._controller.get_publish_report() + logs_string = json.dumps(logs, indent=4) + + mime_data = QtCore.QMimeData() + mime_data.setText(logs_string) + QtWidgets.QApplication.instance().clipboard().setMimeData( + mime_data + ) + self._controller.emit_card_message( + "Report added to clipboard", + CardMessageTypes.info) + + def _export_report(self): + default_filename = "publish-report-{}".format( + time.strftime("%y%m%d-%H-%M") + ) + default_filepath = os.path.join( + os.path.expanduser("~"), + default_filename + ) + new_filepath, ext = QtWidgets.QFileDialog.getSaveFileName( + self, "Save report", default_filepath, ".json" + ) + if not ext or not new_filepath: + return + + logs = self._controller.get_publish_report() + full_path = new_filepath + ext + dir_path = os.path.dirname(full_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(full_path, "w") as file_stream: + json.dump(logs, file_stream) + + self._controller.emit_card_message( + "Report saved", + CardMessageTypes.info) + + +class ErrorsMessageBox(ErrorMessageBox): + def __init__(self, error_title, failed_info, message_start, parent): + self._failed_info = failed_info + self._message_start = message_start + self._info_with_id = [ + # Id must be string when used in tab widget + {"id": str(idx), "info": info} + for idx, info in enumerate(failed_info) + ] + self._widgets_by_id = {} + self._tabs_widget = None + self._stack_layout = None + + super(ErrorsMessageBox, self).__init__(error_title, parent) + + layout = self.layout() + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + + footer_layout = self._footer_widget.layout() + footer_layout.setContentsMargins(5, 5, 5, 5) + + def _create_top_widget(self, parent_widget): + return None + + def _get_report_data(self): + output = [] + for info in self._failed_info: + item_label = info.get("label") + item_identifier = info["identifier"] + if item_label: + report_message = "{} ({})".format( + item_label, item_identifier) + else: + report_message = "{}".format(item_identifier) + + if self._message_start: + report_message = "{} {}".format( + self._message_start, report_message + ) + + report_message += "\n\nError: {}".format(info["message"]) + formatted_traceback = info.get("traceback") + if formatted_traceback: + report_message += "\n\n{}".format(formatted_traceback) + output.append(report_message) + return output + + def _create_content(self, content_layout): + tabs_widget = PublisherTabsWidget(self) + + stack_widget = QtWidgets.QFrame(self._content_widget) + stack_layout = QtWidgets.QStackedLayout(stack_widget) + + first = True + for item in self._info_with_id: + item_id = item["id"] + info = item["info"] + message = info["message"] + formatted_traceback = info.get("traceback") + item_label = info.get("label") + if not item_label: + item_label = info["identifier"] + + msg_widget = QtWidgets.QWidget(stack_widget) + msg_layout = QtWidgets.QVBoxLayout(msg_widget) + + exc_msg_template = "{}" + message_label_widget = QtWidgets.QLabel(msg_widget) + message_label_widget.setText( + exc_msg_template.format(self.convert_text_for_html(message)) + ) + msg_layout.addWidget(message_label_widget, 0) + + if formatted_traceback: + line_widget = self._create_line(msg_widget) + tb_widget = self._create_traceback_widget(formatted_traceback) + msg_layout.addWidget(line_widget, 0) + msg_layout.addWidget(tb_widget, 0) + + msg_layout.addStretch(1) + + tabs_widget.add_tab(item_label, item_id) + stack_layout.addWidget(msg_widget) + if first: + first = False + stack_layout.setCurrentWidget(msg_widget) + + self._widgets_by_id[item_id] = msg_widget + + content_layout.addWidget(tabs_widget, 0) + content_layout.addWidget(stack_widget, 1) + + tabs_widget.tab_changed.connect(self._on_tab_change) + + self._tabs_widget = tabs_widget + self._stack_layout = stack_layout + + def _on_tab_change(self, old_identifier, identifier): + widget = self._widgets_by_id[identifier] + self._stack_layout.setCurrentWidget(widget) diff --git a/openpype/tools/ayon_push_to_project/__init__.py b/client/ayon_core/tools/push_to_project/__init__.py similarity index 100% rename from openpype/tools/ayon_push_to_project/__init__.py rename to client/ayon_core/tools/push_to_project/__init__.py diff --git a/client/ayon_core/tools/push_to_project/control.py b/client/ayon_core/tools/push_to_project/control.py new file mode 100644 index 0000000000..1336721e5a --- /dev/null +++ b/client/ayon_core/tools/push_to_project/control.py @@ -0,0 +1,344 @@ +import threading + +from ayon_core.client import ( + get_asset_by_id, + get_subset_by_id, + get_version_by_id, + get_representations, +) +from ayon_core.settings import get_project_settings +from ayon_core.lib import prepare_template_data +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.pipeline.create import get_subset_name_template +from ayon_core.tools.ayon_utils.models import ProjectsModel, HierarchyModel + +from .models import ( + PushToProjectSelectionModel, + UserPublishValuesModel, + IntegrateModel, +) + + +class PushToContextController: + def __init__(self, project_name=None, version_id=None): + self._event_system = self._create_event_system() + + self._projects_model = ProjectsModel(self) + self._hierarchy_model = HierarchyModel(self) + self._integrate_model = IntegrateModel(self) + + self._selection_model = PushToProjectSelectionModel(self) + self._user_values = UserPublishValuesModel(self) + + self._src_project_name = None + self._src_version_id = None + self._src_asset_doc = None + self._src_subset_doc = None + self._src_version_doc = None + self._src_label = None + + self._submission_enabled = False + self._process_thread = None + self._process_item_id = None + + self.set_source(project_name, version_id) + + # Events system + def emit_event(self, topic, data=None, source=None): + """Use implemented event system to trigger event.""" + + if data is None: + data = {} + self._event_system.emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self._event_system.add_callback(topic, callback) + + def set_source(self, project_name, version_id): + """Set source project and version. + + Args: + project_name (Union[str, None]): Source project name. + version_id (Union[str, None]): Source version id. + """ + + if ( + project_name == self._src_project_name + and version_id == self._src_version_id + ): + return + + self._src_project_name = project_name + self._src_version_id = version_id + self._src_label = None + asset_doc = None + subset_doc = None + version_doc = None + if project_name and version_id: + version_doc = get_version_by_id(project_name, version_id) + + if version_doc: + subset_doc = get_subset_by_id(project_name, version_doc["parent"]) + + if subset_doc: + asset_doc = get_asset_by_id(project_name, subset_doc["parent"]) + + self._src_asset_doc = asset_doc + self._src_subset_doc = subset_doc + self._src_version_doc = version_doc + if asset_doc: + self._user_values.set_new_folder_name(asset_doc["name"]) + variant = self._get_src_variant() + if variant: + self._user_values.set_variant(variant) + + comment = version_doc["data"].get("comment") + if comment: + self._user_values.set_comment(comment) + + self._emit_event( + "source.changed", + { + "project_name": project_name, + "version_id": version_id + } + ) + + def get_source_label(self): + """Get source label. + + Returns: + str: Label describing source project and version as path. + """ + + if self._src_label is None: + self._src_label = self._prepare_source_label() + return self._src_label + + def get_project_items(self, sender=None): + return self._projects_model.get_project_items(sender) + + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_task_items(self, project_name, folder_id, sender=None): + return self._hierarchy_model.get_task_items( + project_name, folder_id, sender + ) + + def get_user_values(self): + return self._user_values.get_data() + + def set_user_value_folder_name(self, folder_name): + self._user_values.set_new_folder_name(folder_name) + self._invalidate() + + def set_user_value_variant(self, variant): + self._user_values.set_variant(variant) + self._invalidate() + + def set_user_value_comment(self, comment): + self._user_values.set_comment(comment) + self._invalidate() + + def set_selected_project(self, project_name): + self._selection_model.set_selected_project(project_name) + self._invalidate() + + def set_selected_folder(self, folder_id): + self._selection_model.set_selected_folder(folder_id) + self._invalidate() + + def set_selected_task(self, task_id, task_name): + self._selection_model.set_selected_task(task_id, task_name) + + def get_process_item_status(self, item_id): + return self._integrate_model.get_item_status(item_id) + + # Processing methods + def submit(self, wait=True): + if not self._submission_enabled: + return + + if self._process_thread is not None: + return + + item_id = self._integrate_model.create_process_item( + self._src_project_name, + self._src_version_id, + self._selection_model.get_selected_project_name(), + self._selection_model.get_selected_folder_id(), + self._selection_model.get_selected_task_name(), + self._user_values.variant, + comment=self._user_values.comment, + new_folder_name=self._user_values.new_folder_name, + dst_version=1 + ) + + self._process_item_id = item_id + self._emit_event("submit.started") + if wait: + self._submit_callback() + self._process_item_id = None + return item_id + + thread = threading.Thread(target=self._submit_callback) + self._process_thread = thread + thread.start() + return item_id + + def wait_for_process_thread(self): + if self._process_thread is None: + return + self._process_thread.join() + self._process_thread = None + + def _prepare_source_label(self): + if not self._src_project_name or not self._src_version_id: + return "Source is not defined" + + asset_doc = self._src_asset_doc + if not asset_doc: + return "Source is invalid" + + folder_path_parts = list(asset_doc["data"]["parents"]) + folder_path_parts.append(asset_doc["name"]) + folder_path = "/".join(folder_path_parts) + subset_doc = self._src_subset_doc + version_doc = self._src_version_doc + return "Source: {}/{}/{}/v{:0>3}".format( + self._src_project_name, + folder_path, + subset_doc["name"], + version_doc["name"] + ) + + def _get_task_info_from_repre_docs(self, asset_doc, repre_docs): + asset_tasks = asset_doc["data"].get("tasks") or {} + found_comb = [] + for repre_doc in repre_docs: + context = repre_doc["context"] + task_info = context.get("task") + if task_info is None: + continue + + task_name = None + task_type = None + if isinstance(task_info, str): + task_name = task_info + asset_task_info = asset_tasks.get(task_info) or {} + task_type = asset_task_info.get("type") + + elif isinstance(task_info, dict): + task_name = task_info.get("name") + task_type = task_info.get("type") + + if task_name and task_type: + return task_name, task_type + + if task_name: + found_comb.append((task_name, task_type)) + + for task_name, task_type in found_comb: + return task_name, task_type + return None, None + + def _get_src_variant(self): + project_name = self._src_project_name + version_doc = self._src_version_doc + asset_doc = self._src_asset_doc + repre_docs = get_representations( + project_name, version_ids=[version_doc["_id"]] + ) + task_name, task_type = self._get_task_info_from_repre_docs( + asset_doc, repre_docs + ) + + project_settings = get_project_settings(project_name) + subset_doc = self._src_subset_doc + family = subset_doc["data"].get("family") + if not family: + family = subset_doc["data"]["families"][0] + template = get_subset_name_template( + self._src_project_name, + family, + task_name, + task_type, + None, + project_settings=project_settings + ) + template_low = template.lower() + variant_placeholder = "{variant}" + if ( + variant_placeholder not in template_low + or (not task_name and "{task" in template_low) + ): + return "" + + idx = template_low.index(variant_placeholder) + template_s = template[:idx] + template_e = template[idx + len(variant_placeholder):] + fill_data = prepare_template_data({ + "family": family, + "task": task_name + }) + try: + subset_s = template_s.format(**fill_data) + subset_e = template_e.format(**fill_data) + except Exception as exc: + print("Failed format", exc) + return "" + + subset_name = self._src_subset_doc["name"] + if ( + (subset_s and not subset_name.startswith(subset_s)) + or (subset_e and not subset_name.endswith(subset_e)) + ): + return "" + + if subset_s: + subset_name = subset_name[len(subset_s):] + if subset_e: + subset_name = subset_name[:len(subset_e)] + return subset_name + + def _check_submit_validations(self): + if not self._user_values.is_valid: + return False + + if not self._selection_model.get_selected_project_name(): + return False + + if ( + not self._user_values.new_folder_name + and not self._selection_model.get_selected_folder_id() + ): + return False + return True + + def _invalidate(self): + submission_enabled = self._check_submit_validations() + if submission_enabled == self._submission_enabled: + return + self._submission_enabled = submission_enabled + self._emit_event( + "submission.enabled.changed", + {"enabled": submission_enabled} + ) + + def _submit_callback(self): + process_item_id = self._process_item_id + if process_item_id is None: + return + self._integrate_model.integrate_item(process_item_id) + self._emit_event("submit.finished", {}) + if process_item_id == self._process_item_id: + self._process_item_id = None + + def _emit_event(self, topic, data=None): + if data is None: + data = {} + self.emit_event(topic, data, "controller") + + def _create_event_system(self): + return QueuedEventSystem() diff --git a/openpype/tools/ayon_push_to_project/main.py b/client/ayon_core/tools/push_to_project/main.py similarity index 78% rename from openpype/tools/ayon_push_to_project/main.py rename to client/ayon_core/tools/push_to_project/main.py index e36940e488..a6ff38c16f 100644 --- a/openpype/tools/ayon_push_to_project/main.py +++ b/client/ayon_core/tools/push_to_project/main.py @@ -1,11 +1,11 @@ import click -from openpype.tools.utils import get_openpype_qt_app -from openpype.tools.ayon_push_to_project.ui import PushToContextSelectWindow +from ayon_core.tools.utils import get_ayon_qt_app +from ayon_core.tools.push_to_project.ui import PushToContextSelectWindow def main_show(project_name, version_id): - app = get_openpype_qt_app() + app = get_ayon_qt_app() window = PushToContextSelectWindow() window.show() diff --git a/openpype/tools/ayon_push_to_project/models/__init__.py b/client/ayon_core/tools/push_to_project/models/__init__.py similarity index 100% rename from openpype/tools/ayon_push_to_project/models/__init__.py rename to client/ayon_core/tools/push_to_project/models/__init__.py diff --git a/client/ayon_core/tools/push_to_project/models/integrate.py b/client/ayon_core/tools/push_to_project/models/integrate.py new file mode 100644 index 0000000000..175716cf10 --- /dev/null +++ b/client/ayon_core/tools/push_to_project/models/integrate.py @@ -0,0 +1,1214 @@ +import os +import re +import copy +import socket +import itertools +import datetime +import sys +import traceback +import uuid + +from bson.objectid import ObjectId + +from ayon_core.client import ( + get_project, + get_assets, + get_asset_by_id, + get_subset_by_id, + get_subset_by_name, + get_version_by_id, + get_last_version_by_subset_id, + get_version_by_name, + get_representations, +) +from ayon_core.client.operations import ( + OperationsSession, + new_asset_document, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_version_update_data, + prepare_representation_update_data, +) +from ayon_core.addon import AddonsManager +from ayon_core.lib import ( + StringTemplate, + get_ayon_username, + get_formatted_current_time, + source_hash, +) + +from ayon_core.lib.file_transaction import FileTransaction +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.version_start import get_versioning_start +from ayon_core.pipeline.template_data import get_template_data +from ayon_core.pipeline.publish import get_publish_template_name +from ayon_core.pipeline.create import get_subset_name + +UNKNOWN = object() + + +class PushToProjectError(Exception): + pass + + +class FileItem(object): + def __init__(self, path): + self.path = path + + @property + def is_valid_file(self): + return os.path.exists(self.path) and os.path.isfile(self.path) + + +class SourceFile(FileItem): + def __init__(self, path, frame=None, udim=None): + super(SourceFile, self).__init__(path) + self.frame = frame + self.udim = udim + + def __repr__(self): + subparts = [self.__class__.__name__] + if self.frame is not None: + subparts.append("frame: {}".format(self.frame)) + if self.udim is not None: + subparts.append("UDIM: {}".format(self.udim)) + + return "<{}> '{}'".format(" - ".join(subparts), self.path) + + +class ResourceFile(FileItem): + def __init__(self, path, relative_path): + super(ResourceFile, self).__init__(path) + self.relative_path = relative_path + + def __repr__(self): + return "<{}> '{}'".format(self.__class__.__name__, self.relative_path) + + @property + def is_valid_file(self): + if not self.relative_path: + return False + return super(ResourceFile, self).is_valid_file + + +class ProjectPushItem: + def __init__( + self, + src_project_name, + src_version_id, + dst_project_name, + dst_folder_id, + dst_task_name, + variant, + comment, + new_folder_name, + dst_version, + item_id=None, + ): + if not item_id: + item_id = uuid.uuid4().hex + self.src_project_name = src_project_name + self.src_version_id = src_version_id + self.dst_project_name = dst_project_name + self.dst_folder_id = dst_folder_id + self.dst_task_name = dst_task_name + self.dst_version = dst_version + self.variant = variant + self.new_folder_name = new_folder_name + self.comment = comment or "" + self.item_id = item_id + self._repr_value = None + + @property + def _repr(self): + if not self._repr_value: + self._repr_value = "|".join([ + self.src_project_name, + self.src_version_id, + self.dst_project_name, + str(self.dst_folder_id), + str(self.new_folder_name), + str(self.dst_task_name), + str(self.dst_version) + ]) + return self._repr_value + + def __repr__(self): + return "<{} - {}>".format(self.__class__.__name__, self._repr) + + def to_data(self): + return { + "src_project_name": self.src_project_name, + "src_version_id": self.src_version_id, + "dst_project_name": self.dst_project_name, + "dst_folder_id": self.dst_folder_id, + "dst_task_name": self.dst_task_name, + "dst_version": self.dst_version, + "variant": self.variant, + "comment": self.comment, + "new_folder_name": self.new_folder_name, + "item_id": self.item_id, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class StatusMessage: + def __init__(self, message, level): + self.message = message + self.level = level + + def __str__(self): + return "{}: {}".format(self.level.upper(), self.message) + + def __repr__(self): + return "<{} - {}> {}".format( + self.__class__.__name__, self.level.upper, self.message + ) + + +class ProjectPushItemStatus: + def __init__( + self, + started=False, + failed=False, + finished=False, + fail_reason=None, + full_traceback=None + ): + self.started = started + self.failed = failed + self.finished = finished + self.fail_reason = fail_reason + self.full_traceback = full_traceback + + def set_failed(self, fail_reason, exc_info=None): + """Set status as failed. + + Attribute 'fail_reason' can change automatically based on passed value. + Reason is unset if 'failed' is 'False' and is set do default reason if + is set to 'True' and reason is not set. + + Args: + fail_reason (str): Reason why failed. + exc_info(tuple): Exception info. + """ + + failed = True + if not fail_reason and not exc_info: + failed = False + + full_traceback = None + if exc_info is not None: + full_traceback = "".join(traceback.format_exception(*exc_info)) + if not fail_reason: + fail_reason = "Failed without specified reason" + + self.failed = failed + self.fail_reason = fail_reason or None + self.full_traceback = full_traceback + + def to_data(self): + return { + "started": self.started, + "failed": self.failed, + "finished": self.finished, + "fail_reason": self.fail_reason, + "full_traceback": self.full_traceback, + } + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class ProjectPushRepreItem: + """Representation item. + + Representation item based on representation document and project roots. + + Representation document may have reference to: + - source files: Files defined with publish template + - resource files: Files that should be in publish directory + but filenames are not template based. + + Args: + repre_doc (Dict[str, Ant]): Representation document. + roots (Dict[str, str]): Project roots (based on project anatomy). + """ + + def __init__(self, repre_doc, roots): + self._repre_doc = repre_doc + self._roots = roots + self._src_files = None + self._resource_files = None + self._frame = UNKNOWN + + @property + def repre_doc(self): + return self._repre_doc + + @property + def src_files(self): + if self._src_files is None: + self.get_source_files() + return self._src_files + + @property + def resource_files(self): + if self._resource_files is None: + self.get_source_files() + return self._resource_files + + @staticmethod + def _clean_path(path): + new_value = path.replace("\\", "/") + while "//" in new_value: + new_value = new_value.replace("//", "/") + return new_value + + @staticmethod + def _get_relative_path(path, src_dirpath): + dirpath, basename = os.path.split(path) + if not dirpath.lower().startswith(src_dirpath.lower()): + return None + + relative_dir = dirpath[len(src_dirpath):].lstrip("/") + if relative_dir: + relative_path = "/".join([relative_dir, basename]) + else: + relative_path = basename + return relative_path + + @property + def frame(self): + """First frame of representation files. + + This value will be in representation document context if is sequence. + + Returns: + Union[int, None]: First frame in representation files based on + source files or None if frame is not part of filename. + """ + + if self._frame is UNKNOWN: + frame = None + for src_file in self.src_files: + src_frame = src_file.frame + if ( + src_frame is not None + and (frame is None or src_frame < frame) + ): + frame = src_frame + self._frame = frame + return self._frame + + @staticmethod + def validate_source_files(src_files, resource_files): + if not src_files: + raise AssertionError(( + "Couldn't figure out source files from representation." + " Found resource files {}" + ).format(", ".join(str(i) for i in resource_files))) + + invalid_items = [ + item + for item in itertools.chain(src_files, resource_files) + if not item.is_valid_file + ] + if invalid_items: + raise AssertionError(( + "Source files that were not found on disk: {}" + ).format(", ".join(str(i) for i in invalid_items))) + + def get_source_files(self): + if self._src_files is not None: + return self._src_files, self._resource_files + + repre_context = self._repre_doc["context"] + if "frame" in repre_context or "udim" in repre_context: + src_files, resource_files = self._get_source_files_with_frames() + else: + src_files, resource_files = self._get_source_files() + + self.validate_source_files(src_files, resource_files) + + self._src_files = src_files + self._resource_files = resource_files + return self._src_files, self._resource_files + + def _get_source_files_with_frames(self): + frame_placeholder = "__frame__" + udim_placeholder = "__udim__" + src_files = [] + resource_files = [] + template = self._repre_doc["data"]["template"] + # Remove padding from 'udim' and 'frame' formatting keys + # - "{frame:0>4}" -> "{frame}" + for key in ("udim", "frame"): + sub_part = "{" + key + "[^}]*}" + replacement = "{{{}}}".format(key) + template = re.sub(sub_part, replacement, template) + + repre_context = self._repre_doc["context"] + fill_repre_context = copy.deepcopy(repre_context) + if "frame" in fill_repre_context: + fill_repre_context["frame"] = frame_placeholder + + if "udim" in fill_repre_context: + fill_repre_context["udim"] = udim_placeholder + + fill_roots = fill_repre_context["root"] + for root_name in tuple(fill_roots.keys()): + fill_roots[root_name] = "{{root[{}]}}".format(root_name) + repre_path = StringTemplate.format_template( + template, fill_repre_context) + repre_path = self._clean_path(repre_path) + src_dirpath, src_basename = os.path.split(repre_path) + src_basename = ( + re.escape(src_basename) + .replace(frame_placeholder, "(?P[0-9]+)") + .replace(udim_placeholder, "(?P[0-9]+)") + ) + src_basename_regex = re.compile("^{}$".format(src_basename)) + for file_info in self._repre_doc["files"]: + filepath_template = self._clean_path(file_info["path"]) + filepath = self._clean_path( + filepath_template.format(root=self._roots) + ) + dirpath, basename = os.path.split(filepath_template) + if ( + dirpath.lower() != src_dirpath.lower() + or not src_basename_regex.match(basename) + ): + relative_path = self._get_relative_path(filepath, src_dirpath) + resource_files.append(ResourceFile(filepath, relative_path)) + continue + + filepath = os.path.join(src_dirpath, basename) + frame = None + udim = None + for item in src_basename_regex.finditer(basename): + group_name = item.lastgroup + value = item.group(group_name) + if group_name == "frame": + frame = int(value) + elif group_name == "udim": + udim = value + + src_files.append(SourceFile(filepath, frame, udim)) + + return src_files, resource_files + + def _get_source_files(self): + src_files = [] + resource_files = [] + template = self._repre_doc["data"]["template"] + repre_context = self._repre_doc["context"] + fill_repre_context = copy.deepcopy(repre_context) + fill_roots = fill_repre_context["root"] + for root_name in tuple(fill_roots.keys()): + fill_roots[root_name] = "{{root[{}]}}".format(root_name) + repre_path = StringTemplate.format_template(template, + fill_repre_context) + repre_path = self._clean_path(repre_path) + src_dirpath = os.path.dirname(repre_path) + for file_info in self._repre_doc["files"]: + filepath_template = self._clean_path(file_info["path"]) + filepath = self._clean_path( + filepath_template.format(root=self._roots)) + + if filepath_template.lower() == repre_path.lower(): + src_files.append( + SourceFile(repre_path.format(root=self._roots)) + ) + else: + relative_path = self._get_relative_path( + filepath_template, src_dirpath + ) + resource_files.append( + ResourceFile(filepath, relative_path) + ) + return src_files, resource_files + + +class ProjectPushItemProcess: + """ + Args: + model (IntegrateModel): Model which is processing item. + item (ProjectPushItem): Item which is being processed. + """ + + # TODO where to get host?!!! + host_name = "republisher" + + def __init__(self, model, item): + self._model = model + self._item = item + + self._src_asset_doc = None + self._src_subset_doc = None + self._src_version_doc = None + self._src_repre_items = None + + self._project_doc = None + self._anatomy = None + self._asset_doc = None + self._created_asset_doc = None + self._task_info = None + self._subset_doc = None + self._version_doc = None + + self._family = None + self._subset_name = None + + self._project_settings = None + self._template_name = None + + self._status = ProjectPushItemStatus() + self._operations = OperationsSession() + self._file_transaction = FileTransaction() + + self._messages = [] + + @property + def item_id(self): + return self._item.item_id + + @property + def started(self): + return self._status.started + + def get_status_data(self): + return self._status.to_data() + + def integrate(self): + self._status.started = True + try: + self._log_info("Process started") + self._fill_source_variables() + self._log_info("Source entities were found") + self._fill_destination_project() + self._log_info("Destination project was found") + self._fill_or_create_destination_asset() + self._log_info("Destination asset was determined") + self._determine_family() + self._determine_publish_template_name() + self._determine_subset_name() + self._make_sure_subset_exists() + self._make_sure_version_exists() + self._log_info("Prerequirements were prepared") + self._integrate_representations() + self._log_info("Integration finished") + + except PushToProjectError as exc: + if not self._status.failed: + self._status.set_failed(str(exc)) + + except Exception as exc: + _exc, _value, _tb = sys.exc_info() + self._status.set_failed( + "Unhandled error happened: {}".format(str(exc)), + (_exc, _value, _tb) + ) + + finally: + self._status.finished = True + self._emit_event( + "push.finished.changed", + { + "finished": True, + "item_id": self.item_id, + } + ) + + def _emit_event(self, topic, data): + self._model.emit_event(topic, data) + + # Loggin helpers + # TODO better logging + def _add_message(self, message, level): + message_obj = StatusMessage(message, level) + self._messages.append(message_obj) + self._emit_event( + "push.message.added", + { + "message": message, + "level": level, + "item_id": self.item_id, + } + ) + print(message_obj) + return message_obj + + def _log_debug(self, message): + return self._add_message(message, "debug") + + def _log_info(self, message): + return self._add_message(message, "info") + + def _log_warning(self, message): + return self._add_message(message, "warning") + + def _log_error(self, message): + return self._add_message(message, "error") + + def _log_critical(self, message): + return self._add_message(message, "critical") + + def _fill_source_variables(self): + src_project_name = self._item.src_project_name + src_version_id = self._item.src_version_id + + project_doc = get_project(src_project_name) + if not project_doc: + self._status.set_failed( + f"Source project \"{src_project_name}\" was not found" + ) + + self._emit_event( + "push.failed.changed", + {"item_id": self.item_id} + ) + raise PushToProjectError(self._status.fail_reason) + + self._log_debug(f"Project '{src_project_name}' found") + + version_doc = get_version_by_id(src_project_name, src_version_id) + if not version_doc: + self._status.set_failed(( + f"Source version with id \"{src_version_id}\"" + f" was not found in project \"{src_project_name}\"" + )) + raise PushToProjectError(self._status.fail_reason) + + subset_id = version_doc["parent"] + subset_doc = get_subset_by_id(src_project_name, subset_id) + if not subset_doc: + self._status.set_failed(( + f"Could find subset with id \"{subset_id}\"" + f" in project \"{src_project_name}\"" + )) + raise PushToProjectError(self._status.fail_reason) + + asset_id = subset_doc["parent"] + asset_doc = get_asset_by_id(src_project_name, asset_id) + if not asset_doc: + self._status.set_failed(( + f"Could find asset with id \"{asset_id}\"" + f" in project \"{src_project_name}\"" + )) + raise PushToProjectError(self._status.fail_reason) + + anatomy = Anatomy(src_project_name) + + repre_docs = get_representations( + src_project_name, + version_ids=[src_version_id] + ) + repre_items = [ + ProjectPushRepreItem(repre_doc, anatomy.roots) + for repre_doc in repre_docs + ] + self._log_debug(( + f"Found {len(repre_items)} representations on" + f" version {src_version_id} in project '{src_project_name}'" + )) + if not repre_items: + self._status.set_failed( + "Source version does not have representations" + f" (Version id: {src_version_id})" + ) + raise PushToProjectError(self._status.fail_reason) + + self._src_asset_doc = asset_doc + self._src_subset_doc = subset_doc + self._src_version_doc = version_doc + self._src_repre_items = repre_items + + def _fill_destination_project(self): + # --- Destination entities --- + dst_project_name = self._item.dst_project_name + # Validate project existence + dst_project_doc = get_project(dst_project_name) + if not dst_project_doc: + self._status.set_failed( + f"Destination project '{dst_project_name}' was not found" + ) + raise PushToProjectError(self._status.fail_reason) + + self._log_debug( + f"Destination project '{dst_project_name}' found" + ) + self._project_doc = dst_project_doc + self._anatomy = Anatomy(dst_project_name) + self._project_settings = get_project_settings( + self._item.dst_project_name + ) + + def _create_asset( + self, + src_asset_doc, + project_doc, + parent_asset_doc, + asset_name + ): + parent_id = None + parents = [] + tools = [] + if parent_asset_doc: + parent_id = parent_asset_doc["_id"] + parents = list(parent_asset_doc["data"]["parents"]) + parents.append(parent_asset_doc["name"]) + _tools = parent_asset_doc["data"].get("tools_env") + if _tools: + tools = list(_tools) + + asset_name_low = asset_name.lower() + other_asset_docs = get_assets( + project_doc["name"], fields=["_id", "name", "data.visualParent"] + ) + for other_asset_doc in other_asset_docs: + other_name = other_asset_doc["name"] + other_parent_id = other_asset_doc["data"].get("visualParent") + if other_name.lower() != asset_name_low: + continue + + if other_parent_id != parent_id: + self._status.set_failed(( + f"Asset with name \"{other_name}\" already" + " exists in different hierarchy." + )) + raise PushToProjectError(self._status.fail_reason) + + self._log_debug(( + f"Found already existing asset with name \"{other_name}\"" + f" which match requested name \"{asset_name}\"" + )) + return get_asset_by_id(project_doc["name"], other_asset_doc["_id"]) + + data_keys = ( + "clipIn", + "clipOut", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "resolutionWidth", + "resolutionHeight", + "fps", + "pixelAspect", + ) + asset_data = { + "visualParent": parent_id, + "parents": parents, + "tasks": {}, + "tools_env": tools + } + src_asset_data = src_asset_doc["data"] + for key in data_keys: + if key in src_asset_data: + asset_data[key] = src_asset_data[key] + + asset_doc = new_asset_document( + asset_name, + project_doc["_id"], + parent_id, + parents, + data=asset_data + ) + self._operations.create_entity( + project_doc["name"], + asset_doc["type"], + asset_doc + ) + self._log_info( + f"Creating new asset with name \"{asset_name}\"" + ) + self._created_asset_doc = asset_doc + return asset_doc + + def _fill_or_create_destination_asset(self): + dst_project_name = self._item.dst_project_name + dst_folder_id = self._item.dst_folder_id + dst_task_name = self._item.dst_task_name + new_folder_name = self._item.new_folder_name + if not dst_folder_id and not new_folder_name: + self._status.set_failed( + "Push item does not have defined destination asset" + ) + raise PushToProjectError(self._status.fail_reason) + + # Get asset document + parent_asset_doc = None + if dst_folder_id: + parent_asset_doc = get_asset_by_id( + self._item.dst_project_name, self._item.dst_folder_id + ) + if not parent_asset_doc: + self._status.set_failed( + f"Could find asset with id \"{dst_folder_id}\"" + f" in project \"{dst_project_name}\"" + ) + raise PushToProjectError(self._status.fail_reason) + + if not new_folder_name: + asset_doc = parent_asset_doc + else: + asset_doc = self._create_asset( + self._src_asset_doc, + self._project_doc, + parent_asset_doc, + new_folder_name + ) + self._asset_doc = asset_doc + if not dst_task_name: + self._task_info = {} + return + + asset_path_parts = list(asset_doc["data"]["parents"]) + asset_path_parts.append(asset_doc["name"]) + asset_path = "/".join(asset_path_parts) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(dst_task_name) + if not task_info: + self._status.set_failed( + f"Could find task with name \"{dst_task_name}\"" + f" on asset \"{asset_path}\"" + f" in project \"{dst_project_name}\"" + ) + raise PushToProjectError(self._status.fail_reason) + + # Create copy of task info to avoid changing data in asset document + task_info = copy.deepcopy(task_info) + task_info["name"] = dst_task_name + # Fill rest of task information based on task type + task_type = task_info["type"] + task_type_info = self._project_doc["config"]["tasks"].get( + task_type, {}) + task_info.update(task_type_info) + self._task_info = task_info + + def _determine_family(self): + subset_doc = self._src_subset_doc + family = subset_doc["data"].get("family") + families = subset_doc["data"].get("families") + if not family and families: + family = families[0] + + if not family: + self._status.set_failed( + "Couldn't figure out family from source subset" + ) + raise PushToProjectError(self._status.fail_reason) + + self._log_debug( + f"Publishing family is '{family}' (Based on source subset)" + ) + self._family = family + + def _determine_publish_template_name(self): + template_name = get_publish_template_name( + self._item.dst_project_name, + self.host_name, + self._family, + self._task_info.get("name"), + self._task_info.get("type"), + project_settings=self._project_settings + ) + self._log_debug( + f"Using template '{template_name}' for integration" + ) + self._template_name = template_name + + def _determine_subset_name(self): + family = self._family + asset_doc = self._asset_doc + task_info = self._task_info + subset_name = get_subset_name( + family, + self._item.variant, + task_info.get("name"), + asset_doc, + project_name=self._item.dst_project_name, + host_name=self.host_name, + project_settings=self._project_settings + ) + self._log_info( + f"Push will be integrating to subset with name '{subset_name}'" + ) + self._subset_name = subset_name + + def _make_sure_subset_exists(self): + project_name = self._item.dst_project_name + asset_id = self._asset_doc["_id"] + subset_name = self._subset_name + family = self._family + subset_doc = get_subset_by_name(project_name, subset_name, asset_id) + if subset_doc: + self._subset_doc = subset_doc + return subset_doc + + data = { + "families": [family] + } + subset_doc = new_subset_document( + subset_name, family, asset_id, data + ) + self._operations.create_entity(project_name, "subset", subset_doc) + self._subset_doc = subset_doc + + def _make_sure_version_exists(self): + """Make sure version document exits in database.""" + + project_name = self._item.dst_project_name + version = self._item.dst_version + src_version_doc = self._src_version_doc + subset_doc = self._subset_doc + subset_id = subset_doc["_id"] + src_data = src_version_doc["data"] + families = subset_doc["data"].get("families") + if not families: + families = [subset_doc["data"]["family"]] + + version_data = { + "families": list(families), + "fps": src_data.get("fps"), + "source": src_data.get("source"), + "machine": socket.gethostname(), + "comment": self._item.comment or "", + "author": get_ayon_username(), + "time": get_formatted_current_time(), + } + if version is None: + last_version_doc = get_last_version_by_subset_id( + project_name, subset_id + ) + if last_version_doc: + version = int(last_version_doc["name"]) + 1 + else: + version = get_versioning_start( + project_name, + self.host_name, + task_name=self._task_info["name"], + task_type=self._task_info["type"], + family=families[0], + subset=subset_doc["name"] + ) + + existing_version_doc = get_version_by_name( + project_name, version, subset_id + ) + # Update existing version + if existing_version_doc: + version_doc = new_version_doc( + version, subset_id, version_data, existing_version_doc["_id"] + ) + update_data = prepare_version_update_data( + existing_version_doc, version_doc + ) + if update_data: + self._operations.update_entity( + project_name, + "version", + existing_version_doc["_id"], + update_data + ) + self._version_doc = version_doc + + return + + version_doc = new_version_doc( + version, subset_id, version_data + ) + self._operations.create_entity(project_name, "version", version_doc) + + self._version_doc = version_doc + + def _integrate_representations(self): + try: + self._real_integrate_representations() + except Exception: + self._operations.clear() + self._file_transaction.rollback() + raise + + def _real_integrate_representations(self): + version_doc = self._version_doc + version_id = version_doc["_id"] + existing_repres = get_representations( + self._item.dst_project_name, + version_ids=[version_id] + ) + existing_repres_by_low_name = { + repre_doc["name"].lower(): repre_doc + for repre_doc in existing_repres + } + template_name = self._template_name + anatomy = self._anatomy + formatting_data = get_template_data( + self._project_doc, + self._asset_doc, + self._task_info.get("name"), + self.host_name + ) + formatting_data.update({ + "subset": self._subset_name, + "family": self._family, + "version": version_doc["name"] + }) + + path_template = anatomy.templates[template_name]["path"].replace( + "\\", "/" + ) + file_template = StringTemplate( + anatomy.templates[template_name]["file"] + ) + self._log_info("Preparing files to transfer") + processed_repre_items = self._prepare_file_transactions( + anatomy, template_name, formatting_data, file_template + ) + self._file_transaction.process() + self._log_info("Preparing database changes") + self._prepare_database_operations( + version_id, + processed_repre_items, + path_template, + existing_repres_by_low_name + ) + self._log_info("Finalization") + self._operations.commit() + self._file_transaction.finalize() + + def _prepare_file_transactions( + self, anatomy, template_name, formatting_data, file_template + ): + processed_repre_items = [] + for repre_item in self._src_repre_items: + repre_doc = repre_item.repre_doc + repre_name = repre_doc["name"] + repre_format_data = copy.deepcopy(formatting_data) + repre_format_data["representation"] = repre_name + for src_file in repre_item.src_files: + ext = os.path.splitext(src_file.path)[-1] + repre_format_data["ext"] = ext[1:] + break + + # Re-use 'output' from source representation + repre_output_name = repre_doc["context"].get("output") + if repre_output_name is not None: + repre_format_data["output"] = repre_output_name + + template_obj = anatomy.templates_obj[template_name]["folder"] + folder_path = template_obj.format_strict(formatting_data) + repre_context = folder_path.used_values + folder_path_rootless = folder_path.rootless + repre_filepaths = [] + published_path = None + for src_file in repre_item.src_files: + file_data = copy.deepcopy(repre_format_data) + frame = src_file.frame + if frame is not None: + file_data["frame"] = frame + + udim = src_file.udim + if udim is not None: + file_data["udim"] = udim + + filename = file_template.format_strict(file_data) + dst_filepath = os.path.normpath( + os.path.join(folder_path, filename) + ) + dst_rootless_path = os.path.normpath( + os.path.join(folder_path_rootless, filename) + ) + if published_path is None or frame == repre_item.frame: + published_path = dst_filepath + repre_context.update(filename.used_values) + + repre_filepaths.append((dst_filepath, dst_rootless_path)) + self._file_transaction.add(src_file.path, dst_filepath) + + for resource_file in repre_item.resource_files: + dst_filepath = os.path.normpath( + os.path.join(folder_path, resource_file.relative_path) + ) + dst_rootless_path = os.path.normpath( + os.path.join( + folder_path_rootless, resource_file.relative_path + ) + ) + repre_filepaths.append((dst_filepath, dst_rootless_path)) + self._file_transaction.add(resource_file.path, dst_filepath) + processed_repre_items.append( + (repre_item, repre_filepaths, repre_context, published_path) + ) + return processed_repre_items + + def _prepare_database_operations( + self, + version_id, + processed_repre_items, + path_template, + existing_repres_by_low_name + ): + addons_manager = AddonsManager() + sync_server_module = addons_manager.get("sync_server") + if sync_server_module is None or not sync_server_module.enabled: + sites = [{ + "name": "studio", + "created_dt": datetime.datetime.now() + }] + else: + sites = sync_server_module.compute_resource_sync_sites( + project_name=self._item.dst_project_name + ) + + added_repre_names = set() + for item in processed_repre_items: + (repre_item, repre_filepaths, repre_context, published_path) = item + repre_name = repre_item.repre_doc["name"] + added_repre_names.add(repre_name.lower()) + new_repre_data = { + "path": published_path, + "template": path_template + } + new_repre_files = [] + for (path, rootless_path) in repre_filepaths: + new_repre_files.append({ + "_id": ObjectId(), + "path": rootless_path, + "size": os.path.getsize(path), + "hash": source_hash(path), + "sites": sites + }) + + existing_repre = existing_repres_by_low_name.get( + repre_name.lower() + ) + entity_id = None + if existing_repre: + entity_id = existing_repre["_id"] + new_repre_doc = new_representation_doc( + repre_name, + version_id, + repre_context, + data=new_repre_data, + entity_id=entity_id + ) + new_repre_doc["files"] = new_repre_files + if not existing_repre: + self._operations.create_entity( + self._item.dst_project_name, + new_repre_doc["type"], + new_repre_doc + ) + else: + update_data = prepare_representation_update_data( + existing_repre, new_repre_doc + ) + if update_data: + self._operations.update_entity( + self._item.dst_project_name, + new_repre_doc["type"], + new_repre_doc["_id"], + update_data + ) + + existing_repre_names = set(existing_repres_by_low_name.keys()) + for repre_name in (existing_repre_names - added_repre_names): + repre_doc = existing_repres_by_low_name[repre_name] + self._operations.update_entity( + self._item.dst_project_name, + repre_doc["type"], + repre_doc["_id"], + {"type": "archived_representation"} + ) + + +class IntegrateModel: + def __init__(self, controller): + self._controller = controller + self._process_items = {} + + def reset(self): + self._process_items = {} + + def emit_event(self, topic, data=None, source=None): + self._controller.emit_event(topic, data, source) + + def create_process_item( + self, + src_project_name, + src_version_id, + dst_project_name, + dst_folder_id, + dst_task_name, + variant, + comment, + new_folder_name, + dst_version, + ): + """Create new item for integration. + + Args: + src_project_name (str): Source project name. + src_version_id (str): Source version id. + dst_project_name (str): Destination project name. + dst_folder_id (str): Destination folder id. + dst_task_name (str): Destination task name. + variant (str): Variant name. + comment (Union[str, None]): Comment. + new_folder_name (Union[str, None]): New folder name. + dst_version (int): Destination version number. + + Returns: + str: Item id. The id can be used to trigger integration or get + status information. + """ + + item = ProjectPushItem( + src_project_name, + src_version_id, + dst_project_name, + dst_folder_id, + dst_task_name, + variant, + comment=comment, + new_folder_name=new_folder_name, + dst_version=dst_version + ) + process_item = ProjectPushItemProcess(self, item) + self._process_items[item.item_id] = process_item + return item.item_id + + def integrate_item(self, item_id): + """Start integration of item. + + Args: + item_id (str): Item id which should be integrated. + """ + + item = self._process_items.get(item_id) + if item is None or item.started: + return + item.integrate() + + def get_item_status(self, item_id): + """Status of an item. + + Args: + item_id (str): Item id for which status should be returned. + + Returns: + dict[str, Any]: Status data. + """ + + item = self._process_items.get(item_id) + if item is not None: + return item.get_status_data() + return None diff --git a/openpype/tools/ayon_push_to_project/models/selection.py b/client/ayon_core/tools/push_to_project/models/selection.py similarity index 100% rename from openpype/tools/ayon_push_to_project/models/selection.py rename to client/ayon_core/tools/push_to_project/models/selection.py diff --git a/openpype/tools/ayon_push_to_project/models/user_values.py b/client/ayon_core/tools/push_to_project/models/user_values.py similarity index 97% rename from openpype/tools/ayon_push_to_project/models/user_values.py rename to client/ayon_core/tools/push_to_project/models/user_values.py index 2a4faeb136..a12a1513ee 100644 --- a/openpype/tools/ayon_push_to_project/models/user_values.py +++ b/client/ayon_core/tools/push_to_project/models/user_values.py @@ -1,6 +1,6 @@ import re -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from ayon_core.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class UserPublishValuesModel: diff --git a/openpype/tools/ayon_push_to_project/ui/__init__.py b/client/ayon_core/tools/push_to_project/ui/__init__.py similarity index 100% rename from openpype/tools/ayon_push_to_project/ui/__init__.py rename to client/ayon_core/tools/push_to_project/ui/__init__.py diff --git a/client/ayon_core/tools/push_to_project/ui/window.py b/client/ayon_core/tools/push_to_project/ui/window.py new file mode 100644 index 0000000000..4d39075dc3 --- /dev/null +++ b/client/ayon_core/tools/push_to_project/ui/window.py @@ -0,0 +1,432 @@ +from qtpy import QtWidgets, QtGui, QtCore + +from ayon_core.style import load_stylesheet, get_app_icon_path +from ayon_core.tools.utils import ( + PlaceholderLineEdit, + SeparatorWidget, + set_style_property, +) +from ayon_core.tools.ayon_utils.widgets import ( + ProjectsCombobox, + FoldersWidget, + TasksWidget, +) +from ayon_core.tools.push_to_project.control import ( + PushToContextController, +) + + +class PushToContextSelectWindow(QtWidgets.QWidget): + def __init__(self, controller=None): + super(PushToContextSelectWindow, self).__init__() + if controller is None: + controller = PushToContextController() + self._controller = controller + + self.setWindowTitle("Push to project (select context)") + self.setWindowIcon(QtGui.QIcon(get_app_icon_path())) + + main_context_widget = QtWidgets.QWidget(self) + + header_widget = QtWidgets.QWidget(main_context_widget) + + header_label = QtWidgets.QLabel( + controller.get_source_label(), + header_widget + ) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(header_label) + + main_splitter = QtWidgets.QSplitter( + QtCore.Qt.Horizontal, main_context_widget + ) + + context_widget = QtWidgets.QWidget(main_splitter) + + projects_combobox = ProjectsCombobox(controller, context_widget) + projects_combobox.set_select_item_visible(True) + projects_combobox.set_standard_filter_enabled(True) + + context_splitter = QtWidgets.QSplitter( + QtCore.Qt.Vertical, context_widget + ) + + folders_widget = FoldersWidget(controller, context_splitter) + folders_widget.set_deselectable(True) + tasks_widget = TasksWidget(controller, context_splitter) + + context_splitter.addWidget(folders_widget) + context_splitter.addWidget(tasks_widget) + + context_layout = QtWidgets.QVBoxLayout(context_widget) + context_layout.setContentsMargins(0, 0, 0, 0) + context_layout.addWidget(projects_combobox, 0) + context_layout.addWidget(context_splitter, 1) + + # --- Inputs widget --- + inputs_widget = QtWidgets.QWidget(main_splitter) + + folder_name_input = PlaceholderLineEdit(inputs_widget) + folder_name_input.setPlaceholderText("< Name of new folder >") + folder_name_input.setObjectName("ValidatedLineEdit") + + variant_input = PlaceholderLineEdit(inputs_widget) + variant_input.setPlaceholderText("< Variant >") + variant_input.setObjectName("ValidatedLineEdit") + + comment_input = PlaceholderLineEdit(inputs_widget) + comment_input.setPlaceholderText("< Publish comment >") + + inputs_layout = QtWidgets.QFormLayout(inputs_widget) + inputs_layout.setContentsMargins(0, 0, 0, 0) + inputs_layout.addRow("New folder name", folder_name_input) + inputs_layout.addRow("Variant", variant_input) + inputs_layout.addRow("Comment", comment_input) + + main_splitter.addWidget(context_widget) + main_splitter.addWidget(inputs_widget) + + # --- Buttons widget --- + btns_widget = QtWidgets.QWidget(self) + cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget) + publish_btn = QtWidgets.QPushButton("Publish", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addStretch(1) + btns_layout.addWidget(cancel_btn, 0) + btns_layout.addWidget(publish_btn, 0) + + sep_1 = SeparatorWidget(parent=main_context_widget) + sep_2 = SeparatorWidget(parent=main_context_widget) + main_context_layout = QtWidgets.QVBoxLayout(main_context_widget) + main_context_layout.addWidget(header_widget, 0) + main_context_layout.addWidget(sep_1, 0) + main_context_layout.addWidget(main_splitter, 1) + main_context_layout.addWidget(sep_2, 0) + main_context_layout.addWidget(btns_widget, 0) + + # NOTE This was added in hurry + # - should be reorganized and changed styles + overlay_widget = QtWidgets.QFrame(self) + overlay_widget.setObjectName("OverlayFrame") + + overlay_label = QtWidgets.QLabel(overlay_widget) + overlay_label.setAlignment(QtCore.Qt.AlignCenter) + + overlay_btns_widget = QtWidgets.QWidget(overlay_widget) + overlay_btns_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + # Add try again button (requires changes in controller) + overlay_try_btn = QtWidgets.QPushButton( + "Try again", overlay_btns_widget + ) + overlay_close_btn = QtWidgets.QPushButton( + "Close", overlay_btns_widget + ) + + overlay_btns_layout = QtWidgets.QHBoxLayout(overlay_btns_widget) + overlay_btns_layout.addStretch(1) + overlay_btns_layout.addWidget(overlay_try_btn, 0) + overlay_btns_layout.addWidget(overlay_close_btn, 0) + overlay_btns_layout.addStretch(1) + + overlay_layout = QtWidgets.QVBoxLayout(overlay_widget) + overlay_layout.addWidget(overlay_label, 0) + overlay_layout.addWidget(overlay_btns_widget, 0) + overlay_layout.setAlignment(QtCore.Qt.AlignCenter) + + main_layout = QtWidgets.QStackedLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(main_context_widget) + main_layout.addWidget(overlay_widget) + main_layout.setStackingMode(QtWidgets.QStackedLayout.StackAll) + main_layout.setCurrentWidget(main_context_widget) + + show_timer = QtCore.QTimer() + show_timer.setInterval(0) + + main_thread_timer = QtCore.QTimer() + main_thread_timer.setInterval(10) + + user_input_changed_timer = QtCore.QTimer() + user_input_changed_timer.setInterval(200) + user_input_changed_timer.setSingleShot(True) + + main_thread_timer.timeout.connect(self._on_main_thread_timer) + show_timer.timeout.connect(self._on_show_timer) + user_input_changed_timer.timeout.connect(self._on_user_input_timer) + folder_name_input.textChanged.connect(self._on_new_asset_change) + variant_input.textChanged.connect(self._on_variant_change) + comment_input.textChanged.connect(self._on_comment_change) + + publish_btn.clicked.connect(self._on_select_click) + cancel_btn.clicked.connect(self._on_close_click) + overlay_close_btn.clicked.connect(self._on_close_click) + overlay_try_btn.clicked.connect(self._on_try_again_click) + + controller.register_event_callback( + "new_folder_name.changed", + self._on_controller_new_asset_change + ) + controller.register_event_callback( + "variant.changed", self._on_controller_variant_change + ) + controller.register_event_callback( + "comment.changed", self._on_controller_comment_change + ) + controller.register_event_callback( + "submission.enabled.changed", self._on_submission_change + ) + controller.register_event_callback( + "source.changed", self._on_controller_source_change + ) + controller.register_event_callback( + "submit.started", self._on_controller_submit_start + ) + controller.register_event_callback( + "submit.finished", self._on_controller_submit_end + ) + controller.register_event_callback( + "push.message.added", self._on_push_message + ) + + self._main_layout = main_layout + + self._main_context_widget = main_context_widget + + self._header_label = header_label + self._main_splitter = main_splitter + + self._projects_combobox = projects_combobox + self._folders_widget = folders_widget + self._tasks_widget = tasks_widget + + self._variant_input = variant_input + self._folder_name_input = folder_name_input + self._comment_input = comment_input + + self._publish_btn = publish_btn + + self._overlay_widget = overlay_widget + self._overlay_close_btn = overlay_close_btn + self._overlay_try_btn = overlay_try_btn + self._overlay_label = overlay_label + + self._user_input_changed_timer = user_input_changed_timer + # Store current value on input text change + # The value is unset when is passed to controller + # The goal is to have controll over changes happened during user change + # in UI and controller auto-changes + self._variant_input_text = None + self._new_folder_name_input_text = None + self._comment_input_text = None + + self._first_show = True + self._show_timer = show_timer + self._show_counter = 0 + + self._main_thread_timer = main_thread_timer + self._main_thread_timer_can_stop = True + self._last_submit_message = None + self._process_item_id = None + + self._variant_is_valid = None + self._folder_is_valid = None + + publish_btn.setEnabled(False) + overlay_close_btn.setVisible(False) + overlay_try_btn.setVisible(False) + + # Support of public api function of controller + def set_source(self, project_name, version_id): + """Set source project and version. + + Call the method on controller. + + Args: + project_name (Union[str, None]): Name of project. + version_id (Union[str, None]): Version id. + """ + + self._controller.set_source(project_name, version_id) + + def showEvent(self, event): + super(PushToContextSelectWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + + def refresh(self): + user_values = self._controller.get_user_values() + new_folder_name = user_values["new_folder_name"] + variant = user_values["variant"] + self._folder_name_input.setText(new_folder_name or "") + self._variant_input.setText(variant or "") + self._invalidate_variant(user_values["is_variant_valid"]) + self._invalidate_new_folder_name( + new_folder_name, user_values["is_new_folder_name_valid"] + ) + + self._projects_combobox.refresh() + + def _on_first_show(self): + width = 740 + height = 640 + inputs_width = 360 + self.setStyleSheet(load_stylesheet()) + self.resize(width, height) + self._main_splitter.setSizes([width - inputs_width, inputs_width]) + self._show_timer.start() + + def _on_show_timer(self): + if self._show_counter < 3: + self._show_counter += 1 + return + self._show_timer.stop() + + self._show_counter = 0 + + self.refresh() + + def _on_new_asset_change(self, text): + self._new_folder_name_input_text = text + self._user_input_changed_timer.start() + + def _on_variant_change(self, text): + self._variant_input_text = text + self._user_input_changed_timer.start() + + def _on_comment_change(self, text): + self._comment_input_text = text + self._user_input_changed_timer.start() + + def _on_user_input_timer(self): + folder_name = self._new_folder_name_input_text + if folder_name is not None: + self._new_folder_name_input_text = None + self._controller.set_user_value_folder_name(folder_name) + + variant = self._variant_input_text + if variant is not None: + self._variant_input_text = None + self._controller.set_user_value_variant(variant) + + comment = self._comment_input_text + if comment is not None: + self._comment_input_text = None + self._controller.set_user_value_comment(comment) + + def _on_controller_new_asset_change(self, event): + folder_name = event["new_folder_name"] + if ( + self._new_folder_name_input_text is None + and folder_name != self._folder_name_input.text() + ): + self._folder_name_input.setText(folder_name) + + self._invalidate_new_folder_name(folder_name, event["is_valid"]) + + def _on_controller_variant_change(self, event): + is_valid = event["is_valid"] + variant = event["variant"] + if ( + self._variant_input_text is None + and variant != self._variant_input.text() + ): + self._variant_input.setText(variant) + + self._invalidate_variant(is_valid) + + def _on_controller_comment_change(self, event): + comment = event["comment"] + if ( + self._comment_input_text is None + and comment != self._comment_input.text() + ): + self._comment_input.setText(comment) + + def _on_controller_source_change(self): + self._header_label.setText(self._controller.get_source_label()) + + def _invalidate_new_folder_name(self, folder_name, is_valid): + self._tasks_widget.setVisible(not folder_name) + if self._folder_is_valid is is_valid: + return + self._folder_is_valid = is_valid + state = "" + if folder_name: + if is_valid is True: + state = "valid" + elif is_valid is False: + state = "invalid" + set_style_property( + self._folder_name_input, "state", state + ) + + def _invalidate_variant(self, is_valid): + if self._variant_is_valid is is_valid: + return + self._variant_is_valid = is_valid + state = "valid" if is_valid else "invalid" + set_style_property(self._variant_input, "state", state) + + def _on_submission_change(self, event): + self._publish_btn.setEnabled(event["enabled"]) + + def _on_close_click(self): + self.close() + + def _on_select_click(self): + self._process_item_id = self._controller.submit(wait=False) + + def _on_try_again_click(self): + self._process_item_id = None + self._last_submit_message = None + + self._overlay_close_btn.setVisible(False) + self._overlay_try_btn.setVisible(False) + self._main_layout.setCurrentWidget(self._main_context_widget) + + def _on_main_thread_timer(self): + if self._last_submit_message: + self._overlay_label.setText(self._last_submit_message) + self._last_submit_message = None + + process_status = self._controller.get_process_item_status( + self._process_item_id + ) + push_failed = process_status["failed"] + fail_traceback = process_status["full_traceback"] + if self._main_thread_timer_can_stop: + self._main_thread_timer.stop() + self._overlay_close_btn.setVisible(True) + if push_failed and not fail_traceback: + self._overlay_try_btn.setVisible(True) + + if push_failed: + message = "Push Failed:\n{}".format(process_status["fail_reason"]) + if fail_traceback: + message += "\n{}".format(fail_traceback) + self._overlay_label.setText(message) + set_style_property(self._overlay_close_btn, "state", "error") + + if self._main_thread_timer_can_stop: + # Join thread in controller + self._controller.wait_for_process_thread() + # Reset process item to None + self._process_item_id = None + + def _on_controller_submit_start(self): + self._main_thread_timer_can_stop = False + self._main_thread_timer.start() + self._main_layout.setCurrentWidget(self._overlay_widget) + self._overlay_label.setText("Submittion started") + + def _on_controller_submit_end(self): + self._main_thread_timer_can_stop = True + + def _on_push_message(self, event): + self._last_submit_message = event["message"] diff --git a/openpype/tools/pyblish_pype/__init__.py b/client/ayon_core/tools/pyblish_pype/__init__.py similarity index 100% rename from openpype/tools/pyblish_pype/__init__.py rename to client/ayon_core/tools/pyblish_pype/__init__.py diff --git a/openpype/tools/pyblish_pype/__main__.py b/client/ayon_core/tools/pyblish_pype/__main__.py similarity index 100% rename from openpype/tools/pyblish_pype/__main__.py rename to client/ayon_core/tools/pyblish_pype/__main__.py diff --git a/openpype/tools/pyblish_pype/app.css b/client/ayon_core/tools/pyblish_pype/app.css similarity index 100% rename from openpype/tools/pyblish_pype/app.css rename to client/ayon_core/tools/pyblish_pype/app.css diff --git a/openpype/tools/pyblish_pype/app.py b/client/ayon_core/tools/pyblish_pype/app.py similarity index 100% rename from openpype/tools/pyblish_pype/app.py rename to client/ayon_core/tools/pyblish_pype/app.py diff --git a/openpype/tools/pyblish_pype/awesome.py b/client/ayon_core/tools/pyblish_pype/awesome.py similarity index 100% rename from openpype/tools/pyblish_pype/awesome.py rename to client/ayon_core/tools/pyblish_pype/awesome.py diff --git a/openpype/tools/pyblish_pype/constants.py b/client/ayon_core/tools/pyblish_pype/constants.py similarity index 100% rename from openpype/tools/pyblish_pype/constants.py rename to client/ayon_core/tools/pyblish_pype/constants.py diff --git a/client/ayon_core/tools/pyblish_pype/control.py b/client/ayon_core/tools/pyblish_pype/control.py new file mode 100644 index 0000000000..1a3e7a15f0 --- /dev/null +++ b/client/ayon_core/tools/pyblish_pype/control.py @@ -0,0 +1,646 @@ +"""The Controller in a Model/View/Controller-based application +The graphical components of Pyblish Lite use this object to perform +publishing. It communicates via the Qt Signals/Slots mechanism +and has no direct connection to any graphics. This is important, +because this is how unittests are able to run without requiring +an active window manager; such as via Travis-CI. +""" +import os +import sys +import inspect +import logging +import collections + +from qtpy import QtCore + +import pyblish.api +import pyblish.util +import pyblish.logic +import pyblish.lib +import pyblish.version + +from . import util +from .constants import InstanceStates + +from ayon_core.settings import get_current_project_settings + + +class IterationBreak(Exception): + pass + + +class MainThreadItem: + """Callback with args and kwargs.""" + def __init__(self, callback, *args, **kwargs): + self.callback = callback + self.args = args + self.kwargs = kwargs + + def process(self): + self.callback(*self.args, **self.kwargs) + + +class MainThreadProcess(QtCore.QObject): + """Qt based main thread process executor. + + Has timer which controls each 50ms if there is new item to process. + + This approach gives ability to update UI meanwhile plugin is in progress. + """ + # How many times let pass QtApplication to process events + # - use 2 as resize event can trigger repaint event but not process in + # same loop + count_timeout = 2 + + def __init__(self): + super(MainThreadProcess, self).__init__() + self._items_to_process = collections.deque() + + timer = QtCore.QTimer() + timer.setInterval(0) + + timer.timeout.connect(self._execute) + + self._timer = timer + self._switch_counter = self.count_timeout + + def process(self, func, *args, **kwargs): + item = MainThreadItem(func, *args, **kwargs) + self.add_item(item) + + def add_item(self, item): + self._items_to_process.append(item) + + def _execute(self): + if not self._items_to_process: + return + + if self._switch_counter > 0: + self._switch_counter -= 1 + return + + self._switch_counter = self.count_timeout + + item = self._items_to_process.popleft() + item.process() + + def start(self): + if not self._timer.isActive(): + self._timer.start() + + def stop(self): + if self._timer.isActive(): + self._timer.stop() + + def clear(self): + if self._timer.isActive(): + self._timer.stop() + self._items_to_process = collections.deque() + + def stop_if_empty(self): + if self._timer.isActive(): + item = MainThreadItem(self._stop_if_empty) + self.add_item(item) + + def _stop_if_empty(self): + if not self._items_to_process: + self.stop() + + +class Controller(QtCore.QObject): + log = logging.getLogger("PyblishController") + # Emitted when the GUI is about to start processing; + # e.g. resetting, validating or publishing. + about_to_process = QtCore.Signal(object, object) + + # ??? Emitted for each process + was_processed = QtCore.Signal(dict) + + # Emitted when reset + # - all data are reset (plugins, processing, pari yielder, etc.) + was_reset = QtCore.Signal() + + # Emitted when previous group changed + passed_group = QtCore.Signal(object) + + # Emitted when want to change state of instances + switch_toggleability = QtCore.Signal(bool) + + # On action finished + was_acted = QtCore.Signal(dict) + + # Emitted when processing has stopped + was_stopped = QtCore.Signal() + + # Emitted when processing has finished + was_finished = QtCore.Signal() + + # Emitted when plugin was skipped + was_skipped = QtCore.Signal(object) + + # store OrderGroups - now it is a singleton + order_groups = util.OrderGroups + + # When instance is toggled + instance_toggled = QtCore.Signal(object, object, object) + + def __init__(self, parent=None): + super(Controller, self).__init__(parent) + self.context = None + self.plugins = {} + self.optional_default = {} + self.instance_toggled.connect(self._on_instance_toggled) + self._main_thread_processor = MainThreadProcess() + + self._current_state = "" + + def reset_variables(self): + self.log.debug("Resetting pyblish context variables") + + # Data internal to the GUI itself + self.is_running = False + self.stopped = False + self.errored = False + self._current_state = "" + + # Active producer of pairs + self.pair_generator = None + # Active pair + self.current_pair = None + + # Orders which changes GUI + # - passing collectors order disables plugin/instance toggle + self.collect_state = 0 + + # - passing validators order disables validate button and gives ability + # to know when to stop on validate button press + self.validators_order = None + self.validated = False + + # Get collectors and validators order + plugin_groups_keys = list(self.order_groups.groups.keys()) + self.validators_order = self.order_groups.validation_order + next_group_order = None + if len(plugin_groups_keys) > 1: + next_group_order = plugin_groups_keys[1] + + # This is used to track whether or not to continue + # processing when, for example, validation has failed. + self.processing = { + "stop_on_validation": False, + # Used? + "last_plugin_order": None, + "current_group_order": plugin_groups_keys[0], + "next_group_order": next_group_order, + "nextOrder": None, + "ordersWithError": set() + } + self._set_state_by_order() + self.log.debug("Reset of pyblish context variables done") + + @property + def current_state(self): + return self._current_state + + def presets_by_hosts(self): + # Get global filters as base + presets = get_current_project_settings() + if not presets: + return {} + + result = presets.get("global", {}).get("filters", {}) + hosts = pyblish.api.registered_hosts() + for host in hosts: + host_presets = presets.get(host, {}).get("filters") + if not host_presets: + continue + + for key, value in host_presets.items(): + if value is None: + if key in result: + result.pop(key) + continue + + result[key] = value + + return result + + def reset_context(self): + self.log.debug("Resetting pyblish context object") + + comment = None + if ( + self.context is not None and + self.context.data.get("comment") and + # We only preserve the user typed comment if we are *not* + # resetting from a successful publish without errors + self._current_state != "Published" + ): + comment = self.context.data["comment"] + + self.context = pyblish.api.Context() + + self.context._publish_states = InstanceStates.ContextType + self.context.optional = False + + self.context.data["publish"] = True + self.context.data["name"] = "context" + + self.context.data["host"] = reversed(pyblish.api.registered_hosts()) + self.context.data["port"] = int( + os.environ.get("PYBLISH_CLIENT_PORT", -1) + ) + self.context.data["connectTime"] = pyblish.lib.time(), + self.context.data["pyblishVersion"] = pyblish.version, + self.context.data["pythonVersion"] = sys.version + + self.context.data["icon"] = "book" + + self.context.families = ("__context__",) + + if comment: + # Preserve comment on reset if user previously had a comment + self.context.data["comment"] = comment + + self.log.debug("Reset of pyblish context object done") + + def reset(self): + """Discover plug-ins and run collection.""" + self._main_thread_processor.clear() + self._main_thread_processor.process(self._reset) + self._main_thread_processor.start() + + def _reset(self): + self.reset_context() + self.reset_variables() + + self.possible_presets = self.presets_by_hosts() + + # Load plugins and set pair generator + self.load_plugins() + self.pair_generator = self._pair_yielder(self.plugins) + + self.was_reset.emit() + + # Process collectors load rest of plugins with collected instances + self.collect() + + def load_plugins(self): + self.test = pyblish.logic.registered_test() + self.optional_default = {} + + plugins = pyblish.api.discover() + + targets = set(pyblish.logic.registered_targets()) + targets.add("default") + targets = list(targets) + plugins_by_targets = pyblish.logic.plugins_by_targets(plugins, targets) + + _plugins = [] + for plugin in plugins_by_targets: + # Skip plugin if is not optional and not active + if ( + not getattr(plugin, "optional", False) + and not getattr(plugin, "active", True) + ): + continue + _plugins.append(plugin) + self.plugins = _plugins + + def on_published(self): + if self.is_running: + self.is_running = False + self._current_state = ( + "Published" if not self.errored else "Published, with errors" + ) + self.was_finished.emit() + self._main_thread_processor.stop() + + def stop(self): + self.log.debug("Stopping") + self.stopped = True + + def act(self, plugin, action): + self.is_running = True + item = MainThreadItem(self._process_action, plugin, action) + self._main_thread_processor.add_item(item) + self._main_thread_processor.start() + self._main_thread_processor.stop_if_empty() + + def _process_action(self, plugin, action): + result = pyblish.plugin.process( + plugin, self.context, None, action.id + ) + self.is_running = False + self.was_acted.emit(result) + + def emit_(self, signal, kwargs): + pyblish.api.emit(signal, **kwargs) + + def _process(self, plugin, instance=None): + """Produce `result` from `plugin` and `instance` + :func:`process` shares state with :func:`_iterator` such that + an instance/plugin pair can be fetched and processed in isolation. + Arguments: + plugin (pyblish.api.Plugin): Produce result using plug-in + instance (optional, pyblish.api.Instance): Process this instance, + if no instance is provided, context is processed. + """ + + self.processing["nextOrder"] = plugin.order + + try: + result = pyblish.plugin.process(plugin, self.context, instance) + # Make note of the order at which the + # potential error error occurred. + if result["error"] is not None: + self.processing["ordersWithError"].add(plugin.order) + + except Exception as exc: + raise Exception("Unknown error({}): {}".format( + plugin.__name__, str(exc) + )) + + return result + + def _pair_yielder(self, plugins): + for plugin in plugins: + if ( + self.processing["current_group_order"] is not None + and plugin.order > self.processing["current_group_order"] + ): + current_group_order = self.processing["current_group_order"] + + new_next_group_order = None + new_current_group_order = self.processing["next_group_order"] + if new_current_group_order is not None: + current_next_order_found = False + for order in self.order_groups.groups.keys(): + if current_next_order_found: + new_next_group_order = order + break + + if order == new_current_group_order: + current_next_order_found = True + + self.processing["next_group_order"] = new_next_group_order + self.processing["current_group_order"] = ( + new_current_group_order + ) + + # Force update to the current state + self._set_state_by_order() + + if self.collect_state == 0: + self.collect_state = 1 + self._current_state = ( + "Ready" if not self.errored else + "Collected, with errors" + ) + self.switch_toggleability.emit(True) + self.passed_group.emit(current_group_order) + yield IterationBreak("Collected") + + else: + self.passed_group.emit(current_group_order) + if self.errored: + self._current_state = ( + "Stopped, due to errors" if not + self.processing["stop_on_validation"] else + "Validated, with errors" + ) + yield IterationBreak("Last group errored") + + if self.collect_state == 1: + self.collect_state = 2 + self.switch_toggleability.emit(False) + + if not self.validated and plugin.order > self.validators_order: + self.validated = True + if self.processing["stop_on_validation"]: + self._current_state = ( + "Validated" if not self.errored else + "Validated, with errors" + ) + yield IterationBreak("Validated") + + # Stop if was stopped + if self.stopped: + self.stopped = False + self._current_state = "Paused" + yield IterationBreak("Stopped") + + # check test if will stop + self.processing["nextOrder"] = plugin.order + message = self.test(**self.processing) + if message: + self._current_state = "Paused" + yield IterationBreak("Stopped due to \"{}\"".format(message)) + + self.processing["last_plugin_order"] = plugin.order + if not plugin.active: + pyblish.logic.log.debug("%s was inactive, skipping.." % plugin) + self.was_skipped.emit(plugin) + continue + + in_collect_stage = self.collect_state == 0 + if plugin.__instanceEnabled__: + instances = pyblish.logic.instances_by_plugin( + self.context, plugin + ) + if not instances: + self.was_skipped.emit(plugin) + continue + + for instance in instances: + if ( + not in_collect_stage + and instance.data.get("publish") is False + ): + pyblish.logic.log.debug( + "%s was inactive, skipping.." % instance + ) + continue + # Stop if was stopped + if self.stopped: + self.stopped = False + self._current_state = "Paused" + yield IterationBreak("Stopped") + + yield (plugin, instance) + else: + families = util.collect_families_from_instances( + self.context, only_active=not in_collect_stage + ) + plugins = pyblish.logic.plugins_by_families( + [plugin], families + ) + if not plugins: + self.was_skipped.emit(plugin) + continue + yield (plugin, None) + + self.passed_group.emit(self.processing["next_group_order"]) + + def iterate_and_process(self, on_finished=None): + """ Iterating inserted plugins with current context. + Collectors do not contain instances, they are None when collecting! + This process don't stop on one + """ + self._main_thread_processor.start() + + def on_next(): + self.log.debug("Looking for next pair to process") + try: + self.current_pair = next(self.pair_generator) + if isinstance(self.current_pair, IterationBreak): + raise self.current_pair + + except IterationBreak: + self.log.debug("Iteration break was raised") + self.is_running = False + self.was_stopped.emit() + self._main_thread_processor.stop() + return + + except StopIteration: + self.log.debug("Iteration stop was raised") + self.is_running = False + # All pairs were processed successfully! + if on_finished is not None: + self._main_thread_processor.add_item( + MainThreadItem(on_finished) + ) + self._main_thread_processor.stop_if_empty() + return + + except Exception as exc: + self.log.warning( + "Unexpected exception during `on_next` happened", + exc_info=True + ) + exc_msg = str(exc) + self._main_thread_processor.add_item( + MainThreadItem(on_unexpected_error, error=exc_msg) + ) + return + + self.about_to_process.emit(*self.current_pair) + self._main_thread_processor.add_item( + MainThreadItem(on_process) + ) + + def on_process(): + try: + self.log.debug( + "Processing pair: {}".format(str(self.current_pair)) + ) + result = self._process(*self.current_pair) + if result["error"] is not None: + self.log.debug("Error happened") + self.errored = True + + self.log.debug("Pair processed") + self.was_processed.emit(result) + + except Exception as exc: + self.log.warning( + "Unexpected exception during `on_process` happened", + exc_info=True + ) + exc_msg = str(exc) + self._main_thread_processor.add_item( + MainThreadItem(on_unexpected_error, error=exc_msg) + ) + return + + self._main_thread_processor.add_item( + MainThreadItem(on_next) + ) + + def on_unexpected_error(error): + # TODO this should be handled much differently + # TODO emit crash signal to show message box with traceback? + self.is_running = False + self.was_stopped.emit() + util.u_print(u"An unexpected error occurred:\n %s" % error) + if on_finished is not None: + self._main_thread_processor.add_item( + MainThreadItem(on_finished) + ) + self._main_thread_processor.stop_if_empty() + + self.is_running = True + self._main_thread_processor.add_item( + MainThreadItem(on_next) + ) + + def _set_state_by_order(self): + order = self.processing["current_group_order"] + self._current_state = self.order_groups.groups[order]["state"] + + def collect(self): + """ Iterate and process Collect plugins + - load_plugins method is launched again when finished + """ + self._set_state_by_order() + self._main_thread_processor.process(self._start_collect) + self._main_thread_processor.start() + + def validate(self): + """ Process plugins to validations_order value.""" + self._set_state_by_order() + self._main_thread_processor.process(self._start_validate) + self._main_thread_processor.start() + + def publish(self): + """ Iterate and process all remaining plugins.""" + self._set_state_by_order() + self._main_thread_processor.process(self._start_publish) + self._main_thread_processor.start() + + def _start_collect(self): + self.iterate_and_process() + + def _start_validate(self): + self.processing["stop_on_validation"] = True + self.iterate_and_process() + + def _start_publish(self): + self.processing["stop_on_validation"] = False + self.iterate_and_process(self.on_published) + + def cleanup(self): + """Forcefully delete objects from memory + In an ideal world, this shouldn't be necessary. Garbage + collection guarantees that anything without reference + is automatically removed. + However, because this application is designed to be run + multiple times from the same interpreter process, extra + case must be taken to ensure there are no memory leaks. + Explicitly deleting objects shines a light on where objects + may still be referenced in the form of an error. No errors + means this was unnecessary, but that's ok. + """ + + for instance in self.context: + del(instance) + + for plugin in self.plugins: + del(plugin) + + def _on_instance_toggled(self, instance, old_value, new_value): + callbacks = pyblish.api.registered_callbacks().get("instanceToggled") + if not callbacks: + return + + for callback in callbacks: + try: + callback(instance, old_value, new_value) + except Exception: + self.log.warning( + "Callback for `instanceToggled` crashed. {}".format( + os.path.abspath(inspect.getfile(callback)) + ), + exc_info=True + ) diff --git a/openpype/tools/pyblish_pype/delegate.py b/client/ayon_core/tools/pyblish_pype/delegate.py similarity index 100% rename from openpype/tools/pyblish_pype/delegate.py rename to client/ayon_core/tools/pyblish_pype/delegate.py diff --git a/openpype/tools/pyblish_pype/font/fontawesome/fontawesome-webfont.ttf b/client/ayon_core/tools/pyblish_pype/font/fontawesome/fontawesome-webfont.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/fontawesome/fontawesome-webfont.ttf rename to client/ayon_core/tools/pyblish_pype/font/fontawesome/fontawesome-webfont.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/LICENSE.txt b/client/ayon_core/tools/pyblish_pype/font/opensans/LICENSE.txt similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/LICENSE.txt rename to client/ayon_core/tools/pyblish_pype/font/opensans/LICENSE.txt diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-Bold.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Bold.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-Bold.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Bold.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-BoldItalic.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-BoldItalic.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-BoldItalic.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-BoldItalic.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-ExtraBold.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-ExtraBold.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-ExtraBold.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-ExtraBold.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-ExtraBoldItalic.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-ExtraBoldItalic.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-ExtraBoldItalic.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-ExtraBoldItalic.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-Italic.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Italic.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-Italic.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Italic.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-Light.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Light.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-Light.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Light.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-LightItalic.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-LightItalic.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-LightItalic.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-LightItalic.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-Regular.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Regular.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-Regular.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Regular.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-Semibold.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Semibold.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-Semibold.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-Semibold.ttf diff --git a/openpype/tools/pyblish_pype/font/opensans/OpenSans-SemiboldItalic.ttf b/client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-SemiboldItalic.ttf similarity index 100% rename from openpype/tools/pyblish_pype/font/opensans/OpenSans-SemiboldItalic.ttf rename to client/ayon_core/tools/pyblish_pype/font/opensans/OpenSans-SemiboldItalic.ttf diff --git a/openpype/tools/pyblish_pype/i18n/pyblish_lite.pro b/client/ayon_core/tools/pyblish_pype/i18n/pyblish_lite.pro similarity index 100% rename from openpype/tools/pyblish_pype/i18n/pyblish_lite.pro rename to client/ayon_core/tools/pyblish_pype/i18n/pyblish_lite.pro diff --git a/openpype/tools/pyblish_pype/i18n/zh_CN.qm b/client/ayon_core/tools/pyblish_pype/i18n/zh_CN.qm similarity index 100% rename from openpype/tools/pyblish_pype/i18n/zh_CN.qm rename to client/ayon_core/tools/pyblish_pype/i18n/zh_CN.qm diff --git a/openpype/tools/pyblish_pype/i18n/zh_CN.ts b/client/ayon_core/tools/pyblish_pype/i18n/zh_CN.ts similarity index 100% rename from openpype/tools/pyblish_pype/i18n/zh_CN.ts rename to client/ayon_core/tools/pyblish_pype/i18n/zh_CN.ts diff --git a/openpype/tools/pyblish_pype/img/down_arrow.png b/client/ayon_core/tools/pyblish_pype/img/down_arrow.png similarity index 100% rename from openpype/tools/pyblish_pype/img/down_arrow.png rename to client/ayon_core/tools/pyblish_pype/img/down_arrow.png diff --git a/openpype/tools/pyblish_pype/img/logo-extrasmall.png b/client/ayon_core/tools/pyblish_pype/img/logo-extrasmall.png similarity index 100% rename from openpype/tools/pyblish_pype/img/logo-extrasmall.png rename to client/ayon_core/tools/pyblish_pype/img/logo-extrasmall.png diff --git a/openpype/tools/pyblish_pype/img/tab-overview.png b/client/ayon_core/tools/pyblish_pype/img/tab-overview.png similarity index 100% rename from openpype/tools/pyblish_pype/img/tab-overview.png rename to client/ayon_core/tools/pyblish_pype/img/tab-overview.png diff --git a/openpype/tools/pyblish_pype/img/tab-terminal.png b/client/ayon_core/tools/pyblish_pype/img/tab-terminal.png similarity index 100% rename from openpype/tools/pyblish_pype/img/tab-terminal.png rename to client/ayon_core/tools/pyblish_pype/img/tab-terminal.png diff --git a/openpype/tools/pyblish_pype/mock.py b/client/ayon_core/tools/pyblish_pype/mock.py similarity index 100% rename from openpype/tools/pyblish_pype/mock.py rename to client/ayon_core/tools/pyblish_pype/mock.py diff --git a/client/ayon_core/tools/pyblish_pype/model.py b/client/ayon_core/tools/pyblish_pype/model.py new file mode 100644 index 0000000000..4c91fb567f --- /dev/null +++ b/client/ayon_core/tools/pyblish_pype/model.py @@ -0,0 +1,1123 @@ +"""Qt models + +Description: + The model contains the original objects from Pyblish, such as + pyblish.api.Instance and pyblish.api.Plugin. The model then + provides an interface for reading and writing to those. + +GUI data: + Aside from original data, such as pyblish.api.Plugin.optional, + the GUI also hosts data internal to itself, such as whether or + not an item has processed such that it may be colored appropriately + in the view. This data is prefixed with two underscores (__). + + E.g. + + _has_processed + + This is so that the the GUI-only data doesn't accidentally overwrite + or cause confusion with existing data in plug-ins and instances. + +Roles: + Data is accessed via standard Qt "roles". You can think of a role + as the key of a dictionary, except they can only be integers. + +""" +from __future__ import unicode_literals + +import pyblish + +from . import settings, util +from .awesome import tags as awesome +from qtpy import QtCore, QtGui +import qtawesome +from six import text_type +from .constants import PluginStates, InstanceStates, GroupStates, Roles + +from ayon_core.settings import get_system_settings + + +# ItemTypes +UserType = QtGui.QStandardItem.UserType +if hasattr(UserType, "value"): + UserType = UserType.value +InstanceType = UserType +PluginType = UserType + 1 +GroupType = UserType + 2 +TerminalLabelType = UserType + 3 +TerminalDetailType = UserType + 4 + + +class QAwesomeTextIconFactory: + icons = {} + + @classmethod + def icon(cls, icon_name): + if icon_name not in cls.icons: + cls.icons[icon_name] = awesome.get(icon_name) + return cls.icons[icon_name] + + +class QAwesomeIconFactory: + icons = {} + + @classmethod + def icon(cls, icon_name, icon_color): + if icon_name not in cls.icons: + cls.icons[icon_name] = {} + + if icon_color not in cls.icons[icon_name]: + cls.icons[icon_name][icon_color] = qtawesome.icon( + icon_name, + color=icon_color + ) + return cls.icons[icon_name][icon_color] + + +class IntentModel(QtGui.QStandardItemModel): + """Model for QComboBox with intents. + + It is expected that one inserted item is dictionary. + Key represents #Label and Value represent #Value. + + Example: + { + "Testing": "test", + "Publishing": "publish" + } + + First and default value is {"< Not Set >": None} + """ + + default_empty_label = "< Not set >" + + def __init__(self, parent=None): + super(IntentModel, self).__init__(parent) + self._item_count = 0 + self.default_index = 0 + + @property + def has_items(self): + return self._item_count > 0 + + def reset(self): + self.clear() + self._item_count = 0 + self.default_index = 0 + + intent_settings = ( + get_system_settings() + .get("modules", {}) + .get("ftrack", {}) + .get("intent", {}) + ) + + items = intent_settings.get("items", {}) + if not items: + return + + allow_empty_intent = intent_settings.get("allow_empty_intent", True) + empty_intent_label = ( + intent_settings.get("empty_intent_label") + or self.default_empty_label + ) + listed_items = list(items.items()) + if allow_empty_intent: + listed_items.insert(0, ("", empty_intent_label)) + + default = intent_settings.get("default") + + for idx, item in enumerate(listed_items): + item_value = item[0] + if item_value == default: + self.default_index = idx + break + + self._add_items(listed_items) + + def _add_items(self, items): + for item in items: + value, label = item + new_item = QtGui.QStandardItem() + new_item.setData(label, QtCore.Qt.DisplayRole) + new_item.setData(value, Roles.IntentItemValue) + + self.setItem(self._item_count, new_item) + self._item_count += 1 + + +class PluginItem(QtGui.QStandardItem): + """Plugin item implementation.""" + + def __init__(self, plugin): + super(PluginItem, self).__init__() + + item_text = plugin.__name__ + if settings.UseLabel: + if hasattr(plugin, "label") and plugin.label: + item_text = plugin.label + + self.plugin = plugin + + self.setData(item_text, QtCore.Qt.DisplayRole) + self.setData(False, Roles.IsEnabledRole) + self.setData(0, Roles.PublishFlagsRole) + self.setData(0, Roles.PluginActionProgressRole) + icon_name = "" + if hasattr(plugin, "icon") and plugin.icon: + icon_name = plugin.icon + icon = QAwesomeTextIconFactory.icon(icon_name) + self.setData(icon, QtCore.Qt.DecorationRole) + + actions = [] + if hasattr(plugin, "actions") and plugin.actions: + actions = list(plugin.actions) + plugin.actions = actions + + is_checked = True + is_optional = getattr(plugin, "optional", False) + if is_optional: + is_checked = getattr(plugin, "active", True) + + plugin.active = is_checked + plugin.optional = is_optional + + self.setData( + "{}.{}".format(plugin.__module__, plugin.__name__), + Roles.ObjectUIdRole + ) + + self.setFlags( + QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled + ) + + def type(self): + return PluginType + + def data(self, role=QtCore.Qt.DisplayRole): + if role == Roles.IsOptionalRole: + return self.plugin.optional + + if role == Roles.ObjectIdRole: + return self.plugin.id + + if role == Roles.TypeRole: + return self.type() + + if role == QtCore.Qt.CheckStateRole: + return self.plugin.active + + if role == Roles.PathModuleRole: + return self.plugin.__module__ + + if role == Roles.FamiliesRole: + return self.plugin.families + + if role == Roles.DocstringRole: + return self.plugin.__doc__ + + if role == Roles.PluginActionsVisibleRole: + return self._data_actions_visible() + + if role == Roles.PluginValidActionsRole: + return self._data_valid_actions() + + return super(PluginItem, self).data(role) + + def _data_actions_visible(self): + # Can only run actions on active plug-ins. + if not self.plugin.active or not self.plugin.actions: + return False + + publish_states = self.data(Roles.PublishFlagsRole) + if ( + not publish_states & PluginStates.IsCompatible + or publish_states & PluginStates.WasSkipped + ): + return False + + # Context specific actions + for action in self.plugin.actions: + if action.on == "failed": + if publish_states & PluginStates.HasError: + return True + + elif action.on == "succeeded": + if ( + publish_states & PluginStates.WasProcessed + and not publish_states & PluginStates.HasError + ): + return True + + elif action.on == "processed": + if publish_states & PluginStates.WasProcessed: + return True + + elif action.on == "notProcessed": + if not publish_states & PluginStates.WasProcessed: + return True + return False + + def _data_valid_actions(self): + valid_actions = [] + + # Can only run actions on active plug-ins. + if not self.plugin.active or not self.plugin.actions: + return valid_actions + + if not self.plugin.active or not self.plugin.actions: + return False + + publish_states = self.data(Roles.PublishFlagsRole) + if ( + not publish_states & PluginStates.IsCompatible + or publish_states & PluginStates.WasSkipped + ): + return False + + # Context specific actions + for action in self.plugin.actions: + valid = False + if action.on == "failed": + if publish_states & PluginStates.HasError: + valid = True + + elif action.on == "succeeded": + if ( + publish_states & PluginStates.WasProcessed + and not publish_states & PluginStates.HasError + ): + valid = True + + elif action.on == "processed": + if publish_states & PluginStates.WasProcessed: + valid = True + + elif action.on == "notProcessed": + if not publish_states & PluginStates.WasProcessed: + valid = True + + if valid: + valid_actions.append(action) + + if not valid_actions: + return valid_actions + + actions_len = len(valid_actions) + # Discard empty groups + indexex_to_remove = [] + for idx, action in enumerate(valid_actions): + if action.__type__ != "category": + continue + + next_id = idx + 1 + if next_id >= actions_len: + indexex_to_remove.append(idx) + continue + + next = valid_actions[next_id] + if next.__type__ != "action": + indexex_to_remove.append(idx) + + for idx in reversed(indexex_to_remove): + valid_actions.pop(idx) + + return valid_actions + + def setData(self, value, role=None): + if role is None: + role = QtCore.Qt.UserRole + 1 + + if role == QtCore.Qt.CheckStateRole: + if not self.data(Roles.IsEnabledRole): + return False + self.plugin.active = value + self.emitDataChanged() + return + + elif role == Roles.PluginActionProgressRole: + if isinstance(value, list): + _value = self.data(Roles.PluginActionProgressRole) + for flag in value: + _value |= flag + value = _value + + elif isinstance(value, dict): + _value = self.data(Roles.PluginActionProgressRole) + for flag, _bool in value.items(): + if _bool is True: + _value |= flag + elif _value & flag: + _value ^= flag + value = _value + + elif role == Roles.PublishFlagsRole: + if isinstance(value, list): + _value = self.data(Roles.PublishFlagsRole) + for flag in value: + _value |= flag + value = _value + + elif isinstance(value, dict): + _value = self.data(Roles.PublishFlagsRole) + for flag, _bool in value.items(): + if _bool is True: + _value |= flag + elif _value & flag: + _value ^= flag + value = _value + + if value & PluginStates.HasWarning: + if self.parent(): + self.parent().setData( + {GroupStates.HasWarning: True}, + Roles.PublishFlagsRole + ) + if value & PluginStates.HasError: + if self.parent(): + self.parent().setData( + {GroupStates.HasError: True}, + Roles.PublishFlagsRole + ) + + return super(PluginItem, self).setData(value, role) + + +class GroupItem(QtGui.QStandardItem): + def __init__(self, *args, **kwargs): + self.order = kwargs.pop("order", None) + self.publish_states = 0 + super(GroupItem, self).__init__(*args, **kwargs) + + def flags(self): + return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled + + def data(self, role=QtCore.Qt.DisplayRole): + if role == Roles.PublishFlagsRole: + return self.publish_states + + if role == Roles.TypeRole: + return self.type() + + return super(GroupItem, self).data(role) + + def setData(self, value, role=(QtCore.Qt.UserRole + 1)): + if role == Roles.PublishFlagsRole: + if isinstance(value, list): + _value = self.data(Roles.PublishFlagsRole) + for flag in value: + _value |= flag + value = _value + + elif isinstance(value, dict): + _value = self.data(Roles.PublishFlagsRole) + for flag, _bool in value.items(): + if _bool is True: + _value |= flag + elif _value & flag: + _value ^= flag + value = _value + self.publish_states = value + self.emitDataChanged() + return True + + return super(GroupItem, self).setData(value, role) + + def type(self): + return GroupType + + +class PluginModel(QtGui.QStandardItemModel): + def __init__(self, controller, *args, **kwargs): + super(PluginModel, self).__init__(*args, **kwargs) + + self.controller = controller + self.checkstates = {} + self.group_items = {} + self.plugin_items = {} + + def reset(self): + self.group_items = {} + self.plugin_items = {} + self.clear() + + def append(self, plugin): + plugin_groups = self.controller.order_groups.groups + label = None + order = None + for _order, item in reversed(plugin_groups.items()): + if _order is None or plugin.order < _order: + label = item["label"] + order = _order + else: + break + + if label is None: + label = "Other" + + group_item = self.group_items.get(label) + if not group_item: + group_item = GroupItem(label, order=order) + self.appendRow(group_item) + self.group_items[label] = group_item + + new_item = PluginItem(plugin) + group_item.appendRow(new_item) + + self.plugin_items[plugin._id] = new_item + + def store_checkstates(self): + self.checkstates.clear() + + for plugin_item in self.plugin_items.values(): + if not plugin_item.plugin.optional: + continue + + uid = plugin_item.data(Roles.ObjectUIdRole) + self.checkstates[uid] = plugin_item.data(QtCore.Qt.CheckStateRole) + + def restore_checkstates(self): + for plugin_item in self.plugin_items.values(): + if not plugin_item.plugin.optional: + continue + + uid = plugin_item.data(Roles.ObjectUIdRole) + state = self.checkstates.get(uid) + if state is not None: + plugin_item.setData(state, QtCore.Qt.CheckStateRole) + + def update_with_result(self, result): + plugin = result["plugin"] + item = self.plugin_items[plugin.id] + + new_flag_states = { + PluginStates.InProgress: False, + PluginStates.WasProcessed: True + } + + publish_states = item.data(Roles.PublishFlagsRole) + + has_warning = publish_states & PluginStates.HasWarning + new_records = result.get("records") or [] + if not has_warning: + for record in new_records: + level_no = record.get("levelno") + if level_no and level_no >= 30: + new_flag_states[PluginStates.HasWarning] = True + break + + if ( + not publish_states & PluginStates.HasError + and not result["success"] + ): + new_flag_states[PluginStates.HasError] = True + + if not publish_states & PluginStates.IsCompatible: + new_flag_states[PluginStates.IsCompatible] = True + + item.setData(new_flag_states, Roles.PublishFlagsRole) + + records = item.data(Roles.LogRecordsRole) or [] + records.extend(new_records) + + item.setData(records, Roles.LogRecordsRole) + + return item + + def update_compatibility(self): + context = self.controller.context + + families = util.collect_families_from_instances(context, True) + for plugin_item in self.plugin_items.values(): + publish_states = plugin_item.data(Roles.PublishFlagsRole) + if ( + publish_states & PluginStates.WasProcessed + or publish_states & PluginStates.WasSkipped + ): + continue + + is_compatible = False + # A plugin should always show if it has processed. + if plugin_item.plugin.__instanceEnabled__: + compatible_instances = pyblish.logic.instances_by_plugin( + context, plugin_item.plugin + ) + for instance in context: + if not instance.data.get("publish"): + continue + + if instance in compatible_instances: + is_compatible = True + break + else: + plugins = pyblish.logic.plugins_by_families( + [plugin_item.plugin], families + ) + if plugins: + is_compatible = True + + current_is_compatible = publish_states & PluginStates.IsCompatible + if ( + (is_compatible and not current_is_compatible) + or (not is_compatible and current_is_compatible) + ): + new_flag = { + PluginStates.IsCompatible: is_compatible + } + plugin_item.setData(new_flag, Roles.PublishFlagsRole) + + +class PluginFilterProxy(QtCore.QSortFilterProxyModel): + def filterAcceptsRow(self, source_row, source_parent): + index = self.sourceModel().index(source_row, 0, source_parent) + item_type = index.data(Roles.TypeRole) + if item_type != PluginType: + return True + + publish_states = index.data(Roles.PublishFlagsRole) + if ( + publish_states & PluginStates.WasSkipped + or not publish_states & PluginStates.IsCompatible + ): + return False + return True + + +class InstanceItem(QtGui.QStandardItem): + """Instance item implementation.""" + + def __init__(self, instance): + super(InstanceItem, self).__init__() + + self.instance = instance + self.is_context = False + publish_states = getattr(instance, "_publish_states", 0) + if publish_states & InstanceStates.ContextType: + self.is_context = True + + instance._publish_states = publish_states + instance._logs = [] + instance.optional = getattr(instance, "optional", True) + instance.data["publish"] = instance.data.get("publish", True) + + family = self.data(Roles.FamiliesRole)[0] + self.setData( + "{}.{}".format(family, self.instance.data["name"]), + Roles.ObjectUIdRole + ) + + def flags(self): + return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled + + def type(self): + return InstanceType + + def data(self, role=QtCore.Qt.DisplayRole): + if role == QtCore.Qt.DisplayRole: + label = None + if settings.UseLabel: + label = self.instance.data.get("label") + + if not label: + if self.is_context: + label = "Context" + else: + label = self.instance.data["name"] + return label + + if role == QtCore.Qt.DecorationRole: + icon_name = self.instance.data.get("icon") or "file" + return QAwesomeTextIconFactory.icon(icon_name) + + if role == Roles.TypeRole: + return self.type() + + if role == Roles.ObjectIdRole: + return self.instance.id + + if role == Roles.FamiliesRole: + if self.is_context: + return ["Context"] + + families = [] + family = self.instance.data.get("family") + if family: + families.append(family) + + _families = self.instance.data.get("families") or [] + for _family in _families: + if _family not in families: + families.append(_family) + + return families + + if role == Roles.IsOptionalRole: + return self.instance.optional + + if role == QtCore.Qt.CheckStateRole: + return self.instance.data["publish"] + + if role == Roles.PublishFlagsRole: + return self.instance._publish_states + + if role == Roles.LogRecordsRole: + return self.instance._logs + + return super(InstanceItem, self).data(role) + + def setData(self, value, role=(QtCore.Qt.UserRole + 1)): + if role == QtCore.Qt.CheckStateRole: + if not self.data(Roles.IsEnabledRole): + return + self.instance.data["publish"] = value + self.emitDataChanged() + return + + if role == Roles.IsEnabledRole: + if not self.instance.optional: + return + + if role == Roles.PublishFlagsRole: + if isinstance(value, list): + _value = self.instance._publish_states + for flag in value: + _value |= flag + value = _value + + elif isinstance(value, dict): + _value = self.instance._publish_states + for flag, _bool in value.items(): + if _bool is True: + _value |= flag + elif _value & flag: + _value ^= flag + value = _value + + if value & InstanceStates.HasWarning: + if self.parent(): + self.parent().setData( + {GroupStates.HasWarning: True}, + Roles.PublishFlagsRole + ) + if value & InstanceStates.HasError: + if self.parent(): + self.parent().setData( + {GroupStates.HasError: True}, + Roles.PublishFlagsRole + ) + + self.instance._publish_states = value + self.emitDataChanged() + return + + if role == Roles.LogRecordsRole: + self.instance._logs = value + self.emitDataChanged() + return + + return super(InstanceItem, self).setData(value, role) + + +class InstanceModel(QtGui.QStandardItemModel): + + group_created = QtCore.Signal(QtCore.QModelIndex) + + def __init__(self, controller, *args, **kwargs): + super(InstanceModel, self).__init__(*args, **kwargs) + + self.controller = controller + self.checkstates = {} + self.group_items = {} + self.instance_items = {} + + def reset(self): + self.group_items = {} + self.instance_items = {} + self.clear() + + def append(self, instance): + new_item = InstanceItem(instance) + if new_item.is_context: + self.appendRow(new_item) + else: + families = new_item.data(Roles.FamiliesRole) + group_item = self.group_items.get(families[0]) + if not group_item: + group_item = GroupItem(families[0]) + self.appendRow(group_item) + self.group_items[families[0]] = group_item + self.group_created.emit(group_item.index()) + + group_item.appendRow(new_item) + instance_id = instance.id + self.instance_items[instance_id] = new_item + + def remove(self, instance_id): + instance_item = self.instance_items.pop(instance_id) + parent_item = instance_item.parent() + parent_item.removeRow(instance_item.row()) + if parent_item.rowCount(): + return + + self.group_items.pop(parent_item.data(QtCore.Qt.DisplayRole)) + self.removeRow(parent_item.row()) + + def store_checkstates(self): + self.checkstates.clear() + + for instance_item in self.instance_items.values(): + if not instance_item.instance.optional: + continue + + uid = instance_item.data(Roles.ObjectUIdRole) + self.checkstates[uid] = instance_item.data( + QtCore.Qt.CheckStateRole + ) + + def restore_checkstates(self): + for instance_item in self.instance_items.values(): + if not instance_item.instance.optional: + continue + + uid = instance_item.data(Roles.ObjectUIdRole) + state = self.checkstates.get(uid) + if state is not None: + instance_item.setData(state, QtCore.Qt.CheckStateRole) + + def update_with_result(self, result): + instance = result["instance"] + if instance is None: + instance_id = self.controller.context.id + else: + instance_id = instance.id + + item = self.instance_items.get(instance_id) + if not item: + return + + new_flag_states = { + InstanceStates.InProgress: False + } + + publish_states = item.data(Roles.PublishFlagsRole) + has_warning = publish_states & InstanceStates.HasWarning + new_records = result.get("records") or [] + if not has_warning: + for record in new_records: + level_no = record.get("levelno") + if level_no and level_no >= 30: + new_flag_states[InstanceStates.HasWarning] = True + break + + if ( + not publish_states & InstanceStates.HasError + and not result["success"] + ): + new_flag_states[InstanceStates.HasError] = True + + item.setData(new_flag_states, Roles.PublishFlagsRole) + + records = item.data(Roles.LogRecordsRole) or [] + records.extend(new_records) + + item.setData(records, Roles.LogRecordsRole) + + return item + + def update_compatibility(self, context, instances): + families = util.collect_families_from_instances(context, True) + for plugin_item in self.plugin_items.values(): + publish_states = plugin_item.data(Roles.PublishFlagsRole) + if ( + publish_states & PluginStates.WasProcessed + or publish_states & PluginStates.WasSkipped + ): + continue + + is_compatible = False + # A plugin should always show if it has processed. + if plugin_item.plugin.__instanceEnabled__: + compatibleInstances = pyblish.logic.instances_by_plugin( + context, plugin_item.plugin + ) + for instance in instances: + if not instance.data.get("publish"): + continue + + if instance in compatibleInstances: + is_compatible = True + break + else: + plugins = pyblish.logic.plugins_by_families( + [plugin_item.plugin], families + ) + if plugins: + is_compatible = True + + current_is_compatible = publish_states & PluginStates.IsCompatible + if ( + (is_compatible and not current_is_compatible) + or (not is_compatible and current_is_compatible) + ): + plugin_item.setData( + {PluginStates.IsCompatible: is_compatible}, + Roles.PublishFlagsRole + ) + + +class InstanceSortProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(InstanceSortProxy, self).__init__(*args, **kwargs) + # Do not care about lower/upper case + self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + def lessThan(self, x_index, y_index): + x_type = x_index.data(Roles.TypeRole) + y_type = y_index.data(Roles.TypeRole) + if x_type != y_type: + if x_type == GroupType: + return False + return True + return super(InstanceSortProxy, self).lessThan(x_index, y_index) + + +class TerminalDetailItem(QtGui.QStandardItem): + key_label_record_map = ( + ("instance", "Instance"), + ("msg", "Message"), + ("name", "Plugin"), + ("pathname", "Path"), + ("lineno", "Line"), + ("traceback", "Traceback"), + ("levelname", "Level"), + ("threadName", "Thread"), + ("msecs", "Millis") + ) + + def __init__(self, record_item): + self.record_item = record_item + self.msg = None + msg = record_item.get("msg") + if msg is None: + msg = record_item["label"].split("\n")[0] + + super(TerminalDetailItem, self).__init__(msg) + + def data(self, role=QtCore.Qt.DisplayRole): + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): + if self.msg is None: + self.msg = self.compute_detail_text(self.record_item) + return self.msg + return super(TerminalDetailItem, self).data(role) + + def compute_detail_text(self, item_data): + if item_data["type"] == "info": + return item_data["label"] + + html_text = "" + for key, title in self.key_label_record_map: + if key not in item_data: + continue + value = item_data[key] + text = ( + str(value) + .replace("<", "<") + .replace(">", ">") + .replace('\n', '
') + .replace(' ', ' ') + ) + + title_tag = ( + '{}: ' + ' color:#fff;\" >{}: ' + ).format(title) + + html_text += ( + '{}' + '{}' + ).format(title_tag, text) + + html_text = '{}
'.format( + html_text + ) + return html_text + + +class TerminalModel(QtGui.QStandardItemModel): + item_icon_name = { + "info": "fa.info", + "record": "fa.circle", + "error": "fa.exclamation-triangle", + } + + item_icon_colors = { + "info": "#ffffff", + "error": "#ff4a4a", + "log_debug": "#ff66e8", + "log_info": "#66abff", + "log_warning": "#ffba66", + "log_error": "#ff4d58", + "log_critical": "#ff4f75", + None: "#333333" + } + + level_to_record = ( + (10, "log_debug"), + (20, "log_info"), + (30, "log_warning"), + (40, "log_error"), + (50, "log_critical") + + ) + + def __init__(self, *args, **kwargs): + super(TerminalModel, self).__init__(*args, **kwargs) + self.reset() + + def reset(self): + self.clear() + + def prepare_records(self, result, suspend_logs): + prepared_records = [] + instance_name = None + instance = result["instance"] + if instance is not None: + instance_name = instance.data["name"] + + if not suspend_logs: + for record in result.get("records") or []: + if isinstance(record, dict): + record_item = record + else: + record_item = { + "label": text_type(record.msg), + "type": "record", + "levelno": record.levelno, + "threadName": record.threadName, + "name": record.name, + "filename": record.filename, + "pathname": record.pathname, + "lineno": record.lineno, + "msg": text_type(record.msg), + "msecs": record.msecs, + "levelname": record.levelname + } + + if instance_name is not None: + record_item["instance"] = instance_name + + prepared_records.append(record_item) + + error = result.get("error") + if error: + fname, line_no, func, exc = error.traceback + error_item = { + "label": str(error), + "type": "error", + "filename": str(fname), + "lineno": str(line_no), + "func": str(func), + "traceback": error.formatted_traceback, + } + + if instance_name is not None: + error_item["instance"] = instance_name + + prepared_records.append(error_item) + + return prepared_records + + def append(self, record_items): + all_record_items = [] + for record_item in record_items: + record_type = record_item["type"] + # Add error message to detail + if record_type == "error": + record_item["msg"] = record_item["label"] + terminal_item_type = None + if record_type == "record": + for level, _type in self.level_to_record: + if level > record_item["levelno"]: + break + terminal_item_type = _type + + else: + terminal_item_type = record_type + + icon_color = self.item_icon_colors.get(terminal_item_type) + icon_name = self.item_icon_name.get(record_type) + + top_item_icon = None + if icon_color and icon_name: + top_item_icon = QAwesomeIconFactory.icon(icon_name, icon_color) + + label = record_item["label"].split("\n")[0] + + top_item = QtGui.QStandardItem() + all_record_items.append(top_item) + + detail_item = TerminalDetailItem(record_item) + top_item.appendRow(detail_item) + + top_item.setData(TerminalLabelType, Roles.TypeRole) + top_item.setData(terminal_item_type, Roles.TerminalItemTypeRole) + top_item.setData(label, QtCore.Qt.DisplayRole) + top_item.setFlags( + QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled + ) + + if top_item_icon: + top_item.setData(top_item_icon, QtCore.Qt.DecorationRole) + + detail_item.setData(TerminalDetailType, Roles.TypeRole) + + self.invisibleRootItem().appendRows(all_record_items) + + def update_with_result(self, result): + self.append(result["records"]) + + +class TerminalProxy(QtCore.QSortFilterProxyModel): + filter_buttons_checks = { + "info": settings.TerminalFilters.get("info", True), + "log_debug": settings.TerminalFilters.get("log_debug", True), + "log_info": settings.TerminalFilters.get("log_info", True), + "log_warning": settings.TerminalFilters.get("log_warning", True), + "log_error": settings.TerminalFilters.get("log_error", True), + "log_critical": settings.TerminalFilters.get("log_critical", True), + "error": settings.TerminalFilters.get("error", True) + } + + instances = [] + + def __init__(self, view, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.__class__.instances.append(self) + # Store parent because by own `QSortFilterProxyModel` has `parent` + # method not returning parent QObject in PySide and PyQt4 + self.view = view + + @classmethod + def change_filter(cls, name, value): + cls.filter_buttons_checks[name] = value + + for instance in cls.instances: + try: + instance.invalidate() + if instance.view: + instance.view.updateGeometry() + + except RuntimeError: + # C++ Object was deleted + cls.instances.remove(instance) + + def filterAcceptsRow(self, source_row, source_parent): + index = self.sourceModel().index(source_row, 0, source_parent) + item_type = index.data(Roles.TypeRole) + if not item_type == TerminalLabelType: + return True + terminal_item_type = index.data(Roles.TerminalItemTypeRole) + return self.__class__.filter_buttons_checks.get( + terminal_item_type, True + ) diff --git a/client/ayon_core/tools/pyblish_pype/settings.py b/client/ayon_core/tools/pyblish_pype/settings.py new file mode 100644 index 0000000000..5b69eb6a50 --- /dev/null +++ b/client/ayon_core/tools/pyblish_pype/settings.py @@ -0,0 +1,30 @@ +from .util import env_variable_to_bool + +# Customize the window of the pyblish-lite window. +WindowTitle = "Pyblish" + +# Customize whether to show label names for plugins. +UseLabel = True + +# Customize which tab to start on. Possible choices are: "artist", "overview" +# and "terminal". +InitialTab = "overview" + +# Customize the window size. +WindowSize = (430, 600) + +TerminalFilters = { + "info": True, + "log_debug": True, + "log_info": True, + "log_warning": True, + "log_error": True, + "log_critical": True, + "traceback": True, +} + +# Allow animations in GUI +Animated = env_variable_to_bool("AYON_PYBLISH_ANIMATED", True) + +# Print UI info message to console +PrintInfo = env_variable_to_bool("AYON_PYBLISH_PRINT_INFO", True) diff --git a/openpype/tools/pyblish_pype/util.py b/client/ayon_core/tools/pyblish_pype/util.py similarity index 100% rename from openpype/tools/pyblish_pype/util.py rename to client/ayon_core/tools/pyblish_pype/util.py diff --git a/openpype/scripts/slates/slate_base/__init__.py b/client/ayon_core/tools/pyblish_pype/vendor/__init__.py similarity index 100% rename from openpype/scripts/slates/slate_base/__init__.py rename to client/ayon_core/tools/pyblish_pype/vendor/__init__.py diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/__init__.py b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/__init__.py similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/__init__.py rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/__init__.py diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/_version.py b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/_version.py similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/_version.py rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/_version.py diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/animation.py b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/animation.py similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/animation.py rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/animation.py diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont-charmap.json b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont-charmap.json similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont-charmap.json rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont-charmap.json diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont.ttf b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont.ttf similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont.ttf rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/elusiveicons-webfont.ttf diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont-charmap.json b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont-charmap.json similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont-charmap.json rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont-charmap.json diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont.ttf b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont.ttf similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont.ttf rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/fonts/fontawesome-webfont.ttf diff --git a/openpype/tools/pyblish_pype/vendor/qtawesome/iconic_font.py b/client/ayon_core/tools/pyblish_pype/vendor/qtawesome/iconic_font.py similarity index 100% rename from openpype/tools/pyblish_pype/vendor/qtawesome/iconic_font.py rename to client/ayon_core/tools/pyblish_pype/vendor/qtawesome/iconic_font.py diff --git a/openpype/tools/pyblish_pype/version.py b/client/ayon_core/tools/pyblish_pype/version.py similarity index 100% rename from openpype/tools/pyblish_pype/version.py rename to client/ayon_core/tools/pyblish_pype/version.py diff --git a/openpype/tools/pyblish_pype/view.py b/client/ayon_core/tools/pyblish_pype/view.py similarity index 100% rename from openpype/tools/pyblish_pype/view.py rename to client/ayon_core/tools/pyblish_pype/view.py diff --git a/openpype/tools/pyblish_pype/widgets.py b/client/ayon_core/tools/pyblish_pype/widgets.py similarity index 100% rename from openpype/tools/pyblish_pype/widgets.py rename to client/ayon_core/tools/pyblish_pype/widgets.py diff --git a/openpype/tools/pyblish_pype/window.py b/client/ayon_core/tools/pyblish_pype/window.py similarity index 100% rename from openpype/tools/pyblish_pype/window.py rename to client/ayon_core/tools/pyblish_pype/window.py diff --git a/openpype/tools/resources/__init__.py b/client/ayon_core/tools/resources/__init__.py similarity index 100% rename from openpype/tools/resources/__init__.py rename to client/ayon_core/tools/resources/__init__.py diff --git a/openpype/tools/resources/images/delete.png b/client/ayon_core/tools/resources/images/delete.png similarity index 100% rename from openpype/tools/resources/images/delete.png rename to client/ayon_core/tools/resources/images/delete.png diff --git a/openpype/tools/resources/images/file.png b/client/ayon_core/tools/resources/images/file.png similarity index 100% rename from openpype/tools/resources/images/file.png rename to client/ayon_core/tools/resources/images/file.png diff --git a/openpype/tools/resources/images/files.png b/client/ayon_core/tools/resources/images/files.png similarity index 100% rename from openpype/tools/resources/images/files.png rename to client/ayon_core/tools/resources/images/files.png diff --git a/openpype/tools/resources/images/folder.png b/client/ayon_core/tools/resources/images/folder.png similarity index 100% rename from openpype/tools/resources/images/folder.png rename to client/ayon_core/tools/resources/images/folder.png diff --git a/openpype/tools/resources/images/menu.png b/client/ayon_core/tools/resources/images/menu.png similarity index 100% rename from openpype/tools/resources/images/menu.png rename to client/ayon_core/tools/resources/images/menu.png diff --git a/openpype/tools/ayon_sceneinventory/__init__.py b/client/ayon_core/tools/sceneinventory/__init__.py similarity index 100% rename from openpype/tools/ayon_sceneinventory/__init__.py rename to client/ayon_core/tools/sceneinventory/__init__.py diff --git a/client/ayon_core/tools/sceneinventory/control.py b/client/ayon_core/tools/sceneinventory/control.py new file mode 100644 index 0000000000..409f92b506 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/control.py @@ -0,0 +1,127 @@ +import ayon_api + +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.host import ILoadHost +from ayon_core.pipeline import ( + registered_host, + get_current_context, +) +from ayon_core.tools.ayon_utils.models import HierarchyModel + +from .models import SiteSyncModel + + +class SceneInventoryController: + """This is a temporary controller for AYON. + + Goal of this temporary controller is to provide a way to get current + context instead of using 'AvalonMongoDB' object (or 'legacy_io'). + + Also provides (hopefully) cleaner api for site sync. + """ + + def __init__(self, host=None): + if host is None: + host = registered_host() + self._host = host + self._current_context = None + self._current_project = None + self._current_folder_id = None + self._current_folder_set = False + + self._site_sync_model = SiteSyncModel(self) + # Switch dialog requirements + self._hierarchy_model = HierarchyModel(self) + self._event_system = self._create_event_system() + + def emit_event(self, topic, data=None, source=None): + if data is None: + data = {} + self._event_system.emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self._event_system.add_callback(topic, callback) + + def reset(self): + self._current_context = None + self._current_project = None + self._current_folder_id = None + self._current_folder_set = False + + self._site_sync_model.reset() + self._hierarchy_model.reset() + + def get_current_context(self): + if self._current_context is None: + if hasattr(self._host, "get_current_context"): + self._current_context = self._host.get_current_context() + else: + self._current_context = get_current_context() + return self._current_context + + def get_current_project_name(self): + if self._current_project is None: + self._current_project = self.get_current_context()["project_name"] + return self._current_project + + def get_current_folder_id(self): + if self._current_folder_set: + return self._current_folder_id + + context = self.get_current_context() + project_name = context["project_name"] + folder_name = context.get("asset_name") + folder_id = None + if folder_name: + folder = ayon_api.get_folder_by_path(project_name, folder_name) + if folder: + folder_id = folder["id"] + + self._current_folder_id = folder_id + self._current_folder_set = True + return self._current_folder_id + + def get_containers(self): + host = self._host + if isinstance(host, ILoadHost): + return list(host.get_containers()) + elif hasattr(host, "ls"): + return list(host.ls()) + return [] + + # Site Sync methods + def is_sync_server_enabled(self): + return self._site_sync_model.is_sync_server_enabled() + + def get_sites_information(self): + return self._site_sync_model.get_sites_information() + + def get_site_provider_icons(self): + return self._site_sync_model.get_site_provider_icons() + + def get_representations_site_progress(self, representation_ids): + return self._site_sync_model.get_representations_site_progress( + representation_ids + ) + + def resync_representations(self, representation_ids, site_type): + return self._site_sync_model.resync_representations( + representation_ids, site_type + ) + + # Switch dialog methods + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_folder_label(self, folder_id): + if not folder_id: + return None + project_name = self.get_current_project_name() + folder_item = self._hierarchy_model.get_folder_item( + project_name, folder_id) + if folder_item is None: + return None + return folder_item.label + + def _create_event_system(self): + return QueuedEventSystem() diff --git a/client/ayon_core/tools/sceneinventory/delegates.py b/client/ayon_core/tools/sceneinventory/delegates.py new file mode 100644 index 0000000000..1f8bb81835 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/delegates.py @@ -0,0 +1,196 @@ +import numbers + +from ayon_core.client import ( + get_versions, + get_hero_versions, +) +from ayon_core.pipeline import HeroVersionType +from ayon_core.tools.utils.models import TreeModel +from ayon_core.tools.utils.lib import format_version + +from qtpy import QtWidgets, QtCore, QtGui + + +class VersionDelegate(QtWidgets.QStyledItemDelegate): + """A delegate that display version integer formatted as version string.""" + + version_changed = QtCore.Signal() + first_run = False + lock = False + + def __init__(self, controller, *args, **kwargs): + self._controller = controller + super(VersionDelegate, self).__init__(*args, **kwargs) + + def get_project_name(self): + return self._controller.get_current_project_name() + + def displayText(self, value, locale): + if isinstance(value, HeroVersionType): + return format_version(value, True) + if not isinstance(value, numbers.Integral): + # For cases where no version is resolved like NOT FOUND cases + # where a representation might not exist in current database + return + + return format_version(value) + + def paint(self, painter, option, index): + fg_color = index.data(QtCore.Qt.ForegroundRole) + if fg_color: + if isinstance(fg_color, QtGui.QBrush): + fg_color = fg_color.color() + elif isinstance(fg_color, QtGui.QColor): + pass + else: + fg_color = None + + if not fg_color: + return super(VersionDelegate, self).paint(painter, option, index) + + if option.widget: + style = option.widget.style() + else: + style = QtWidgets.QApplication.style() + + style.drawControl( + QtWidgets.QStyle.CE_ItemViewItem, + option, + painter, + option.widget + ) + + painter.save() + + text = self.displayText( + index.data(QtCore.Qt.DisplayRole), option.locale + ) + pen = painter.pen() + pen.setColor(fg_color) + painter.setPen(pen) + + text_rect = style.subElementRect( + QtWidgets.QStyle.SE_ItemViewItemText, + option + ) + text_margin = style.proxy().pixelMetric( + QtWidgets.QStyle.PM_FocusFrameHMargin, option, option.widget + ) + 1 + + painter.drawText( + text_rect.adjusted(text_margin, 0, - text_margin, 0), + option.displayAlignment, + text + ) + + painter.restore() + + def createEditor(self, parent, option, index): + item = index.data(TreeModel.ItemRole) + if item.get("isGroup") or item.get("isMerged"): + return + + editor = QtWidgets.QComboBox(parent) + + def commit_data(): + if not self.first_run: + self.commitData.emit(editor) # Update model data + self.version_changed.emit() # Display model data + editor.currentIndexChanged.connect(commit_data) + + self.first_run = True + self.lock = False + + return editor + + def setEditorData(self, editor, index): + if self.lock: + # Only set editor data once per delegation + return + + editor.clear() + + # Current value of the index + item = index.data(TreeModel.ItemRole) + value = index.data(QtCore.Qt.DisplayRole) + if item["version_document"]["type"] != "hero_version": + assert isinstance(value, numbers.Integral), ( + "Version is not integer" + ) + + project_name = self.get_project_name() + # Add all available versions to the editor + parent_id = item["version_document"]["parent"] + version_docs = [ + version_doc + for version_doc in sorted( + get_versions(project_name, subset_ids=[parent_id]), + key=lambda item: item["name"] + ) + if version_doc["data"].get("active", True) + ] + + hero_versions = list( + get_hero_versions( + project_name, + subset_ids=[parent_id], + fields=["name", "data.tags", "version_id"] + ) + ) + hero_version_doc = None + if hero_versions: + hero_version_doc = hero_versions[0] + + doc_for_hero_version = None + + selected = None + items = [] + for version_doc in version_docs: + version_tags = version_doc["data"].get("tags") or [] + if "deleted" in version_tags: + continue + + if ( + hero_version_doc + and doc_for_hero_version is None + and hero_version_doc["version_id"] == version_doc["_id"] + ): + doc_for_hero_version = version_doc + + label = format_version(version_doc["name"]) + item = QtGui.QStandardItem(label) + item.setData(version_doc, QtCore.Qt.UserRole) + items.append(item) + + if version_doc["name"] == value: + selected = item + + if hero_version_doc and doc_for_hero_version: + version_name = doc_for_hero_version["name"] + label = format_version(version_name, True) + if isinstance(value, HeroVersionType): + index = len(version_docs) + hero_version_doc["name"] = HeroVersionType(version_name) + + item = QtGui.QStandardItem(label) + item.setData(hero_version_doc, QtCore.Qt.UserRole) + items.append(item) + + # Reverse items so latest versions be upper + items = list(reversed(items)) + for item in items: + editor.model().appendRow(item) + + index = 0 + if selected: + index = selected.row() + + # Will trigger index-change signal + editor.setCurrentIndex(index) + self.first_run = False + self.lock = True + + def setModelData(self, editor, model, index): + """Apply the integer version back in the model""" + version = editor.itemData(editor.currentIndex()) + model.setData(index, version["name"]) diff --git a/client/ayon_core/tools/sceneinventory/model.py b/client/ayon_core/tools/sceneinventory/model.py new file mode 100644 index 0000000000..05ecfd442d --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/model.py @@ -0,0 +1,623 @@ +import collections +import re +import logging +import uuid +import copy + +from collections import defaultdict + +from qtpy import QtCore, QtGui +import qtawesome + +from ayon_core.client import ( + get_assets, + get_subsets, + get_versions, + get_last_version_by_subset_id, + get_representations, +) +from ayon_core.pipeline import ( + get_current_project_name, + schema, + HeroVersionType, +) +from ayon_core.style import get_default_entity_icon_color +from ayon_core.tools.utils.models import TreeModel, Item +from ayon_core.tools.ayon_utils.widgets import get_qt_icon + + +def walk_hierarchy(node): + """Recursively yield group node.""" + for child in node.children(): + if child.get("isGroupNode"): + yield child + + for _child in walk_hierarchy(child): + yield _child + + +class InventoryModel(TreeModel): + """The model for the inventory""" + + Columns = [ + "Name", + "version", + "count", + "family", + "group", + "loader", + "objectName", + "active_site", + "remote_site", + ] + active_site_col = Columns.index("active_site") + remote_site_col = Columns.index("remote_site") + + OUTDATED_COLOR = QtGui.QColor(235, 30, 30) + CHILD_OUTDATED_COLOR = QtGui.QColor(200, 160, 30) + GRAYOUT_COLOR = QtGui.QColor(160, 160, 160) + + UniqueRole = QtCore.Qt.UserRole + 2 # unique label role + + def __init__(self, controller, parent=None): + super(InventoryModel, self).__init__(parent) + self.log = logging.getLogger(self.__class__.__name__) + + self._controller = controller + + self._hierarchy_view = False + + self._default_icon_color = get_default_entity_icon_color() + + site_icons = self._controller.get_site_provider_icons() + + self._site_icons = { + provider: get_qt_icon(icon_def) + for provider, icon_def in site_icons.items() + } + + def outdated(self, item): + value = item.get("version") + if isinstance(value, HeroVersionType): + return False + + if item.get("version") == item.get("highest_version"): + return False + return True + + def data(self, index, role): + if not index.isValid(): + return + + item = index.internalPointer() + + if role == QtCore.Qt.FontRole: + # Make top-level entries bold + if item.get("isGroupNode") or item.get("isNotSet"): # group-item + font = QtGui.QFont() + font.setBold(True) + return font + + if role == QtCore.Qt.ForegroundRole: + # Set the text color to the OUTDATED_COLOR when the + # collected version is not the same as the highest version + key = self.Columns[index.column()] + if key == "version": # version + if item.get("isGroupNode"): # group-item + if self.outdated(item): + return self.OUTDATED_COLOR + + if self._hierarchy_view: + # If current group is not outdated, check if any + # outdated children. + for _node in walk_hierarchy(item): + if self.outdated(_node): + return self.CHILD_OUTDATED_COLOR + else: + + if self._hierarchy_view: + # Although this is not a group item, we still need + # to distinguish which one contain outdated child. + for _node in walk_hierarchy(item): + if self.outdated(_node): + return self.CHILD_OUTDATED_COLOR.darker(150) + + return self.GRAYOUT_COLOR + + if key == "Name" and not item.get("isGroupNode"): + return self.GRAYOUT_COLOR + + # Add icons + if role == QtCore.Qt.DecorationRole: + if index.column() == 0: + # Override color + color = item.get("color", self._default_icon_color) + if item.get("isGroupNode"): # group-item + return qtawesome.icon("fa.folder", color=color) + if item.get("isNotSet"): + return qtawesome.icon("fa.exclamation-circle", color=color) + + return qtawesome.icon("fa.file-o", color=color) + + if index.column() == 3: + # Family icon + return item.get("familyIcon", None) + + column_name = self.Columns[index.column()] + + if column_name == "group" and item.get("group"): + return qtawesome.icon("fa.object-group", + color=get_default_entity_icon_color()) + + if item.get("isGroupNode"): + if column_name == "active_site": + provider = item.get("active_site_provider") + return self._site_icons.get(provider) + + if column_name == "remote_site": + provider = item.get("remote_site_provider") + return self._site_icons.get(provider) + + if role == QtCore.Qt.DisplayRole and item.get("isGroupNode"): + column_name = self.Columns[index.column()] + progress = None + if column_name == "active_site": + progress = item.get("active_site_progress", 0) + elif column_name == "remote_site": + progress = item.get("remote_site_progress", 0) + if progress is not None: + return "{}%".format(max(progress, 0) * 100) + + if role == self.UniqueRole: + return item["representation"] + item.get("objectName", "") + + return super(InventoryModel, self).data(index, role) + + def set_hierarchy_view(self, state): + """Set whether to display subsets in hierarchy view.""" + state = bool(state) + + if state != self._hierarchy_view: + self._hierarchy_view = state + + def refresh(self, selected=None, containers=None): + """Refresh the model""" + + # for debugging or testing, injecting items from outside + if containers is None: + containers = self._controller.get_containers() + + self.clear() + if not selected or not self._hierarchy_view: + self._add_containers(containers) + return + + # Filter by cherry-picked items + self._add_containers(( + container + for container in containers + if container["objectName"] in selected + )) + + def _add_containers(self, containers, parent=None): + """Add the items to the model. + + The items should be formatted similar to `api.ls()` returns, an item + is then represented as: + {"filename_v001.ma": [full/filename/of/loaded/filename_v001.ma, + full/filename/of/loaded/filename_v001.ma], + "nodetype" : "reference", + "node": "referenceNode1"} + + Note: When performing an additional call to `add_items` it will *not* + group the new items with previously existing item groups of the + same type. + + Args: + containers (generator): Container items. + parent (Item, optional): Set this item as parent for the added + items when provided. Defaults to the root of the model. + + Returns: + node.Item: root node which has children added based on the data + """ + + project_name = get_current_project_name() + + self.beginResetModel() + + # Group by representation + grouped = defaultdict(lambda: {"containers": list()}) + for container in containers: + repre_id = container["representation"] + grouped[repre_id]["containers"].append(container) + + ( + repres_by_id, + versions_by_id, + products_by_id, + folders_by_id, + ) = self._query_entities(project_name, set(grouped.keys())) + # Add to model + not_found = defaultdict(list) + not_found_ids = [] + for repre_id, group_dict in sorted(grouped.items()): + group_containers = group_dict["containers"] + representation = repres_by_id.get(repre_id) + if not representation: + not_found["representation"].extend(group_containers) + not_found_ids.append(repre_id) + continue + + version = versions_by_id.get(representation["parent"]) + if not version: + not_found["version"].extend(group_containers) + not_found_ids.append(repre_id) + continue + + product = products_by_id.get(version["parent"]) + if not product: + not_found["product"].extend(group_containers) + not_found_ids.append(repre_id) + continue + + folder = folders_by_id.get(product["parent"]) + if not folder: + not_found["folder"].extend(group_containers) + not_found_ids.append(repre_id) + continue + + group_dict.update({ + "representation": representation, + "version": version, + "subset": product, + "asset": folder + }) + + for _repre_id in not_found_ids: + grouped.pop(_repre_id) + + for where, group_containers in not_found.items(): + # create the group header + group_node = Item() + name = "< NOT FOUND - {} >".format(where) + group_node["Name"] = name + group_node["representation"] = name + group_node["count"] = len(group_containers) + group_node["isGroupNode"] = False + group_node["isNotSet"] = True + + self.add_child(group_node, parent=parent) + + for container in group_containers: + item_node = Item() + item_node.update(container) + item_node["Name"] = container.get("objectName", "NO NAME") + item_node["isNotFound"] = True + self.add_child(item_node, parent=group_node) + + # TODO Use product icons + family_icon = qtawesome.icon( + "fa.folder", color="#0091B2" + ) + # Prepare site sync specific data + progress_by_id = self._controller.get_representations_site_progress( + set(grouped.keys()) + ) + sites_info = self._controller.get_sites_information() + + for repre_id, group_dict in sorted(grouped.items()): + group_containers = group_dict["containers"] + representation = group_dict["representation"] + version = group_dict["version"] + subset = group_dict["subset"] + asset = group_dict["asset"] + + # Get the primary family + maj_version, _ = schema.get_schema_version(subset["schema"]) + if maj_version < 3: + src_doc = version + else: + src_doc = subset + + prim_family = src_doc["data"].get("family") + if not prim_family: + families = src_doc["data"].get("families") + if families: + prim_family = families[0] + + # Store the highest available version so the model can know + # whether current version is currently up-to-date. + highest_version = get_last_version_by_subset_id( + project_name, version["parent"] + ) + + # create the group header + group_node = Item() + group_node["Name"] = "{}_{}: ({})".format( + asset["name"], subset["name"], representation["name"] + ) + group_node["representation"] = repre_id + group_node["version"] = version["name"] + group_node["highest_version"] = highest_version["name"] + group_node["family"] = prim_family or "" + group_node["familyIcon"] = family_icon + group_node["count"] = len(group_containers) + group_node["isGroupNode"] = True + group_node["group"] = subset["data"].get("subsetGroup") + + # Site sync specific data + progress = progress_by_id[repre_id] + group_node.update(sites_info) + group_node["active_site_progress"] = progress["active_site"] + group_node["remote_site_progress"] = progress["remote_site"] + + self.add_child(group_node, parent=parent) + + for container in group_containers: + item_node = Item() + item_node.update(container) + + # store the current version on the item + item_node["version"] = version["name"] + + # Remapping namespace to item name. + # Noted that the name key is capital "N", by doing this, we + # can view namespace in GUI without changing container data. + item_node["Name"] = container["namespace"] + + self.add_child(item_node, parent=group_node) + + self.endResetModel() + + return self._root_item + + def _query_entities(self, project_name, repre_ids): + """Query entities for representations from containers. + + Returns: + tuple[dict, dict, dict, dict]: Representation, version, product + and folder documents by id. + """ + + repres_by_id = {} + versions_by_id = {} + products_by_id = {} + folders_by_id = {} + output = ( + repres_by_id, + versions_by_id, + products_by_id, + folders_by_id, + ) + + filtered_repre_ids = set() + for repre_id in repre_ids: + # Filter out invalid representation ids + # NOTE: This is added because scenes from OpenPype did contain + # ObjectId from mongo. + try: + uuid.UUID(repre_id) + filtered_repre_ids.add(repre_id) + except ValueError: + continue + if not filtered_repre_ids: + return output + + repre_docs = get_representations(project_name, repre_ids) + repres_by_id.update({ + repre_doc["_id"]: repre_doc + for repre_doc in repre_docs + }) + version_ids = { + repre_doc["parent"] for repre_doc in repres_by_id.values() + } + if not version_ids: + return output + + version_docs = get_versions(project_name, version_ids, hero=True) + versions_by_id.update({ + version_doc["_id"]: version_doc + for version_doc in version_docs + }) + hero_versions_by_subversion_id = collections.defaultdict(list) + for version_doc in versions_by_id.values(): + if version_doc["type"] != "hero_version": + continue + subversion = version_doc["version_id"] + hero_versions_by_subversion_id[subversion].append(version_doc) + + if hero_versions_by_subversion_id: + subversion_ids = set( + hero_versions_by_subversion_id.keys() + ) + subversion_docs = get_versions(project_name, subversion_ids) + for subversion_doc in subversion_docs: + subversion_id = subversion_doc["_id"] + subversion_ids.discard(subversion_id) + h_version_docs = hero_versions_by_subversion_id[subversion_id] + for version_doc in h_version_docs: + version_doc["name"] = HeroVersionType( + subversion_doc["name"] + ) + version_doc["data"] = copy.deepcopy( + subversion_doc["data"] + ) + + for subversion_id in subversion_ids: + h_version_docs = hero_versions_by_subversion_id[subversion_id] + for version_doc in h_version_docs: + versions_by_id.pop(version_doc["_id"]) + + product_ids = { + version_doc["parent"] + for version_doc in versions_by_id.values() + } + if not product_ids: + return output + product_docs = get_subsets(project_name, product_ids) + products_by_id.update({ + product_doc["_id"]: product_doc + for product_doc in product_docs + }) + folder_ids = { + product_doc["parent"] + for product_doc in products_by_id.values() + } + if not folder_ids: + return output + + folder_docs = get_assets(project_name, folder_ids) + folders_by_id.update({ + folder_doc["_id"]: folder_doc + for folder_doc in folder_docs + }) + return output + + +class FilterProxyModel(QtCore.QSortFilterProxyModel): + """Filter model to where key column's value is in the filtered tags""" + + def __init__(self, *args, **kwargs): + super(FilterProxyModel, self).__init__(*args, **kwargs) + self._filter_outdated = False + self._hierarchy_view = False + + def filterAcceptsRow(self, row, parent): + model = self.sourceModel() + source_index = model.index(row, self.filterKeyColumn(), parent) + + # Always allow bottom entries (individual containers), since their + # parent group hidden if it wouldn't have been validated. + rows = model.rowCount(source_index) + if not rows: + return True + + # Filter by regex + if hasattr(self, "filterRegExp"): + regex = self.filterRegExp() + else: + regex = self.filterRegularExpression() + pattern = regex.pattern() + if pattern: + pattern = re.escape(pattern) + + if not self._matches(row, parent, pattern): + return False + + if self._filter_outdated: + # When filtering to outdated we filter the up to date entries + # thus we "allow" them when they are outdated + if not self._is_outdated(row, parent): + return False + + return True + + def set_filter_outdated(self, state): + """Set whether to show the outdated entries only.""" + state = bool(state) + + if state != self._filter_outdated: + self._filter_outdated = bool(state) + self.invalidateFilter() + + def set_hierarchy_view(self, state): + state = bool(state) + + if state != self._hierarchy_view: + self._hierarchy_view = state + + def _is_outdated(self, row, parent): + """Return whether row is outdated. + + A row is considered outdated if it has "version" and "highest_version" + data and in the internal data structure, and they are not of an + equal value. + + """ + def outdated(node): + version = node.get("version", None) + highest = node.get("highest_version", None) + + # Always allow indices that have no version data at all + if version is None and highest is None: + return True + + # If either a version or highest is present but not the other + # consider the item invalid. + if not self._hierarchy_view: + # Skip this check if in hierarchy view, or the child item + # node will be hidden even it's actually outdated. + if version is None or highest is None: + return False + return version != highest + + index = self.sourceModel().index(row, self.filterKeyColumn(), parent) + + # The scene contents are grouped by "representation", e.g. the same + # "representation" loaded twice is grouped under the same header. + # Since the version check filters these parent groups we skip that + # check for the individual children. + has_parent = index.parent().isValid() + if has_parent and not self._hierarchy_view: + return True + + # Filter to those that have the different version numbers + node = index.internalPointer() + if outdated(node): + return True + + if self._hierarchy_view: + for _node in walk_hierarchy(node): + if outdated(_node): + return True + + return False + + def _matches(self, row, parent, pattern): + """Return whether row matches regex pattern. + + Args: + row (int): row number in model + parent (QtCore.QModelIndex): parent index + pattern (regex.pattern): pattern to check for in key + + Returns: + bool + + """ + model = self.sourceModel() + column = self.filterKeyColumn() + role = self.filterRole() + + def matches(row, parent, pattern): + index = model.index(row, column, parent) + key = model.data(index, role) + if re.search(pattern, key, re.IGNORECASE): + return True + + if matches(row, parent, pattern): + return True + + # Also allow if any of the children matches + source_index = model.index(row, column, parent) + rows = model.rowCount(source_index) + + if any( + matches(idx, source_index, pattern) + for idx in range(rows) + ): + return True + + if not self._hierarchy_view: + return False + + for idx in range(rows): + child_index = model.index(idx, column, source_index) + child_rows = model.rowCount(child_index) + return any( + self._matches(child_idx, child_index, pattern) + for child_idx in range(child_rows) + ) + + return True diff --git a/openpype/tools/ayon_sceneinventory/models/__init__.py b/client/ayon_core/tools/sceneinventory/models/__init__.py similarity index 100% rename from openpype/tools/ayon_sceneinventory/models/__init__.py rename to client/ayon_core/tools/sceneinventory/models/__init__.py diff --git a/client/ayon_core/tools/sceneinventory/models/site_sync.py b/client/ayon_core/tools/sceneinventory/models/site_sync.py new file mode 100644 index 0000000000..c7bc0b756d --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/models/site_sync.py @@ -0,0 +1,176 @@ +from ayon_core.client import get_representations +from ayon_core.addon import AddonsManager + +NOT_SET = object() + + +class SiteSyncModel: + def __init__(self, controller): + self._controller = controller + + self._sync_server_module = NOT_SET + self._sync_server_enabled = None + self._active_site = NOT_SET + self._remote_site = NOT_SET + self._active_site_provider = NOT_SET + self._remote_site_provider = NOT_SET + + def reset(self): + self._sync_server_module = NOT_SET + self._sync_server_enabled = None + self._active_site = NOT_SET + self._remote_site = NOT_SET + self._active_site_provider = NOT_SET + self._remote_site_provider = NOT_SET + + def is_sync_server_enabled(self): + """Site sync is enabled. + + Returns: + bool: Is enabled or not. + """ + + self._cache_sync_server_module() + return self._sync_server_enabled + + def get_site_provider_icons(self): + """Icon paths per provider. + + Returns: + dict[str, str]: Path by provider name. + """ + + if not self.is_sync_server_enabled(): + return {} + site_sync_addon = self._get_sync_server_module() + return site_sync_addon.get_site_icons() + + def get_sites_information(self): + return { + "active_site": self._get_active_site(), + "active_site_provider": self._get_active_site_provider(), + "remote_site": self._get_remote_site(), + "remote_site_provider": self._get_remote_site_provider() + } + + def get_representations_site_progress(self, representation_ids): + """Get progress of representations sync.""" + + representation_ids = set(representation_ids) + output = { + repre_id: { + "active_site": 0, + "remote_site": 0, + } + for repre_id in representation_ids + } + if not self.is_sync_server_enabled(): + return output + + project_name = self._controller.get_current_project_name() + site_sync = self._get_sync_server_module() + repre_docs = get_representations(project_name, representation_ids) + active_site = self._get_active_site() + remote_site = self._get_remote_site() + + for repre_doc in repre_docs: + repre_output = output[repre_doc["_id"]] + result = site_sync.get_progress_for_repre( + repre_doc, active_site, remote_site + ) + repre_output["active_site"] = result[active_site] + repre_output["remote_site"] = result[remote_site] + + return output + + def resync_representations(self, representation_ids, site_type): + """ + + Args: + representation_ids (Iterable[str]): Representation ids. + site_type (Literal[active_site, remote_site]): Site type. + """ + + project_name = self._controller.get_current_project_name() + site_sync = self._get_sync_server_module() + active_site = self._get_active_site() + remote_site = self._get_remote_site() + progress = self.get_representations_site_progress( + representation_ids + ) + for repre_id in representation_ids: + repre_progress = progress.get(repre_id) + if not repre_progress: + continue + + if site_type == "active_site": + # check opposite from added site, must be 1 or unable to sync + check_progress = repre_progress["remote_site"] + site = active_site + else: + check_progress = repre_progress["active_site"] + site = remote_site + + if check_progress == 1: + site_sync.add_site( + project_name, repre_id, site, force=True + ) + + def _get_sync_server_module(self): + self._cache_sync_server_module() + return self._sync_server_module + + def _cache_sync_server_module(self): + if self._sync_server_module is not NOT_SET: + return self._sync_server_module + manager = AddonsManager() + site_sync = manager.get("sync_server") + sync_enabled = site_sync is not None and site_sync.enabled + self._sync_server_module = site_sync + self._sync_server_enabled = sync_enabled + + def _get_active_site(self): + if self._active_site is NOT_SET: + self._cache_sites() + return self._active_site + + def _get_remote_site(self): + if self._remote_site is NOT_SET: + self._cache_sites() + return self._remote_site + + def _get_active_site_provider(self): + if self._active_site_provider is NOT_SET: + self._cache_sites() + return self._active_site_provider + + def _get_remote_site_provider(self): + if self._remote_site_provider is NOT_SET: + self._cache_sites() + return self._remote_site_provider + + def _cache_sites(self): + active_site = None + remote_site = None + active_site_provider = None + remote_site_provider = None + if self.is_sync_server_enabled(): + site_sync = self._get_sync_server_module() + project_name = self._controller.get_current_project_name() + active_site = site_sync.get_active_site(project_name) + remote_site = site_sync.get_remote_site(project_name) + active_site_provider = "studio" + remote_site_provider = "studio" + if active_site != "studio": + active_site_provider = site_sync.get_provider_for_site( + project_name, active_site + ) + if remote_site != "studio": + remote_site_provider = site_sync.get_provider_for_site( + project_name, remote_site + ) + + self._active_site = active_site + self._remote_site = remote_site + self._active_site_provider = active_site_provider + self._remote_site_provider = remote_site_provider diff --git a/openpype/tools/ayon_sceneinventory/switch_dialog/__init__.py b/client/ayon_core/tools/sceneinventory/switch_dialog/__init__.py similarity index 100% rename from openpype/tools/ayon_sceneinventory/switch_dialog/__init__.py rename to client/ayon_core/tools/sceneinventory/switch_dialog/__init__.py diff --git a/client/ayon_core/tools/sceneinventory/switch_dialog/dialog.py b/client/ayon_core/tools/sceneinventory/switch_dialog/dialog.py new file mode 100644 index 0000000000..89c3b652e1 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/switch_dialog/dialog.py @@ -0,0 +1,1355 @@ +import collections +import logging + +from qtpy import QtWidgets, QtCore +import qtawesome + +from ayon_core.client import ( + get_assets, + get_subset_by_name, + get_subsets, + get_versions, + get_hero_versions, + get_last_versions, + get_representations, +) +from ayon_core.pipeline.load import ( + discover_loader_plugins, + switch_container, + get_repres_contexts, + loaders_from_repre_context, + LoaderSwitchNotImplementedError, + IncompatibleLoaderError, + LoaderNotFoundError +) + +from .widgets import ( + ButtonWithMenu, + SearchComboBox +) +from .folders_input import FoldersField + +log = logging.getLogger("SwitchAssetDialog") + + +class ValidationState: + def __init__(self): + self.folder_ok = True + self.product_ok = True + self.repre_ok = True + + @property + def all_ok(self): + return ( + self.folder_ok + and self.product_ok + and self.repre_ok + ) + + +class SwitchAssetDialog(QtWidgets.QDialog): + """Widget to support asset switching""" + + MIN_WIDTH = 550 + + switched = QtCore.Signal() + + def __init__(self, controller, parent=None, items=None): + super(SwitchAssetDialog, self).__init__(parent) + + self.setWindowTitle("Switch selected items ...") + + # Force and keep focus dialog + self.setModal(True) + + folders_field = FoldersField(controller, self) + products_combox = SearchComboBox(self) + repres_combobox = SearchComboBox(self) + + products_combox.set_placeholder("") + repres_combobox.set_placeholder("") + + folder_label = QtWidgets.QLabel(self) + product_label = QtWidgets.QLabel(self) + repre_label = QtWidgets.QLabel(self) + + current_folder_btn = QtWidgets.QPushButton("Use current folder", self) + + accept_icon = qtawesome.icon("fa.check", color="white") + accept_btn = ButtonWithMenu(self) + accept_btn.setIcon(accept_icon) + + main_layout = QtWidgets.QGridLayout(self) + # Folder column + main_layout.addWidget(current_folder_btn, 0, 0) + main_layout.addWidget(folders_field, 1, 0) + main_layout.addWidget(folder_label, 2, 0) + # Product column + main_layout.addWidget(products_combox, 1, 1) + main_layout.addWidget(product_label, 2, 1) + # Representation column + main_layout.addWidget(repres_combobox, 1, 2) + main_layout.addWidget(repre_label, 2, 2) + # Btn column + main_layout.addWidget(accept_btn, 1, 3) + main_layout.setColumnStretch(0, 1) + main_layout.setColumnStretch(1, 1) + main_layout.setColumnStretch(2, 1) + main_layout.setColumnStretch(3, 0) + + show_timer = QtCore.QTimer() + show_timer.setInterval(0) + show_timer.setSingleShot(False) + + show_timer.timeout.connect(self._on_show_timer) + folders_field.value_changed.connect( + self._combobox_value_changed + ) + products_combox.currentIndexChanged.connect( + self._combobox_value_changed + ) + repres_combobox.currentIndexChanged.connect( + self._combobox_value_changed + ) + accept_btn.clicked.connect(self._on_accept) + current_folder_btn.clicked.connect(self._on_current_folder) + + self._show_timer = show_timer + self._show_counter = 0 + + self._current_folder_btn = current_folder_btn + + self._folders_field = folders_field + self._products_combox = products_combox + self._representations_box = repres_combobox + + self._folder_label = folder_label + self._product_label = product_label + self._repre_label = repre_label + + self._accept_btn = accept_btn + + self.setMinimumWidth(self.MIN_WIDTH) + + # Set default focus to accept button so you don't directly type in + # first asset field, this also allows to see the placeholder value. + accept_btn.setFocus() + + self._folder_docs_by_id = {} + self._product_docs_by_id = {} + self._version_docs_by_id = {} + self._repre_docs_by_id = {} + + self._missing_folder_ids = set() + self._missing_product_ids = set() + self._missing_version_ids = set() + self._missing_repre_ids = set() + self._missing_docs = False + + self._inactive_folder_ids = set() + self._inactive_product_ids = set() + self._inactive_repre_ids = set() + + self._init_folder_id = None + self._init_product_name = None + self._init_repre_name = None + + self._fill_check = False + + self._project_name = controller.get_current_project_name() + self._folder_id = controller.get_current_folder_id() + + self._current_folder_btn.setEnabled(self._folder_id is not None) + + self._controller = controller + + self._items = items + self._prepare_content_data() + + def showEvent(self, event): + super(SwitchAssetDialog, self).showEvent(event) + self._show_timer.start() + + def refresh(self, init_refresh=False): + """Build the need comboboxes with content""" + if not self._fill_check and not init_refresh: + return + + self._fill_check = False + + validation_state = ValidationState() + self._folders_field.refresh() + # Set other comboboxes to empty if any document is missing or + # any folder of loaded representations is archived. + self._is_folder_ok(validation_state) + if validation_state.folder_ok: + product_values = self._get_product_box_values() + self._fill_combobox(product_values, "product") + self._is_product_ok(validation_state) + + if validation_state.folder_ok and validation_state.product_ok: + repre_values = sorted(self._representations_box_values()) + self._fill_combobox(repre_values, "repre") + self._is_repre_ok(validation_state) + + # Fill comboboxes with values + self.set_labels() + + self.apply_validations(validation_state) + + self._build_loaders_menu() + + if init_refresh: + # pre select context if possible + self._folders_field.set_selected_item(self._init_folder_id) + self._products_combox.set_valid_value(self._init_product_name) + self._representations_box.set_valid_value(self._init_repre_name) + + self._fill_check = True + + def set_labels(self): + folder_label = self._folders_field.get_selected_folder_label() + product_label = self._products_combox.get_valid_value() + repre_label = self._representations_box.get_valid_value() + + default = "*No changes" + self._folder_label.setText(folder_label or default) + self._product_label.setText(product_label or default) + self._repre_label.setText(repre_label or default) + + def apply_validations(self, validation_state): + error_msg = "*Please select" + error_sheet = "border: 1px solid red;" + + product_sheet = None + repre_sheet = None + accept_state = "" + if validation_state.folder_ok is False: + self._folder_label.setText(error_msg) + elif validation_state.product_ok is False: + product_sheet = error_sheet + self._product_label.setText(error_msg) + elif validation_state.repre_ok is False: + repre_sheet = error_sheet + self._repre_label.setText(error_msg) + + if validation_state.all_ok: + accept_state = "1" + + self._folders_field.set_valid(validation_state.folder_ok) + self._products_combox.setStyleSheet(product_sheet or "") + self._representations_box.setStyleSheet(repre_sheet or "") + + self._accept_btn.setEnabled(validation_state.all_ok) + self._set_style_property(self._accept_btn, "state", accept_state) + + def find_last_versions(self, product_ids): + project_name = self._project_name + return get_last_versions( + project_name, + subset_ids=product_ids, + fields=["_id", "parent", "type"] + ) + + def _on_show_timer(self): + if self._show_counter == 2: + self._show_timer.stop() + self.refresh(True) + else: + self._show_counter += 1 + + def _prepare_content_data(self): + repre_ids = { + item["representation"] + for item in self._items + } + + project_name = self._project_name + repres = list(get_representations( + project_name, + representation_ids=repre_ids, + archived=True, + )) + repres_by_id = {str(repre["_id"]): repre for repre in repres} + + content_repre_docs_by_id = {} + inactive_repre_ids = set() + missing_repre_ids = set() + version_ids = set() + for repre_id in repre_ids: + repre_doc = repres_by_id.get(repre_id) + if repre_doc is None: + missing_repre_ids.add(repre_id) + elif repres_by_id[repre_id]["type"] == "archived_representation": + inactive_repre_ids.add(repre_id) + version_ids.add(repre_doc["parent"]) + else: + content_repre_docs_by_id[repre_id] = repre_doc + version_ids.add(repre_doc["parent"]) + + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True + ) + content_version_docs_by_id = {} + for version_doc in version_docs: + version_id = version_doc["_id"] + content_version_docs_by_id[version_id] = version_doc + + missing_version_ids = set() + product_ids = set() + for version_id in version_ids: + version_doc = content_version_docs_by_id.get(version_id) + if version_doc is None: + missing_version_ids.add(version_id) + else: + product_ids.add(version_doc["parent"]) + + product_docs = get_subsets( + project_name, subset_ids=product_ids, archived=True + ) + product_docs_by_id = {sub["_id"]: sub for sub in product_docs} + + folder_ids = set() + inactive_product_ids = set() + missing_product_ids = set() + content_product_docs_by_id = {} + for product_id in product_ids: + product_doc = product_docs_by_id.get(product_id) + if product_doc is None: + missing_product_ids.add(product_id) + elif product_doc["type"] == "archived_subset": + folder_ids.add(product_doc["parent"]) + inactive_product_ids.add(product_id) + else: + folder_ids.add(product_doc["parent"]) + content_product_docs_by_id[product_id] = product_doc + + folder_docs = get_assets( + project_name, asset_ids=folder_ids, archived=True + ) + folder_docs_by_id = { + folder_doc["_id"]: folder_doc + for folder_doc in folder_docs + } + + missing_folder_ids = set() + inactive_folder_ids = set() + content_folder_docs_by_id = {} + for folder_id in folder_ids: + folder_doc = folder_docs_by_id.get(folder_id) + if folder_doc is None: + missing_folder_ids.add(folder_id) + elif folder_doc["type"] == "archived_asset": + inactive_folder_ids.add(folder_id) + else: + content_folder_docs_by_id[folder_id] = folder_doc + + # stash context values, works only for single representation + init_folder_id = None + init_product_name = None + init_repre_name = None + if len(repres) == 1: + init_repre_doc = repres[0] + init_version_doc = content_version_docs_by_id.get( + init_repre_doc["parent"]) + init_product_doc = None + init_folder_doc = None + if init_version_doc: + init_product_doc = content_product_docs_by_id.get( + init_version_doc["parent"] + ) + if init_product_doc: + init_folder_doc = content_folder_docs_by_id.get( + init_product_doc["parent"] + ) + if init_folder_doc: + init_repre_name = init_repre_doc["name"] + init_product_name = init_product_doc["name"] + init_folder_id = init_folder_doc["_id"] + + self._init_folder_id = init_folder_id + self._init_product_name = init_product_name + self._init_repre_name = init_repre_name + + self._folder_docs_by_id = content_folder_docs_by_id + self._product_docs_by_id = content_product_docs_by_id + self._version_docs_by_id = content_version_docs_by_id + self._repre_docs_by_id = content_repre_docs_by_id + + self._missing_folder_ids = missing_folder_ids + self._missing_product_ids = missing_product_ids + self._missing_version_ids = missing_version_ids + self._missing_repre_ids = missing_repre_ids + self._missing_docs = ( + bool(missing_folder_ids) + or bool(missing_version_ids) + or bool(missing_product_ids) + or bool(missing_repre_ids) + ) + + self._inactive_folder_ids = inactive_folder_ids + self._inactive_product_ids = inactive_product_ids + self._inactive_repre_ids = inactive_repre_ids + + def _combobox_value_changed(self, *args, **kwargs): + self.refresh() + + def _build_loaders_menu(self): + repre_ids = self._get_current_output_repre_ids() + loaders = self._get_loaders(repre_ids) + # Get and destroy the action group + self._accept_btn.clear_actions() + + if not loaders: + return + + # Build new action group + group = QtWidgets.QActionGroup(self._accept_btn) + + for loader in loaders: + # Label + label = getattr(loader, "label", None) + if label is None: + label = loader.__name__ + + action = group.addAction(label) + # action = QtWidgets.QAction(label) + action.setData(loader) + + # Support font-awesome icons using the `.icon` and `.color` + # attributes on plug-ins. + icon = getattr(loader, "icon", None) + if icon is not None: + try: + key = "fa.{0}".format(icon) + color = getattr(loader, "color", "white") + action.setIcon(qtawesome.icon(key, color=color)) + + except Exception as exc: + print("Unable to set icon for loader {}: {}".format( + loader, str(exc) + )) + + self._accept_btn.add_action(action) + + group.triggered.connect(self._on_action_clicked) + + def _on_action_clicked(self, action): + loader_plugin = action.data() + self._trigger_switch(loader_plugin) + + def _get_loaders(self, repre_ids): + repre_contexts = None + if repre_ids: + repre_contexts = get_repres_contexts(repre_ids) + + if not repre_contexts: + return list() + + available_loaders = [] + for loader_plugin in discover_loader_plugins(): + # Skip loaders without switch method + if not hasattr(loader_plugin, "switch"): + continue + + # Skip utility loaders + if ( + hasattr(loader_plugin, "is_utility") + and loader_plugin.is_utility + ): + continue + available_loaders.append(loader_plugin) + + loaders = None + for repre_context in repre_contexts.values(): + _loaders = set(loaders_from_repre_context( + available_loaders, repre_context + )) + if loaders is None: + loaders = _loaders + else: + loaders = _loaders.intersection(loaders) + + if not loaders: + break + + if loaders is None: + loaders = [] + else: + loaders = list(loaders) + + return loaders + + def _fill_combobox(self, values, combobox_type): + if combobox_type == "product": + combobox_widget = self._products_combox + elif combobox_type == "repre": + combobox_widget = self._representations_box + else: + return + selected_value = combobox_widget.get_valid_value() + + # Fill combobox + if values is not None: + combobox_widget.populate(list(sorted(values))) + if selected_value and selected_value in values: + index = None + for idx in range(combobox_widget.count()): + if selected_value == str(combobox_widget.itemText(idx)): + index = idx + break + if index is not None: + combobox_widget.setCurrentIndex(index) + + def _set_style_property(self, widget, name, value): + cur_value = widget.property(name) + if cur_value == value: + return + widget.setProperty(name, value) + widget.style().polish(widget) + + def _get_current_output_repre_ids(self): + # NOTE hero versions are not used because it is expected that + # hero version has same representations as latests + selected_folder_id = self._folders_field.get_selected_folder_id() + selected_product_name = self._products_combox.currentText() + selected_repre = self._representations_box.currentText() + + # Nothing is selected + # [ ] [ ] [ ] + if ( + not selected_folder_id + and not selected_product_name + and not selected_repre + ): + return list(self._repre_docs_by_id.keys()) + + # Everything is selected + # [x] [x] [x] + if selected_folder_id and selected_product_name and selected_repre: + return self._get_current_output_repre_ids_xxx( + selected_folder_id, selected_product_name, selected_repre + ) + + # [x] [x] [ ] + # If folder and product is selected + if selected_folder_id and selected_product_name: + return self._get_current_output_repre_ids_xxo( + selected_folder_id, selected_product_name + ) + + # [x] [ ] [x] + # If folder and repre is selected + if selected_folder_id and selected_repre: + return self._get_current_output_repre_ids_xox( + selected_folder_id, selected_repre + ) + + # [x] [ ] [ ] + # If folder and product is selected + if selected_folder_id: + return self._get_current_output_repre_ids_xoo(selected_folder_id) + + # [ ] [x] [x] + if selected_product_name and selected_repre: + return self._get_current_output_repre_ids_oxx( + selected_product_name, selected_repre + ) + + # [ ] [x] [ ] + if selected_product_name: + return self._get_current_output_repre_ids_oxo( + selected_product_name + ) + + # [ ] [ ] [x] + return self._get_current_output_repre_ids_oox(selected_repre) + + def _get_current_output_repre_ids_xxx( + self, folder_id, selected_product_name, selected_repre + ): + project_name = self._project_name + product_doc = get_subset_by_name( + project_name, + selected_product_name, + folder_id, + fields=["_id"] + ) + + product_id = product_doc["_id"] + last_versions_by_product_id = self.find_last_versions([product_id]) + version_doc = last_versions_by_product_id.get(product_id) + if not version_doc: + return [] + + repre_docs = get_representations( + project_name, + version_ids=[version_doc["_id"]], + representation_names=[selected_repre], + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xxo(self, folder_id, product_name): + project_name = self._project_name + product_doc = get_subset_by_name( + project_name, + product_name, + folder_id, + fields=["_id"] + ) + if not product_doc: + return [] + + repre_names = set() + for repre_doc in self._repre_docs_by_id.values(): + repre_names.add(repre_doc["name"]) + + # TODO where to take version ids? + version_ids = [] + repre_docs = get_representations( + project_name, + representation_names=repre_names, + version_ids=version_ids, + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xox(self, folder_id, selected_repre): + product_names = { + product_doc["name"] + for product_doc in self._product_docs_by_id.values() + } + + project_name = self._project_name + product_docs = get_subsets( + project_name, + asset_ids=[folder_id], + subset_names=product_names, + fields=["_id", "name"] + ) + product_name_by_id = { + product_doc["_id"]: product_doc["name"] + for product_doc in product_docs + } + product_ids = list(product_name_by_id.keys()) + last_versions_by_product_id = self.find_last_versions(product_ids) + last_version_id_by_product_name = {} + for product_id, last_version in last_versions_by_product_id.items(): + product_name = product_name_by_id[product_id] + last_version_id_by_product_name[product_name] = ( + last_version["_id"] + ) + + repre_docs = get_representations( + project_name, + version_ids=last_version_id_by_product_name.values(), + representation_names=[selected_repre], + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_xoo(self, folder_id): + project_name = self._project_name + repres_by_product_name = collections.defaultdict(set) + for repre_doc in self._repre_docs_by_id.values(): + version_doc = self._version_docs_by_id[repre_doc["parent"]] + product_doc = self._product_docs_by_id[version_doc["parent"]] + product_name = product_doc["name"] + repres_by_product_name[product_name].add(repre_doc["name"]) + + product_docs = list(get_subsets( + project_name, + asset_ids=[folder_id], + subset_names=repres_by_product_name.keys(), + fields=["_id", "name"] + )) + product_name_by_id = { + product_doc["_id"]: product_doc["name"] + for product_doc in product_docs + } + product_ids = list(product_name_by_id.keys()) + last_versions_by_product_id = self.find_last_versions(product_ids) + last_version_id_by_product_name = {} + for product_id, last_version in last_versions_by_product_id.items(): + product_name = product_name_by_id[product_id] + last_version_id_by_product_name[product_name] = ( + last_version["_id"] + ) + + repre_names_by_version_id = {} + for product_name, repre_names in repres_by_product_name.items(): + version_id = last_version_id_by_product_name.get(product_name) + # This should not happen but why to crash? + if version_id is not None: + repre_names_by_version_id[version_id] = list(repre_names) + + repre_docs = get_representations( + project_name, + names_by_version_ids=repre_names_by_version_id, + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oxx( + self, product_name, selected_repre + ): + project_name = self._project_name + product_docs = get_subsets( + project_name, + asset_ids=self._folder_docs_by_id.keys(), + subset_names=[product_name], + fields=["_id"] + ) + product_ids = [product_doc["_id"] for product_doc in product_docs] + last_versions_by_product_id = self.find_last_versions(product_ids) + last_version_ids = [ + last_version["_id"] + for last_version in last_versions_by_product_id.values() + ] + repre_docs = get_representations( + project_name, + version_ids=last_version_ids, + representation_names=[selected_repre], + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oxo(self, product_name): + project_name = self._project_name + product_docs = get_subsets( + project_name, + asset_ids=self._folder_docs_by_id.keys(), + subset_names=[product_name], + fields=["_id", "parent"] + ) + product_docs_by_id = { + product_doc["_id"]: product_doc + for product_doc in product_docs + } + if not product_docs: + return list() + + last_versions_by_product_id = self.find_last_versions( + product_docs_by_id.keys() + ) + + product_id_by_version_id = {} + for product_id, last_version in last_versions_by_product_id.items(): + version_id = last_version["_id"] + product_id_by_version_id[version_id] = product_id + + if not product_id_by_version_id: + return list() + + repre_names_by_folder_id = collections.defaultdict(set) + for repre_doc in self._repre_docs_by_id.values(): + version_doc = self._version_docs_by_id[repre_doc["parent"]] + product_doc = self._product_docs_by_id[version_doc["parent"]] + folder_doc = self._folder_docs_by_id[product_doc["parent"]] + folder_id = folder_doc["_id"] + repre_names_by_folder_id[folder_id].add(repre_doc["name"]) + + repre_names_by_version_id = {} + for last_version_id, product_id in product_id_by_version_id.items(): + product_doc = product_docs_by_id[product_id] + folder_id = product_doc["parent"] + repre_names = repre_names_by_folder_id.get(folder_id) + if not repre_names: + continue + repre_names_by_version_id[last_version_id] = repre_names + + repre_docs = get_representations( + project_name, + names_by_version_ids=repre_names_by_version_id, + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_current_output_repre_ids_oox(self, selected_repre): + project_name = self._project_name + repre_docs = get_representations( + project_name, + representation_names=[selected_repre], + version_ids=self._version_docs_by_id.keys(), + fields=["_id"] + ) + return [repre_doc["_id"] for repre_doc in repre_docs] + + def _get_product_box_values(self): + project_name = self._project_name + selected_folder_id = self._folders_field.get_selected_folder_id() + if selected_folder_id: + folder_ids = [selected_folder_id] + else: + folder_ids = list(self._folder_docs_by_id.keys()) + + product_docs = get_subsets( + project_name, + asset_ids=folder_ids, + fields=["parent", "name"] + ) + + product_names_by_parent_id = collections.defaultdict(set) + for product_doc in product_docs: + product_names_by_parent_id[product_doc["parent"]].add( + product_doc["name"] + ) + + possible_product_names = None + for product_names in product_names_by_parent_id.values(): + if possible_product_names is None: + possible_product_names = product_names + else: + possible_product_names = possible_product_names.intersection( + product_names) + + if not possible_product_names: + break + + if not possible_product_names: + return [] + return list(possible_product_names) + + def _representations_box_values(self): + # NOTE hero versions are not used because it is expected that + # hero version has same representations as latests + project_name = self._project_name + selected_folder_id = self._folders_field.get_selected_folder_id() + selected_product_name = self._products_combox.currentText() + + # If nothing is selected + # [ ] [ ] [?] + if not selected_folder_id and not selected_product_name: + # Find all representations of selection's products + possible_repres = get_representations( + project_name, + version_ids=self._version_docs_by_id.keys(), + fields=["parent", "name"] + ) + + possible_repres_by_parent = collections.defaultdict(set) + for repre in possible_repres: + possible_repres_by_parent[repre["parent"]].add(repre["name"]) + + output_repres = None + for repre_names in possible_repres_by_parent.values(): + if output_repres is None: + output_repres = repre_names + else: + output_repres = (output_repres & repre_names) + + if not output_repres: + break + + return list(output_repres or list()) + + # [x] [x] [?] + if selected_folder_id and selected_product_name: + product_doc = get_subset_by_name( + project_name, + selected_product_name, + selected_folder_id, + fields=["_id"] + ) + + product_id = product_doc["_id"] + last_versions_by_product_id = self.find_last_versions([product_id]) + version_doc = last_versions_by_product_id.get(product_id) + repre_docs = get_representations( + project_name, + version_ids=[version_doc["_id"]], + fields=["name"] + ) + return [ + repre_doc["name"] + for repre_doc in repre_docs + ] + + # [x] [ ] [?] + # If only folder is selected + if selected_folder_id: + # Filter products by names from content + product_names = { + product_doc["name"] + for product_doc in self._product_docs_by_id.values() + } + + product_docs = get_subsets( + project_name, + asset_ids=[selected_folder_id], + subset_names=product_names, + fields=["_id"] + ) + product_ids = { + product_doc["_id"] + for product_doc in product_docs + } + if not product_ids: + return list() + + last_versions_by_product_id = self.find_last_versions(product_ids) + product_id_by_version_id = {} + for product_id, last_version in ( + last_versions_by_product_id.items() + ): + version_id = last_version["_id"] + product_id_by_version_id[version_id] = product_id + + if not product_id_by_version_id: + return list() + + repre_docs = list(get_representations( + project_name, + version_ids=product_id_by_version_id.keys(), + fields=["name", "parent"] + )) + if not repre_docs: + return list() + + repre_names_by_parent = collections.defaultdict(set) + for repre_doc in repre_docs: + repre_names_by_parent[repre_doc["parent"]].add( + repre_doc["name"] + ) + + available_repres = None + for repre_names in repre_names_by_parent.values(): + if available_repres is None: + available_repres = repre_names + continue + + available_repres = available_repres.intersection(repre_names) + + return list(available_repres) + + # [ ] [x] [?] + product_docs = list(get_subsets( + project_name, + asset_ids=self._folder_docs_by_id.keys(), + subset_names=[selected_product_name], + fields=["_id", "parent"] + )) + if not product_docs: + return list() + + product_docs_by_id = { + product_doc["_id"]: product_doc + for product_doc in product_docs + } + last_versions_by_product_id = self.find_last_versions( + product_docs_by_id.keys() + ) + + product_id_by_version_id = {} + for product_id, last_version in last_versions_by_product_id.items(): + version_id = last_version["_id"] + product_id_by_version_id[version_id] = product_id + + if not product_id_by_version_id: + return list() + + repre_docs = list( + get_representations( + project_name, + version_ids=product_id_by_version_id.keys(), + fields=["name", "parent"] + ) + ) + if not repre_docs: + return list() + + repre_names_by_folder_id = collections.defaultdict(set) + for repre_doc in repre_docs: + product_id = product_id_by_version_id[repre_doc["parent"]] + folder_id = product_docs_by_id[product_id]["parent"] + repre_names_by_folder_id[folder_id].add(repre_doc["name"]) + + available_repres = None + for repre_names in repre_names_by_folder_id.values(): + if available_repres is None: + available_repres = repre_names + continue + + available_repres = available_repres.intersection(repre_names) + + return list(available_repres) + + def _is_folder_ok(self, validation_state): + selected_folder_id = self._folders_field.get_selected_folder_id() + if ( + selected_folder_id is None + and (self._missing_docs or self._inactive_folder_ids) + ): + validation_state.folder_ok = False + + def _is_product_ok(self, validation_state): + selected_folder_id = self._folders_field.get_selected_folder_id() + selected_product_name = self._products_combox.get_valid_value() + + # [?] [x] [?] + # If product is selected then must be ok + if selected_product_name is not None: + return + + # [ ] [ ] [?] + if selected_folder_id is None: + # If there were archived products and folder is not selected + if self._inactive_product_ids: + validation_state.product_ok = False + return + + # [x] [ ] [?] + project_name = self._project_name + product_docs = get_subsets( + project_name, asset_ids=[selected_folder_id], fields=["name"] + ) + + product_names = set( + product_doc["name"] + for product_doc in product_docs + ) + + for product_doc in self._product_docs_by_id.values(): + if product_doc["name"] not in product_names: + validation_state.product_ok = False + break + + def _is_repre_ok(self, validation_state): + selected_folder_id = self._folders_field.get_selected_folder_id() + selected_product_name = self._products_combox.get_valid_value() + selected_repre = self._representations_box.get_valid_value() + + # [?] [?] [x] + # If product is selected then must be ok + if selected_repre is not None: + return + + # [ ] [ ] [ ] + if selected_folder_id is None and selected_product_name is None: + if ( + self._inactive_repre_ids + or self._missing_version_ids + or self._missing_repre_ids + ): + validation_state.repre_ok = False + return + + # [x] [x] [ ] + project_name = self._project_name + if ( + selected_folder_id is not None + and selected_product_name is not None + ): + product_doc = get_subset_by_name( + project_name, + selected_product_name, + selected_folder_id, + fields=["_id"] + ) + product_id = product_doc["_id"] + last_versions_by_product_id = self.find_last_versions([product_id]) + last_version = last_versions_by_product_id.get(product_id) + if not last_version: + validation_state.repre_ok = False + return + + repre_docs = get_representations( + project_name, + version_ids=[last_version["_id"]], + fields=["name"] + ) + + repre_names = set( + repre_doc["name"] + for repre_doc in repre_docs + ) + for repre_doc in self._repre_docs_by_id.values(): + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + return + + # [x] [ ] [ ] + if selected_folder_id is not None: + product_docs = list(get_subsets( + project_name, + asset_ids=[selected_folder_id], + fields=["_id", "name"] + )) + + product_name_by_id = {} + product_ids = set() + for product_doc in product_docs: + product_id = product_doc["_id"] + product_ids.add(product_id) + product_name_by_id[product_id] = product_doc["name"] + + last_versions_by_product_id = self.find_last_versions(product_ids) + + product_id_by_version_id = {} + for product_id, last_version in ( + last_versions_by_product_id.items() + ): + version_id = last_version["_id"] + product_id_by_version_id[version_id] = product_id + + repre_docs = get_representations( + project_name, + version_ids=product_id_by_version_id.keys(), + fields=["name", "parent"] + ) + repres_by_product_name = collections.defaultdict(set) + for repre_doc in repre_docs: + product_id = product_id_by_version_id[repre_doc["parent"]] + product_name = product_name_by_id[product_id] + repres_by_product_name[product_name].add(repre_doc["name"]) + + for repre_doc in self._repre_docs_by_id.values(): + version_doc = self._version_docs_by_id[repre_doc["parent"]] + product_doc = self._product_docs_by_id[version_doc["parent"]] + repre_names = repres_by_product_name[product_doc["name"]] + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + return + + # [ ] [x] [ ] + # Product documents + product_docs = get_subsets( + project_name, + asset_ids=self._folder_docs_by_id.keys(), + subset_names=[selected_product_name], + fields=["_id", "name", "parent"] + ) + product_docs_by_id = {} + for product_doc in product_docs: + product_docs_by_id[product_doc["_id"]] = product_doc + + last_versions_by_product_id = self.find_last_versions( + product_docs_by_id.keys() + ) + product_id_by_version_id = {} + for product_id, last_version in last_versions_by_product_id.items(): + version_id = last_version["_id"] + product_id_by_version_id[version_id] = product_id + + repre_docs = get_representations( + project_name, + version_ids=product_id_by_version_id.keys(), + fields=["name", "parent"] + ) + repres_by_folder_id = collections.defaultdict(set) + for repre_doc in repre_docs: + product_id = product_id_by_version_id[repre_doc["parent"]] + folder_id = product_docs_by_id[product_id]["parent"] + repres_by_folder_id[folder_id].add(repre_doc["name"]) + + for repre_doc in self._repre_docs_by_id.values(): + version_doc = self._version_docs_by_id[repre_doc["parent"]] + product_doc = self._product_docs_by_id[version_doc["parent"]] + folder_id = product_doc["parent"] + repre_names = repres_by_folder_id[folder_id] + if repre_doc["name"] not in repre_names: + validation_state.repre_ok = False + break + + def _on_current_folder(self): + # Set initial folder as current. + folder_id = self._controller.get_current_folder_id() + if not folder_id: + return + + selected_folder_id = self._folders_field.get_selected_folder_id() + if folder_id == selected_folder_id: + return + + self._folders_field.set_selected_item(folder_id) + self._combobox_value_changed() + + def _on_accept(self): + self._trigger_switch() + + def _trigger_switch(self, loader=None): + # Use None when not a valid value or when placeholder value + selected_folder_id = self._folders_field.get_selected_folder_id() + selected_product_name = self._products_combox.get_valid_value() + selected_representation = self._representations_box.get_valid_value() + + project_name = self._project_name + if selected_folder_id: + folder_ids = {selected_folder_id} + else: + folder_ids = set(self._folder_docs_by_id.keys()) + + product_names = None + if selected_product_name: + product_names = [selected_product_name] + + product_docs = list(get_subsets( + project_name, + subset_names=product_names, + asset_ids=folder_ids + )) + product_ids = set() + product_docs_by_parent_and_name = collections.defaultdict(dict) + for product_doc in product_docs: + product_ids.add(product_doc["_id"]) + folder_id = product_doc["parent"] + name = product_doc["name"] + product_docs_by_parent_and_name[folder_id][name] = product_doc + + # versions + _version_docs = get_versions(project_name, subset_ids=product_ids) + version_docs = list(reversed( + sorted(_version_docs, key=lambda item: item["name"]) + )) + + hero_version_docs = list(get_hero_versions( + project_name, subset_ids=product_ids + )) + + version_ids = set() + version_docs_by_parent_id_and_name = collections.defaultdict(dict) + for version_doc in version_docs: + version_ids.add(version_doc["_id"]) + product_id = version_doc["parent"] + name = version_doc["name"] + version_docs_by_parent_id_and_name[product_id][name] = version_doc + + hero_version_docs_by_parent_id = {} + for hero_version_doc in hero_version_docs: + version_ids.add(hero_version_doc["_id"]) + parent_id = hero_version_doc["parent"] + hero_version_docs_by_parent_id[parent_id] = hero_version_doc + + repre_docs = get_representations( + project_name, version_ids=version_ids + ) + repre_docs_by_parent_id_by_name = collections.defaultdict(dict) + for repre_doc in repre_docs: + parent_id = repre_doc["parent"] + name = repre_doc["name"] + repre_docs_by_parent_id_by_name[parent_id][name] = repre_doc + + for container in self._items: + self._switch_container( + container, + loader, + selected_folder_id, + selected_product_name, + selected_representation, + product_docs_by_parent_and_name, + version_docs_by_parent_id_and_name, + hero_version_docs_by_parent_id, + repre_docs_by_parent_id_by_name, + ) + + self.switched.emit() + + self.close() + + def _switch_container( + self, + container, + loader, + selected_folder_id, + selected_product_name, + selected_representation, + product_docs_by_parent_and_name, + version_docs_by_parent_id_and_name, + hero_version_docs_by_parent_id, + repre_docs_by_parent_id_by_name, + ): + container_repre_id = container["representation"] + container_repre = self._repre_docs_by_id[container_repre_id] + container_repre_name = container_repre["name"] + container_version_id = container_repre["parent"] + + container_version = self._version_docs_by_id[container_version_id] + + container_product_id = container_version["parent"] + container_product = self._product_docs_by_id[container_product_id] + container_product_name = container_product["name"] + + container_folder_id = container_product["parent"] + + if selected_folder_id: + folder_id = selected_folder_id + else: + folder_id = container_folder_id + + products_by_name = product_docs_by_parent_and_name[folder_id] + if selected_product_name: + product_doc = products_by_name[selected_product_name] + else: + product_doc = products_by_name[container_product["name"]] + + repre_doc = None + product_id = product_doc["_id"] + if container_version["type"] == "hero_version": + hero_version = hero_version_docs_by_parent_id.get( + product_id + ) + if hero_version: + _repres = repre_docs_by_parent_id_by_name.get( + hero_version["_id"] + ) + if selected_representation: + repre_doc = _repres.get(selected_representation) + else: + repre_doc = _repres.get(container_repre_name) + + if not repre_doc: + version_docs_by_name = ( + version_docs_by_parent_id_and_name[product_id] + ) + # If asset or subset are selected for switching, we use latest + # version else we try to keep the current container version. + version_name = None + if ( + selected_folder_id in (None, container_folder_id) + and selected_product_name in (None, container_product_name) + ): + version_name = container_version.get("name") + + version_doc = None + if version_name is not None: + version_doc = version_docs_by_name.get(version_name) + + if version_doc is None: + version_name = max(version_docs_by_name) + version_doc = version_docs_by_name[version_name] + + version_id = version_doc["_id"] + repres_by_name = repre_docs_by_parent_id_by_name[version_id] + if selected_representation: + repre_doc = repres_by_name[selected_representation] + else: + repre_doc = repres_by_name[container_repre_name] + + error = None + try: + switch_container(container, repre_doc, loader) + except ( + LoaderSwitchNotImplementedError, + IncompatibleLoaderError, + LoaderNotFoundError, + ) as exc: + error = str(exc) + except Exception: + error = ( + "Switch asset failed. " + "Search console log for more details." + ) + if error is not None: + log.warning(( + "Couldn't switch asset." + "See traceback for more information." + ), exc_info=True) + dialog = QtWidgets.QMessageBox(self) + dialog.setWindowTitle("Switch asset failed") + dialog.setText(error) + dialog.exec_() diff --git a/openpype/tools/ayon_sceneinventory/switch_dialog/folders_input.py b/client/ayon_core/tools/sceneinventory/switch_dialog/folders_input.py similarity index 98% rename from openpype/tools/ayon_sceneinventory/switch_dialog/folders_input.py rename to client/ayon_core/tools/sceneinventory/switch_dialog/folders_input.py index 699c62371a..2358a82a7f 100644 --- a/openpype/tools/ayon_sceneinventory/switch_dialog/folders_input.py +++ b/client/ayon_core/tools/sceneinventory/switch_dialog/folders_input.py @@ -1,12 +1,12 @@ from qtpy import QtWidgets, QtCore import qtawesome -from openpype.tools.utils import ( +from ayon_core.tools.utils import ( PlaceholderLineEdit, BaseClickableFrame, set_style_property, ) -from openpype.tools.ayon_utils.widgets import FoldersWidget +from ayon_core.tools.ayon_utils.widgets import FoldersWidget NOT_SET = object() diff --git a/client/ayon_core/tools/sceneinventory/switch_dialog/widgets.py b/client/ayon_core/tools/sceneinventory/switch_dialog/widgets.py new file mode 100644 index 0000000000..6d5c00c199 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/switch_dialog/widgets.py @@ -0,0 +1,94 @@ +from qtpy import QtWidgets, QtCore + +from ayon_core import style + + +class ButtonWithMenu(QtWidgets.QToolButton): + def __init__(self, parent=None): + super(ButtonWithMenu, self).__init__(parent) + + self.setObjectName("ButtonWithMenu") + + self.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) + menu = QtWidgets.QMenu(self) + + self.setMenu(menu) + + self._menu = menu + self._actions = [] + + def menu(self): + return self._menu + + def clear_actions(self): + if self._menu is not None: + self._menu.clear() + self._actions = [] + + def add_action(self, action): + self._actions.append(action) + self._menu.addAction(action) + + def _on_action_trigger(self): + action = self.sender() + if action not in self._actions: + return + action.trigger() + + +class SearchComboBox(QtWidgets.QComboBox): + """Searchable ComboBox with empty placeholder value as first value""" + + def __init__(self, parent): + super(SearchComboBox, self).__init__(parent) + + self.setEditable(True) + self.setInsertPolicy(QtWidgets.QComboBox.NoInsert) + + combobox_delegate = QtWidgets.QStyledItemDelegate(self) + self.setItemDelegate(combobox_delegate) + + completer = self.completer() + completer.setCompletionMode( + QtWidgets.QCompleter.PopupCompletion + ) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + + completer_view = completer.popup() + completer_view.setObjectName("CompleterView") + completer_delegate = QtWidgets.QStyledItemDelegate(completer_view) + completer_view.setItemDelegate(completer_delegate) + completer_view.setStyleSheet(style.load_stylesheet()) + + self._combobox_delegate = combobox_delegate + + self._completer_delegate = completer_delegate + self._completer = completer + + def set_placeholder(self, placeholder): + self.lineEdit().setPlaceholderText(placeholder) + + def populate(self, items): + self.clear() + self.addItems([""]) # ensure first item is placeholder + self.addItems(items) + + def get_valid_value(self): + """Return the current text if it's a valid value else None + + Note: The empty placeholder value is valid and returns as "" + + """ + + text = self.currentText() + lookup = set(self.itemText(i) for i in range(self.count())) + if text not in lookup: + return None + + return text or None + + def set_valid_value(self, value): + """Try to locate 'value' and pre-select it in dropdown.""" + index = self.findText(value) + if index > -1: + self.setCurrentIndex(index) diff --git a/client/ayon_core/tools/sceneinventory/view.py b/client/ayon_core/tools/sceneinventory/view.py new file mode 100644 index 0000000000..214be68ae0 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/view.py @@ -0,0 +1,825 @@ +import uuid +import collections +import logging +import itertools +from functools import partial + +from qtpy import QtWidgets, QtCore +import qtawesome + +from ayon_core.client import ( + get_version_by_id, + get_versions, + get_hero_versions, + get_representation_by_id, + get_representations, +) +from ayon_core import style +from ayon_core.pipeline import ( + HeroVersionType, + update_container, + remove_container, + discover_inventory_actions, +) +from ayon_core.tools.utils.lib import ( + iter_model_rows, + format_version +) + +from .switch_dialog import SwitchAssetDialog +from .model import InventoryModel + + +DEFAULT_COLOR = "#fb9c15" + +log = logging.getLogger("SceneInventory") + + +class SceneInventoryView(QtWidgets.QTreeView): + data_changed = QtCore.Signal() + hierarchy_view_changed = QtCore.Signal(bool) + + def __init__(self, controller, parent): + super(SceneInventoryView, self).__init__(parent=parent) + + # view settings + self.setIndentation(12) + self.setAlternatingRowColors(True) + self.setSortingEnabled(True) + self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + self.customContextMenuRequested.connect(self._show_right_mouse_menu) + + self._hierarchy_view = False + self._selected = None + + self._controller = controller + + def _set_hierarchy_view(self, enabled): + if enabled == self._hierarchy_view: + return + self._hierarchy_view = enabled + self.hierarchy_view_changed.emit(enabled) + + def _enter_hierarchy(self, items): + self._selected = set(i["objectName"] for i in items) + self._set_hierarchy_view(True) + self.data_changed.emit() + self.expandToDepth(1) + self.setStyleSheet(""" + QTreeView { + border-color: #fb9c15; + } + """) + + def _leave_hierarchy(self): + self._set_hierarchy_view(False) + self.data_changed.emit() + self.setStyleSheet("QTreeView {}") + + def _build_item_menu_for_selection(self, items, menu): + # Exclude items that are "NOT FOUND" since setting versions, updating + # and removal won't work for those items. + items = [item for item in items if not item.get("isNotFound")] + if not items: + return + + # An item might not have a representation, for example when an item + # is listed as "NOT FOUND" + repre_ids = set() + for item in items: + repre_id = item["representation"] + try: + uuid.UUID(repre_id) + repre_ids.add(repre_id) + except ValueError: + pass + + project_name = self._controller.get_current_project_name() + repre_docs = get_representations( + project_name, representation_ids=repre_ids, fields=["parent"] + ) + + version_ids = { + repre_doc["parent"] + for repre_doc in repre_docs + } + + loaded_versions = get_versions( + project_name, version_ids=version_ids, hero=True + ) + + loaded_hero_versions = [] + versions_by_parent_id = collections.defaultdict(list) + subset_ids = set() + for version in loaded_versions: + if version["type"] == "hero_version": + loaded_hero_versions.append(version) + else: + parent_id = version["parent"] + versions_by_parent_id[parent_id].append(version) + subset_ids.add(parent_id) + + all_versions = get_versions( + project_name, subset_ids=subset_ids, hero=True + ) + hero_versions = [] + versions = [] + for version in all_versions: + if version["type"] == "hero_version": + hero_versions.append(version) + else: + versions.append(version) + + has_loaded_hero_versions = len(loaded_hero_versions) > 0 + has_available_hero_version = len(hero_versions) > 0 + has_outdated = False + + for version in versions: + parent_id = version["parent"] + current_versions = versions_by_parent_id[parent_id] + for current_version in current_versions: + if current_version["name"] < version["name"]: + has_outdated = True + break + + if has_outdated: + break + + switch_to_versioned = None + if has_loaded_hero_versions: + def _on_switch_to_versioned(items): + repre_ids = { + item["representation"] + for item in items + } + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["parent"] + ) + + version_ids = set() + version_id_by_repre_id = {} + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + repre_id = str(repre_doc["_id"]) + version_id_by_repre_id[repre_id] = version_id + version_ids.add(version_id) + + hero_versions = get_hero_versions( + project_name, + version_ids=version_ids, + fields=["version_id"] + ) + + hero_src_version_ids = set() + for hero_version in hero_versions: + version_id = hero_version["version_id"] + hero_src_version_ids.add(version_id) + hero_version_id = hero_version["_id"] + for _repre_id, current_version_id in ( + version_id_by_repre_id.items() + ): + if current_version_id == hero_version_id: + version_id_by_repre_id[_repre_id] = version_id + + version_docs = get_versions( + project_name, + version_ids=hero_src_version_ids, + fields=["name"] + ) + version_name_by_id = {} + for version_doc in version_docs: + version_name_by_id[version_doc["_id"]] = \ + version_doc["name"] + + # Specify version per item to update to + update_items = [] + update_versions = [] + for item in items: + repre_id = item["representation"] + version_id = version_id_by_repre_id.get(repre_id) + version_name = version_name_by_id.get(version_id) + if version_name is not None: + update_items.append(item) + update_versions.append(version_name) + self._update_containers(update_items, update_versions) + + update_icon = qtawesome.icon( + "fa.asterisk", + color=DEFAULT_COLOR + ) + switch_to_versioned = QtWidgets.QAction( + update_icon, + "Switch to versioned", + menu + ) + switch_to_versioned.triggered.connect( + lambda: _on_switch_to_versioned(items) + ) + + update_to_latest_action = None + if has_outdated or has_loaded_hero_versions: + update_icon = qtawesome.icon( + "fa.angle-double-up", + color=DEFAULT_COLOR + ) + update_to_latest_action = QtWidgets.QAction( + update_icon, + "Update to latest", + menu + ) + update_to_latest_action.triggered.connect( + lambda: self._update_containers(items, version=-1) + ) + + change_to_hero = None + if has_available_hero_version: + # TODO change icon + change_icon = qtawesome.icon( + "fa.asterisk", + color="#00b359" + ) + change_to_hero = QtWidgets.QAction( + change_icon, + "Change to hero", + menu + ) + change_to_hero.triggered.connect( + lambda: self._update_containers(items, + version=HeroVersionType(-1)) + ) + + # set version + set_version_icon = qtawesome.icon("fa.hashtag", color=DEFAULT_COLOR) + set_version_action = QtWidgets.QAction( + set_version_icon, + "Set version", + menu + ) + set_version_action.triggered.connect( + lambda: self._show_version_dialog(items)) + + # switch folder + switch_folder_icon = qtawesome.icon("fa.sitemap", color=DEFAULT_COLOR) + switch_folder_action = QtWidgets.QAction( + switch_folder_icon, + "Switch Folder", + menu + ) + switch_folder_action.triggered.connect( + lambda: self._show_switch_dialog(items)) + + # remove + remove_icon = qtawesome.icon("fa.remove", color=DEFAULT_COLOR) + remove_action = QtWidgets.QAction(remove_icon, "Remove items", menu) + remove_action.triggered.connect( + lambda: self._show_remove_warning_dialog(items)) + + # add the actions + if switch_to_versioned: + menu.addAction(switch_to_versioned) + + if update_to_latest_action: + menu.addAction(update_to_latest_action) + + if change_to_hero: + menu.addAction(change_to_hero) + + menu.addAction(set_version_action) + menu.addAction(switch_folder_action) + + menu.addSeparator() + + menu.addAction(remove_action) + + self._handle_sync_server(menu, repre_ids) + + def _handle_sync_server(self, menu, repre_ids): + """Adds actions for download/upload when SyncServer is enabled + + Args: + menu (OptionMenu) + repre_ids (list) of object_ids + + Returns: + (OptionMenu) + """ + + if not self._controller.is_sync_server_enabled(): + return + + menu.addSeparator() + + download_icon = qtawesome.icon("fa.download", color=DEFAULT_COLOR) + download_active_action = QtWidgets.QAction( + download_icon, + "Download", + menu + ) + download_active_action.triggered.connect( + lambda: self._add_sites(repre_ids, "active_site")) + + upload_icon = qtawesome.icon("fa.upload", color=DEFAULT_COLOR) + upload_remote_action = QtWidgets.QAction( + upload_icon, + "Upload", + menu + ) + upload_remote_action.triggered.connect( + lambda: self._add_sites(repre_ids, "remote_site")) + + menu.addAction(download_active_action) + menu.addAction(upload_remote_action) + + def _add_sites(self, repre_ids, site_type): + """(Re)sync all 'repre_ids' to specific site. + + It checks if opposite site has fully available content to limit + accidents. (ReSync active when no remote >> losing active content) + + Args: + repre_ids (list) + site_type (Literal[active_site, remote_site]): Site type. + """ + + self._controller.resync_representations(repre_ids, site_type) + + self.data_changed.emit() + + def _build_item_menu(self, items=None): + """Create menu for the selected items""" + + if not items: + items = [] + + menu = QtWidgets.QMenu(self) + + # add the actions + self._build_item_menu_for_selection(items, menu) + + # These two actions should be able to work without selection + # expand all items + expandall_action = QtWidgets.QAction(menu, text="Expand all items") + expandall_action.triggered.connect(self.expandAll) + + # collapse all items + collapse_action = QtWidgets.QAction(menu, text="Collapse all items") + collapse_action.triggered.connect(self.collapseAll) + + menu.addAction(expandall_action) + menu.addAction(collapse_action) + + custom_actions = self._get_custom_actions(containers=items) + if custom_actions: + submenu = QtWidgets.QMenu("Actions", self) + for action in custom_actions: + color = action.color or DEFAULT_COLOR + icon = qtawesome.icon("fa.%s" % action.icon, color=color) + action_item = QtWidgets.QAction(icon, action.label, submenu) + action_item.triggered.connect( + partial(self._process_custom_action, action, items)) + + submenu.addAction(action_item) + + menu.addMenu(submenu) + + # go back to flat view + back_to_flat_action = None + if self._hierarchy_view: + back_to_flat_icon = qtawesome.icon("fa.list", color=DEFAULT_COLOR) + back_to_flat_action = QtWidgets.QAction( + back_to_flat_icon, + "Back to Full-View", + menu + ) + back_to_flat_action.triggered.connect(self._leave_hierarchy) + + # send items to hierarchy view + enter_hierarchy_icon = qtawesome.icon("fa.indent", color="#d8d8d8") + enter_hierarchy_action = QtWidgets.QAction( + enter_hierarchy_icon, + "Cherry-Pick (Hierarchy)", + menu + ) + enter_hierarchy_action.triggered.connect( + lambda: self._enter_hierarchy(items)) + + if items: + menu.addAction(enter_hierarchy_action) + + if back_to_flat_action is not None: + menu.addAction(back_to_flat_action) + + return menu + + def _get_custom_actions(self, containers): + """Get the registered Inventory Actions + + Args: + containers(list): collection of containers + + Returns: + list: collection of filter and initialized actions + """ + + def sorter(Plugin): + """Sort based on order attribute of the plugin""" + return Plugin.order + + # Fedd an empty dict if no selection, this will ensure the compat + # lookup always work, so plugin can interact with Scene Inventory + # reversely. + containers = containers or [dict()] + + # Check which action will be available in the menu + Plugins = discover_inventory_actions() + compatible = [p() for p in Plugins if + any(p.is_compatible(c) for c in containers)] + + return sorted(compatible, key=sorter) + + def _process_custom_action(self, action, containers): + """Run action and if results are returned positive update the view + + If the result is list or dict, will select view items by the result. + + Args: + action (InventoryAction): Inventory Action instance + containers (list): Data of currently selected items + + Returns: + None + """ + + result = action.process(containers) + if result: + self.data_changed.emit() + + if isinstance(result, (list, set)): + self._select_items_by_action(result) + + if isinstance(result, dict): + self._select_items_by_action( + result["objectNames"], result["options"] + ) + + def _select_items_by_action(self, object_names, options=None): + """Select view items by the result of action + + Args: + object_names (list or set): A list/set of container object name + options (dict): GUI operation options. + + Returns: + None + + """ + options = options or dict() + + if options.get("clear", True): + self.clearSelection() + + object_names = set(object_names) + if ( + self._hierarchy_view + and not self._selected.issuperset(object_names) + ): + # If any container not in current cherry-picked view, update + # view before selecting them. + self._selected.update(object_names) + self.data_changed.emit() + + model = self.model() + selection_model = self.selectionModel() + + select_mode = { + "select": QtCore.QItemSelectionModel.Select, + "deselect": QtCore.QItemSelectionModel.Deselect, + "toggle": QtCore.QItemSelectionModel.Toggle, + }[options.get("mode", "select")] + + for index in iter_model_rows(model, 0): + item = index.data(InventoryModel.ItemRole) + if item.get("isGroupNode"): + continue + + name = item.get("objectName") + if name in object_names: + self.scrollTo(index) # Ensure item is visible + flags = select_mode | QtCore.QItemSelectionModel.Rows + selection_model.select(index, flags) + + object_names.remove(name) + + if len(object_names) == 0: + break + + def _show_right_mouse_menu(self, pos): + """Display the menu when at the position of the item clicked""" + + globalpos = self.viewport().mapToGlobal(pos) + + if not self.selectionModel().hasSelection(): + print("No selection") + # Build menu without selection, feed an empty list + menu = self._build_item_menu() + menu.exec_(globalpos) + return + + active = self.currentIndex() # index under mouse + active = active.sibling(active.row(), 0) # get first column + + # move index under mouse + indices = self.get_indices() + if active in indices: + indices.remove(active) + + indices.append(active) + + # Extend to the sub-items + all_indices = self._extend_to_children(indices) + items = [dict(i.data(InventoryModel.ItemRole)) for i in all_indices + if i.parent().isValid()] + + if self._hierarchy_view: + # Ensure no group item + items = [n for n in items if not n.get("isGroupNode")] + + menu = self._build_item_menu(items) + menu.exec_(globalpos) + + def get_indices(self): + """Get the selected rows""" + selection_model = self.selectionModel() + return selection_model.selectedRows() + + def _extend_to_children(self, indices): + """Extend the indices to the children indices. + + Top-level indices are extended to its children indices. Sub-items + are kept as is. + + Args: + indices (list): The indices to extend. + + Returns: + list: The children indices + + """ + def get_children(i): + model = i.model() + rows = model.rowCount(parent=i) + for row in range(rows): + child = model.index(row, 0, parent=i) + yield child + + subitems = set() + for i in indices: + valid_parent = i.parent().isValid() + if valid_parent and i not in subitems: + subitems.add(i) + + if self._hierarchy_view: + # Assume this is a group item + for child in get_children(i): + subitems.add(child) + else: + # is top level item + for child in get_children(i): + subitems.add(child) + + return list(subitems) + + def _show_version_dialog(self, items): + """Create a dialog with the available versions for the selected file + + Args: + items (list): list of items to run the "set_version" for + + Returns: + None + """ + + active = items[-1] + + project_name = self._controller.get_current_project_name() + # Get available versions for active representation + repre_doc = get_representation_by_id( + project_name, + active["representation"], + fields=["parent"] + ) + + repre_version_doc = get_version_by_id( + project_name, + repre_doc["parent"], + fields=["parent"] + ) + + version_docs = list(get_versions( + project_name, + subset_ids=[repre_version_doc["parent"]], + hero=True + )) + hero_version = None + standard_versions = [] + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version = version_doc + else: + standard_versions.append(version_doc) + versions = list(reversed( + sorted(standard_versions, key=lambda item: item["name"]) + )) + if hero_version: + _version_id = hero_version["version_id"] + for _version in versions: + if _version["_id"] != _version_id: + continue + + hero_version["name"] = HeroVersionType( + _version["name"] + ) + hero_version["data"] = _version["data"] + break + + # Get index among the listed versions + current_item = None + current_version = active["version"] + if isinstance(current_version, HeroVersionType): + current_item = hero_version + else: + for version in versions: + if version["name"] == current_version: + current_item = version + break + + all_versions = [] + if hero_version: + all_versions.append(hero_version) + all_versions.extend(versions) + + if current_item: + index = all_versions.index(current_item) + else: + index = 0 + + versions_by_label = dict() + labels = [] + for version in all_versions: + is_hero = version["type"] == "hero_version" + label = format_version(version["name"], is_hero) + labels.append(label) + versions_by_label[label] = version["name"] + + label, state = QtWidgets.QInputDialog.getItem( + self, + "Set version..", + "Set version number to", + labels, + current=index, + editable=False + ) + if not state: + return + + if label: + version = versions_by_label[label] + self._update_containers(items, version) + + def _show_switch_dialog(self, items): + """Display Switch dialog""" + dialog = SwitchAssetDialog(self._controller, self, items) + dialog.switched.connect(self.data_changed.emit) + dialog.show() + + def _show_remove_warning_dialog(self, items): + """Prompt a dialog to inform the user the action will remove items""" + + accept = QtWidgets.QMessageBox.Ok + buttons = accept | QtWidgets.QMessageBox.Cancel + + state = QtWidgets.QMessageBox.question( + self, + "Are you sure?", + "Are you sure you want to remove {} item(s)".format(len(items)), + buttons=buttons, + defaultButton=accept + ) + + if state != accept: + return + + for item in items: + remove_container(item) + self.data_changed.emit() + + def _show_version_error_dialog(self, version, items): + """Shows QMessageBox when version switch doesn't work + + Args: + version: str or int or None + """ + if version == -1: + version_str = "latest" + elif isinstance(version, HeroVersionType): + version_str = "hero" + elif isinstance(version, int): + version_str = "v{:03d}".format(version) + else: + version_str = version + + dialog = QtWidgets.QMessageBox(self) + dialog.setIcon(QtWidgets.QMessageBox.Warning) + dialog.setStyleSheet(style.load_stylesheet()) + dialog.setWindowTitle("Update failed") + + switch_btn = dialog.addButton( + "Switch Folder", + QtWidgets.QMessageBox.ActionRole + ) + switch_btn.clicked.connect(lambda: self._show_switch_dialog(items)) + + dialog.addButton(QtWidgets.QMessageBox.Cancel) + + msg = ( + "Version update to '{}' failed as representation doesn't exist." + "\n\nPlease update to version with a valid representation" + " OR \n use 'Switch Folder' button to change folder." + ).format(version_str) + dialog.setText(msg) + dialog.exec_() + + def update_all(self): + """Update all items that are currently 'outdated' in the view""" + # Get the source model through the proxy model + model = self.model().sourceModel() + + # Get all items from outdated groups + outdated_items = [] + for index in iter_model_rows(model, + column=0, + include_root=False): + item = index.data(model.ItemRole) + + if not item.get("isGroupNode"): + continue + + # Only the group nodes contain the "highest_version" data and as + # such we find only the groups and take its children. + if not model.outdated(item): + continue + + # Collect all children which we want to update + children = item.children() + outdated_items.extend(children) + + if not outdated_items: + log.info("Nothing to update.") + return + + # Trigger update to latest + self._update_containers(outdated_items, version=-1) + + def _update_containers(self, items, version): + """Helper to update items to given version (or version per item) + + If at least one item is specified this will always try to refresh + the inventory even if errors occurred on any of the items. + + Arguments: + items (list): Items to update + version (int or list): Version to set to. + This can be a list specifying a version for each item. + Like `update_container` version -1 sets the latest version + and HeroTypeVersion instances set the hero version. + + """ + + if isinstance(version, (list, tuple)): + # We allow a unique version to be specified per item. In that case + # the length must match with the items + assert len(items) == len(version), ( + "Number of items mismatches number of versions: " + "{} items - {} versions".format(len(items), len(version)) + ) + versions = version + else: + # Repeat the same version infinitely + versions = itertools.repeat(version) + + # Trigger update to latest + try: + for item, item_version in zip(items, versions): + try: + update_container(item, item_version) + except AssertionError: + self._show_version_error_dialog(item_version, [item]) + log.warning("Update failed", exc_info=True) + finally: + # Always update the scene inventory view, even if errors occurred + self.data_changed.emit() diff --git a/client/ayon_core/tools/sceneinventory/window.py b/client/ayon_core/tools/sceneinventory/window.py new file mode 100644 index 0000000000..f1bea26bb9 --- /dev/null +++ b/client/ayon_core/tools/sceneinventory/window.py @@ -0,0 +1,189 @@ +from qtpy import QtWidgets, QtCore, QtGui +import qtawesome + +from ayon_core import style, resources +from ayon_core.tools.utils.lib import ( + preserve_expanded_rows, + preserve_selection, +) +from ayon_core.tools.sceneinventory import SceneInventoryController + +from .delegates import VersionDelegate +from .model import ( + InventoryModel, + FilterProxyModel +) +from .view import SceneInventoryView + + +class SceneInventoryWindow(QtWidgets.QDialog): + """Scene Inventory window""" + + def __init__(self, controller=None, parent=None): + super(SceneInventoryWindow, self).__init__(parent) + + if controller is None: + controller = SceneInventoryController() + + project_name = controller.get_current_project_name() + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowTitle("Scene Inventory - {}".format(project_name)) + self.setObjectName("SceneInventory") + + self.resize(1100, 480) + + # region control + + filter_label = QtWidgets.QLabel("Search", self) + text_filter = QtWidgets.QLineEdit(self) + + outdated_only_checkbox = QtWidgets.QCheckBox( + "Filter to outdated", self + ) + outdated_only_checkbox.setToolTip("Show outdated files only") + outdated_only_checkbox.setChecked(False) + + icon = qtawesome.icon("fa.arrow-up", color="white") + update_all_button = QtWidgets.QPushButton(self) + update_all_button.setToolTip("Update all outdated to latest version") + update_all_button.setIcon(icon) + + icon = qtawesome.icon("fa.refresh", color="white") + refresh_button = QtWidgets.QPushButton(self) + refresh_button.setToolTip("Refresh") + refresh_button.setIcon(icon) + + control_layout = QtWidgets.QHBoxLayout() + control_layout.addWidget(filter_label) + control_layout.addWidget(text_filter) + control_layout.addWidget(outdated_only_checkbox) + control_layout.addWidget(update_all_button) + control_layout.addWidget(refresh_button) + + model = InventoryModel(controller) + proxy = FilterProxyModel() + proxy.setSourceModel(model) + proxy.setDynamicSortFilter(True) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + + view = SceneInventoryView(controller, self) + view.setModel(proxy) + + sync_enabled = controller.is_sync_server_enabled() + view.setColumnHidden(model.active_site_col, not sync_enabled) + view.setColumnHidden(model.remote_site_col, not sync_enabled) + + # set some nice default widths for the view + view.setColumnWidth(0, 250) # name + view.setColumnWidth(1, 55) # version + view.setColumnWidth(2, 55) # count + view.setColumnWidth(3, 150) # family + view.setColumnWidth(4, 120) # group + view.setColumnWidth(5, 150) # loader + + # apply delegates + version_delegate = VersionDelegate(controller, self) + column = model.Columns.index("version") + view.setItemDelegateForColumn(column, version_delegate) + + layout = QtWidgets.QVBoxLayout(self) + layout.addLayout(control_layout) + layout.addWidget(view) + + show_timer = QtCore.QTimer() + show_timer.setInterval(0) + show_timer.setSingleShot(False) + + # signals + show_timer.timeout.connect(self._on_show_timer) + text_filter.textChanged.connect(self._on_text_filter_change) + outdated_only_checkbox.stateChanged.connect( + self._on_outdated_state_change + ) + view.hierarchy_view_changed.connect( + self._on_hierarchy_view_change + ) + view.data_changed.connect(self._on_refresh_request) + refresh_button.clicked.connect(self._on_refresh_request) + update_all_button.clicked.connect(self._on_update_all) + + self._show_timer = show_timer + self._show_counter = 0 + self._controller = controller + self._update_all_button = update_all_button + self._outdated_only_checkbox = outdated_only_checkbox + self._view = view + self._model = model + self._proxy = proxy + self._version_delegate = version_delegate + + self._first_show = True + self._first_refresh = True + + def showEvent(self, event): + super(SceneInventoryWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + + self._show_counter = 0 + self._show_timer.start() + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidentally perform Maya commands + whilst trying to name an instance. + + """ + + def _on_refresh_request(self): + """Signal callback to trigger 'refresh' without any arguments.""" + + self.refresh() + + def refresh(self, containers=None): + self._first_refresh = False + self._controller.reset() + with preserve_expanded_rows( + tree_view=self._view, + role=self._model.UniqueRole + ): + with preserve_selection( + tree_view=self._view, + role=self._model.UniqueRole, + current_index=False + ): + kwargs = {"containers": containers} + # TODO do not touch view's inner attribute + if self._view._hierarchy_view: + kwargs["selected"] = self._view._selected + self._model.refresh(**kwargs) + + def _on_show_timer(self): + if self._show_counter < 3: + self._show_counter += 1 + return + self._show_timer.stop() + self.refresh() + + def _on_hierarchy_view_change(self, enabled): + self._proxy.set_hierarchy_view(enabled) + self._model.set_hierarchy_view(enabled) + + def _on_text_filter_change(self, text_filter): + if hasattr(self._proxy, "setFilterRegExp"): + self._proxy.setFilterRegExp(text_filter) + else: + self._proxy.setFilterRegularExpression(text_filter) + + def _on_outdated_state_change(self): + self._proxy.set_filter_outdated( + self._outdated_only_checkbox.isChecked() + ) + + def _on_update_all(self): + self._view.update_all() diff --git a/openpype/tests/__init__.py b/client/ayon_core/tools/stdout_broker/__init__.py similarity index 100% rename from openpype/tests/__init__.py rename to client/ayon_core/tools/stdout_broker/__init__.py diff --git a/client/ayon_core/tools/stdout_broker/app.py b/client/ayon_core/tools/stdout_broker/app.py new file mode 100644 index 0000000000..15447b608b --- /dev/null +++ b/client/ayon_core/tools/stdout_broker/app.py @@ -0,0 +1,173 @@ +import os +import sys +import threading +import collections +import websocket +import json +from datetime import datetime + +from ayon_core.lib import Logger +from openpype_modules.webserver.host_console_listener import MsgAction + +log = Logger.get_logger(__name__) + + +class StdOutBroker: + """ + Application showing console in Services tray for non python hosts + instead of cmd window. + """ + MAX_LINES = 10000 + TIMER_TIMEOUT = 0.200 + + def __init__(self, host_name): + self.host_name = host_name + self.webserver_client = None + + self.original_stdout_write = None + self.original_stderr_write = None + self.log_queue = collections.deque() + + date_str = datetime.now().strftime("%d%m%Y%H%M%S") + self.host_id = "{}_{}".format(self.host_name, date_str) + + self._std_available = False + self._is_running = False + self._catch_std_outputs() + + self._timer = None + + @property + def send_to_tray(self): + """Checks if connected to tray and have access to logs.""" + return self.webserver_client and self._std_available + + def start(self): + """Start app, create and start timer""" + if not self._std_available or self._is_running: + return + self._is_running = True + self._create_timer() + self._connect_to_tray() + + def stop(self): + """Disconnect from Tray, process last logs""" + if not self._is_running: + return + self._is_running = False + self._process_queue() + self._disconnect_from_tray() + + def host_connected(self): + """Send to Tray console that host is ready - icon change. """ + log.info("Host {} connected".format(self.host_id)) + + payload = { + "host": self.host_id, + "action": MsgAction.INITIALIZED, + "text": "Integration with {}".format( + str.capitalize(self.host_name)) + } + self._send(payload) + + def _create_timer(self): + timer = threading.Timer(self.TIMER_TIMEOUT, self._timer_callback) + timer.start() + self._timer = timer + + def _timer_callback(self): + if not self._is_running: + return + self._process_queue() + self._create_timer() + + def _connect_to_tray(self): + """Connect to Tray webserver to pass console output. """ + if not self._std_available: # not content to log + return + ws = websocket.WebSocket() + webserver_url = os.environ.get("AYON_WEBSERVER_URL") + + if not webserver_url: + print("Unknown webserver url, cannot connect to pass log") + return + + webserver_url = webserver_url.replace("http", "ws") + ws.connect("{}/ws/host_listener".format(webserver_url)) + self.webserver_client = ws + + payload = { + "host": self.host_id, + "action": MsgAction.CONNECTING, + "text": "Integration with {}".format( + str.capitalize(self.host_name)) + } + self._send(payload) + + def _disconnect_from_tray(self): + """Send to Tray that host is closing - remove from Services. """ + print("Host {} closing".format(self.host_name)) + if not self.webserver_client: + return + + payload = { + "host": self.host_id, + "action": MsgAction.CLOSE, + "text": "Integration with {}".format( + str.capitalize(self.host_name)) + } + + self._send(payload) + self.webserver_client.close() + + def _catch_std_outputs(self): + """Redirects standard out and error to own functions""" + if sys.stdout: + self.original_stdout_write = sys.stdout.write + sys.stdout.write = self._my_stdout_write + self._std_available = True + + if sys.stderr: + self.original_stderr_write = sys.stderr.write + sys.stderr.write = self._my_stderr_write + self._std_available = True + + def _my_stdout_write(self, text): + """Appends outputted text to queue, keep writing to original stdout""" + if self.original_stdout_write is not None: + self.original_stdout_write(text) + if self.send_to_tray: + self.log_queue.append(text) + + def _my_stderr_write(self, text): + """Appends outputted text to queue, keep writing to original stderr""" + if self.original_stderr_write is not None: + self.original_stderr_write(text) + if self.send_to_tray: + self.log_queue.append(text) + + def _process_queue(self): + """Sends lines and purges queue""" + if not self.send_to_tray: + return + + lines = tuple(self.log_queue) + self.log_queue.clear() + if lines: + payload = { + "host": self.host_id, + "action": MsgAction.ADD, + "text": "\n".join(lines) + } + + self._send(payload) + + def _send(self, payload): + """Worker method to send to existing websocket connection.""" + if not self.send_to_tray: + return + + try: + self.webserver_client.send(json.dumps(payload)) + except ConnectionResetError: # Tray closed + self._connect_to_tray() diff --git a/client/ayon_core/tools/stdout_broker/window.py b/client/ayon_core/tools/stdout_broker/window.py new file mode 100644 index 0000000000..0d5bac5732 --- /dev/null +++ b/client/ayon_core/tools/stdout_broker/window.py @@ -0,0 +1,105 @@ +import re +import collections + +from qtpy import QtWidgets + +from ayon_core import style + + +class ConsoleDialog(QtWidgets.QDialog): + """Qt dialog to show stdout instead of unwieldy cmd window""" + WIDTH = 720 + HEIGHT = 450 + MAX_LINES = 10000 + + sdict = { + r">>> ": + ' >>> ', + r"!!!(?!\sCRI|\sERR)": + ' !!! ', + r"\-\-\- ": + ' --- ', + r"\*\*\*(?!\sWRN)": + ' *** ', + r"\*\*\* WRN": + ' *** WRN', + r" \- ": + ' - ', + r"\[ ": + '[', + r"\]": + ']', + r"{": + '{', + r"}": + r"}", + r"\(": + '(', + r"\)": + r")", + r"^\.\.\. ": + ' ... ', + r"!!! ERR: ": + ' !!! ERR: ', + r"!!! CRI: ": + ' !!! CRI: ', + r"(?i)failed": + ' FAILED ', + r"(?i)error": + ' ERROR ' + } + + def __init__(self, text, parent=None): + super(ConsoleDialog, self).__init__(parent) + layout = QtWidgets.QHBoxLayout(parent) + + plain_text = QtWidgets.QPlainTextEdit(self) + plain_text.setReadOnly(True) + plain_text.resize(self.WIDTH, self.HEIGHT) + plain_text.maximumBlockCount = self.MAX_LINES + + while text: + plain_text.appendPlainText(text.popleft().strip()) + + layout.addWidget(plain_text) + + self.setWindowTitle("Console output") + + self.plain_text = plain_text + + self.setStyleSheet(style.load_stylesheet()) + + self.resize(self.WIDTH, self.HEIGHT) + + def append_text(self, new_text): + if isinstance(new_text, str): + new_text = collections.deque(new_text.split("\n")) + while new_text: + text = new_text.popleft() + if text: + self.plain_text.appendHtml(self.color(text)) + + def _multiple_replace(self, text, adict): + """Replace multiple tokens defined in dict. + + Find and replace all occurrences of strings defined in dict is + supplied string. + + Args: + text (str): string to be searched + adict (dict): dictionary with `{'search': 'replace'}` + + Returns: + str: string with replaced tokens + + """ + for r, v in adict.items(): + text = re.sub(r, v, text) + + return text + + def color(self, message): + """Color message with html tags. """ + message = self._multiple_replace(message, self.sdict) + + return message diff --git a/openpype/tools/subsetmanager/README.md b/client/ayon_core/tools/subsetmanager/README.md similarity index 100% rename from openpype/tools/subsetmanager/README.md rename to client/ayon_core/tools/subsetmanager/README.md diff --git a/openpype/tools/subsetmanager/__init__.py b/client/ayon_core/tools/subsetmanager/__init__.py similarity index 100% rename from openpype/tools/subsetmanager/__init__.py rename to client/ayon_core/tools/subsetmanager/__init__.py diff --git a/client/ayon_core/tools/subsetmanager/model.py b/client/ayon_core/tools/subsetmanager/model.py new file mode 100644 index 0000000000..638d096918 --- /dev/null +++ b/client/ayon_core/tools/subsetmanager/model.py @@ -0,0 +1,52 @@ +import uuid + +from qtpy import QtCore, QtGui + +from ayon_core.pipeline import registered_host + +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 + + +class InstanceModel(QtGui.QStandardItemModel): + def __init__(self, *args, **kwargs): + super(InstanceModel, self).__init__(*args, **kwargs) + self._instances_by_item_id = {} + + def get_instance_by_id(self, item_id): + return self._instances_by_item_id.get(item_id) + + def refresh(self): + self.clear() + + self._instances_by_item_id = {} + + instances = None + host = registered_host() + list_instances = getattr(host, "list_instances", None) + if list_instances: + instances = list_instances() + + if not instances: + return + + items = [] + for instance_data in instances: + item_id = str(uuid.uuid4()) + label = instance_data.get("label") or instance_data["subset"] + item = QtGui.QStandardItem(label) + item.setEnabled(True) + item.setEditable(False) + item.setData(item_id, ITEM_ID_ROLE) + items.append(item) + self._instances_by_item_id[item_id] = instance_data + + if items: + self.invisibleRootItem().appendRows(items) + + def headerData(self, section, orientation, role): + if role == QtCore.Qt.DisplayRole and section == 0: + return "Instance" + + return super(InstanceModel, self).headerData( + section, orientation, role + ) diff --git a/openpype/tools/subsetmanager/widgets.py b/client/ayon_core/tools/subsetmanager/widgets.py similarity index 100% rename from openpype/tools/subsetmanager/widgets.py rename to client/ayon_core/tools/subsetmanager/widgets.py diff --git a/client/ayon_core/tools/subsetmanager/window.py b/client/ayon_core/tools/subsetmanager/window.py new file mode 100644 index 0000000000..97dab1adb2 --- /dev/null +++ b/client/ayon_core/tools/subsetmanager/window.py @@ -0,0 +1,218 @@ +import os +import sys + +from qtpy import QtWidgets, QtCore +import qtawesome + +from ayon_core import style +from ayon_core.pipeline import registered_host +from ayon_core.tools.utils import PlaceholderLineEdit +from ayon_core.tools.utils.lib import ( + iter_model_rows, + qt_app_context +) +from ayon_core.tools.utils.models import RecursiveSortFilterProxyModel +from .model import ( + InstanceModel, + ITEM_ID_ROLE +) +from .widgets import InstanceDetail + + +module = sys.modules[__name__] +module.window = None + + +class SubsetManagerWindow(QtWidgets.QDialog): + def __init__(self, parent=None): + super(SubsetManagerWindow, self).__init__(parent=parent) + self.setWindowTitle("Subset Manager 0.1") + self.setObjectName("SubsetManager") + if not parent: + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint + ) + + self.resize(780, 430) + + # Trigger refresh on first called show + self._first_show = True + + left_side_widget = QtWidgets.QWidget(self) + + # Header part + header_widget = QtWidgets.QWidget(left_side_widget) + + # Filter input + filter_input = PlaceholderLineEdit(header_widget) + filter_input.setPlaceholderText("Filter subsets..") + + # Refresh button + icon = qtawesome.icon("fa.refresh", color="white") + refresh_btn = QtWidgets.QPushButton(header_widget) + refresh_btn.setIcon(icon) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(filter_input) + header_layout.addWidget(refresh_btn) + + # Instances view + view = QtWidgets.QTreeView(left_side_widget) + view.setIndentation(0) + view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + model = InstanceModel(view) + proxy = RecursiveSortFilterProxyModel() + proxy.setSourceModel(model) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + + view.setModel(proxy) + + left_side_layout = QtWidgets.QVBoxLayout(left_side_widget) + left_side_layout.setContentsMargins(0, 0, 0, 0) + left_side_layout.addWidget(header_widget) + left_side_layout.addWidget(view) + + details_widget = InstanceDetail(self) + + layout = QtWidgets.QHBoxLayout(self) + layout.addWidget(left_side_widget, 0) + layout.addWidget(details_widget, 1) + + filter_input.textChanged.connect(proxy.setFilterFixedString) + refresh_btn.clicked.connect(self._on_refresh_clicked) + view.clicked.connect(self._on_activated) + view.customContextMenuRequested.connect(self.on_context_menu) + details_widget.save_triggered.connect(self._on_save) + + self._model = model + self._proxy = proxy + self._view = view + self._details_widget = details_widget + self._refresh_btn = refresh_btn + + def _on_refresh_clicked(self): + self.refresh() + + def _on_activated(self, index): + container = None + item_id = None + if index.isValid(): + item_id = index.data(ITEM_ID_ROLE) + container = self._model.get_instance_by_id(item_id) + + self._details_widget.set_details(container, item_id) + + def _on_save(self): + host = registered_host() + if not hasattr(host, "save_instances"): + print("BUG: Host does not have \"save_instances\" method") + return + + current_index = self._view.selectionModel().currentIndex() + if not current_index.isValid(): + return + + item_id = current_index.data(ITEM_ID_ROLE) + if item_id != self._details_widget.item_id(): + return + + item_data = self._details_widget.instance_data_from_text() + new_instances = [] + for index in iter_model_rows(self._model, 0): + _item_id = index.data(ITEM_ID_ROLE) + if _item_id == item_id: + instance_data = item_data + else: + instance_data = self._model.get_instance_by_id(item_id) + new_instances.append(instance_data) + + host.save_instances(new_instances) + + def on_context_menu(self, point): + point_index = self._view.indexAt(point) + item_id = point_index.data(ITEM_ID_ROLE) + instance_data = self._model.get_instance_by_id(item_id) + if instance_data is None: + return + + # Prepare menu + menu = QtWidgets.QMenu(self) + actions = [] + host = registered_host() + if hasattr(host, "remove_instance"): + action = QtWidgets.QAction("Remove instance", menu) + action.setData(host.remove_instance) + actions.append(action) + + if hasattr(host, "select_instance"): + action = QtWidgets.QAction("Select instance", menu) + action.setData(host.select_instance) + actions.append(action) + + if not actions: + actions.append(QtWidgets.QAction("* Nothing to do", menu)) + + for action in actions: + menu.addAction(action) + + # Show menu under mouse + global_point = self._view.mapToGlobal(point) + action = menu.exec_(global_point) + if not action or not action.data(): + return + + # Process action + # TODO catch exceptions + function = action.data() + function(instance_data) + + # Reset modified data + self.refresh() + + def refresh(self): + self._details_widget.set_details(None, None) + self._model.refresh() + + host = registered_host() + dev_mode = os.environ.get("AVALON_DEVELOP_MODE") or "" + editable = False + if dev_mode.lower() in ("1", "yes", "true", "on"): + editable = hasattr(host, "save_instances") + self._details_widget.set_editable(editable) + + def showEvent(self, *args, **kwargs): + super(SubsetManagerWindow, self).showEvent(*args, **kwargs) + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + self.refresh() + + +def show(root=None, debug=False, parent=None): + """Display Scene Inventory GUI + + Arguments: + debug (bool, optional): Run in debug-mode, + defaults to False + parent (QtCore.QObject, optional): When provided parent the interface + to this QObject. + + """ + + try: + module.window.close() + del module.window + except (RuntimeError, AttributeError): + pass + + with qt_app_context(): + window = SubsetManagerWindow(parent) + window.show() + + module.window = window + + # Pull window to the front. + module.window.raise_() + module.window.activateWindow() diff --git a/client/ayon_core/tools/texture_copy/app.py b/client/ayon_core/tools/texture_copy/app.py new file mode 100644 index 0000000000..064f4e5577 --- /dev/null +++ b/client/ayon_core/tools/texture_copy/app.py @@ -0,0 +1,146 @@ +import os +import re +import click + +import speedcopy + +from ayon_core.client import get_project, get_asset_by_name +from ayon_core.lib import Terminal +from ayon_core.pipeline import legacy_io, Anatomy + + +t = Terminal() + +texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', + '.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb'] + + +class TextureCopy: + + def __init__(self): + if not legacy_io.Session: + legacy_io.install() + + def _get_textures(self, path): + textures = [] + for dir, subdir, files in os.walk(path): + textures.extend( + os.path.join(dir, x) for x in files + if os.path.splitext(x)[1].lower() in texture_extensions) + return textures + + def _get_destination_path(self, asset, project): + project_name = project["name"] + hierarchy = "" + parents = asset['data']['parents'] + if parents and len(parents) > 0: + hierarchy = os.path.join(*parents) + + template_data = { + "project": { + "name": project_name, + "code": project['data']['code'] + }, + "asset": asset['name'], + "family": 'texture', + "subset": 'Main', + "hierarchy": hierarchy + } + anatomy = Anatomy(project_name) + template_obj = anatomy.templates_obj["texture"]["path"] + return template_obj.format_strict(template_data) + + def _get_version(self, path): + versions = [0] + dirs = [f.path for f in os.scandir(path) if f.is_dir()] + for d in dirs: + ver = re.search(r'^v(\d+)$', + os.path.basename(d), + flags=re.IGNORECASE) + if ver is not None: + versions.append(int(ver.group(1))) + + return max(versions) + 1 + + def _copy_textures(self, textures, destination): + for tex in textures: + dst = os.path.join(destination, + os.path.basename(tex)) + t.echo(" - Copy {} -> {}".format(tex, dst)) + try: + speedcopy.copyfile(tex, dst) + except Exception as e: + t.echo("!!! Copying failed") + t.echo("!!! {}".format(e)) + exit(1) + + def process(self, asset_name, project_name, path): + """ + Process all textures found in path and copy them to asset under + project. + """ + + t.echo(">>> Looking for textures ...") + textures = self._get_textures(path) + if len(textures) < 1: + t.echo("!!! no textures found.") + exit(1) + else: + t.echo(">>> Found {} textures ...".format(len(textures))) + + project = get_project(project_name) + if not project: + t.echo("!!! Project name [ {} ] not found.".format(project_name)) + exit(1) + + asset = get_asset_by_name(project_name, asset_name) + if not asset: + t.echo("!!! Asset [ {} ] not found in project".format(asset_name)) + exit(1) + t.echo((">>> Project [ {} ] and " + "asset [ {} ] seems to be OK ...").format(project['name'], + asset['name'])) + + dst_path = self._get_destination_path(asset, project) + t.echo("--- Using [ {} ] as destination path".format(dst_path)) + if not os.path.exists(dst_path): + try: + os.makedirs(dst_path) + except IOError as e: + t.echo("!!! Unable to create destination directory") + t.echo("!!! {}".format(e)) + exit(1) + version = '%02d' % self._get_version(dst_path) + t.echo("--- Using version [ {} ]".format(version)) + dst_path = os.path.join(dst_path, "v{}".format(version)) + t.echo("--- Final destination path [ {} ]".format(dst_path)) + try: + os.makedirs(dst_path) + except FileExistsError: + t.echo("!!! Somethings wrong, version directory already exists") + exit(1) + except IOError as e: + t.echo("!!! Cannot create version directory") + t.echo("!!! {}".format(e)) + exit(1) + + t.echo(">>> copying textures ...") + self._copy_textures(textures, dst_path) + t.echo(">>> done.") + t.echo("<<< terminating ...") + + +@click.command() +@click.option('--asset', required=True) +@click.option('--project', required=True) +@click.option('--path', required=True) +def texture_copy(asset, project, path): + t.echo("*** Running Texture tool ***") + t.echo(">>> Initializing avalon session ...") + os.environ["AVALON_PROJECT"] = project + os.environ["AVALON_ASSET"] = asset + TextureCopy().process(asset, project, path) + + +if __name__ == '__main__': + texture_copy() diff --git a/client/ayon_core/tools/tray/__init__.py b/client/ayon_core/tools/tray/__init__.py new file mode 100644 index 0000000000..f5e558e0bb --- /dev/null +++ b/client/ayon_core/tools/tray/__init__.py @@ -0,0 +1,5 @@ +from .tray import main + +__all__ = ( + "main", +) diff --git a/client/ayon_core/tools/tray/__main__.py b/client/ayon_core/tools/tray/__main__.py new file mode 100644 index 0000000000..51ae93ae28 --- /dev/null +++ b/client/ayon_core/tools/tray/__main__.py @@ -0,0 +1,7 @@ +try: + from . import tray +except ImportError: + import tray + + +tray.main() diff --git a/openpype/tools/tray/images/gifts.png b/client/ayon_core/tools/tray/images/gifts.png similarity index 100% rename from openpype/tools/tray/images/gifts.png rename to client/ayon_core/tools/tray/images/gifts.png diff --git a/client/ayon_core/tools/tray/info_widget.py b/client/ayon_core/tools/tray/info_widget.py new file mode 100644 index 0000000000..597a4fc2c5 --- /dev/null +++ b/client/ayon_core/tools/tray/info_widget.py @@ -0,0 +1,486 @@ +import os +import json +import collections + +import ayon_api +from qtpy import QtCore, QtGui, QtWidgets + +from ayon_core import style +import ayon_core.version +from ayon_core import resources +from ayon_core.lib import get_ayon_launcher_args +from ayon_core.lib.ayon_info import ( + get_all_current_info, + get_workstation_info, + extract_ayon_info_to_file, +) + +IS_MAIN_ROLE = QtCore.Qt.UserRole + + +class EnvironmentValueDelegate(QtWidgets.QStyledItemDelegate): + def createEditor(self, parent, option, index): + edit_widget = QtWidgets.QLineEdit(parent) + edit_widget.setReadOnly(True) + return edit_widget + + +class EnvironmentsView(QtWidgets.QTreeView): + def __init__(self, parent=None): + super(EnvironmentsView, self).__init__(parent) + + self._scroll_enabled = False + + model = QtGui.QStandardItemModel() + + env = os.environ.copy() + keys = [] + values = [] + for key in sorted(env.keys()): + key_item = QtGui.QStandardItem(key) + key_item.setFlags( + QtCore.Qt.ItemIsSelectable + | QtCore.Qt.ItemIsEnabled + ) + key_item.setData(True, IS_MAIN_ROLE) + keys.append(key_item) + + value = env[key] + value_item = QtGui.QStandardItem(value) + value_item.setData(True, IS_MAIN_ROLE) + values.append(value_item) + + value_parts = [ + part + for part in value.split(os.pathsep) if part + ] + if len(value_parts) < 2: + continue + + sub_parts = [] + for part_value in value_parts: + part_item = QtGui.QStandardItem(part_value) + part_item.setData(False, IS_MAIN_ROLE) + sub_parts.append(part_item) + key_item.appendRows(sub_parts) + + model.appendColumn(keys) + model.appendColumn(values) + model.setHorizontalHeaderLabels(["Key", "Value"]) + + self.setModel(model) + # self.setIndentation(0) + delegate = EnvironmentValueDelegate(self) + self.setItemDelegate(delegate) + self.header().setSectionResizeMode( + 0, QtWidgets.QHeaderView.ResizeToContents + ) + self.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection) + + def get_selection_as_dict(self): + indexes = self.selectionModel().selectedIndexes() + + main_mapping = collections.defaultdict(dict) + for index in indexes: + is_main = index.data(IS_MAIN_ROLE) + if not is_main: + continue + row = index.row() + value = index.data(QtCore.Qt.DisplayRole) + if index.column() == 0: + key = "key" + else: + key = "value" + main_mapping[row][key] = value + + result = {} + for item in main_mapping.values(): + result[item["key"]] = item["value"] + return result + + def keyPressEvent(self, event): + if ( + event.type() == QtGui.QKeyEvent.KeyPress + and event.matches(QtGui.QKeySequence.Copy) + ): + selected_data = self.get_selection_as_dict() + selected_str = json.dumps(selected_data, indent=4) + + mime_data = QtCore.QMimeData() + mime_data.setText(selected_str) + QtWidgets.QApplication.instance().clipboard().setMimeData( + mime_data + ) + event.accept() + else: + return super(EnvironmentsView, self).keyPressEvent(event) + + def set_scroll_enabled(self, value): + self._scroll_enabled = value + + def wheelEvent(self, event): + if not self._scroll_enabled: + event.ignore() + return + return super(EnvironmentsView, self).wheelEvent(event) + + +class ClickableWidget(QtWidgets.QWidget): + clicked = QtCore.Signal() + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self.clicked.emit() + super(ClickableWidget, self).mouseReleaseEvent(event) + + +class CollapsibleWidget(QtWidgets.QWidget): + def __init__(self, label, parent): + super(CollapsibleWidget, self).__init__(parent) + + self.content_widget = None + + top_part = ClickableWidget(parent=self) + + button_size = QtCore.QSize(5, 5) + button_toggle = QtWidgets.QToolButton(parent=top_part) + button_toggle.setIconSize(button_size) + button_toggle.setArrowType(QtCore.Qt.RightArrow) + button_toggle.setCheckable(True) + button_toggle.setChecked(False) + + label_widget = QtWidgets.QLabel(label, parent=top_part) + + top_part_layout = QtWidgets.QHBoxLayout(top_part) + top_part_layout.setContentsMargins(0, 0, 0, 5) + top_part_layout.addWidget(button_toggle) + top_part_layout.addWidget(label_widget) + top_part_layout.addStretch(1) + + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + self.button_toggle = button_toggle + self.label_widget = label_widget + + top_part.clicked.connect(self._top_part_clicked) + self.button_toggle.clicked.connect(self._btn_clicked) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.setAlignment(QtCore.Qt.AlignTop) + main_layout.addWidget(top_part) + + self.main_layout = main_layout + + def set_content_widget(self, content_widget): + content_widget.setVisible(self.button_toggle.isChecked()) + self.main_layout.addWidget(content_widget) + self.content_widget = content_widget + + def _btn_clicked(self): + self.toggle_content(self.button_toggle.isChecked()) + + def _top_part_clicked(self): + self.toggle_content() + + def toggle_content(self, *args): + if len(args) > 0: + checked = args[0] + else: + checked = not self.button_toggle.isChecked() + arrow_type = QtCore.Qt.RightArrow + if checked: + arrow_type = QtCore.Qt.DownArrow + self.button_toggle.setChecked(checked) + self.button_toggle.setArrowType(arrow_type) + if self.content_widget: + self.content_widget.setVisible(checked) + self.parent().updateGeometry() + + def resizeEvent(self, event): + super(CollapsibleWidget, self).resizeEvent(event) + if self.content_widget: + self.content_widget.updateGeometry() + + +class InfoWidget(QtWidgets.QWidget): + _resized = QtCore.Signal() + + def __init__(self, parent=None): + super(InfoWidget, self).__init__(parent) + + self._scroll_at_bottom = False + + self.setStyleSheet(style.load_stylesheet()) + + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + self.setWindowTitle("AYON info") + + scroll_area = QtWidgets.QScrollArea(self) + info_widget = InfoSubWidget(scroll_area) + + scroll_area.setWidget(info_widget) + scroll_area.setWidgetResizable(True) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(scroll_area, 1) + main_layout.addWidget(self._create_btns_section(), 0) + + scroll_area.verticalScrollBar().valueChanged.connect( + self._on_area_scroll + ) + self._resized.connect(self._on_resize) + self.resize(740, 540) + + self.scroll_area = scroll_area + self.info_widget = info_widget + + def _on_area_scroll(self, value): + vertical_bar = self.scroll_area.verticalScrollBar() + self._scroll_at_bottom = vertical_bar.maximum() == vertical_bar.value() + self.info_widget.set_scroll_enabled(self._scroll_at_bottom) + + def _on_resize(self): + if not self._scroll_at_bottom: + return + vertical_bar = self.scroll_area.verticalScrollBar() + vertical_bar.setValue(vertical_bar.maximum()) + + def resizeEvent(self, event): + super(InfoWidget, self).resizeEvent(event) + self._resized.emit() + self.info_widget.set_content_height( + self.scroll_area.height() + ) + + def showEvent(self, event): + super(InfoWidget, self).showEvent(event) + self.info_widget.set_content_height( + self.scroll_area.height() + ) + + def _create_btns_section(self): + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + + copy_to_clipboard_btn = QtWidgets.QPushButton( + "Copy to clipboard", btns_widget + ) + export_to_file_btn = QtWidgets.QPushButton( + "Export", btns_widget + ) + btns_layout.addWidget(QtWidgets.QWidget(btns_widget), 1) + btns_layout.addWidget(copy_to_clipboard_btn) + btns_layout.addWidget(export_to_file_btn) + + copy_to_clipboard_btn.clicked.connect(self._on_copy_to_clipboard) + export_to_file_btn.clicked.connect(self._on_export_to_file) + + return btns_widget + + def _on_export_to_file(self): + dst_dir_path = QtWidgets.QFileDialog.getExistingDirectory( + self, + "Choose directory", + os.path.expanduser("~"), + QtWidgets.QFileDialog.ShowDirsOnly + ) + if not dst_dir_path or not os.path.exists(dst_dir_path): + return + + filepath = extract_ayon_info_to_file(dst_dir_path) + title = "Extraction done" + message = "Extraction is done. Destination filepath is \"{}\"".format( + filepath.replace("\\", "/") + ) + dialog = QtWidgets.QMessageBox(self) + dialog.setIcon(QtWidgets.QMessageBox.NoIcon) + dialog.setWindowTitle(title) + dialog.setText(message) + dialog.exec_() + + def _on_copy_to_clipboard(self): + all_data = get_all_current_info() + all_data_str = json.dumps(all_data, indent=4) + + mime_data = QtCore.QMimeData() + mime_data.setText(all_data_str) + QtWidgets.QApplication.instance().clipboard().setMimeData( + mime_data + ) + + +class InfoSubWidget(QtWidgets.QWidget): + not_applicable = "N/A" + + def __init__(self, parent=None): + super(InfoSubWidget, self).__init__(parent) + + self.env_view = None + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setAlignment(QtCore.Qt.AlignTop) + main_layout.addWidget(self._create_info_widget(), 0) + main_layout.addWidget(self._create_separator(), 0) + main_layout.addWidget(self._create_workstation_widget(), 0) + main_layout.addWidget(self._create_separator(), 0) + main_layout.addWidget(self._create_environ_widget(), 1) + + def set_content_height(self, height): + if self.env_view: + self.env_view.setMinimumHeight(height) + + def set_scroll_enabled(self, value): + if self.env_view: + self.env_view.set_scroll_enabled(value) + + def _create_separator(self): + separator_widget = QtWidgets.QWidget(self) + separator_widget.setObjectName("Separator") + separator_widget.setMinimumHeight(2) + separator_widget.setMaximumHeight(2) + return separator_widget + + def _create_workstation_widget(self): + key_label_mapping = { + "system_name": "System:", + "local_id": "Local ID:", + "username": "Username:", + "hostname": "Hostname:", + "hostip": "Host IP:" + } + keys_order = [ + "system_name", + "local_id", + "username", + "hostname", + "hostip" + ] + workstation_info = get_workstation_info() + for key in workstation_info.keys(): + if key not in keys_order: + keys_order.append(key) + + wokstation_info_widget = CollapsibleWidget("Workstation info", self) + + info_widget = QtWidgets.QWidget(self) + info_layout = QtWidgets.QGridLayout(info_widget) + # Add spacer to 3rd column + info_layout.addWidget(QtWidgets.QWidget(info_widget), 0, 2) + info_layout.setColumnStretch(2, 1) + + for key in keys_order: + if key not in workstation_info: + continue + + label = key_label_mapping.get(key, key) + value = workstation_info[key] + row = info_layout.rowCount() + info_layout.addWidget( + QtWidgets.QLabel(label), row, 0, 1, 1 + ) + value_label = QtWidgets.QLabel(value) + value_label.setTextInteractionFlags( + QtCore.Qt.TextSelectableByMouse + ) + info_layout.addWidget( + value_label, row, 1, 1, 1 + ) + + wokstation_info_widget.set_content_widget(info_widget) + wokstation_info_widget.toggle_content() + + return wokstation_info_widget + + def _create_environ_widget(self): + env_widget = CollapsibleWidget("Environments", self) + + env_view = EnvironmentsView(env_widget) + env_view.setMinimumHeight(300) + env_widget.set_content_widget(env_view) + + self.env_view = env_view + + return env_widget + + def _create_info_widget(self): + """Create widget with information about application.""" + + executable_args = get_ayon_launcher_args() + username = "N/A" + user_info = ayon_api.get_user() + if user_info: + username = user_info.get("name") or username + full_name = user_info.get("attrib", {}).get("fullName") + if full_name: + username = "{} ({})".format(full_name, username) + info_values = { + "executable": executable_args[-1], + "server_url": os.environ["AYON_SERVER_URL"], + "bundle_name": os.environ["AYON_BUNDLE_NAME"], + "username": username + } + key_label_mapping = { + "executable": "AYON Executable:", + "server_url": "AYON Server:", + "bundle_name": "AYON Bundle:", + "username": "AYON Username:" + } + # Prepare keys order + keys_order = [ + "server_url", + "bundle_name", + "username", + "executable", + ] + + for key in info_values.keys(): + if key not in keys_order: + keys_order.append(key) + + # Create widgets + info_widget = QtWidgets.QWidget(self) + info_layout = QtWidgets.QGridLayout(info_widget) + # Add spacer to 3rd column + info_layout.addWidget(QtWidgets.QWidget(info_widget), 0, 2) + info_layout.setColumnStretch(2, 1) + + title_label = QtWidgets.QLabel(info_widget) + title_label.setText("Application information") + title_label.setStyleSheet("font-weight: bold;") + info_layout.addWidget(title_label, 0, 0, 1, 2) + + for key in keys_order: + if key not in info_values: + continue + value = info_values[key] + label = key_label_mapping.get(key, key) + row = info_layout.rowCount() + info_layout.addWidget( + QtWidgets.QLabel(label), row, 0, 1, 1 + ) + value_label = QtWidgets.QLabel(value) + value_label.setTextInteractionFlags( + QtCore.Qt.TextSelectableByMouse + ) + info_layout.addWidget( + value_label, row, 1, 1, 1 + ) + + row = info_layout.rowCount() + info_layout.addWidget( + QtWidgets.QLabel("Core Addon:"), row, 0, 1, 1 + ) + value_label = QtWidgets.QLabel(ayon_core.version.__version__) + value_label.setTextInteractionFlags( + QtCore.Qt.TextSelectableByMouse + ) + info_layout.addWidget( + value_label, row, 1, 1, 1 + ) + return info_widget diff --git a/client/ayon_core/tools/tray/tray.py b/client/ayon_core/tools/tray/tray.py new file mode 100644 index 0000000000..3a70d68466 --- /dev/null +++ b/client/ayon_core/tools/tray/tray.py @@ -0,0 +1,468 @@ +import collections +import os +import sys +import atexit + +import platform + +from qtpy import QtCore, QtGui, QtWidgets + +from ayon_core import resources, style +from ayon_core.lib import ( + Logger, + get_ayon_launcher_args, + run_detached_process, +) +from ayon_core.lib import is_running_from_build +from ayon_core.addon import ( + ITrayAction, + ITrayService, + TrayAddonsManager, +) +from ayon_core.settings import get_system_settings +from ayon_core.tools.utils import ( + WrappedCallbackItem, + get_ayon_qt_app, +) + +from .info_widget import InfoWidget + + +# TODO PixmapLabel should be moved to 'utils' in other future PR so should be +# imported from there +class PixmapLabel(QtWidgets.QLabel): + """Label resizing image to height of font.""" + def __init__(self, pixmap, parent): + super(PixmapLabel, self).__init__(parent) + self._empty_pixmap = QtGui.QPixmap(0, 0) + self._source_pixmap = pixmap + + def set_source_pixmap(self, pixmap): + """Change source image.""" + self._source_pixmap = pixmap + self._set_resized_pix() + + def _get_pix_size(self): + size = self.fontMetrics().height() * 3 + return size, size + + def _set_resized_pix(self): + if self._source_pixmap is None: + self.setPixmap(self._empty_pixmap) + return + width, height = self._get_pix_size() + self.setPixmap( + self._source_pixmap.scaled( + width, + height, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + ) + + def resizeEvent(self, event): + self._set_resized_pix() + super(PixmapLabel, self).resizeEvent(event) + + +class TrayManager: + """Cares about context of application. + + Load submenus, actions, separators and addons into tray's context. + """ + def __init__(self, tray_widget, main_window): + self.tray_widget = tray_widget + self.main_window = main_window + self._info_widget = None + self._restart_action = None + + self.log = Logger.get_logger(self.__class__.__name__) + + system_settings = get_system_settings() + + version_check_interval = system_settings["general"].get( + "version_check_interval" + ) + if version_check_interval is None: + version_check_interval = 5 + self._version_check_interval = version_check_interval * 60 * 1000 + + self._addons_manager = TrayAddonsManager() + + self.errors = [] + + self.main_thread_timer = None + self._main_thread_callbacks = collections.deque() + self._execution_in_progress = None + + @property + def doubleclick_callback(self): + """Double-click callback for Tray icon.""" + callback_name = self._addons_manager.doubleclick_callback + return self._addons_manager.doubleclick_callbacks.get(callback_name) + + def execute_doubleclick(self): + """Execute double click callback in main thread.""" + callback = self.doubleclick_callback + if callback: + self.execute_in_main_thread(callback) + + def _restart_and_install(self): + self.restart(use_expected_version=True) + + def execute_in_main_thread(self, callback, *args, **kwargs): + if isinstance(callback, WrappedCallbackItem): + item = callback + else: + item = WrappedCallbackItem(callback, *args, **kwargs) + + self._main_thread_callbacks.append(item) + + return item + + def _main_thread_execution(self): + if self._execution_in_progress: + return + self._execution_in_progress = True + for _ in range(len(self._main_thread_callbacks)): + if self._main_thread_callbacks: + item = self._main_thread_callbacks.popleft() + item.execute() + + self._execution_in_progress = False + + def initialize_addons(self): + """Add addons to tray.""" + + self._addons_manager.initialize(self, self.tray_widget.menu) + + admin_submenu = ITrayAction.admin_submenu(self.tray_widget.menu) + self.tray_widget.menu.addMenu(admin_submenu) + + # Add services if they are + services_submenu = ITrayService.services_submenu(self.tray_widget.menu) + self.tray_widget.menu.addMenu(services_submenu) + + # Add separator + self.tray_widget.menu.addSeparator() + + self._add_version_item() + + # Add Exit action to menu + exit_action = QtWidgets.QAction("Exit", self.tray_widget) + exit_action.triggered.connect(self.tray_widget.exit) + self.tray_widget.menu.addAction(exit_action) + + # Tell each addon which addons were imported + self._addons_manager.start_addons() + + # Print time report + self._addons_manager.print_report() + + # create timer loop to check callback functions + main_thread_timer = QtCore.QTimer() + main_thread_timer.setInterval(300) + main_thread_timer.timeout.connect(self._main_thread_execution) + main_thread_timer.start() + + self.main_thread_timer = main_thread_timer + + self.execute_in_main_thread(self._startup_validations) + + def _startup_validations(self): + """Run possible startup validations.""" + pass + + def show_tray_message(self, title, message, icon=None, msecs=None): + """Show tray message. + + Args: + title (str): Title of message. + message (str): Content of message. + icon (QSystemTrayIcon.MessageIcon): Message's icon. Default is + Information icon, may differ by Qt version. + msecs (int): Duration of message visibility in milliseconds. + Default is 10000 msecs, may differ by Qt version. + """ + args = [title, message] + kwargs = {} + if icon: + kwargs["icon"] = icon + if msecs: + kwargs["msecs"] = msecs + + self.tray_widget.showMessage(*args, **kwargs) + + def _add_version_item(self): + login_action = QtWidgets.QAction("Login", self.tray_widget) + login_action.triggered.connect(self._on_ayon_login) + self.tray_widget.menu.addAction(login_action) + + version_string = os.getenv("AYON_VERSION", "AYON Info") + + version_action = QtWidgets.QAction(version_string, self.tray_widget) + version_action.triggered.connect(self._on_version_action) + + restart_action = QtWidgets.QAction( + "Restart && Update", self.tray_widget + ) + restart_action.triggered.connect(self._on_restart_action) + restart_action.setVisible(False) + + self.tray_widget.menu.addAction(version_action) + self.tray_widget.menu.addAction(restart_action) + self.tray_widget.menu.addSeparator() + + self._restart_action = restart_action + + def _on_ayon_login(self): + self.execute_in_main_thread(self._show_ayon_login) + + def _show_ayon_login(self): + from ayon_common.connection.credentials import change_user_ui + + result = change_user_ui() + if result.shutdown: + self.exit() + + elif result.restart or result.token_changed: + # Remove environment variables from current connection + # - keep develop, staging, headless values + for key in { + "AYON_SERVER_URL", + "AYON_API_KEY", + "AYON_BUNDLE_NAME", + }: + os.environ.pop(key, None) + self.restart() + + def _on_restart_action(self): + self.restart(use_expected_version=True) + + def restart(self, use_expected_version=False, reset_version=False): + """Restart Tray tool. + + First creates new process with same argument and close current tray. + + Args: + use_expected_version(bool): OpenPype version is set to expected + version. + reset_version(bool): OpenPype version is cleaned up so igniters + logic will decide which version will be used. + """ + args = get_ayon_launcher_args() + envs = dict(os.environ.items()) + + # Create a copy of sys.argv + additional_args = list(sys.argv) + # Remove first argument from 'sys.argv' + # - when running from code the first argument is 'start.py' + # - when running from build the first argument is executable + additional_args.pop(0) + + cleanup_additional_args = False + if use_expected_version: + cleanup_additional_args = True + reset_version = True + + # Pop OPENPYPE_VERSION + if reset_version: + cleanup_additional_args = True + envs.pop("OPENPYPE_VERSION", None) + + if cleanup_additional_args: + _additional_args = [] + for arg in additional_args: + if arg == "--use-staging" or arg.startswith("--use-version"): + continue + _additional_args.append(arg) + additional_args = _additional_args + + args.extend(additional_args) + run_detached_process(args, env=envs) + self.exit() + + def exit(self): + self.tray_widget.exit() + + def on_exit(self): + self._addons_manager.on_exit() + + def _on_version_action(self): + if self._info_widget is None: + self._info_widget = InfoWidget() + + self._info_widget.show() + self._info_widget.raise_() + self._info_widget.activateWindow() + + +class SystemTrayIcon(QtWidgets.QSystemTrayIcon): + """Tray widget. + + :param parent: Main widget that cares about all GUIs + :type parent: QtWidgets.QMainWindow + """ + + doubleclick_time_ms = 100 + + def __init__(self, parent): + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + + super(SystemTrayIcon, self).__init__(icon, parent) + + self._exited = False + + # Store parent - QtWidgets.QMainWindow() + self.parent = parent + + # Setup menu in Tray + self.menu = QtWidgets.QMenu() + self.menu.setStyleSheet(style.load_stylesheet()) + + # Set addons + self.tray_man = TrayManager(self, self.parent) + + # Add menu to Context of SystemTrayIcon + self.setContextMenu(self.menu) + + atexit.register(self.exit) + + # Catch activate event for left click if not on MacOS + # - MacOS has this ability by design and is harder to modify this + # behavior + if platform.system().lower() == "darwin": + return + + self.activated.connect(self.on_systray_activated) + + click_timer = QtCore.QTimer() + click_timer.setInterval(self.doubleclick_time_ms) + click_timer.timeout.connect(self._click_timer_timeout) + + self._click_timer = click_timer + self._doubleclick = False + self._click_pos = None + + self._initializing_addons = False + + @property + def initializing_addons(self): + return self._initializing_addons + + def initialize_addons(self): + self._initializing_addons = True + self.tray_man.initialize_addons() + self._initializing_addons = False + + def _click_timer_timeout(self): + self._click_timer.stop() + doubleclick = self._doubleclick + # Reset bool value + self._doubleclick = False + if doubleclick: + self.tray_man.execute_doubleclick() + else: + self._show_context_menu() + + def _show_context_menu(self): + pos = self._click_pos + self._click_pos = None + if pos is None: + pos = QtGui.QCursor().pos() + self.contextMenu().popup(pos) + + def on_systray_activated(self, reason): + # show contextMenu if left click + if reason == QtWidgets.QSystemTrayIcon.Trigger: + if self.tray_man.doubleclick_callback: + self._click_pos = QtGui.QCursor().pos() + self._click_timer.start() + else: + self._show_context_menu() + + elif reason == QtWidgets.QSystemTrayIcon.DoubleClick: + self._doubleclick = True + + def exit(self): + """ Exit whole application. + + - Icon won't stay in tray after exit. + """ + if self._exited: + return + self._exited = True + + self.hide() + self.tray_man.on_exit() + QtCore.QCoreApplication.exit() + + +class TrayStarter(QtCore.QObject): + def __init__(self, app): + app.setQuitOnLastWindowClosed(False) + self._app = app + self._splash = None + + main_window = QtWidgets.QMainWindow() + tray_widget = SystemTrayIcon(main_window) + + start_timer = QtCore.QTimer() + start_timer.setInterval(100) + start_timer.start() + + start_timer.timeout.connect(self._on_start_timer) + + self._main_window = main_window + self._tray_widget = tray_widget + self._timer_counter = 0 + self._start_timer = start_timer + + def _on_start_timer(self): + if self._timer_counter == 0: + self._timer_counter += 1 + splash = self._get_splash() + splash.show() + self._tray_widget.show() + # Make sure tray and splash are painted out + QtWidgets.QApplication.processEvents() + + elif self._timer_counter == 1: + # Second processing of events to make sure splash is painted + QtWidgets.QApplication.processEvents() + self._timer_counter += 1 + self._tray_widget.initialize_addons() + + elif not self._tray_widget.initializing_addons: + splash = self._get_splash() + splash.hide() + self._start_timer.stop() + + def _get_splash(self): + if self._splash is None: + self._splash = self._create_splash() + return self._splash + + def _create_splash(self): + splash_pix = QtGui.QPixmap(resources.get_ayon_splash_filepath()) + splash = QtWidgets.QSplashScreen(splash_pix) + splash.setMask(splash_pix.mask()) + splash.setEnabled(False) + splash.setWindowFlags( + QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint + ) + return splash + + +def main(): + app = get_ayon_qt_app() + + starter = TrayStarter(app) + + if not is_running_from_build() and os.name == "nt": + import ctypes + ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID( + u"ayon_tray" + ) + + sys.exit(app.exec_()) diff --git a/openpype/tools/traypublisher/__init__.py b/client/ayon_core/tools/traypublisher/__init__.py similarity index 100% rename from openpype/tools/traypublisher/__init__.py rename to client/ayon_core/tools/traypublisher/__init__.py diff --git a/client/ayon_core/tools/traypublisher/window.py b/client/ayon_core/tools/traypublisher/window.py new file mode 100644 index 0000000000..dad314e510 --- /dev/null +++ b/client/ayon_core/tools/traypublisher/window.py @@ -0,0 +1,276 @@ +"""Tray publisher is extending publisher tool. + +Adds ability to select project using overlay widget with list of projects. + +Tray publisher can be considered as host implementeation with creators and +publishing plugins. +""" + +import platform + +from qtpy import QtWidgets, QtCore +import qtawesome +import appdirs + +from ayon_core.lib import JSONSettingRegistry +from ayon_core.pipeline import install_host +from ayon_core.hosts.traypublisher.api import TrayPublisherHost +from ayon_core.tools.publisher.control_qt import QtPublisherController +from ayon_core.tools.publisher.window import PublisherWindow +from ayon_core.tools.utils import PlaceholderLineEdit, get_ayon_qt_app +from ayon_core.tools.utils.constants import PROJECT_NAME_ROLE +from ayon_core.tools.utils.models import ( + ProjectModel, + ProjectSortFilterProxy +) + + +class TrayPublisherController(QtPublisherController): + @property + def host(self): + return self._host + + def reset_project_data_cache(self): + self._asset_docs_cache.reset() + + +class TrayPublisherRegistry(JSONSettingRegistry): + """Class handling OpenPype general settings registry. + + Attributes: + vendor (str): Name used for path construction. + product (str): Additional name used for path construction. + + """ + + def __init__(self): + self.vendor = "pypeclub" + self.product = "openpype" + name = "tray_publisher" + path = appdirs.user_data_dir(self.product, self.vendor) + super(TrayPublisherRegistry, self).__init__(name, path) + + +class StandaloneOverlayWidget(QtWidgets.QFrame): + project_selected = QtCore.Signal(str) + + def __init__(self, publisher_window): + super(StandaloneOverlayWidget, self).__init__(publisher_window) + self.setObjectName("OverlayFrame") + + middle_frame = QtWidgets.QFrame(self) + middle_frame.setObjectName("ChooseProjectFrame") + + content_widget = QtWidgets.QWidget(middle_frame) + + header_label = QtWidgets.QLabel("Choose project", content_widget) + header_label.setObjectName("ChooseProjectLabel") + # Create project models and view + projects_model = ProjectModel() + projects_proxy = ProjectSortFilterProxy() + projects_proxy.setSourceModel(projects_model) + projects_proxy.setFilterKeyColumn(0) + + projects_view = QtWidgets.QListView(content_widget) + projects_view.setObjectName("ChooseProjectView") + projects_view.setModel(projects_proxy) + projects_view.setEditTriggers( + QtWidgets.QAbstractItemView.NoEditTriggers + ) + + confirm_btn = QtWidgets.QPushButton("Confirm", content_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", content_widget) + cancel_btn.setVisible(False) + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.addStretch(1) + btns_layout.addWidget(cancel_btn, 0) + btns_layout.addWidget(confirm_btn, 0) + + txt_filter = PlaceholderLineEdit(content_widget) + txt_filter.setPlaceholderText("Quick filter projects..") + txt_filter.setClearButtonEnabled(True) + txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"), + QtWidgets.QLineEdit.LeadingPosition) + + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + content_layout.setSpacing(20) + content_layout.addWidget(header_label, 0) + content_layout.addWidget(txt_filter, 0) + content_layout.addWidget(projects_view, 1) + content_layout.addLayout(btns_layout, 0) + + middle_layout = QtWidgets.QHBoxLayout(middle_frame) + middle_layout.setContentsMargins(30, 30, 10, 10) + middle_layout.addWidget(content_widget) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.addStretch(1) + main_layout.addWidget(middle_frame, 2) + main_layout.addStretch(1) + + projects_view.doubleClicked.connect(self._on_double_click) + confirm_btn.clicked.connect(self._on_confirm_click) + cancel_btn.clicked.connect(self._on_cancel_click) + txt_filter.textChanged.connect(self._on_text_changed) + + self._projects_view = projects_view + self._projects_model = projects_model + self._projects_proxy = projects_proxy + self._cancel_btn = cancel_btn + self._confirm_btn = confirm_btn + self._txt_filter = txt_filter + + self._publisher_window = publisher_window + self._project_name = None + + def showEvent(self, event): + self._projects_model.refresh() + # Sort projects after refresh + self._projects_proxy.sort(0) + + setting_registry = TrayPublisherRegistry() + try: + project_name = setting_registry.get_item("project_name") + except ValueError: + project_name = None + + if project_name: + index = None + src_index = self._projects_model.find_project(project_name) + if src_index is not None: + index = self._projects_proxy.mapFromSource(src_index) + + if index is not None: + selection_model = self._projects_view.selectionModel() + selection_model.select( + index, + QtCore.QItemSelectionModel.SelectCurrent + ) + self._projects_view.setCurrentIndex(index) + + self._cancel_btn.setVisible(self._project_name is not None) + super(StandaloneOverlayWidget, self).showEvent(event) + + def _on_double_click(self): + self.set_selected_project() + + def _on_confirm_click(self): + self.set_selected_project() + + def _on_cancel_click(self): + self._set_project(self._project_name) + + def _on_text_changed(self): + self._projects_proxy.setFilterRegularExpression( + self._txt_filter.text()) + + def set_selected_project(self): + index = self._projects_view.currentIndex() + + project_name = index.data(PROJECT_NAME_ROLE) + if project_name: + self._set_project(project_name) + + @property + def host(self): + return self._publisher_window.controller.host + + def _set_project(self, project_name): + self._project_name = project_name + self.host.set_project_name(project_name) + self.setVisible(False) + self.project_selected.emit(project_name) + + setting_registry = TrayPublisherRegistry() + setting_registry.set_item("project_name", project_name) + + +class TrayPublishWindow(PublisherWindow): + def __init__(self, *args, **kwargs): + controller = TrayPublisherController() + super(TrayPublishWindow, self).__init__( + controller=controller, reset_on_show=False + ) + + flags = self.windowFlags() + # Disable always on top hint + if flags & QtCore.Qt.WindowStaysOnTopHint: + flags ^= QtCore.Qt.WindowStaysOnTopHint + + self.setWindowFlags(flags) + + overlay_widget = StandaloneOverlayWidget(self) + + btns_widget = self._header_extra_widget + + back_to_overlay_btn = QtWidgets.QPushButton( + "Change project", btns_widget + ) + save_btn = QtWidgets.QPushButton("Save", btns_widget) + # TODO implement save mechanism of tray publisher + save_btn.setVisible(False) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + + btns_layout.addWidget(save_btn, 0) + btns_layout.addWidget(back_to_overlay_btn, 0) + + overlay_widget.project_selected.connect(self._on_project_select) + back_to_overlay_btn.clicked.connect(self._on_back_to_overlay) + save_btn.clicked.connect(self._on_tray_publish_save) + + self._back_to_overlay_btn = back_to_overlay_btn + self._overlay_widget = overlay_widget + + def _set_publish_frame_visible(self, publish_frame_visible): + super(TrayPublishWindow, self)._set_publish_frame_visible( + publish_frame_visible + ) + self._back_to_overlay_btn.setVisible(not publish_frame_visible) + + def _on_back_to_overlay(self): + self._overlay_widget.setVisible(True) + self._resize_overlay() + + def _resize_overlay(self): + self._overlay_widget.resize( + self.width(), + self.height() + ) + + def resizeEvent(self, event): + super(TrayPublishWindow, self).resizeEvent(event) + self._resize_overlay() + + def _on_project_select(self, project_name): + # TODO register project specific plugin paths + self._controller.save_changes(False) + self._controller.reset_project_data_cache() + + self.reset() + if not self._controller.instances: + self._go_to_create_tab() + + def _on_tray_publish_save(self): + self._controller.save_changes() + print("NOT YET IMPLEMENTED") + + +def main(): + host = TrayPublisherHost() + install_host(host) + + app_instance = get_ayon_qt_app() + + if platform.system().lower() == "windows": + import ctypes + ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID( + u"traypublisher" + ) + + window = TrayPublishWindow() + window.show() + app_instance.exec_() diff --git a/client/ayon_core/tools/utils/__init__.py b/client/ayon_core/tools/utils/__init__.py new file mode 100644 index 0000000000..7be0ea5e9f --- /dev/null +++ b/client/ayon_core/tools/utils/__init__.py @@ -0,0 +1,124 @@ +from .layouts import FlowLayout +from .widgets import ( + FocusSpinBox, + FocusDoubleSpinBox, + ComboBox, + CustomTextComboBox, + PlaceholderLineEdit, + ExpandingTextEdit, + BaseClickableFrame, + ClickableFrame, + ClickableLabel, + ExpandBtn, + ClassicExpandBtn, + PixmapLabel, + IconButton, + PixmapButton, + SeparatorWidget, + PressHoverButton, + + VerticalExpandButton, + SquareButton, + RefreshButton, + GoToCurrentButton, +) +from .views import ( + DeselectableTreeView, + TreeView, +) +from .error_dialog import ErrorMessageBox +from .lib import ( + WrappedCallbackItem, + paint_image_with_color, + get_warning_pixmap, + set_style_property, + DynamicQThread, + qt_app_context, + get_qt_app, + get_ayon_qt_app, + get_openpype_qt_app, + get_asset_icon, + get_asset_icon_by_name, + get_asset_icon_name_from_doc, + get_asset_icon_color_from_doc, +) + +from .models import ( + RecursiveSortFilterProxyModel, +) +from .overlay_messages import ( + MessageOverlayObject, +) +from .multiselection_combobox import MultiSelectionComboBox +from .thumbnail_paint_widget import ThumbnailPainterWidget +from .sliders import NiceSlider +from .nice_checkbox import NiceCheckbox +from .dialogs import ( + show_message_dialog, + ScrollMessageBox, + SimplePopup, + PopupUpdateKeys, +) + + +__all__ = ( + "FlowLayout", + + "FocusSpinBox", + "FocusDoubleSpinBox", + "ComboBox", + "CustomTextComboBox", + "PlaceholderLineEdit", + "ExpandingTextEdit", + "BaseClickableFrame", + "ClickableFrame", + "ClickableLabel", + "ExpandBtn", + "ClassicExpandBtn", + "PixmapLabel", + "IconButton", + "PixmapButton", + "SeparatorWidget", + "PressHoverButton", + + "VerticalExpandButton", + "SquareButton", + "RefreshButton", + "GoToCurrentButton", + + "DeselectableTreeView", + "TreeView", + + "ErrorMessageBox", + + "WrappedCallbackItem", + "paint_image_with_color", + "get_warning_pixmap", + "set_style_property", + "DynamicQThread", + "qt_app_context", + "get_qt_app", + "get_ayon_qt_app", + "get_openpype_qt_app", + "get_asset_icon", + "get_asset_icon_by_name", + "get_asset_icon_name_from_doc", + "get_asset_icon_color_from_doc", + + "RecursiveSortFilterProxyModel", + + "MessageOverlayObject", + + "MultiSelectionComboBox", + + "ThumbnailPainterWidget", + + "NiceSlider", + + "NiceCheckbox", + + "show_message_dialog", + "ScrollMessageBox", + "SimplePopup", + "PopupUpdateKeys", +) diff --git a/client/ayon_core/tools/utils/assets_widget.py b/client/ayon_core/tools/utils/assets_widget.py new file mode 100644 index 0000000000..c05f3de850 --- /dev/null +++ b/client/ayon_core/tools/utils/assets_widget.py @@ -0,0 +1,643 @@ +import time +import collections + +from qtpy import QtWidgets, QtCore, QtGui +import qtawesome + +from ayon_core.client import ( + get_project, + get_assets, +) +from ayon_core.style import ( + get_default_tools_icon_color, +) +from ayon_core.tools.flickcharm import FlickCharm + +from .views import ( + TreeViewSpinner, + DeselectableTreeView +) +from .widgets import PlaceholderLineEdit +from .models import RecursiveSortFilterProxyModel +from .lib import ( + DynamicQThread, + get_asset_icon +) + +ASSET_ID_ROLE = QtCore.Qt.UserRole + 1 +ASSET_NAME_ROLE = QtCore.Qt.UserRole + 2 +ASSET_LABEL_ROLE = QtCore.Qt.UserRole + 3 +ASSET_UNDERLINE_COLORS_ROLE = QtCore.Qt.UserRole + 4 +ASSET_PATH_ROLE = QtCore.Qt.UserRole + 5 + + +class _AssetsView(TreeViewSpinner, DeselectableTreeView): + """Asset items view. + + Adds abilities to deselect, show loading spinner and add flick charm + (scroll by mouse/touchpad click and move). + """ + + def __init__(self, parent=None): + super(_AssetsView, self).__init__(parent) + self.setIndentation(15) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setHeaderHidden(True) + + self._flick_charm_activated = False + self._flick_charm = FlickCharm(parent=self) + self._before_flick_scroll_mode = None + + def activate_flick_charm(self): + if self._flick_charm_activated: + return + self._flick_charm_activated = True + self._before_flick_scroll_mode = self.verticalScrollMode() + self._flick_charm.activateOn(self) + self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) + + def deactivate_flick_charm(self): + if not self._flick_charm_activated: + return + self._flick_charm_activated = False + self._flick_charm.deactivateFrom(self) + if self._before_flick_scroll_mode is not None: + self.setVerticalScrollMode(self._before_flick_scroll_mode) + + def mousePressEvent(self, event): + index = self.indexAt(event.pos()) + if not index.isValid(): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers == QtCore.Qt.ShiftModifier: + return + elif modifiers == QtCore.Qt.ControlModifier: + return + + super(_AssetsView, self).mousePressEvent(event) + + def set_loading_state(self, loading, empty): + """Change loading state. + + TODO: Separate into 2 individual methods. + + Args: + loading(bool): Is loading. + empty(bool): Is model empty. + """ + if self.is_loading != loading: + if loading: + self.spinner.repaintNeeded.connect( + self.viewport().update + ) + else: + self.spinner.repaintNeeded.disconnect() + self.viewport().update() + + self.is_loading = loading + self.is_empty = empty + + +class _AssetModel(QtGui.QStandardItemModel): + """A model listing assets in the active project. + + The assets are displayed in a treeview, they are visually parented by + a `visualParent` field in the database containing an `_id` to a parent + asset. + + Asset document may have defined label, icon or icon color. + + Loading of data for model happens in thread which means that refresh + is not sequential. When refresh is triggered it is required to listen for + 'refreshed' signal. + + Args: + dbcon (AvalonMongoDB): Ready to use connection to mongo with. + parent (QObject): Parent Qt object. + """ + + _doc_fetched = QtCore.Signal() + refreshed = QtCore.Signal(bool) + + # Asset document projection + _asset_projection = { + "name": 1, + "parent": 1, + "data.visualParent": 1, + "data.label": 1, + "data.icon": 1, + "data.color": 1 + } + + def __init__(self, dbcon, parent=None): + super(_AssetModel, self).__init__(parent=parent) + self.dbcon = dbcon + + self._refreshing = False + self._doc_fetching_thread = None + self._doc_fetching_stop = False + self._doc_payload = [] + + self._doc_fetched.connect(self._on_docs_fetched) + + self._item_ids_with_color = set() + self._items_by_asset_id = {} + + self._last_project_name = None + + @property + def refreshing(self): + return self._refreshing + + def get_index_by_asset_id(self, asset_id): + item = self._items_by_asset_id.get(asset_id) + if item is not None: + return item.index() + return QtCore.QModelIndex() + + def get_indexes_by_asset_ids(self, asset_ids): + return [ + self.get_index_by_asset_id(asset_id) + for asset_id in asset_ids + ] + + def get_index_by_asset_name(self, asset_name): + indexes = self.get_indexes_by_asset_names([asset_name]) + for index in indexes: + if index.isValid(): + return index + return indexes[0] + + def get_indexes_by_asset_names(self, asset_names): + asset_ids_by_name = { + asset_name: None + for asset_name in asset_names + } + + for asset_id, item in self._items_by_asset_id.items(): + asset_name = item.data(ASSET_NAME_ROLE) + if asset_name in asset_ids_by_name: + asset_ids_by_name[asset_name] = asset_id + + asset_ids = [ + asset_ids_by_name[asset_name] + for asset_name in asset_names + ] + + return self.get_indexes_by_asset_ids(asset_ids) + + def refresh(self, force=False): + """Refresh the data for the model. + + Args: + force (bool): Stop currently running refresh start new refresh. + """ + # Skip fetch if there is already other thread fetching documents + if self._refreshing: + if not force: + return + self.stop_refresh() + + project_name = self.dbcon.Session.get("AVALON_PROJECT") + clear_model = False + if project_name != self._last_project_name: + clear_model = True + self._last_project_name = project_name + + if clear_model: + self._clear_items() + + # Fetch documents from mongo + # Restart payload + self._refreshing = True + self._doc_payload = [] + self._doc_fetching_thread = DynamicQThread(self._threaded_fetch) + self._doc_fetching_thread.start() + + def stop_refresh(self): + self._stop_fetch_thread() + + def clear_underlines(self): + for asset_id in set(self._item_ids_with_color): + self._item_ids_with_color.remove(asset_id) + item = self._items_by_asset_id.get(asset_id) + if item is not None: + item.setData(None, ASSET_UNDERLINE_COLORS_ROLE) + + def set_underline_colors(self, colors_by_asset_id): + self.clear_underlines() + + for asset_id, colors in colors_by_asset_id.items(): + item = self._items_by_asset_id.get(asset_id) + if item is None: + continue + item.setData(colors, ASSET_UNDERLINE_COLORS_ROLE) + self._item_ids_with_color.add(asset_id) + + def _clear_items(self): + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + self._items_by_asset_id = {} + self._item_ids_with_color = set() + + def _on_docs_fetched(self): + # Make sure refreshing did not change + # - since this line is refreshing sequential and + # triggering of new refresh will happen when this method is done + if not self._refreshing: + self._clear_items() + return + + self._fill_assets(self._doc_payload) + + self.refreshed.emit(bool(self._items_by_asset_id)) + + self._stop_fetch_thread() + + def _fill_assets(self, asset_docs): + # Collect asset documents as needed + asset_ids = set() + asset_docs_by_id = {} + asset_ids_by_parents = collections.defaultdict(set) + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + asset_data = asset_doc.get("data") or {} + parent_id = asset_data.get("visualParent") + asset_ids.add(asset_id) + asset_docs_by_id[asset_id] = asset_doc + asset_ids_by_parents[parent_id].add(asset_id) + + # Prepare removed asset ids + removed_asset_ids = ( + set(self._items_by_asset_id.keys()) - set(asset_docs_by_id.keys()) + ) + + # Prepare queue for adding new items + asset_items_queue = collections.deque() + + # Queue starts with root item and 'visualParent' None + root_item = self.invisibleRootItem() + asset_items_queue.append((None, root_item)) + + while asset_items_queue: + # Get item from queue + parent_id, parent_item = asset_items_queue.popleft() + # Skip if there are no children + children_ids = asset_ids_by_parents[parent_id] + + # Go through current children of parent item + # - find out items that were deleted and skip creation of already + # existing items + for row in reversed(range(parent_item.rowCount())): + child_item = parent_item.child(row, 0) + asset_id = child_item.data(ASSET_ID_ROLE) + # Remove item that is not available + if asset_id not in children_ids: + if asset_id in removed_asset_ids: + # Remove and destroy row + parent_item.removeRow(row) + else: + # Just take the row from parent without destroying + parent_item.takeRow(row) + continue + + # Remove asset id from `children_ids` set + # - is used as set for creation of "new items" + children_ids.remove(asset_id) + # Add existing children to queue + asset_items_queue.append((asset_id, child_item)) + + new_items = [] + for asset_id in children_ids: + # Look for item in cache (maybe parent changed) + item = self._items_by_asset_id.get(asset_id) + # Create new item if was not found + if item is None: + item = QtGui.QStandardItem() + item.setEditable(False) + item.setData(asset_id, ASSET_ID_ROLE) + self._items_by_asset_id[asset_id] = item + new_items.append(item) + # Add item to queue + asset_items_queue.append((asset_id, item)) + + if new_items: + parent_item.appendRows(new_items) + + # Remove cache of removed items + for asset_id in removed_asset_ids: + self._items_by_asset_id.pop(asset_id) + + # Refresh data + # - all items refresh all data except id + for asset_id, item in self._items_by_asset_id.items(): + asset_doc = asset_docs_by_id[asset_id] + + asset_name = asset_doc["name"] + if item.data(ASSET_NAME_ROLE) != asset_name: + item.setData(asset_name, ASSET_NAME_ROLE) + + asset_data = asset_doc.get("data") or {} + asset_label = asset_data.get("label") or asset_name + if item.data(ASSET_LABEL_ROLE) != asset_label: + item.setData(asset_label, QtCore.Qt.DisplayRole) + item.setData(asset_label, ASSET_LABEL_ROLE) + + has_children = item.rowCount() > 0 + icon = get_asset_icon(asset_doc, has_children) + item.setData(icon, QtCore.Qt.DecorationRole) + + def _threaded_fetch(self): + asset_docs = self._fetch_asset_docs() + if not self._refreshing: + return + + self._doc_payload = asset_docs + + # Emit doc fetched only if was not stopped + self._doc_fetched.emit() + + def _fetch_asset_docs(self): + project_name = self.dbcon.current_project() + if not project_name: + return [] + + project_doc = get_project(project_name, fields=["_id"]) + if not project_doc: + return [] + + # Get all assets sorted by name + return list( + get_assets(project_name, fields=self._asset_projection.keys()) + ) + + def _stop_fetch_thread(self): + self._refreshing = False + if self._doc_fetching_thread is not None: + while self._doc_fetching_thread.isRunning(): + time.sleep(0.01) + self._doc_fetching_thread = None + + +class _AssetsWidget(QtWidgets.QWidget): + """Base widget to display a tree of assets with filter. + + Assets have only one column and are sorted by name. + + Refreshing of assets happens in thread so calling 'refresh' method + is not sequential. To capture moment when refreshing is finished listen + to 'refreshed' signal. + + To capture selection changes listen to 'selection_changed' signal. It won't + send any information about new selection as it may be different based on + inheritance changes. + + Args: + dbcon (AvalonMongoDB): Connection to avalon mongo db. + parent (QWidget): Parent Qt widget. + """ + + # on model refresh + refresh_triggered = QtCore.Signal() + refreshed = QtCore.Signal() + # on view selection change + selection_changed = QtCore.Signal() + # It was double clicked on view + double_clicked = QtCore.Signal() + + def __init__(self, dbcon, parent=None): + super(_AssetsWidget, self).__init__(parent=parent) + + self.dbcon = dbcon + + # Tree View + model = self._create_source_model() + proxy = self._create_proxy_model(model) + + view = _AssetsView(self) + view.setModel(proxy) + + header_widget = QtWidgets.QWidget(self) + + current_asset_icon = qtawesome.icon( + "fa.arrow-down", color=get_default_tools_icon_color() + ) + current_asset_btn = QtWidgets.QPushButton(header_widget) + current_asset_btn.setIcon(current_asset_icon) + current_asset_btn.setToolTip("Go to Asset from current Session") + # Hide by default + current_asset_btn.setVisible(False) + + refresh_icon = qtawesome.icon( + "fa.refresh", color=get_default_tools_icon_color() + ) + refresh_btn = QtWidgets.QPushButton(header_widget) + refresh_btn.setIcon(refresh_icon) + refresh_btn.setToolTip("Refresh items") + + filter_input = PlaceholderLineEdit(header_widget) + filter_input.setPlaceholderText("Filter folders..") + + # Header + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(filter_input) + header_layout.addWidget(current_asset_btn) + header_layout.addWidget(refresh_btn) + + # Make header widgets expand vertically if there is a place + for widget in ( + current_asset_btn, + refresh_btn, + filter_input, + ): + size_policy = widget.sizePolicy() + size_policy.setVerticalPolicy( + QtWidgets.QSizePolicy.MinimumExpanding) + widget.setSizePolicy(size_policy) + + # Layout + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(header_widget, 0) + layout.addWidget(view, 1) + + # Signals/Slots + filter_input.textChanged.connect(self._on_filter_text_change) + + selection_model = view.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + refresh_btn.clicked.connect(self.refresh) + current_asset_btn.clicked.connect(self._on_current_asset_click) + view.doubleClicked.connect(self.double_clicked) + + self._header_widget = header_widget + self._filter_input = filter_input + self._refresh_btn = refresh_btn + self._current_asset_btn = current_asset_btn + self._model = model + self._proxy = proxy + self._view = view + self._last_project_name = None + + self._last_btns_height = None + + self.model_selection = {} + + @property + def header_widget(self): + return self._header_widget + + def _create_source_model(self): + model = _AssetModel(dbcon=self.dbcon, parent=self) + model.refreshed.connect(self._on_model_refresh) + return model + + def _create_proxy_model(self, source_model): + proxy = RecursiveSortFilterProxyModel() + proxy.setSourceModel(source_model) + proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) + proxy.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + return proxy + + @property + def refreshing(self): + return self._model.refreshing + + def refresh(self): + self._refresh_model() + + def stop_refresh(self): + self._model.stop_refresh() + + def _get_current_session_asset(self): + return self.dbcon.Session.get("AVALON_ASSET") + + def _on_current_asset_click(self): + """Trigger change of asset to current context asset. + This separation gives ability to override this method and use it + in differnt way. + """ + + self.set_current_session_asset() + + def set_current_session_asset(self): + asset_name = self._get_current_session_asset() + if asset_name: + self.select_asset_by_name(asset_name) + + def set_refresh_btn_visibility(self, visible=None): + """Hide set refresh button. + Some tools may have their global refresh button or do not support + refresh at all. + """ + + if visible is None: + visible = not self._refresh_btn.isVisible() + self._refresh_btn.setVisible(visible) + + def set_current_asset_btn_visibility(self, visible=None): + """Hide set current asset button. + + Not all tools support using of current context asset. + """ + + if visible is None: + visible = not self._current_asset_btn.isVisible() + self._current_asset_btn.setVisible(visible) + + def select_asset(self, asset_id): + index = self._model.get_index_by_asset_id(asset_id) + new_index = self._proxy.mapFromSource(index) + self._select_indexes([new_index]) + + def select_asset_by_name(self, asset_name): + index = self._model.get_index_by_asset_name(asset_name) + new_index = self._proxy.mapFromSource(index) + self._select_indexes([new_index]) + + def activate_flick_charm(self): + self._view.activate_flick_charm() + + def deactivate_flick_charm(self): + self._view.deactivate_flick_charm() + + def _on_selection_change(self): + self.selection_changed.emit() + + def _on_filter_text_change(self, new_text): + self._proxy.setFilterFixedString(new_text) + + def _on_model_refresh(self, has_item): + """This method should be triggered on model refresh. + + Default implementation register this callback in '_create_source_model' + so if you're modifying model keep in mind that this method should be + called when refresh is done. + """ + + self._proxy.sort(0) + self._set_loading_state(loading=False, empty=not has_item) + self.refreshed.emit() + + def _refresh_model(self): + # Store selection + self._set_loading_state(loading=True, empty=True) + + # Trigger signal before refresh is called + self.refresh_triggered.emit() + # Refresh model + self._model.refresh() + + def _set_loading_state(self, loading, empty): + self._view.set_loading_state(loading, empty) + + def _clear_selection(self): + selection_model = self._view.selectionModel() + selection_model.clearSelection() + + def _select_indexes(self, indexes): + valid_indexes = [ + index + for index in indexes + if index.isValid() + ] + if not valid_indexes: + return + + selection_model = self._view.selectionModel() + selection_model.clearSelection() + + mode = ( + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) + for index in valid_indexes: + self._view.expand(self._proxy.parent(index)) + selection_model.select(index, mode) + self._view.setCurrentIndex(valid_indexes[0]) + + +class SingleSelectAssetsWidget(_AssetsWidget): + """Single selection asset widget. + + Contain single selection specific api methods. + + Deprecated: + This widget will be removed soon. Please do not use it in new code. + """ + + def get_selected_asset_id(self): + """Currently selected asset id.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + for index in indexes: + return index.data(ASSET_ID_ROLE) + return None + + def get_selected_asset_name(self): + """Currently selected asset name.""" + selection_model = self._view.selectionModel() + indexes = selection_model.selectedRows() + for index in indexes: + return index.data(ASSET_NAME_ROLE) + return None diff --git a/openpype/widgets/color_widgets/__init__.py b/client/ayon_core/tools/utils/color_widgets/__init__.py similarity index 100% rename from openpype/widgets/color_widgets/__init__.py rename to client/ayon_core/tools/utils/color_widgets/__init__.py diff --git a/openpype/widgets/color_widgets/color_inputs.py b/client/ayon_core/tools/utils/color_widgets/color_inputs.py similarity index 100% rename from openpype/widgets/color_widgets/color_inputs.py rename to client/ayon_core/tools/utils/color_widgets/color_inputs.py diff --git a/openpype/widgets/color_widgets/color_picker_widget.py b/client/ayon_core/tools/utils/color_widgets/color_picker_widget.py similarity index 100% rename from openpype/widgets/color_widgets/color_picker_widget.py rename to client/ayon_core/tools/utils/color_widgets/color_picker_widget.py diff --git a/openpype/widgets/color_widgets/color_screen_pick.py b/client/ayon_core/tools/utils/color_widgets/color_screen_pick.py similarity index 100% rename from openpype/widgets/color_widgets/color_screen_pick.py rename to client/ayon_core/tools/utils/color_widgets/color_screen_pick.py diff --git a/openpype/widgets/color_widgets/color_triangle.py b/client/ayon_core/tools/utils/color_widgets/color_triangle.py similarity index 100% rename from openpype/widgets/color_widgets/color_triangle.py rename to client/ayon_core/tools/utils/color_widgets/color_triangle.py diff --git a/openpype/widgets/color_widgets/color_view.py b/client/ayon_core/tools/utils/color_widgets/color_view.py similarity index 100% rename from openpype/widgets/color_widgets/color_view.py rename to client/ayon_core/tools/utils/color_widgets/color_view.py diff --git a/openpype/widgets/color_widgets/eyedropper.png b/client/ayon_core/tools/utils/color_widgets/eyedropper.png similarity index 100% rename from openpype/widgets/color_widgets/eyedropper.png rename to client/ayon_core/tools/utils/color_widgets/eyedropper.png diff --git a/client/ayon_core/tools/utils/constants.py b/client/ayon_core/tools/utils/constants.py new file mode 100644 index 0000000000..0c92e3ccc8 --- /dev/null +++ b/client/ayon_core/tools/utils/constants.py @@ -0,0 +1,16 @@ +from qtpy import QtCore + + +UNCHECKED_INT = getattr(QtCore.Qt.Unchecked, "value", 0) +PARTIALLY_CHECKED_INT = getattr(QtCore.Qt.PartiallyChecked, "value", 1) +CHECKED_INT = getattr(QtCore.Qt.Checked, "value", 2) + +# Checkbox state +try: + ITEM_IS_USER_TRISTATE = QtCore.Qt.ItemIsUserTristate +except AttributeError: + ITEM_IS_USER_TRISTATE = QtCore.Qt.ItemIsTristate + +DEFAULT_PROJECT_LABEL = "< Default >" +PROJECT_NAME_ROLE = QtCore.Qt.UserRole + 101 +PROJECT_IS_ACTIVE_ROLE = QtCore.Qt.UserRole + 102 diff --git a/client/ayon_core/tools/utils/delegates.py b/client/ayon_core/tools/utils/delegates.py new file mode 100644 index 0000000000..1147074b77 --- /dev/null +++ b/client/ayon_core/tools/utils/delegates.py @@ -0,0 +1,108 @@ +import time +from datetime import datetime +import logging + +from qtpy import QtWidgets + +log = logging.getLogger(__name__) + + +def pretty_date(t, now=None, strftime="%b %d %Y %H:%M"): + """Parse datetime to readable timestamp + + Within first ten seconds: + - "just now", + Within first minute ago: + - "%S seconds ago" + Within one hour ago: + - "%M minutes ago". + Within one day ago: + - "%H:%M hours ago" + Else: + "%Y-%m-%d %H:%M:%S" + + """ + + assert isinstance(t, datetime) + if now is None: + now = datetime.now() + assert isinstance(now, datetime) + diff = now - t + + second_diff = diff.seconds + day_diff = diff.days + + # future (consider as just now) + if day_diff < 0: + return "just now" + + # history + if day_diff == 0: + if second_diff < 10: + return "just now" + if second_diff < 60: + return str(second_diff) + " seconds ago" + if second_diff < 120: + return "a minute ago" + if second_diff < 3600: + return str(second_diff // 60) + " minutes ago" + if second_diff < 86400: + minutes = (second_diff % 3600) // 60 + hours = second_diff // 3600 + return "{0}:{1:02d} hours ago".format(hours, minutes) + + return t.strftime(strftime) + + +def pretty_timestamp(t, now=None): + """Parse timestamp to user readable format + + >>> pretty_timestamp("20170614T151122Z", now="20170614T151123Z") + 'just now' + + >>> pretty_timestamp("20170614T151122Z", now="20170614T171222Z") + '2:01 hours ago' + + Args: + t (str): The time string to parse. + now (str, optional) + + Returns: + str: human readable "recent" date. + + """ + + if now is not None: + try: + now = time.strptime(now, "%Y%m%dT%H%M%SZ") + now = datetime.fromtimestamp(time.mktime(now)) + except ValueError as e: + log.warning("Can't parse 'now' time format: {0} {1}".format(t, e)) + return None + + if isinstance(t, float): + dt = datetime.fromtimestamp(t) + else: + # Parse the time format as if it is `str` result from + # `pyblish.lib.time()` which usually is stored in Avalon database. + try: + t = time.strptime(t, "%Y%m%dT%H%M%SZ") + except ValueError as e: + log.warning("Can't parse time format: {0} {1}".format(t, e)) + return None + dt = datetime.fromtimestamp(time.mktime(t)) + + # prettify + return pretty_date(dt, now=now) + + +class PrettyTimeDelegate(QtWidgets.QStyledItemDelegate): + """A delegate that displays a timestamp as a pretty date. + + This displays dates like `pretty_date`. + + """ + + def displayText(self, value, locale): + if value is not None: + return pretty_timestamp(value) diff --git a/client/ayon_core/tools/utils/dialogs.py b/client/ayon_core/tools/utils/dialogs.py new file mode 100644 index 0000000000..5dd0ddd54e --- /dev/null +++ b/client/ayon_core/tools/utils/dialogs.py @@ -0,0 +1,253 @@ +import logging +from qtpy import QtWidgets, QtCore + +log = logging.getLogger(__name__) + + +def show_message_dialog(title, message, level=None, parent=None): + """ + + Args: + title (str): Title of dialog. + message (str): Message to display. + level (Literal["info", "warning", "critical"]): Level of dialog. + parent (Optional[QtCore.QObject]): Parent widget. + + """ + if level is None: + level = "info" + + if level == "info": + function = QtWidgets.QMessageBox.information + elif level == "warning": + function = QtWidgets.QMessageBox.warning + elif level == "critical": + function = QtWidgets.QMessageBox.critical + else: + raise ValueError(f"Invalid level: {level}") + function(parent, title, message) + + +class ScrollMessageBox(QtWidgets.QDialog): + """Basic version of scrollable QMessageBox. + + No other existing dialog implementation is scrollable. + + Args: + icon (QtWidgets.QMessageBox.Icon): Icon to display. + title (str): Window title. + messages (list[str]): List of messages. + cancelable (Optional[bool]): True if Cancel button should be added. + + """ + def __init__(self, icon, title, messages, cancelable=False): + super(ScrollMessageBox, self).__init__() + self.setWindowTitle(title) + self.icon = icon + + self._messages = messages + + self.setWindowFlags(QtCore.Qt.WindowTitleHint) + + layout = QtWidgets.QVBoxLayout(self) + + scroll_widget = QtWidgets.QScrollArea(self) + scroll_widget.setWidgetResizable(True) + content_widget = QtWidgets.QWidget(self) + scroll_widget.setWidget(content_widget) + + message_len = 0 + content_layout = QtWidgets.QVBoxLayout(content_widget) + for message in messages: + label_widget = QtWidgets.QLabel(message, content_widget) + content_layout.addWidget(label_widget) + message_len = max(message_len, len(message)) + + # guess size of scrollable area + # WARNING: 'desktop' method probably won't work in PySide6 + desktop = QtWidgets.QApplication.desktop() + max_width = desktop.availableGeometry().width() + scroll_widget.setMinimumWidth( + min(max_width, message_len * 6) + ) + layout.addWidget(scroll_widget) + + buttons = QtWidgets.QDialogButtonBox.Ok + if cancelable: + buttons |= QtWidgets.QDialogButtonBox.Cancel + + btn_box = QtWidgets.QDialogButtonBox(buttons) + btn_box.accepted.connect(self.accept) + + if cancelable: + btn_box.reject.connect(self.reject) + + btn = QtWidgets.QPushButton("Copy to clipboard") + btn.clicked.connect(self._on_copy_click) + btn_box.addButton(btn, QtWidgets.QDialogButtonBox.NoRole) + + layout.addWidget(btn_box) + + def _on_copy_click(self): + clipboard = QtWidgets.QApplication.clipboard() + clipboard.setText("\n".join(self._messages)) + + +class SimplePopup(QtWidgets.QDialog): + """A Popup that moves itself to bottom right of screen on show event. + + The UI contains a message label and a red highlighted button to "show" + or perform another custom action from this pop-up. + + """ + + on_clicked = QtCore.Signal() + + def __init__(self, parent=None, *args, **kwargs): + super(SimplePopup, self).__init__(parent=parent, *args, **kwargs) + + # Set default title + self.setWindowTitle("Popup") + + self.setContentsMargins(0, 0, 0, 0) + + message_label = QtWidgets.QLabel("", self) + message_label.setStyleSheet(""" + QLabel { + font-size: 12px; + } + """) + confirm_btn = QtWidgets.QPushButton("Show", self) + confirm_btn.setSizePolicy( + QtWidgets.QSizePolicy.Maximum, + QtWidgets.QSizePolicy.Maximum + ) + confirm_btn.setStyleSheet( + """QPushButton { background-color: #BB0000 }""" + ) + + # Layout + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(10, 5, 10, 10) + + # Increase spacing slightly for readability + layout.setSpacing(10) + layout.addWidget(message_label) + layout.addWidget(confirm_btn) + + # Signals + confirm_btn.clicked.connect(self._on_clicked) + + # Default size + self.resize(400, 40) + + self._message_label = message_label + self._confirm_btn = confirm_btn + + def set_message(self, message): + self._message_label.setText(message) + + def set_button_text(self, text): + self._confirm_btn.setText(text) + + def setMessage(self, message): + self.set_message(message) + + def setButtonText(self, text): + self.set_button_text(text) + + def showEvent(self, event): + # Position popup based on contents on show event + geo = self._calculate_window_geometry() + self.setGeometry(geo) + + return super(SimplePopup, self).showEvent(event) + + def _on_clicked(self): + """Callback for when the 'show' button is clicked. + + Raises the parent (if any) + + """ + + parent = self.parent() + self.close() + + # Trigger the signal + self.on_clicked.emit() + + if parent: + parent.raise_() + + def _calculate_window_geometry(self): + """Respond to status changes + + On creation, align window with screen bottom right. + + """ + + window = self + + width = window.width() + width = max(width, window.minimumWidth()) + + height = window.height() + height = max(height, window.sizeHint().height()) + + try: + screen = window.screen() + desktop_geometry = screen.availableGeometry() + except AttributeError: + # Backwards compatibility for older Qt versions + # PySide6 removed QDesktopWidget + desktop_geometry = QtWidgets.QDesktopWidget().availableGeometry() + + window_geometry = window.geometry() + + screen_width = window_geometry.width() + screen_height = window_geometry.height() + + # Calculate width and height of system tray + systray_width = window_geometry.width() - desktop_geometry.width() + systray_height = window_geometry.height() - desktop_geometry.height() + + padding = 10 + + x = screen_width - width + y = screen_height - height + + x -= systray_width + padding + y -= systray_height + padding + + return QtCore.QRect(x, y, width, height) + + +class PopupUpdateKeys(SimplePopup): + """Simple popup with checkbox.""" + + on_clicked_state = QtCore.Signal(bool) + + def __init__(self, parent=None, *args, **kwargs): + super(PopupUpdateKeys, self).__init__( + parent=parent, *args, **kwargs + ) + + layout = self.layout() + + # Insert toggle for Update keys + toggle = QtWidgets.QCheckBox("Update Keys", self) + layout.insertWidget(1, toggle) + + self.on_clicked.connect(self.emit_click_with_state) + + layout.insertStretch(1, 1) + + self._toggle_checkbox = toggle + + def is_toggle_checked(self): + return self._toggle_checkbox.isChecked() + + def emit_click_with_state(self): + """Emit the on_clicked_state signal with the toggled state""" + checked = self._toggle_checkbox.isChecked() + self.on_clicked_state.emit(checked) diff --git a/openpype/tools/utils/error_dialog.py b/client/ayon_core/tools/utils/error_dialog.py similarity index 100% rename from openpype/tools/utils/error_dialog.py rename to client/ayon_core/tools/utils/error_dialog.py diff --git a/openpype/tools/utils/host_tools.py b/client/ayon_core/tools/utils/host_tools.py similarity index 79% rename from openpype/tools/utils/host_tools.py rename to client/ayon_core/tools/utils/host_tools.py index cc20774349..8841a377cf 100644 --- a/openpype/tools/utils/host_tools.py +++ b/client/ayon_core/tools/utils/host_tools.py @@ -7,10 +7,9 @@ import pyblish.api -from openpype import AYON_SERVER_ENABLED -from openpype.host import IWorkfileHost, ILoadHost -from openpype.lib import Logger -from openpype.pipeline import ( +from ayon_core.host import IWorkfileHost, ILoadHost +from ayon_core.lib import Logger +from ayon_core.pipeline import ( registered_host, get_current_asset_name, ) @@ -39,7 +38,6 @@ def __init__(self, parent=None): self._publisher_tool = None self._subset_manager_tool = None self._scene_inventory_tool = None - self._library_loader_tool = None self._experimental_tools_dialog = None @property @@ -48,29 +46,13 @@ def log(self): self._log = Logger.get_logger(self.__class__.__name__) return self._log - def _init_ayon_workfiles_tool(self, parent): - from openpype.tools.ayon_workfiles.widgets import WorkfilesToolWindow - - workfiles_window = WorkfilesToolWindow(parent=parent) - self._workfiles_tool = workfiles_window - - def _init_openpype_workfiles_tool(self, parent): - from openpype.tools.workfiles.app import Window - - # Host validation - host = registered_host() - IWorkfileHost.validate_workfile_methods(host) - - workfiles_window = Window(parent=parent) - self._workfiles_tool = workfiles_window - def get_workfiles_tool(self, parent): """Create, cache and return workfiles tool window.""" if self._workfiles_tool is None: - if AYON_SERVER_ENABLED: - self._init_ayon_workfiles_tool(parent) - else: - self._init_openpype_workfiles_tool(parent) + from ayon_core.tools.workfiles.widgets import WorkfilesToolWindow + + workfiles_window = WorkfilesToolWindow(parent=parent) + self._workfiles_tool = workfiles_window return self._workfiles_tool @@ -86,22 +68,18 @@ def show_workfiles( def get_loader_tool(self, parent): """Create, cache and return loader tool window.""" if self._loader_tool is None: + from ayon_core.tools.loader.ui import LoaderWindow + from ayon_core.tools.loader import LoaderController + host = registered_host() ILoadHost.validate_load_methods(host) - if AYON_SERVER_ENABLED: - from openpype.tools.ayon_loader.ui import LoaderWindow - from openpype.tools.ayon_loader import LoaderController - - controller = LoaderController(host=host) - loader_window = LoaderWindow( - controller=controller, - parent=parent or self._parent - ) - else: - from openpype.tools.loader import LoaderWindow + controller = LoaderController(host=host) + loader_window = LoaderWindow( + controller=controller, + parent=parent or self._parent + ) - loader_window = LoaderWindow(parent=parent or self._parent) self._loader_tool = loader_window return self._loader_tool @@ -119,16 +97,12 @@ def show_loader(self, parent=None, use_context=None): if use_context is None: use_context = False - if not AYON_SERVER_ENABLED and use_context: - context = {"asset": get_current_asset_name()} - loader_tool.set_context(context, refresh=True) - else: - loader_tool.refresh() + loader_tool.refresh() def get_creator_tool(self, parent): """Create, cache and return creator tool window.""" if self._creator_tool is None: - from openpype.tools.creator import CreatorWindow + from ayon_core.tools.creator import CreatorWindow creator_window = CreatorWindow(parent=parent or self._parent) self._creator_tool = creator_window @@ -149,7 +123,7 @@ def show_creator(self, parent=None): def get_subset_manager_tool(self, parent): """Create, cache and return subset manager tool window.""" if self._subset_manager_tool is None: - from openpype.tools.subsetmanager import SubsetManagerWindow + from ayon_core.tools.subsetmanager import SubsetManagerWindow subset_manager_window = SubsetManagerWindow( parent=parent or self._parent @@ -174,20 +148,12 @@ def get_scene_inventory_tool(self, parent): host = registered_host() ILoadHost.validate_load_methods(host) - if AYON_SERVER_ENABLED: - from openpype.tools.ayon_sceneinventory.window import ( - SceneInventoryWindow) - - scene_inventory_window = SceneInventoryWindow( - parent=parent or self._parent - ) - - else: - from openpype.tools.sceneinventory import SceneInventoryWindow + from ayon_core.tools.sceneinventory.window import ( + SceneInventoryWindow) - scene_inventory_window = SceneInventoryWindow( - parent=parent or self._parent - ) + scene_inventory_window = SceneInventoryWindow( + parent=parent or self._parent + ) self._scene_inventory_tool = scene_inventory_window return self._scene_inventory_tool @@ -206,31 +172,11 @@ def show_scene_inventory(self, parent=None): def get_library_loader_tool(self, parent): """Create, cache and return library loader tool window.""" - if AYON_SERVER_ENABLED: - return self.get_loader_tool(parent) - - if self._library_loader_tool is None: - from openpype.tools.libraryloader import LibraryLoaderWindow - - library_window = LibraryLoaderWindow( - parent=parent or self._parent - ) - self._library_loader_tool = library_window - - return self._library_loader_tool + return self.get_loader_tool(parent) def show_library_loader(self, parent=None): """Loader tool for loading representations from library project.""" - if AYON_SERVER_ENABLED: - return self.show_loader(parent) - - with qt_app_context(): - library_loader_tool = self.get_library_loader_tool(parent) - library_loader_tool.show() - library_loader_tool.raise_() - library_loader_tool.activateWindow() - library_loader_tool.showNormal() - library_loader_tool.refresh() + return self.show_loader(parent) def show_publish(self, parent=None): """Try showing the most desirable publish GUI @@ -269,7 +215,7 @@ def get_experimental_tools_dialog(self, parent=None): experimental tools. """ if self._experimental_tools_dialog is None: - from openpype.tools.experimental_tools import ( + from ayon_core.tools.experimental_tools import ( ExperimentalToolsDialog ) @@ -290,7 +236,7 @@ def get_publisher_tool(self, parent=None, controller=None): """Create, cache and return publisher window.""" if self._publisher_tool is None: - from openpype.tools.publisher.window import PublisherWindow + from ayon_core.tools.publisher.window import PublisherWindow host = registered_host() ILoadHost.validate_load_methods(host) diff --git a/openpype/tools/utils/images/__init__.py b/client/ayon_core/tools/utils/images/__init__.py similarity index 100% rename from openpype/tools/utils/images/__init__.py rename to client/ayon_core/tools/utils/images/__init__.py diff --git a/openpype/tools/utils/images/thumbnail.png b/client/ayon_core/tools/utils/images/thumbnail.png similarity index 100% rename from openpype/tools/utils/images/thumbnail.png rename to client/ayon_core/tools/utils/images/thumbnail.png diff --git a/openpype/tools/utils/layouts.py b/client/ayon_core/tools/utils/layouts.py similarity index 100% rename from openpype/tools/utils/layouts.py rename to client/ayon_core/tools/utils/layouts.py diff --git a/client/ayon_core/tools/utils/lib.py b/client/ayon_core/tools/utils/lib.py new file mode 100644 index 0000000000..b7edd6be71 --- /dev/null +++ b/client/ayon_core/tools/utils/lib.py @@ -0,0 +1,543 @@ +import os +import sys +import contextlib + +from qtpy import QtWidgets, QtCore, QtGui +import qtawesome + +from ayon_core.style import ( + get_default_entity_icon_color, + get_objected_colors, + get_app_icon_path, +) +from ayon_core.resources import get_image_path +from ayon_core.lib import Logger + +from .constants import CHECKED_INT, UNCHECKED_INT + +log = Logger.get_logger(__name__) + + +def checkstate_int_to_enum(state): + if not isinstance(state, int): + return state + if state == CHECKED_INT: + return QtCore.Qt.Checked + + if state == UNCHECKED_INT: + return QtCore.Qt.Unchecked + return QtCore.Qt.PartiallyChecked + + +def checkstate_enum_to_int(state): + if isinstance(state, int): + return state + if state == QtCore.Qt.Checked: + return 0 + if state == QtCore.Qt.PartiallyChecked: + return 1 + return 2 + + +def center_window(window): + """Move window to center of it's screen.""" + + if hasattr(QtWidgets.QApplication, "desktop"): + desktop = QtWidgets.QApplication.desktop() + screen_idx = desktop.screenNumber(window) + screen_geo = desktop.screenGeometry(screen_idx) + else: + screen = window.screen() + screen_geo = screen.geometry() + + geo = window.frameGeometry() + geo.moveCenter(screen_geo.center()) + if geo.y() < screen_geo.y(): + geo.setY(screen_geo.y()) + window.move(geo.topLeft()) + + +def html_escape(text): + """Basic escape of html syntax symbols in text.""" + + return ( + text + .replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace('"', """) + .replace("'", "'") + ) + + +def set_style_property(widget, property_name, property_value): + """Set widget's property that may affect style. + + If current property value is different then style of widget is polished. + """ + cur_value = widget.property(property_name) + if cur_value == property_value: + return + widget.setProperty(property_name, property_value) + style = widget.style() + style.polish(widget) + + +def paint_image_with_color(image, color): + """Redraw image with single color using it's alpha. + + It is expected that input image is singlecolor image with alpha. + + Args: + image (QImage): Loaded image with alpha. + color (QColor): Color that will be used to paint image. + """ + width = image.width() + height = image.height() + + alpha_mask = image.createAlphaMask() + alpha_region = QtGui.QRegion(QtGui.QBitmap.fromImage(alpha_mask)) + + pixmap = QtGui.QPixmap(width, height) + pixmap.fill(QtCore.Qt.transparent) + + painter = QtGui.QPainter(pixmap) + render_hints = ( + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform + ) + # Deprecated since 5.14 + if hasattr(QtGui.QPainter, "HighQualityAntialiasing"): + render_hints |= QtGui.QPainter.HighQualityAntialiasing + painter.setRenderHints(render_hints) + + painter.setClipRegion(alpha_region) + painter.setPen(QtCore.Qt.NoPen) + painter.setBrush(color) + painter.drawRect(QtCore.QRect(0, 0, width, height)) + painter.end() + + return pixmap + + +def format_version(value, hero_version=False): + """Formats integer to displayable version name""" + label = "v{0:03d}".format(value) + if not hero_version: + return label + return "[{}]".format(label) + + +@contextlib.contextmanager +def qt_app_context(): + app = QtWidgets.QApplication.instance() + + if not app: + print("Starting new QApplication..") + app = QtWidgets.QApplication(sys.argv) + yield app + app.exec_() + else: + print("Using existing QApplication..") + yield app + + +def get_qt_app(): + """Get Qt application. + + The function initializes new Qt application if it is not already + initialized. It also sets some attributes to the application to + ensure that it will work properly on high DPI displays. + + Returns: + QtWidgets.QApplication: Current Qt application. + """ + + app = QtWidgets.QApplication.instance() + if app is None: + for attr_name in ( + "AA_EnableHighDpiScaling", + "AA_UseHighDpiPixmaps", + ): + attr = getattr(QtCore.Qt, attr_name, None) + if attr is not None: + QtWidgets.QApplication.setAttribute(attr) + + policy = os.getenv("QT_SCALE_FACTOR_ROUNDING_POLICY") + if ( + hasattr( + QtWidgets.QApplication, "setHighDpiScaleFactorRoundingPolicy" + ) + and not policy + ): + QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy( + QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough + ) + + app = QtWidgets.QApplication(sys.argv) + + return app + + +def get_ayon_qt_app(): + """Main Qt application initialized for AYON processed. + + This function should be used only inside AYON-launcher process + and never inside other processes. + """ + + app = get_qt_app() + app.setWindowIcon(QtGui.QIcon(get_app_icon_path())) + return app + + +def get_openpype_qt_app(): + return get_ayon_qt_app() + + +class _Cache: + icons = {} + + +def get_qta_icon_by_name_and_color(icon_name, icon_color): + if not icon_name or not icon_color: + return None + + full_icon_name = "{0}-{1}".format(icon_name, icon_color) + if full_icon_name in _Cache.icons: + return _Cache.icons[full_icon_name] + + variants = [icon_name] + qta_instance = qtawesome._instance() + for key in qta_instance.charmap.keys(): + variants.append("{0}.{1}".format(key, icon_name)) + + icon = None + used_variant = None + for variant in variants: + try: + icon = qtawesome.icon(variant, color=icon_color) + used_variant = variant + break + except Exception: + pass + + if used_variant is None: + log.info("Didn't find icon \"{}\"".format(icon_name)) + + elif used_variant != icon_name: + log.debug("Icon \"{}\" was not found \"{}\" is used instead".format( + icon_name, used_variant + )) + + _Cache.icons[full_icon_name] = icon + return icon + + +def get_asset_icon_name(asset_doc, has_children=True): + icon_name = get_asset_icon_name_from_doc(asset_doc) + if icon_name: + return icon_name + return get_default_asset_icon_name(has_children) + + +def get_asset_icon_color(asset_doc): + icon_color = get_asset_icon_color_from_doc(asset_doc) + if icon_color: + return icon_color + return get_default_entity_icon_color() + + +def get_default_asset_icon_name(has_children): + if has_children: + return "fa.folder" + return "fa.folder-o" + + +def get_asset_icon_name_from_doc(asset_doc): + if asset_doc: + return asset_doc["data"].get("icon") + return None + + +def get_asset_icon_color_from_doc(asset_doc): + if asset_doc: + return asset_doc["data"].get("color") + return None + + +def get_asset_icon_by_name(icon_name, icon_color, has_children=False): + if not icon_name: + icon_name = get_default_asset_icon_name(has_children) + + if icon_color: + icon_color = QtGui.QColor(icon_color) + else: + icon_color = get_default_entity_icon_color() + icon = get_qta_icon_by_name_and_color(icon_name, icon_color) + if icon is not None: + return icon + return get_qta_icon_by_name_and_color( + get_default_asset_icon_name(has_children), + icon_color + ) + + +def get_asset_icon(asset_doc, has_children=False): + icon_name = get_asset_icon_name(asset_doc, has_children) + icon_color = get_asset_icon_color(asset_doc) + + return get_qta_icon_by_name_and_color(icon_name, icon_color) + + +def get_default_task_icon(color=None): + if color is None: + color = get_default_entity_icon_color() + return get_qta_icon_by_name_and_color("fa.male", color) + + +def get_task_icon(project_doc, asset_doc, task_name): + """Get icon for a task. + + Icon should be defined by task type which is stored on project. + """ + + color = get_default_entity_icon_color() + + tasks_info = asset_doc.get("data", {}).get("tasks") or {} + task_info = tasks_info.get(task_name) or {} + task_icon = task_info.get("icon") + if task_icon: + icon = get_qta_icon_by_name_and_color(task_icon, color) + if icon is not None: + return icon + + task_type = task_info.get("type") + task_types = project_doc["config"]["tasks"] + + task_type_info = task_types.get(task_type) or {} + task_type_icon = task_type_info.get("icon") + if task_type_icon: + icon = get_qta_icon_by_name_and_color(task_icon, color) + if icon is not None: + return icon + return get_default_task_icon(color) + + +def iter_model_rows(model, column, include_root=False): + """Iterate over all row indices in a model""" + indices = [QtCore.QModelIndex()] # start iteration at root + + for index in indices: + # Add children to the iterations + child_rows = model.rowCount(index) + for child_row in range(child_rows): + child_index = model.index(child_row, column, index) + indices.append(child_index) + + if not include_root and not index.isValid(): + continue + + yield index + + +@contextlib.contextmanager +def preserve_expanded_rows(tree_view, column=0, role=None): + """Preserves expanded row in QTreeView by column's data role. + + This function is created to maintain the expand vs collapse status of + the model items. When refresh is triggered the items which are expanded + will stay expanded and vice versa. + + Arguments: + tree_view (QWidgets.QTreeView): the tree view which is + nested in the application + column (int): the column to retrieve the data from + role (int): the role which dictates what will be returned + + Returns: + None + + """ + if role is None: + role = QtCore.Qt.DisplayRole + model = tree_view.model() + + expanded = set() + + for index in iter_model_rows(model, column=column, include_root=False): + if tree_view.isExpanded(index): + value = index.data(role) + expanded.add(value) + + try: + yield + finally: + if not expanded: + return + + for index in iter_model_rows(model, column=column, include_root=False): + value = index.data(role) + state = value in expanded + if state: + tree_view.expand(index) + else: + tree_view.collapse(index) + + +@contextlib.contextmanager +def preserve_selection(tree_view, column=0, role=None, current_index=True): + """Preserves row selection in QTreeView by column's data role. + + This function is created to maintain the selection status of + the model items. When refresh is triggered the items which are expanded + will stay expanded and vice versa. + + tree_view (QWidgets.QTreeView): the tree view nested in the application + column (int): the column to retrieve the data from + role (int): the role which dictates what will be returned + + Returns: + None + + """ + if role is None: + role = QtCore.Qt.DisplayRole + model = tree_view.model() + selection_model = tree_view.selectionModel() + flags = ( + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) + + if current_index: + current_index_value = tree_view.currentIndex().data(role) + else: + current_index_value = None + + selected_rows = selection_model.selectedRows() + if not selected_rows: + yield + return + + selected = set(row.data(role) for row in selected_rows) + try: + yield + finally: + if not selected: + return + + # Go through all indices, select the ones with similar data + for index in iter_model_rows(model, column=column, include_root=False): + value = index.data(role) + state = value in selected + if state: + tree_view.scrollTo(index) # Ensure item is visible + selection_model.select(index, flags) + + if current_index_value and value == current_index_value: + selection_model.setCurrentIndex( + index, selection_model.NoUpdate + ) + + +class DynamicQThread(QtCore.QThread): + """QThread which can run any function with argument and kwargs. + + Args: + func (function): Function which will be called. + args (tuple): Arguments which will be passed to function. + kwargs (tuple): Keyword arguments which will be passed to function. + parent (QObject): Parent of thread. + """ + def __init__(self, func, args=None, kwargs=None, parent=None): + super(DynamicQThread, self).__init__(parent) + if args is None: + args = tuple() + if kwargs is None: + kwargs = {} + self._func = func + self._args = args + self._kwargs = kwargs + + def run(self): + """Execute the function with arguments.""" + self._func(*self._args, **self._kwargs) + + +class WrappedCallbackItem: + """Structure to store information about callback and args/kwargs for it. + + Item can be used to execute callback in main thread which may be needed + for execution of Qt objects. + + Item store callback (callable variable), arguments and keyword arguments + for the callback. Item hold information about it's process. + """ + not_set = object() + _log = None + + def __init__(self, callback, *args, **kwargs): + self._done = False + self._exception = self.not_set + self._result = self.not_set + self._callback = callback + self._args = args + self._kwargs = kwargs + + def __call__(self): + self.execute() + + @property + def log(self): + cls = self.__class__ + if cls._log is None: + cls._log = Logger.get_logger(cls.__name__) + return cls._log + + @property + def done(self): + return self._done + + @property + def exception(self): + return self._exception + + @property + def result(self): + return self._result + + def execute(self): + """Execute callback and store its result. + + Method must be called from main thread. Item is marked as `done` + when callback execution finished. Store output of callback of exception + information when callback raises one. + """ + if self.done: + self.log.warning("- item is already processed") + return + + try: + result = self._callback(*self._args, **self._kwargs) + self._result = result + + except Exception as exc: + self._exception = exc + + finally: + self._done = True + + +def get_warning_pixmap(color=None): + """Warning icon as QPixmap. + + Args: + color(QtGui.QColor): Color that will be used to paint warning icon. + """ + src_image_path = get_image_path("warning.png") + src_image = QtGui.QImage(src_image_path) + if color is None: + color = get_objected_colors("delete-btn-bg").get_qcolor() + + return paint_image_with_color(src_image, color) diff --git a/client/ayon_core/tools/utils/models.py b/client/ayon_core/tools/utils/models.py new file mode 100644 index 0000000000..e60d85b4e4 --- /dev/null +++ b/client/ayon_core/tools/utils/models.py @@ -0,0 +1,402 @@ +import re +import logging + +import qtpy +from qtpy import QtCore, QtGui +from ayon_core.client import get_projects +from .constants import ( + PROJECT_IS_ACTIVE_ROLE, + PROJECT_NAME_ROLE, + DEFAULT_PROJECT_LABEL +) + +log = logging.getLogger(__name__) + + +class TreeModel(QtCore.QAbstractItemModel): + + Columns = list() + ItemRole = QtCore.Qt.UserRole + 1 + item_class = None + + def __init__(self, parent=None): + super(TreeModel, self).__init__(parent) + self._root_item = self.ItemClass() + + @property + def ItemClass(self): + if self.item_class is not None: + return self.item_class + return Item + + def rowCount(self, parent=None): + if parent is None or not parent.isValid(): + parent_item = self._root_item + else: + parent_item = parent.internalPointer() + return parent_item.childCount() + + def columnCount(self, parent): + return len(self.Columns) + + def data(self, index, role): + if not index.isValid(): + return None + + if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: + item = index.internalPointer() + column = index.column() + + key = self.Columns[column] + return item.get(key, None) + + if role == self.ItemRole: + return index.internalPointer() + + def setData(self, index, value, role=QtCore.Qt.EditRole): + """Change the data on the items. + + Returns: + bool: Whether the edit was successful + """ + + if index.isValid(): + if role == QtCore.Qt.EditRole: + + item = index.internalPointer() + column = index.column() + key = self.Columns[column] + item[key] = value + + # passing `list()` for PyQt5 (see PYSIDE-462) + if qtpy.API in ("pyqt4", "pyside"): + self.dataChanged.emit(index, index) + else: + self.dataChanged.emit(index, index, [role]) + + # must return true if successful + return True + + return False + + def setColumns(self, keys): + assert isinstance(keys, (list, tuple)) + self.Columns = keys + + def headerData(self, section, orientation, role): + + if role == QtCore.Qt.DisplayRole: + if section < len(self.Columns): + return self.Columns[section] + + super(TreeModel, self).headerData(section, orientation, role) + + def flags(self, index): + flags = QtCore.Qt.ItemIsEnabled + + item = index.internalPointer() + if item.get("enabled", True): + flags |= QtCore.Qt.ItemIsSelectable + + return flags + + def parent(self, index): + + item = index.internalPointer() + parent_item = item.parent() + + # If it has no parents we return invalid + if parent_item == self._root_item or not parent_item: + return QtCore.QModelIndex() + + return self.createIndex(parent_item.row(), 0, parent_item) + + def index(self, row, column, parent=None): + """Return index for row/column under parent""" + + if parent is None or not parent.isValid(): + parent_item = self._root_item + else: + parent_item = parent.internalPointer() + + child_item = parent_item.child(row) + if child_item: + return self.createIndex(row, column, child_item) + else: + return QtCore.QModelIndex() + + def add_child(self, item, parent=None): + if parent is None: + parent = self._root_item + + parent.add_child(item) + + def column_name(self, column): + """Return column key by index""" + + if column < len(self.Columns): + return self.Columns[column] + + def clear(self): + self.beginResetModel() + self._root_item = self.ItemClass() + self.endResetModel() + + +class Item(dict): + """An item that can be represented in a tree view using `TreeModel`. + + The item can store data just like a regular dictionary. + + >>> data = {"name": "John", "score": 10} + >>> item = Item(data) + >>> assert item["name"] == "John" + + """ + + def __init__(self, data=None): + super(Item, self).__init__() + + self._children = list() + self._parent = None + + if data is not None: + assert isinstance(data, dict) + self.update(data) + + def childCount(self): + return len(self._children) + + def child(self, row): + + if row >= len(self._children): + log.warning("Invalid row as child: {0}".format(row)) + return + + return self._children[row] + + def children(self): + return self._children + + def parent(self): + return self._parent + + def row(self): + """ + Returns: + int: Index of this item under parent""" + if self._parent is not None: + siblings = self.parent().children() + return siblings.index(self) + return -1 + + def add_child(self, child): + """Add a child to this item""" + child._parent = self + self._children.append(child) + + +class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel): + """Recursive proxy model. + Item is not filtered if any children match the filter. + Use case: Filtering by string - parent won't be filtered if does not match + the filter string but first checks if any children does. + """ + + def __init__(self, *args, **kwargs): + super(RecursiveSortFilterProxyModel, self).__init__(*args, **kwargs) + recursive_enabled = False + if hasattr(self, "setRecursiveFilteringEnabled"): + self.setRecursiveFilteringEnabled(True) + recursive_enabled = True + self._recursive_enabled = recursive_enabled + + def filterAcceptsRow(self, row, parent_index): + if hasattr(self, "filterRegExp"): + regex = self.filterRegExp() + else: + regex = self.filterRegularExpression() + + pattern = regex.pattern() + if pattern: + model = self.sourceModel() + source_index = model.index( + row, self.filterKeyColumn(), parent_index + ) + if source_index.isValid(): + pattern = regex.pattern() + + # Check current index itself + value = model.data(source_index, self.filterRole()) + matched = bool(re.search(pattern, value, re.IGNORECASE)) + if matched or self._recursive_enabled: + return matched + + rows = model.rowCount(source_index) + for idx in range(rows): + if self.filterAcceptsRow(idx, source_index): + return True + + # Otherwise filter it + return False + + return super(RecursiveSortFilterProxyModel, self).filterAcceptsRow( + row, parent_index + ) + + +# TODO remove 'ProjectModel' and 'ProjectSortFilterProxy' classes +# - replace their usage with current 'ayon_utils' models +class ProjectModel(QtGui.QStandardItemModel): + def __init__( + self, only_active=True, add_default_project=False, *args, **kwargs + ): + super(ProjectModel, self).__init__(*args, **kwargs) + + self._only_active = only_active + self._add_default_project = add_default_project + + self._default_item = None + self._items_by_name = {} + self._refreshed = False + + def set_default_project_available(self, available=True): + if available is None: + available = not self._add_default_project + + if self._add_default_project == available: + return + + self._add_default_project = available + if not available and self._default_item is not None: + root_item = self.invisibleRootItem() + root_item.removeRow(self._default_item.row()) + self._default_item = None + + def set_only_active(self, only_active=True): + if only_active is None: + only_active = not self._only_active + + if self._only_active == only_active: + return + + self._only_active = only_active + + if self._refreshed: + self.refresh() + + def project_name_is_available(self, project_name): + """Check availability of project name in current items.""" + return project_name in self._items_by_name + + def refresh(self): + # Change '_refreshed' state + self._refreshed = True + new_items = [] + # Add default item to model if should + if self._add_default_project and self._default_item is None: + item = QtGui.QStandardItem(DEFAULT_PROJECT_LABEL) + item.setData(None, PROJECT_NAME_ROLE) + item.setData(True, PROJECT_IS_ACTIVE_ROLE) + new_items.append(item) + self._default_item = item + + project_names = set() + project_docs = get_projects( + inactive=not self._only_active, + fields=["name", "data.active"] + ) + for project_doc in project_docs: + project_name = project_doc["name"] + project_names.add(project_name) + if project_name in self._items_by_name: + item = self._items_by_name[project_name] + else: + item = QtGui.QStandardItem(project_name) + + self._items_by_name[project_name] = item + new_items.append(item) + + is_active = project_doc.get("data", {}).get("active", True) + item.setData(project_name, PROJECT_NAME_ROLE) + item.setData(is_active, PROJECT_IS_ACTIVE_ROLE) + + if not is_active: + font = item.font() + font.setItalic(True) + item.setFont(font) + + root_item = self.invisibleRootItem() + for project_name in tuple(self._items_by_name.keys()): + if project_name not in project_names: + item = self._items_by_name.pop(project_name) + root_item.removeRow(item.row()) + + if new_items: + root_item.appendRows(new_items) + + def find_project(self, project_name): + """ + Get index of 'project_name' value. + + Args: + project_name (str): + Returns: + (QModelIndex) + """ + val = self._items_by_name.get(project_name) + if val: + return self.indexFromItem(val) + + +class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(ProjectSortFilterProxy, self).__init__(*args, **kwargs) + self._filter_enabled = True + # Disable case sensitivity + self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + def lessThan(self, left_index, right_index): + if left_index.data(PROJECT_NAME_ROLE) is None: + return True + + if right_index.data(PROJECT_NAME_ROLE) is None: + return False + + left_is_active = left_index.data(PROJECT_IS_ACTIVE_ROLE) + right_is_active = right_index.data(PROJECT_IS_ACTIVE_ROLE) + if right_is_active == left_is_active: + return super(ProjectSortFilterProxy, self).lessThan( + left_index, right_index + ) + + if left_is_active: + return True + return False + + def filterAcceptsRow(self, source_row, source_parent): + index = self.sourceModel().index(source_row, 0, source_parent) + string_pattern = self.filterRegularExpression().pattern() + if self._filter_enabled: + result = self._custom_index_filter(index) + if result is not None: + project_name = index.data(PROJECT_NAME_ROLE) + if project_name is None: + return result + return string_pattern.lower() in project_name.lower() + + return super(ProjectSortFilterProxy, self).filterAcceptsRow( + source_row, source_parent + ) + + def _custom_index_filter(self, index): + is_active = bool(index.data(PROJECT_IS_ACTIVE_ROLE)) + + return is_active + + def is_filter_enabled(self): + return self._filter_enabled + + def set_filter_enabled(self, value): + self._filter_enabled = value + self.invalidateFilter() diff --git a/openpype/tools/utils/multiselection_combobox.py b/client/ayon_core/tools/utils/multiselection_combobox.py similarity index 100% rename from openpype/tools/utils/multiselection_combobox.py rename to client/ayon_core/tools/utils/multiselection_combobox.py diff --git a/openpype/widgets/nice_checkbox.py b/client/ayon_core/tools/utils/nice_checkbox.py similarity index 99% rename from openpype/widgets/nice_checkbox.py rename to client/ayon_core/tools/utils/nice_checkbox.py index 651187a8ab..06845c397a 100644 --- a/openpype/widgets/nice_checkbox.py +++ b/client/ayon_core/tools/utils/nice_checkbox.py @@ -1,7 +1,7 @@ from math import floor, sqrt, ceil from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors +from ayon_core.style import get_objected_colors class NiceCheckbox(QtWidgets.QFrame): diff --git a/openpype/tools/utils/overlay_messages.py b/client/ayon_core/tools/utils/overlay_messages.py similarity index 99% rename from openpype/tools/utils/overlay_messages.py rename to client/ayon_core/tools/utils/overlay_messages.py index 4da266bcf7..ef4ac4cf11 100644 --- a/openpype/tools/utils/overlay_messages.py +++ b/client/ayon_core/tools/utils/overlay_messages.py @@ -2,7 +2,7 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors +from ayon_core.style import get_objected_colors from .lib import set_style_property diff --git a/openpype/widgets/sliders.py b/client/ayon_core/tools/utils/sliders.py similarity index 100% rename from openpype/widgets/sliders.py rename to client/ayon_core/tools/utils/sliders.py diff --git a/client/ayon_core/tools/utils/tasks_widget.py b/client/ayon_core/tools/utils/tasks_widget.py new file mode 100644 index 0000000000..12e074f910 --- /dev/null +++ b/client/ayon_core/tools/utils/tasks_widget.py @@ -0,0 +1,303 @@ +from qtpy import QtWidgets, QtCore, QtGui +import qtawesome + +from ayon_core.client import ( + get_project, + get_asset_by_id, +) +from ayon_core.style import get_disabled_entity_icon_color +from ayon_core.tools.utils.lib import get_task_icon + +from .views import DeselectableTreeView + + +TASK_NAME_ROLE = QtCore.Qt.UserRole + 1 +TASK_TYPE_ROLE = QtCore.Qt.UserRole + 2 +TASK_ORDER_ROLE = QtCore.Qt.UserRole + 3 +TASK_ASSIGNEE_ROLE = QtCore.Qt.UserRole + 4 + + +class _TasksModel(QtGui.QStandardItemModel): + """A model listing the tasks combined for a list of assets""" + + def __init__(self, dbcon, parent=None): + super(_TasksModel, self).__init__(parent=parent) + self.dbcon = dbcon + self.setHeaderData( + 0, QtCore.Qt.Horizontal, "Tasks", QtCore.Qt.DisplayRole + ) + + self._no_tasks_icon = qtawesome.icon( + "fa.exclamation-circle", + color=get_disabled_entity_icon_color() + ) + self._cached_icons = {} + self._project_doc = {} + + self._empty_tasks_item = None + self._last_asset_id = None + self._loaded_project_name = None + + def _context_is_valid(self): + if self._get_current_project(): + return True + return False + + def refresh(self): + self._refresh_project_doc() + self.set_asset_id(self._last_asset_id) + + def _refresh_project_doc(self): + # Get the project configured icons from database + project_doc = {} + if self._context_is_valid(): + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + self._loaded_project_name = self._get_current_project() + self._project_doc = project_doc + + def headerData(self, section, orientation, role=None): + if role is None: + role = QtCore.Qt.EditRole + # Show nice labels in the header + if section == 0: + if ( + role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole) + and orientation == QtCore.Qt.Horizontal + ): + return "Tasks" + + return super(_TasksModel, self).headerData(section, orientation, role) + + def _get_current_project(self): + return self.dbcon.Session.get("AVALON_PROJECT") + + def set_asset_id(self, asset_id): + asset_doc = None + if asset_id and self._context_is_valid(): + project_name = self._get_current_project() + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.tasks"] + ) + self._set_asset(asset_doc) + + def _get_empty_task_item(self): + if self._empty_tasks_item is None: + item = QtGui.QStandardItem("No task") + item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + self._empty_tasks_item = item + return self._empty_tasks_item + + def _set_asset(self, asset_doc): + """Set assets to track by their database id + + Arguments: + asset_doc (dict): Asset document from MongoDB. + """ + if self._loaded_project_name != self._get_current_project(): + self._refresh_project_doc() + + asset_tasks = {} + self._last_asset_id = None + if asset_doc: + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + self._last_asset_id = asset_doc["_id"] + + root_item = self.invisibleRootItem() + root_item.removeRows(0, root_item.rowCount()) + + items = [] + + for task_name, task_info in asset_tasks.items(): + task_type = task_info.get("type") + task_order = task_info.get("order") + icon = get_task_icon(self._project_doc, asset_doc, task_name) + + task_assignees = set() + assignees_data = task_info.get("assignees") or [] + for assignee in assignees_data: + username = assignee.get("username") + if username: + task_assignees.add(username) + + label = "{} ({})".format(task_name, task_type or "type N/A") + item = QtGui.QStandardItem(label) + item.setData(task_name, TASK_NAME_ROLE) + item.setData(task_type, TASK_TYPE_ROLE) + item.setData(task_order, TASK_ORDER_ROLE) + item.setData(task_assignees, TASK_ASSIGNEE_ROLE) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) + items.append(item) + + if not items: + item = QtGui.QStandardItem("No task") + item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + items.append(item) + + root_item.appendRows(items) + + +class _TasksProxyModel(QtCore.QSortFilterProxyModel): + def lessThan(self, x_index, y_index): + x_order = x_index.data(TASK_ORDER_ROLE) + y_order = y_index.data(TASK_ORDER_ROLE) + if x_order is not None and y_order is not None: + if x_order < y_order: + return True + if x_order > y_order: + return False + + elif x_order is None and y_order is not None: + return True + + elif y_order is None and x_order is not None: + return False + + x_name = x_index.data(QtCore.Qt.DisplayRole) + y_name = y_index.data(QtCore.Qt.DisplayRole) + if x_name == y_name: + return True + + if x_name == tuple(sorted((x_name, y_name)))[0]: + return True + return False + + +class TasksWidget(QtWidgets.QWidget): + """Widget showing active Tasks + + Deprecated: + This widget will be removed soon. Please do not use it in new code. + """ + + task_changed = QtCore.Signal() + + def __init__(self, dbcon, parent=None): + self._dbcon = dbcon + + super(TasksWidget, self).__init__(parent) + + tasks_view = DeselectableTreeView(self) + tasks_view.setIndentation(0) + tasks_view.setSortingEnabled(True) + tasks_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + + header_view = tasks_view.header() + header_view.setSortIndicator(0, QtCore.Qt.AscendingOrder) + + tasks_model = self._create_source_model() + tasks_proxy = self._create_proxy_model(tasks_model) + tasks_view.setModel(tasks_proxy) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(tasks_view) + + selection_model = tasks_view.selectionModel() + selection_model.selectionChanged.connect(self._on_task_change) + + self._tasks_model = tasks_model + self._tasks_proxy = tasks_proxy + self._tasks_view = tasks_view + + self._last_selected_task_name = None + + def _create_source_model(self): + """Create source model of tasks widget. + + Model must have available 'refresh' method and 'set_asset_id' to change + context of asset. + """ + return _TasksModel(self._dbcon) + + def _create_proxy_model(self, source_model): + proxy = _TasksProxyModel() + proxy.setSourceModel(source_model) + return proxy + + def refresh(self): + self._tasks_model.refresh() + + def set_asset_id(self, asset_id): + # Try and preserve the last selected task and reselect it + # after switching assets. If there's no currently selected + # asset keep whatever the "last selected" was prior to it. + current = self.get_selected_task_name() + if current: + self._last_selected_task_name = current + + self._tasks_model.set_asset_id(asset_id) + + if self._last_selected_task_name: + self.select_task_name(self._last_selected_task_name) + + # Force a task changed emit. + self.task_changed.emit() + + def _clear_selection(self): + selection_model = self._tasks_view.selectionModel() + selection_model.clearSelection() + + def select_task_name(self, task_name): + """Select a task by name. + + If the task does not exist in the current model then selection is only + cleared. + + Args: + task (str): Name of the task to select. + + """ + task_view_model = self._tasks_view.model() + if not task_view_model: + return + + # Clear selection + selection_model = self._tasks_view.selectionModel() + selection_model.clearSelection() + + # Select the task + mode = ( + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) + for row in range(task_view_model.rowCount()): + index = task_view_model.index(row, 0) + name = index.data(TASK_NAME_ROLE) + if name == task_name: + selection_model.select(index, mode) + + # Set the currently active index + self._tasks_view.setCurrentIndex(index) + break + + last_selected_task_name = self.get_selected_task_name() + if last_selected_task_name: + self._last_selected_task_name = last_selected_task_name + + def get_selected_task_name(self): + """Return name of task at current index (selected) + + Returns: + str: Name of the current task. + + """ + index = self._tasks_view.currentIndex() + selection_model = self._tasks_view.selectionModel() + if index.isValid() and selection_model.isSelected(index): + return index.data(TASK_NAME_ROLE) + return None + + def get_selected_task_type(self): + index = self._tasks_view.currentIndex() + selection_model = self._tasks_view.selectionModel() + if index.isValid() and selection_model.isSelected(index): + return index.data(TASK_TYPE_ROLE) + return None + + def _on_task_change(self): + self.task_changed.emit() diff --git a/openpype/tools/utils/thumbnail_paint_widget.py b/client/ayon_core/tools/utils/thumbnail_paint_widget.py similarity index 99% rename from openpype/tools/utils/thumbnail_paint_widget.py rename to client/ayon_core/tools/utils/thumbnail_paint_widget.py index 130942aaf0..9dbc2bcdd0 100644 --- a/openpype/tools/utils/thumbnail_paint_widget.py +++ b/client/ayon_core/tools/utils/thumbnail_paint_widget.py @@ -1,6 +1,6 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import get_objected_colors +from ayon_core.style import get_objected_colors from .lib import paint_image_with_color from .images import get_image diff --git a/openpype/tools/utils/views.py b/client/ayon_core/tools/utils/views.py similarity index 97% rename from openpype/tools/utils/views.py rename to client/ayon_core/tools/utils/views.py index 596a47ede9..b501f1ff11 100644 --- a/openpype/tools/utils/views.py +++ b/client/ayon_core/tools/utils/views.py @@ -1,5 +1,5 @@ -from openpype.resources import get_image_path -from openpype.tools.flickcharm import FlickCharm +from ayon_core.resources import get_image_path +from ayon_core.tools.flickcharm import FlickCharm from qtpy import QtWidgets, QtCore, QtGui, QtSvg diff --git a/client/ayon_core/tools/utils/widgets.py b/client/ayon_core/tools/utils/widgets.py new file mode 100644 index 0000000000..1d4f85246f --- /dev/null +++ b/client/ayon_core/tools/utils/widgets.py @@ -0,0 +1,891 @@ +import logging + +from qtpy import QtWidgets, QtCore, QtGui +import qargparse +import qtawesome + +from ayon_core.style import ( + get_objected_colors, + get_style_image_path, + get_default_tools_icon_color, +) +from ayon_core.lib.attribute_definitions import AbstractAttrDef + +from .lib import get_qta_icon_by_name_and_color + +log = logging.getLogger(__name__) + + +class FocusSpinBox(QtWidgets.QSpinBox): + """QSpinBox which allow scroll wheel changes only in active state.""" + + def __init__(self, *args, **kwargs): + super(FocusSpinBox, self).__init__(*args, **kwargs) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + def wheelEvent(self, event): + if not self.hasFocus(): + event.ignore() + else: + super(FocusSpinBox, self).wheelEvent(event) + + +class FocusDoubleSpinBox(QtWidgets.QDoubleSpinBox): + """QDoubleSpinBox which allow scroll wheel changes only in active state.""" + + def __init__(self, *args, **kwargs): + super(FocusDoubleSpinBox, self).__init__(*args, **kwargs) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + def wheelEvent(self, event): + if not self.hasFocus(): + event.ignore() + else: + super(FocusDoubleSpinBox, self).wheelEvent(event) + + +class ComboBox(QtWidgets.QComboBox): + """Base of combobox with pre-implement changes used in tools. + + Combobox is using styled delegate by default so stylesheets are propagated. + + Items are not changed on scroll until the combobox is in focus. + """ + + def __init__(self, *args, **kwargs): + super(ComboBox, self).__init__(*args, **kwargs) + delegate = QtWidgets.QStyledItemDelegate() + self.setItemDelegate(delegate) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + self._delegate = delegate + + def wheelEvent(self, event): + if self.hasFocus(): + return super(ComboBox, self).wheelEvent(event) + + +class CustomTextComboBox(ComboBox): + """Combobox which can have different text showed.""" + + def __init__(self, *args, **kwargs): + self._custom_text = None + super(CustomTextComboBox, self).__init__(*args, **kwargs) + + def set_custom_text(self, text=None): + if self._custom_text != text: + self._custom_text = text + self.repaint() + + def paintEvent(self, event): + painter = QtWidgets.QStylePainter(self) + option = QtWidgets.QStyleOptionComboBox() + self.initStyleOption(option) + if self._custom_text is not None: + option.currentText = self._custom_text + painter.drawComplexControl(QtWidgets.QStyle.CC_ComboBox, option) + painter.drawControl(QtWidgets.QStyle.CE_ComboBoxLabel, option) + + +class PlaceholderLineEdit(QtWidgets.QLineEdit): + """Set placeholder color of QLineEdit in Qt 5.12 and higher.""" + def __init__(self, *args, **kwargs): + super(PlaceholderLineEdit, self).__init__(*args, **kwargs) + # Change placeholder palette color + if hasattr(QtGui.QPalette, "PlaceholderText"): + filter_palette = self.palette() + color_obj = get_objected_colors("font") + color = color_obj.get_qcolor() + color.setAlpha(67) + filter_palette.setColor( + QtGui.QPalette.PlaceholderText, + color + ) + self.setPalette(filter_palette) + + +class ExpandingTextEdit(QtWidgets.QTextEdit): + """QTextEdit which does not have sroll area but expands height.""" + + def __init__(self, parent=None): + super(ExpandingTextEdit, self).__init__(parent) + + size_policy = self.sizePolicy() + size_policy.setHeightForWidth(True) + size_policy.setVerticalPolicy(QtWidgets.QSizePolicy.Preferred) + self.setSizePolicy(size_policy) + + self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) + self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) + + doc = self.document() + doc.contentsChanged.connect(self._on_doc_change) + + def _on_doc_change(self): + self.updateGeometry() + + def hasHeightForWidth(self): + return True + + def heightForWidth(self, width): + margins = self.contentsMargins() + + document_width = 0 + if width >= margins.left() + margins.right(): + document_width = width - margins.left() - margins.right() + + document = self.document().clone() + document.setTextWidth(document_width) + + return margins.top() + document.size().height() + margins.bottom() + + def sizeHint(self): + width = super(ExpandingTextEdit, self).sizeHint().width() + return QtCore.QSize(width, self.heightForWidth(width)) + + +class BaseClickableFrame(QtWidgets.QFrame): + """Widget that catch left mouse click and can trigger a callback. + + Callback is defined by overriding `_mouse_release_callback`. + """ + def __init__(self, parent): + super(BaseClickableFrame, self).__init__(parent) + + self._mouse_pressed = False + + def _mouse_release_callback(self): + pass + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(BaseClickableFrame, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self._mouse_release_callback() + + super(BaseClickableFrame, self).mouseReleaseEvent(event) + + +class ClickableFrame(BaseClickableFrame): + """Extended clickable frame which triggers 'clicked' signal.""" + clicked = QtCore.Signal() + + def _mouse_release_callback(self): + self.clicked.emit() + + +class ClickableLabel(QtWidgets.QLabel): + """Label that catch left mouse click and can trigger 'clicked' signal.""" + clicked = QtCore.Signal() + + def __init__(self, parent): + super(ClickableLabel, self).__init__(parent) + + self._mouse_pressed = False + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self._mouse_pressed = True + super(ClickableLabel, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + if self._mouse_pressed: + self._mouse_pressed = False + if self.rect().contains(event.pos()): + self.clicked.emit() + + super(ClickableLabel, self).mouseReleaseEvent(event) + + +class ExpandBtnLabel(QtWidgets.QLabel): + """Label showing expand icon meant for ExpandBtn.""" + state_changed = QtCore.Signal() + + + def __init__(self, parent): + super(ExpandBtnLabel, self).__init__(parent) + self._source_collapsed_pix = self._create_collapsed_pixmap() + self._source_expanded_pix = self._create_expanded_pixmap() + + self._current_image = self._source_collapsed_pix + self._collapsed = True + + def _create_collapsed_pixmap(self): + return QtGui.QPixmap( + get_style_image_path("branch_closed") + ) + + def _create_expanded_pixmap(self): + return QtGui.QPixmap( + get_style_image_path("branch_open") + ) + + @property + def collapsed(self): + return self._collapsed + + def set_collapsed(self, collapsed=None): + if collapsed is None: + collapsed = not self._collapsed + if self._collapsed == collapsed: + return + self._collapsed = collapsed + if collapsed: + self._current_image = self._source_collapsed_pix + else: + self._current_image = self._source_expanded_pix + self._set_resized_pix() + self.state_changed.emit() + + def resizeEvent(self, event): + self._set_resized_pix() + super(ExpandBtnLabel, self).resizeEvent(event) + + def _set_resized_pix(self): + size = int(self.fontMetrics().height() / 2) + if size < 1: + size = 1 + size += size % 2 + self.setPixmap( + self._current_image.scaled( + size, + size, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + ) + + +class ExpandBtn(ClickableFrame): + state_changed = QtCore.Signal() + + def __init__(self, parent=None): + super(ExpandBtn, self).__init__(parent) + + pixmap_label = self._create_pix_widget(self) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(pixmap_label) + + pixmap_label.state_changed.connect(self.state_changed) + + self._pixmap_label = pixmap_label + + def _create_pix_widget(self, parent=None): + if parent is None: + parent = self + return ExpandBtnLabel(parent) + + @property + def collapsed(self): + return self._pixmap_label.collapsed + + def set_collapsed(self, collapsed=None): + self._pixmap_label.set_collapsed(collapsed) + + +class ClassicExpandBtnLabel(ExpandBtnLabel): + def _create_collapsed_pixmap(self): + return QtGui.QPixmap( + get_style_image_path("right_arrow") + ) + + def _create_expanded_pixmap(self): + return QtGui.QPixmap( + get_style_image_path("down_arrow") + ) + + +class ClassicExpandBtn(ExpandBtn): + """Same as 'ExpandBtn' but with arrow images.""" + + def _create_pix_widget(self, parent=None): + if parent is None: + parent = self + return ClassicExpandBtnLabel(parent) + + +class ImageButton(QtWidgets.QPushButton): + """PushButton with icon and size of font. + + Using font metrics height as icon size reference. + + TODO: + - handle changes of screen (different resolution) + """ + + def __init__(self, *args, **kwargs): + super(ImageButton, self).__init__(*args, **kwargs) + self.setObjectName("ImageButton") + + def _change_size(self): + font_height = self.fontMetrics().height() + self.setIconSize(QtCore.QSize(font_height, font_height)) + + def showEvent(self, event): + super(ImageButton, self).showEvent(event) + + self._change_size() + + def sizeHint(self): + return self.iconSize() + + +class IconButton(QtWidgets.QPushButton): + """PushButton with icon and size of font. + + Using font metrics height as icon size reference. + """ + + def __init__(self, *args, **kwargs): + super(IconButton, self).__init__(*args, **kwargs) + self.setObjectName("IconButton") + + def sizeHint(self): + result = super(IconButton, self).sizeHint() + icon_h = self.iconSize().height() + font_height = self.fontMetrics().height() + text_set = bool(self.text()) + if not text_set and icon_h < font_height: + new_size = result.height() - icon_h + font_height + result.setHeight(new_size) + result.setWidth(new_size) + + return result + + +class PixmapLabel(QtWidgets.QLabel): + """Label resizing image to height of font.""" + def __init__(self, pixmap, parent): + super(PixmapLabel, self).__init__(parent) + self._empty_pixmap = QtGui.QPixmap(0, 0) + self._source_pixmap = pixmap + + self._last_width = 0 + self._last_height = 0 + + def set_source_pixmap(self, pixmap): + """Change source image.""" + self._source_pixmap = pixmap + self._set_resized_pix() + + def _get_pix_size(self): + size = self.fontMetrics().height() + size += size % 2 + return size, size + + def minimumSizeHint(self): + width, height = self._get_pix_size() + if width != self._last_width or height != self._last_height: + self._set_resized_pix() + return QtCore.QSize(width, height) + + def _set_resized_pix(self): + if self._source_pixmap is None: + self.setPixmap(self._empty_pixmap) + return + width, height = self._get_pix_size() + self.setPixmap( + self._source_pixmap.scaled( + width, + height, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + ) + self._last_width = width + self._last_height = height + + def resizeEvent(self, event): + self._set_resized_pix() + super(PixmapLabel, self).resizeEvent(event) + + +class PixmapButtonPainter(QtWidgets.QWidget): + def __init__(self, pixmap, parent): + super(PixmapButtonPainter, self).__init__(parent) + + self._pixmap = pixmap + self._cached_pixmap = None + self._disabled = False + + def resizeEvent(self, event): + super(PixmapButtonPainter, self).resizeEvent(event) + self._cached_pixmap = None + self.repaint() + + def set_enabled(self, enabled): + if self._disabled != enabled: + return + self._disabled = not enabled + self.repaint() + + def set_pixmap(self, pixmap): + self._pixmap = pixmap + self._cached_pixmap = None + + self.repaint() + + def _cache_pixmap(self): + size = self.size() + self._cached_pixmap = self._pixmap.scaled( + size.width(), + size.height(), + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + + def paintEvent(self, event): + painter = QtGui.QPainter() + painter.begin(self) + if self._pixmap is None: + painter.end() + return + + render_hints = ( + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform + ) + if hasattr(QtGui.QPainter, "HighQualityAntialiasing"): + render_hints |= QtGui.QPainter.HighQualityAntialiasing + + painter.setRenderHints(render_hints) + if self._cached_pixmap is None: + self._cache_pixmap() + + if self._disabled: + painter.setOpacity(0.5) + painter.drawPixmap(0, 0, self._cached_pixmap) + + painter.end() + + +class PixmapButton(ClickableFrame): + def __init__(self, pixmap=None, parent=None): + super(PixmapButton, self).__init__(parent) + + button_painter = PixmapButtonPainter(pixmap, self) + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(2, 2, 2, 2) + + self._button_painter = button_painter + + def setContentsMargins(self, *args): + layout = self.layout() + layout.setContentsMargins(*args) + self._update_painter_geo() + + def setEnabled(self, enabled): + self._button_painter.set_enabled(enabled) + super(PixmapButton, self).setEnabled(enabled) + + def set_pixmap(self, pixmap): + self._button_painter.set_pixmap(pixmap) + + def sizeHint(self): + font_height = self.fontMetrics().height() + return QtCore.QSize(font_height, font_height) + + def resizeEvent(self, event): + super(PixmapButton, self).resizeEvent(event) + self._update_painter_geo() + + def showEvent(self, event): + super(PixmapButton, self).showEvent(event) + self._update_painter_geo() + + def _update_painter_geo(self): + size = self.size() + layout = self.layout() + left, top, right, bottom = layout.getContentsMargins() + self._button_painter.setGeometry( + left, + top, + size.width() - (left + right), + size.height() - (top + bottom) + ) + + +class OptionalMenu(QtWidgets.QMenu): + """A subclass of `QtWidgets.QMenu` to work with `OptionalAction` + + This menu has reimplemented `mouseReleaseEvent`, `mouseMoveEvent` and + `leaveEvent` to provide better action highlighting and triggering for + actions that were instances of `QtWidgets.QWidgetAction`. + + """ + def mouseReleaseEvent(self, event): + """Emit option clicked signal if mouse released on it""" + active = self.actionAt(event.pos()) + if active and active.use_option: + option = active.widget.option + if option.is_hovered(event.globalPos()): + option.clicked.emit() + super(OptionalMenu, self).mouseReleaseEvent(event) + + def mouseMoveEvent(self, event): + """Add highlight to active action""" + active = self.actionAt(event.pos()) + for action in self.actions(): + action.set_highlight(action is active, event.globalPos()) + super(OptionalMenu, self).mouseMoveEvent(event) + + def leaveEvent(self, event): + """Remove highlight from all actions""" + for action in self.actions(): + action.set_highlight(False) + super(OptionalMenu, self).leaveEvent(event) + + +class OptionalAction(QtWidgets.QWidgetAction): + """Menu action with option box + + A menu action like Maya's menu item with option box, implemented by + subclassing `QtWidgets.QWidgetAction`. + + """ + + def __init__(self, label, icon, use_option, parent): + super(OptionalAction, self).__init__(parent) + self.label = label + self.icon = icon + self.use_option = use_option + self.option_tip = "" + self.optioned = False + self.widget = None + + def createWidget(self, parent): + widget = OptionalActionWidget(self.label, parent) + self.widget = widget + + if self.icon: + widget.setIcon(self.icon) + + if self.use_option: + widget.option.clicked.connect(self.on_option) + widget.option.setToolTip(self.option_tip) + else: + widget.option.setVisible(False) + + return widget + + def set_option_tip(self, options): + sep = "\n\n" + if not options or not isinstance(options[0], AbstractAttrDef): + mak = (lambda opt: opt["name"] + " :\n " + opt["help"]) + self.option_tip = sep.join(mak(opt) for opt in options) + return + + option_items = [] + for option in options: + option_lines = [] + if option.label: + option_lines.append( + "{} ({}) :".format(option.label, option.key) + ) + else: + option_lines.append("{} :".format(option.key)) + + if option.tooltip: + option_lines.append(" - {}".format(option.tooltip)) + option_items.append("\n".join(option_lines)) + + self.option_tip = sep.join(option_items) + + def on_option(self): + self.optioned = True + + def set_highlight(self, state, global_pos=None): + option_state = False + if self.use_option: + option_state = self.widget.option.is_hovered(global_pos) + self.widget.set_hover_properties(state, option_state) + + +class OptionalActionWidget(QtWidgets.QWidget): + """Main widget class for `OptionalAction`""" + + def __init__(self, label, parent=None): + super(OptionalActionWidget, self).__init__(parent) + + body_widget = QtWidgets.QWidget(self) + body_widget.setObjectName("OptionalActionBody") + + icon = QtWidgets.QLabel(body_widget) + label = QtWidgets.QLabel(label, body_widget) + # (NOTE) For removing ugly QLable shadow FX when highlighted in Nuke. + # See https://stackoverflow.com/q/52838690/4145300 + label.setStyle(QtWidgets.QStyleFactory.create("Plastique")) + option = OptionBox(body_widget) + option.setObjectName("OptionalActionOption") + + icon.setFixedSize(24, 16) + option.setFixedSize(30, 30) + + body_layout = QtWidgets.QHBoxLayout(body_widget) + body_layout.setContentsMargins(4, 0, 4, 0) + body_layout.setSpacing(2) + body_layout.addWidget(icon) + body_layout.addWidget(label) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(2, 1, 2, 1) + layout.setSpacing(0) + layout.addWidget(body_widget) + layout.addWidget(option) + + body_widget.setMouseTracking(True) + label.setMouseTracking(True) + option.setMouseTracking(True) + self.setMouseTracking(True) + self.setFixedHeight(32) + + self.icon = icon + self.label = label + self.option = option + self.body = body_widget + + def set_hover_properties(self, hovered, option_hovered): + body_state = "" + option_state = "" + if hovered: + body_state = "hover" + + if option_hovered: + option_state = "hover" + + if self.body.property("state") != body_state: + self.body.setProperty("state", body_state) + self.body.style().polish(self.body) + + if self.option.property("state") != option_state: + self.option.setProperty("state", option_state) + self.option.style().polish(self.option) + + def setIcon(self, icon): + pixmap = icon.pixmap(16, 16) + self.icon.setPixmap(pixmap) + + +class OptionBox(QtWidgets.QLabel): + """Option box widget class for `OptionalActionWidget`""" + + clicked = QtCore.Signal() + + def __init__(self, parent): + super(OptionBox, self).__init__(parent) + + self.setAlignment(QtCore.Qt.AlignCenter) + + icon = qtawesome.icon("fa.sticky-note-o", color="#c6c6c6") + pixmap = icon.pixmap(18, 18) + self.setPixmap(pixmap) + + def is_hovered(self, global_pos): + if global_pos is None: + return False + pos = self.mapFromGlobal(global_pos) + return self.rect().contains(pos) + + +class OptionDialog(QtWidgets.QDialog): + """Option dialog shown by option box""" + + def __init__(self, parent=None): + super(OptionDialog, self).__init__(parent) + self.setModal(True) + self._options = dict() + + def create(self, options): + parser = qargparse.QArgumentParser(arguments=options) + + decision_widget = QtWidgets.QWidget(self) + accept_btn = QtWidgets.QPushButton("Accept", decision_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", decision_widget) + + decision_layout = QtWidgets.QHBoxLayout(decision_widget) + decision_layout.addWidget(accept_btn) + decision_layout.addWidget(cancel_btn) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(parser) + layout.addWidget(decision_widget) + + accept_btn.clicked.connect(self.accept) + cancel_btn.clicked.connect(self.reject) + parser.changed.connect(self.on_changed) + + def on_changed(self, argument): + self._options[argument["name"]] = argument.read() + + def parse(self): + return self._options.copy() + + +class SeparatorWidget(QtWidgets.QFrame): + """Prepared widget that can be used as separator with predefined color. + + Args: + size (int): Size of separator (width or height). + orientation (Qt.Horizontal|Qt.Vertical): Orintation of widget. + parent (QtWidgets.QWidget): Parent widget. + """ + + def __init__(self, size=2, orientation=QtCore.Qt.Horizontal, parent=None): + super(SeparatorWidget, self).__init__(parent) + + self.setObjectName("Separator") + + maximum_width = self.maximumWidth() + maximum_height = self.maximumHeight() + + self._size = None + self._orientation = orientation + self._maximum_width = maximum_width + self._maximum_height = maximum_height + self.set_size(size) + + def set_size(self, size): + if size != self._size: + self._set_size(size) + + def _set_size(self, size): + if self._orientation == QtCore.Qt.Vertical: + self.setMinimumWidth(size) + self.setMaximumWidth(size) + else: + self.setMinimumHeight(size) + self.setMaximumHeight(size) + + self._size = size + + def set_orientation(self, orientation): + if self._orientation == orientation: + return + + # Reset min/max sizes in opossite direction + if self._orientation == QtCore.Qt.Vertical: + self.setMinimumHeight(0) + self.setMaximumHeight(self._maximum_height) + else: + self.setMinimumWidth(0) + self.setMaximumWidth(self._maximum_width) + + self._orientation = orientation + + self._set_size(self._size) + + +class PressHoverButton(QtWidgets.QPushButton): + _mouse_pressed = False + _mouse_hovered = False + change_state = QtCore.Signal(bool) + + @property + def mouse_pressed(self): + return self._mouse_pressed + + @property + def mouse_hovered(self): + return self._mouse_hovered + + def mousePressEvent(self, event): + self._mouse_pressed = True + self._mouse_hovered = True + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + self._mouse_pressed = False + self._mouse_hovered = False + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mouseReleaseEvent(event) + + def mouseMoveEvent(self, event): + mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos()) + under_mouse = self.rect().contains(mouse_pos) + if under_mouse != self._mouse_hovered: + self._mouse_hovered = under_mouse + self.change_state.emit(self._mouse_hovered) + + super(PressHoverButton, self).mouseMoveEvent(event) + + +def get_refresh_icon(): + return get_qta_icon_by_name_and_color( + "fa.refresh", get_default_tools_icon_color() + ) + + +def get_go_to_current_icon(): + return get_qta_icon_by_name_and_color( + "fa.arrow-down", get_default_tools_icon_color() + ) + + +class VerticalExpandButton(QtWidgets.QPushButton): + """Button which is expanding vertically. + + By default, button is a little bit smaller than other widgets like + QLineEdit. This button is expanding vertically to match size of + other widgets, next to it. + """ + + def __init__(self, parent=None): + super(VerticalExpandButton, self).__init__(parent) + + sp = self.sizePolicy() + sp.setVerticalPolicy(QtWidgets.QSizePolicy.Minimum) + self.setSizePolicy(sp) + + +class SquareButton(QtWidgets.QPushButton): + """Make button square shape. + + Change width to match height on resize. + """ + + def __init__(self, *args, **kwargs): + super(SquareButton, self).__init__(*args, **kwargs) + + sp = self.sizePolicy() + sp.setVerticalPolicy(QtWidgets.QSizePolicy.Minimum) + sp.setHorizontalPolicy(QtWidgets.QSizePolicy.Minimum) + self.setSizePolicy(sp) + self._ideal_width = None + + def showEvent(self, event): + super(SquareButton, self).showEvent(event) + self._ideal_width = self.height() + self.updateGeometry() + + def resizeEvent(self, event): + super(SquareButton, self).resizeEvent(event) + self._ideal_width = self.height() + self.updateGeometry() + + def sizeHint(self): + sh = super(SquareButton, self).sizeHint() + ideal_width = self._ideal_width + if ideal_width is None: + ideal_width = sh.height() + sh.setWidth(ideal_width) + return sh + + +class RefreshButton(VerticalExpandButton): + def __init__(self, parent=None): + super(RefreshButton, self).__init__(parent) + self.setIcon(get_refresh_icon()) + + +class GoToCurrentButton(VerticalExpandButton): + def __init__(self, parent=None): + super(GoToCurrentButton, self).__init__(parent) + self.setIcon(get_go_to_current_icon()) diff --git a/openpype/tools/workfile_template_build/__init__.py b/client/ayon_core/tools/workfile_template_build/__init__.py similarity index 100% rename from openpype/tools/workfile_template_build/__init__.py rename to client/ayon_core/tools/workfile_template_build/__init__.py diff --git a/client/ayon_core/tools/workfile_template_build/window.py b/client/ayon_core/tools/workfile_template_build/window.py new file mode 100644 index 0000000000..7f95bac60a --- /dev/null +++ b/client/ayon_core/tools/workfile_template_build/window.py @@ -0,0 +1,241 @@ +from qtpy import QtWidgets + +from ayon_core import style +from ayon_core.lib import Logger +from ayon_core.pipeline import legacy_io +from ayon_core.tools.attribute_defs import AttributeDefinitionsWidget + + +class WorkfileBuildPlaceholderDialog(QtWidgets.QDialog): + def __init__(self, host, builder, parent=None): + super(WorkfileBuildPlaceholderDialog, self).__init__(parent) + self.setWindowTitle("Workfile Placeholder Manager") + + self._log = None + + self._first_show = True + self._first_refreshed = False + + self._builder = builder + self._host = host + # Mode can be 0 (create) or 1 (update) + # TODO write it a little bit better + self._mode = 0 + self._update_item = None + self._last_selected_plugin = None + + host_name = getattr(self._host, "name", None) + if not host_name: + host_name = legacy_io.Session.get("AVALON_APP") or "NA" + self._host_name = host_name + + plugins_combo = QtWidgets.QComboBox(self) + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + + btns_widget = QtWidgets.QWidget(self) + create_btn = QtWidgets.QPushButton("Create", btns_widget) + save_btn = QtWidgets.QPushButton("Save", btns_widget) + close_btn = QtWidgets.QPushButton("Close", btns_widget) + + create_btn.setVisible(False) + save_btn.setVisible(False) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.addStretch(1) + btns_layout.addWidget(create_btn, 0) + btns_layout.addWidget(save_btn, 0) + btns_layout.addWidget(close_btn, 0) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(plugins_combo, 0) + main_layout.addWidget(content_widget, 1) + main_layout.addWidget(btns_widget, 0) + + create_btn.clicked.connect(self._on_create_click) + save_btn.clicked.connect(self._on_save_click) + close_btn.clicked.connect(self._on_close_click) + plugins_combo.currentIndexChanged.connect(self._on_plugin_change) + + self._attr_defs_widget = None + self._plugins_combo = plugins_combo + + self._content_widget = content_widget + self._content_layout = content_layout + + self._create_btn = create_btn + self._save_btn = save_btn + self._close_btn = close_btn + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def _clear_content_widget(self): + while self._content_layout.count() > 0: + item = self._content_layout.takeAt(0) + widget = item.widget() + if widget: + widget.setVisible(False) + widget.deleteLater() + + def _add_message_to_content(self, message): + msg_label = QtWidgets.QLabel(message, self._content_widget) + self._content_layout.addWidget(msg_label, 0) + self._content_layout.addStretch(1) + + def refresh(self): + self._first_refreshed = True + + self._clear_content_widget() + + if not self._builder: + self._add_message_to_content(( + "Host \"{}\" does not have implemented logic" + " for template workfile build." + ).format(self._host_name)) + self._update_ui_visibility() + return + + placeholder_plugins = self._builder.placeholder_plugins + + if self._mode == 1: + self._last_selected_plugin + plugin = self._builder.placeholder_plugins.get( + self._last_selected_plugin + ) + self._create_option_widgets( + plugin, self._update_item.to_dict() + ) + self._update_ui_visibility() + return + + if not placeholder_plugins: + self._add_message_to_content(( + "Host \"{}\" does not have implemented plugins" + " for template workfile build." + ).format(self._host_name)) + self._update_ui_visibility() + return + + last_selected_plugin = self._last_selected_plugin + self._last_selected_plugin = None + self._plugins_combo.clear() + for identifier, plugin in placeholder_plugins.items(): + label = plugin.label or identifier + self._plugins_combo.addItem(label, identifier) + + index = self._plugins_combo.findData(last_selected_plugin) + if index < 0: + index = 0 + self._plugins_combo.setCurrentIndex(index) + self._on_plugin_change() + + self._update_ui_visibility() + + def set_create_mode(self): + if self._mode == 0: + return + + self._mode = 0 + self._update_item = None + self.refresh() + + def set_update_mode(self, update_item): + if self._mode == 1: + return + + self._mode = 1 + self._update_item = update_item + if update_item: + self._last_selected_plugin = update_item.plugin.identifier + self.refresh() + return + + self._clear_content_widget() + self._add_message_to_content(( + "Nothing to update." + " (You maybe don't have selected placeholder.)" + )) + self._update_ui_visibility() + + def _create_option_widgets(self, plugin, options=None): + self._clear_content_widget() + attr_defs = plugin.get_placeholder_options(options) + widget = AttributeDefinitionsWidget(attr_defs, self._content_widget) + self._content_layout.addWidget(widget, 0) + self._content_layout.addStretch(1) + self._attr_defs_widget = widget + self._last_selected_plugin = plugin.identifier + + def _update_ui_visibility(self): + create_mode = self._mode == 0 + self._plugins_combo.setVisible(create_mode) + + if not self._builder: + self._save_btn.setVisible(False) + self._create_btn.setVisible(False) + return + + save_enabled = not create_mode + if save_enabled: + save_enabled = self._update_item is not None + self._save_btn.setVisible(save_enabled) + self._create_btn.setVisible(create_mode) + + def _on_plugin_change(self): + index = self._plugins_combo.currentIndex() + plugin_identifier = self._plugins_combo.itemData(index) + if plugin_identifier == self._last_selected_plugin: + return + + plugin = self._builder.placeholder_plugins.get(plugin_identifier) + self._create_option_widgets(plugin) + + def _on_save_click(self): + options = self._attr_defs_widget.current_value() + plugin = self._builder.placeholder_plugins.get( + self._last_selected_plugin + ) + # TODO much better error handling + try: + plugin.update_placeholder(self._update_item, options) + self.accept() + except Exception: + self.log.warning("Something went wrong", exc_info=True) + dialog = QtWidgets.QMessageBox(self) + dialog.setWindowTitle("Something went wrong") + dialog.setText("Something went wrong") + dialog.exec_() + + def _on_create_click(self): + options = self._attr_defs_widget.current_value() + plugin = self._builder.placeholder_plugins.get( + self._last_selected_plugin + ) + # TODO much better error handling + try: + plugin.create_placeholder(options) + except Exception: + self.log.warning("Something went wrong", exc_info=True) + dialog = QtWidgets.QMessageBox(self) + dialog.setWindowTitle("Something went wrong") + dialog.setText("Something went wrong") + dialog.exec_() + + def _on_close_click(self): + self.reject() + + def showEvent(self, event): + super(WorkfileBuildPlaceholderDialog, self).showEvent(event) + if not self._first_refreshed: + self.refresh() + + if self._first_show: + self._first_show = False + self.setStyleSheet(style.load_stylesheet()) + self.resize(390, 450) diff --git a/openpype/tools/__init__.py b/client/ayon_core/tools/workfiles/__init__.py similarity index 100% rename from openpype/tools/__init__.py rename to client/ayon_core/tools/workfiles/__init__.py diff --git a/client/ayon_core/tools/workfiles/abstract.py b/client/ayon_core/tools/workfiles/abstract.py new file mode 100644 index 0000000000..c9eb9004e3 --- /dev/null +++ b/client/ayon_core/tools/workfiles/abstract.py @@ -0,0 +1,1010 @@ +import os +from abc import ABCMeta, abstractmethod + +import six +from ayon_core.style import get_default_entity_icon_color + + +class WorkfileInfo: + """Information about workarea file with possible additional from database. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + filepath (str): Filepath. + filesize (int): File size. + creation_time (int): Creation time (timestamp). + modification_time (int): Modification time (timestamp). + note (str): Note. + """ + + def __init__( + self, + folder_id, + task_id, + filepath, + filesize, + creation_time, + modification_time, + note, + ): + self.folder_id = folder_id + self.task_id = task_id + self.filepath = filepath + self.filesize = filesize + self.creation_time = creation_time + self.modification_time = modification_time + self.note = note + + def to_data(self): + """Converts WorkfileInfo item to data. + + Returns: + dict[str, Any]: Folder item data. + """ + + return { + "folder_id": self.folder_id, + "task_id": self.task_id, + "filepath": self.filepath, + "filesize": self.filesize, + "creation_time": self.creation_time, + "modification_time": self.modification_time, + "note": self.note, + } + + @classmethod + def from_data(cls, data): + """Re-creates WorkfileInfo item from data. + + Args: + data (dict[str, Any]): Workfile info item data. + + Returns: + WorkfileInfo: Workfile info item. + """ + + return cls(**data) + + +class FolderItem: + """Item representing folder entity on a server. + + Folder can be a child of another folder or a project. + + Args: + entity_id (str): Folder id. + parent_id (Union[str, None]): Parent folder id. If 'None' then project + is parent. + name (str): Name of folder. + label (str): Folder label. + icon_name (str): Name of icon from font awesome. + icon_color (str): Hex color string that will be used for icon. + """ + + def __init__( + self, entity_id, parent_id, name, label, icon_name, icon_color + ): + self.entity_id = entity_id + self.parent_id = parent_id + self.name = name + self.icon_name = icon_name or "fa.folder" + self.icon_color = icon_color or get_default_entity_icon_color() + self.label = label or name + + def to_data(self): + """Converts folder item to data. + + Returns: + dict[str, Any]: Folder item data. + """ + + return { + "entity_id": self.entity_id, + "parent_id": self.parent_id, + "name": self.name, + "label": self.label, + "icon_name": self.icon_name, + "icon_color": self.icon_color, + } + + @classmethod + def from_data(cls, data): + """Re-creates folder item from data. + + Args: + data (dict[str, Any]): Folder item data. + + Returns: + FolderItem: Folder item. + """ + + return cls(**data) + + +class TaskItem: + """Task item representing task entity on a server. + + Task is child of a folder. + + Task item has label that is used for display in UI. The label is by + default using task name and type. + + Args: + task_id (str): Task id. + name (str): Name of task. + task_type (str): Type of task. + parent_id (str): Parent folder id. + icon_name (str): Name of icon from font awesome. + icon_color (str): Hex color string that will be used for icon. + """ + + def __init__( + self, task_id, name, task_type, parent_id, icon_name, icon_color + ): + self.task_id = task_id + self.name = name + self.task_type = task_type + self.parent_id = parent_id + self.icon_name = icon_name or "fa.male" + self.icon_color = icon_color or get_default_entity_icon_color() + self._label = None + + @property + def id(self): + """Alias for task_id. + + Returns: + str: Task id. + """ + + return self.task_id + + @property + def label(self): + """Label of task item for UI. + + Returns: + str: Label of task item. + """ + + if self._label is None: + self._label = "{} ({})".format(self.name, self.task_type) + return self._label + + def to_data(self): + """Converts task item to data. + + Returns: + dict[str, Any]: Task item data. + """ + + return { + "task_id": self.task_id, + "name": self.name, + "parent_id": self.parent_id, + "task_type": self.task_type, + "icon_name": self.icon_name, + "icon_color": self.icon_color, + } + + @classmethod + def from_data(cls, data): + """Re-create task item from data. + + Args: + data (dict[str, Any]): Task item data. + + Returns: + TaskItem: Task item. + """ + + return cls(**data) + + +class FileItem: + """File item that represents a file. + + Can be used for both Workarea and Published workfile. Workarea file + will always exist on disk which is not the case for Published workfile. + + Args: + dirpath (str): Directory path of file. + filename (str): Filename. + modified (float): Modified timestamp. + representation_id (Optional[str]): Representation id of published + workfile. + filepath (Optional[str]): Prepared filepath. + exists (Optional[bool]): If file exists on disk. + """ + + def __init__( + self, + dirpath, + filename, + modified, + representation_id=None, + filepath=None, + exists=None + ): + self.filename = filename + self.dirpath = dirpath + self.modified = modified + self.representation_id = representation_id + self._filepath = filepath + self._exists = exists + + @property + def filepath(self): + """Filepath of file. + + Returns: + str: Full path to a file. + """ + + if self._filepath is None: + self._filepath = os.path.join(self.dirpath, self.filename) + return self._filepath + + @property + def exists(self): + """File is available. + + Returns: + bool: If file exists on disk. + """ + + if self._exists is None: + self._exists = os.path.exists(self.filepath) + return self._exists + + def to_data(self): + """Converts file item to data. + + Returns: + dict[str, Any]: File item data. + """ + + return { + "filename": self.filename, + "dirpath": self.dirpath, + "modified": self.modified, + "representation_id": self.representation_id, + "filepath": self.filepath, + "exists": self.exists, + } + + @classmethod + def from_data(cls, data): + """Re-creates file item from data. + + Args: + data (dict[str, Any]): File item data. + + Returns: + FileItem: File item. + """ + + required_keys = { + "filename", + "dirpath", + "modified", + "representation_id" + } + missing_keys = required_keys - set(data.keys()) + if missing_keys: + raise KeyError("Missing keys: {}".format(missing_keys)) + + return cls(**{ + key: data[key] + for key in required_keys + }) + + +class WorkareaFilepathResult: + """Result of workarea file formatting. + + Args: + root (str): Root path of workarea. + filename (str): Filename. + exists (bool): True if file exists. + filepath (str): Filepath. If not provided it will be constructed + from root and filename. + """ + + def __init__(self, root, filename, exists, filepath=None): + if not filepath and root and filename: + filepath = os.path.join(root, filename) + self.root = root + self.filename = filename + self.exists = exists + self.filepath = filepath + + +@six.add_metaclass(ABCMeta) +class AbstractWorkfilesCommon(object): + @abstractmethod + def is_host_valid(self): + """Host is valid for workfiles tool work. + + Returns: + bool: True if host is valid. + """ + + pass + + @abstractmethod + def get_workfile_extensions(self): + """Get possible workfile extensions. + + Defined by host implementation. + + Returns: + Iterable[str]: List of extensions. + """ + + pass + + @abstractmethod + def is_save_enabled(self): + """Is workfile save enabled. + + Returns: + bool: True if save is enabled. + """ + + pass + + @abstractmethod + def set_save_enabled(self, enabled): + """Enable or disabled workfile save. + + Args: + enabled (bool): Enable save workfile when True. + """ + + pass + + +class AbstractWorkfilesBackend(AbstractWorkfilesCommon): + # Current context + @abstractmethod + def get_host_name(self): + """Name of host. + + Returns: + str: Name of host. + """ + pass + + @abstractmethod + def get_current_project_name(self): + """Project name from current context of host. + + Returns: + str: Name of project. + """ + + pass + + @abstractmethod + def get_current_folder_id(self): + """Folder id from current context of host. + + Returns: + Union[str, None]: Folder id or None if host does not have + any context. + """ + + pass + + @abstractmethod + def get_current_task_name(self): + """Task name from current context of host. + + Returns: + Union[str, None]: Task name or None if host does not have + any context. + """ + + pass + + @abstractmethod + def get_current_workfile(self): + """Current workfile from current context of host. + + Returns: + Union[str, None]: Path to workfile or None if host does + not have opened specific file. + """ + + pass + + @property + @abstractmethod + def project_anatomy(self): + """Project anatomy for current project. + + Returns: + Anatomy: Project anatomy. + """ + + pass + + @property + @abstractmethod + def project_settings(self): + """Project settings for current project. + + Returns: + dict[str, Any]: Project settings. + """ + + pass + + @abstractmethod + def get_project_entity(self, project_name): + """Get project entity by name. + + Args: + project_name (str): Project name. + + Returns: + dict[str, Any]: Project entity data. + """ + + pass + + @abstractmethod + def get_folder_entity(self, project_name, folder_id): + """Get folder entity by id. + + Args: + project_name (str): Project name. + folder_id (str): Folder id. + + Returns: + dict[str, Any]: Folder entity data. + """ + + pass + + @abstractmethod + def get_task_entity(self, project_name, task_id): + """Get task entity by id. + + Args: + project_name (str): Project name. + task_id (str): Task id. + + Returns: + dict[str, Any]: Task entity data. + """ + + pass + + def emit_event(self, topic, data=None, source=None): + """Emit event. + + Args: + topic (str): Event topic used for callbacks filtering. + data (Optional[dict[str, Any]]): Event data. + source (Optional[str]): Event source. + """ + + pass + + +class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): + """UI controller abstraction that is used for workfiles tool frontend. + + Abstraction to provide data for UI and to handle UI events. + + Provide access to abstract backend data, like folders and tasks. Cares + about handling of selection, keep information about current UI selection + and have ability to tell what selection should UI show. + + Selection is separated into 2 parts, first is what UI elements tell + about selection, and second is what UI should show as selected. + """ + + @abstractmethod + def register_event_callback(self, topic, callback): + """Register event callback. + + Listen for events with given topic. + + Args: + topic (str): Name of topic. + callback (Callable): Callback that will be called when event + is triggered. + """ + + pass + + # Host information + @abstractmethod + def get_workfile_extensions(self): + """Each host can define extensions that can be used for workfile. + + Returns: + List[str]: File extensions that can be used as workfile for + current host. + """ + + pass + + # Selection information + @abstractmethod + def get_selected_folder_id(self): + """Currently selected folder id. + + Returns: + Union[str, None]: Folder id or None if no folder is selected. + """ + + pass + + @abstractmethod + def set_selected_folder(self, folder_id): + """Change selected folder. + + This deselects currently selected task. + + Args: + folder_id (Union[str, None]): Folder id or None if no folder + is selected. + """ + + pass + + @abstractmethod + def get_selected_task_id(self): + """Currently selected task id. + + Returns: + Union[str, None]: Task id or None if no folder is selected. + """ + + pass + + @abstractmethod + def get_selected_task_name(self): + """Currently selected task name. + + Returns: + Union[str, None]: Task name or None if no folder is selected. + """ + + pass + + @abstractmethod + def set_selected_task(self, task_id, task_name): + """Change selected task. + + Args: + task_id (Union[str, None]): Task id or None if no task + is selected. + task_name (Union[str, None]): Task name or None if no task + is selected. + """ + + pass + + @abstractmethod + def get_selected_workfile_path(self): + """Currently selected workarea workile. + + Returns: + Union[str, None]: Selected workfile path. + """ + + pass + + @abstractmethod + def set_selected_workfile_path(self, path): + """Change selected workfile path. + + Args: + path (Union[str, None]): Selected workfile path. + """ + + pass + + @abstractmethod + def get_selected_representation_id(self): + """Currently selected workfile representation id. + + Returns: + Union[str, None]: Representation id or None if no representation + is selected. + """ + + pass + + @abstractmethod + def set_selected_representation_id(self, representation_id): + """Change selected representation. + + Args: + representation_id (Union[str, None]): Selected workfile + representation id. + """ + + pass + + def get_selected_context(self): + """Obtain selected context. + + Returns: + dict[str, Union[str, None]]: Selected context. + """ + + return { + "folder_id": self.get_selected_folder_id(), + "task_id": self.get_selected_task_id(), + "task_name": self.get_selected_task_name(), + "workfile_path": self.get_selected_workfile_path(), + "representation_id": self.get_selected_representation_id(), + } + + # Expected selection + # - expected selection is used to restore selection after refresh + # or when current context should be used + @abstractmethod + def set_expected_selection( + self, + folder_id, + task_name, + workfile_name=None, + representation_id=None + ): + """Define what should be selected in UI. + + Expected selection provide a way to define/change selection of + sequential UI elements. For example, if folder and task should be + selected a task element should wait until folder element has selected + folder. + + Triggers 'expected_selection.changed' event. + + Args: + folder_id (str): Folder id. + task_name (str): Task name. + workfile_name (Optional[str]): Workfile name. Used for workarea + files UI element. + representation_id (Optional[str]): Representation id. Used for + published filed UI element. + """ + + pass + + @abstractmethod + def get_expected_selection_data(self): + """Data of expected selection. + + TODOs: + Return defined object instead of dict. + + Returns: + dict[str, Any]: Expected selection data. + """ + + pass + + @abstractmethod + def expected_folder_selected(self, folder_id): + """Expected folder was selected in UI. + + Args: + folder_id (str): Folder id which was selected. + """ + + pass + + @abstractmethod + def expected_task_selected(self, folder_id, task_name): + """Expected task was selected in UI. + + Args: + folder_id (str): Folder id under which task is. + task_name (str): Task name which was selected. + """ + + pass + + @abstractmethod + def expected_representation_selected( + self, folder_id, task_name, representation_id + ): + """Expected representation was selected in UI. + + Args: + folder_id (str): Folder id under which representation is. + task_name (str): Task name under which representation is. + representation_id (str): Representation id which was selected. + """ + + pass + + @abstractmethod + def expected_workfile_selected(self, folder_id, task_name, workfile_name): + """Expected workfile was selected in UI. + + Args: + folder_id (str): Folder id under which workfile is. + task_name (str): Task name under which workfile is. + workfile_name (str): Workfile filename which was selected. + """ + + pass + + @abstractmethod + def go_to_current_context(self): + """Set expected selection to current context.""" + + pass + + # Model functions + @abstractmethod + def get_folder_items(self, project_name, sender): + """Folder items to visualize project hierarchy. + + This function may trigger events 'folders.refresh.started' and + 'folders.refresh.finished' which will contain 'sender' value in data. + That may help to avoid re-refresh of folder items in UI elements. + + Args: + project_name (str): Project name for which are folders requested. + sender (str): Who requested folder items. + + Returns: + list[FolderItem]: Minimum possible information needed + for visualisation of folder hierarchy. + """ + + pass + + @abstractmethod + def get_task_items(self, project_name, folder_id, sender): + """Task items. + + This function may trigger events 'tasks.refresh.started' and + 'tasks.refresh.finished' which will contain 'sender' value in data. + That may help to avoid re-refresh of task items in UI elements. + + Args: + project_name (str): Project name for which are tasks requested. + folder_id (str): Folder ID for which are tasks requested. + sender (str): Who requested folder items. + + Returns: + list[TaskItem]: Minimum possible information needed + for visualisation of tasks. + """ + + pass + + @abstractmethod + def has_unsaved_changes(self): + """Has host unsaved change in currently running session. + + Returns: + bool: Has unsaved changes. + """ + + pass + + @abstractmethod + def get_workarea_dir_by_context(self, folder_id, task_id): + """Get workarea directory by context. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + + Returns: + str: Workarea directory. + """ + + pass + + @abstractmethod + def get_workarea_file_items(self, folder_id, task_id): + """Get workarea file items. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + + Returns: + list[FileItem]: List of workarea file items. + """ + + pass + + @abstractmethod + def get_workarea_save_as_data(self, folder_id, task_id): + """Prepare data for Save As operation. + + Todos: + Return defined object instead of dict. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + + Returns: + dict[str, Any]: Data for Save As operation. + """ + + pass + + @abstractmethod + def fill_workarea_filepath( + self, + folder_id, + task_id, + extension, + use_last_version, + version, + comment, + ): + """Calculate workfile path for passed context. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + extension (str): File extension. + use_last_version (bool): Use last version. + version (int): Version used if 'use_last_version' if 'False'. + comment (str): User's comment (subversion). + + Returns: + WorkareaFilepathResult: Result of the operation. + """ + + pass + + @abstractmethod + def get_published_file_items(self, folder_id, task_id): + """Get published file items. + + Args: + folder_id (str): Folder id. + task_id (Union[str, None]): Task id. + + Returns: + list[FileItem]: List of published file items. + """ + + pass + + @abstractmethod + def get_workfile_info(self, folder_id, task_id, filepath): + """Workfile info from database. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + filepath (str): Workfile path. + + Returns: + Union[WorkfileInfo, None]: Workfile info or None if was passed + invalid context. + """ + + pass + + @abstractmethod + def save_workfile_info(self, folder_id, task_id, filepath, note): + """Save workfile info to database. + + At this moment the only information which can be saved about + workfile is 'note'. + + When 'note' is 'None' it is only validated if workfile info exists, + and if not then creates one with empty note. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + filepath (str): Workfile path. + note (Union[str, None]): Note. + """ + + pass + + # General commands + @abstractmethod + def reset(self): + """Reset everything, models, ui etc. + + Triggers 'controller.reset.started' event at the beginning and + 'controller.reset.finished' at the end. + """ + + pass + + # Controller actions + @abstractmethod + def open_workfile(self, folder_id, task_id, filepath): + """Open a workfile for context. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + filepath (str): Workfile path. + """ + + pass + + @abstractmethod + def save_current_workfile(self): + """Save state of current workfile.""" + + pass + + @abstractmethod + def save_as_workfile( + self, + folder_id, + task_id, + workdir, + filename, + template_key, + ): + """Save current state of workfile to workarea. + + Args: + folder_id (str): Folder id. + task_id (str): Task id. + workdir (str): Workarea directory. + filename (str): Workarea filename. + template_key (str): Template key used to get the workdir + and filename. + """ + + pass + + @abstractmethod + def copy_workfile_representation( + self, + representation_id, + representation_filepath, + folder_id, + task_id, + workdir, + filename, + template_key, + ): + """Action to copy published workfile representation to workarea. + + Triggers 'copy_representation.started' event on start and + 'copy_representation.finished' event with '{"failed": bool}'. + + Args: + representation_id (str): Representation id. + representation_filepath (str): Path to representation file. + folder_id (str): Folder id. + task_id (str): Task id. + workdir (str): Workarea directory. + filename (str): Workarea filename. + template_key (str): Template key. + """ + + pass + + @abstractmethod + def duplicate_workfile(self, src_filepath, workdir, filename): + """Duplicate workfile. + + Workfiles is not opened when done. + + Args: + src_filepath (str): Source workfile path. + workdir (str): Destination workdir. + filename (str): Destination filename. + """ + + pass diff --git a/client/ayon_core/tools/workfiles/control.py b/client/ayon_core/tools/workfiles/control.py new file mode 100644 index 0000000000..c1e513d12c --- /dev/null +++ b/client/ayon_core/tools/workfiles/control.py @@ -0,0 +1,744 @@ +import os +import shutil + +import ayon_api + +from ayon_core.client import get_asset_by_id +from ayon_core.host import IWorkfileHost +from ayon_core.lib import Logger, emit_event +from ayon_core.lib.events import QueuedEventSystem +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import Anatomy, registered_host +from ayon_core.pipeline.context_tools import ( + change_current_context, + get_current_host_name, + get_global_context, +) +from ayon_core.pipeline.workfile import create_workdir_extra_folders + +from ayon_core.tools.ayon_utils.models import ( + HierarchyModel, + HierarchyExpectedSelection, + ProjectsModel, +) + +from .abstract import ( + AbstractWorkfilesFrontend, + AbstractWorkfilesBackend, +) +from .models import SelectionModel, WorkfilesModel + + +class WorkfilesToolExpectedSelection(HierarchyExpectedSelection): + def __init__(self, controller): + super(WorkfilesToolExpectedSelection, self).__init__( + controller, + handle_project=False, + handle_folder=True, + handle_task=True, + ) + + self._workfile_name = None + self._representation_id = None + + self._workfile_selected = True + self._representation_selected = True + + def set_expected_selection( + self, + project_name=None, + folder_id=None, + task_name=None, + workfile_name=None, + representation_id=None, + ): + self._workfile_name = workfile_name + self._representation_id = representation_id + + self._workfile_selected = False + self._representation_selected = False + + super(WorkfilesToolExpectedSelection, self).set_expected_selection( + project_name, + folder_id, + task_name, + ) + + def get_expected_selection_data(self): + data = super( + WorkfilesToolExpectedSelection, self + ).get_expected_selection_data() + + _is_current = ( + self._project_selected + and self._folder_selected + and self._task_selected + ) + workfile_is_current = False + repre_is_current = False + if _is_current: + workfile_is_current = not self._workfile_selected + repre_is_current = not self._representation_selected + + data["workfile"] = { + "name": self._workfile_name, + "current": workfile_is_current, + "selected": self._workfile_selected, + } + data["representation"] = { + "id": self._representation_id, + "current": repre_is_current, + "selected": self._workfile_selected, + } + return data + + def is_expected_workfile_selected(self, workfile_name): + return ( + workfile_name == self._workfile_name + and self._workfile_selected + ) + + def is_expected_representation_selected(self, representation_id): + return ( + representation_id == self._representation_id + and self._representation_selected + ) + + def expected_workfile_selected(self, folder_id, task_name, workfile_name): + if folder_id != self._folder_id: + return False + + if task_name != self._task_name: + return False + + if workfile_name != self._workfile_name: + return False + self._workfile_selected = True + self._emit_change() + return True + + def expected_representation_selected( + self, folder_id, task_name, representation_id + ): + if folder_id != self._folder_id: + return False + + if task_name != self._task_name: + return False + + if representation_id != self._representation_id: + return False + self._representation_selected = True + self._emit_change() + return True + + +class BaseWorkfileController( + AbstractWorkfilesFrontend, AbstractWorkfilesBackend +): + def __init__(self, host=None): + if host is None: + host = registered_host() + + host_is_valid = False + if host is not None: + missing_methods = ( + IWorkfileHost.get_missing_workfile_methods(host) + ) + host_is_valid = len(missing_methods) == 0 + + self._host = host + self._host_is_valid = host_is_valid + + self._project_anatomy = None + self._project_settings = None + self._event_system = None + self._log = None + + self._current_project_name = None + self._current_folder_name = None + self._current_folder_id = None + self._current_task_name = None + self._save_is_enabled = True + + # Expected selected folder and task + self._expected_selection = self._create_expected_selection_obj() + self._selection_model = self._create_selection_model() + self._projects_model = self._create_projects_model() + self._hierarchy_model = self._create_hierarchy_model() + self._workfiles_model = self._create_workfiles_model() + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger("WorkfilesUI") + return self._log + + def is_host_valid(self): + return self._host_is_valid + + def _create_expected_selection_obj(self): + return WorkfilesToolExpectedSelection(self) + + def _create_projects_model(self): + return ProjectsModel(self) + + def _create_selection_model(self): + return SelectionModel(self) + + def _create_hierarchy_model(self): + return HierarchyModel(self) + + def _create_workfiles_model(self): + return WorkfilesModel(self) + + @property + def event_system(self): + """Inner event system for workfiles tool controller. + + Is used for communication with UI. Event system is created on demand. + + Returns: + QueuedEventSystem: Event system which can trigger callbacks + for topics. + """ + + if self._event_system is None: + self._event_system = QueuedEventSystem() + return self._event_system + + # ---------------------------------------------------- + # Implementation of methods required for backend logic + # ---------------------------------------------------- + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings( + self.get_current_project_name()) + return self._project_settings + + @property + def project_anatomy(self): + if self._project_anatomy is None: + self._project_anatomy = Anatomy(self.get_current_project_name()) + return self._project_anatomy + + def get_project_entity(self, project_name): + return self._projects_model.get_project_entity( + project_name) + + def get_folder_entity(self, project_name, folder_id): + return self._hierarchy_model.get_folder_entity( + project_name, folder_id) + + def get_task_entity(self, project_name, task_id): + return self._hierarchy_model.get_task_entity( + project_name, task_id) + + # --------------------------------- + # Implementation of abstract methods + # --------------------------------- + def emit_event(self, topic, data=None, source=None): + """Use implemented event system to trigger event.""" + + if data is None: + data = {} + self.event_system.emit(topic, data, source) + + def register_event_callback(self, topic, callback): + self.event_system.add_callback(topic, callback) + + def is_save_enabled(self): + """Is workfile save enabled. + + Returns: + bool: True if save is enabled. + """ + + return self._save_is_enabled + + def set_save_enabled(self, enabled): + """Enable or disabled workfile save. + + Args: + enabled (bool): Enable save workfile when True. + """ + + if self._save_is_enabled == enabled: + return + + self._save_is_enabled = enabled + self._emit_event( + "workfile_save_enable.changed", + {"enabled": enabled} + ) + + # Host information + def get_workfile_extensions(self): + host = self._host + if isinstance(host, IWorkfileHost): + return host.get_workfile_extensions() + return host.file_extensions() + + def has_unsaved_changes(self): + host = self._host + if isinstance(host, IWorkfileHost): + return host.workfile_has_unsaved_changes() + return host.has_unsaved_changes() + + # Current context + def get_host_name(self): + host = self._host + if isinstance(host, IWorkfileHost): + return host.name + return get_current_host_name() + + def _get_host_current_context(self): + if hasattr(self._host, "get_current_context"): + return self._host.get_current_context() + return get_global_context() + + def get_current_project_name(self): + return self._current_project_name + + def get_current_folder_id(self): + return self._current_folder_id + + def get_current_task_name(self): + return self._current_task_name + + def get_current_workfile(self): + host = self._host + if isinstance(host, IWorkfileHost): + return host.get_current_workfile() + return host.current_file() + + # Selection information + def get_selected_folder_id(self): + return self._selection_model.get_selected_folder_id() + + def set_selected_folder(self, folder_id): + self._selection_model.set_selected_folder(folder_id) + + def get_selected_task_id(self): + return self._selection_model.get_selected_task_id() + + def get_selected_task_name(self): + return self._selection_model.get_selected_task_name() + + def set_selected_task(self, task_id, task_name): + return self._selection_model.set_selected_task(task_id, task_name) + + def get_selected_workfile_path(self): + return self._selection_model.get_selected_workfile_path() + + def set_selected_workfile_path(self, path): + self._selection_model.set_selected_workfile_path(path) + + def get_selected_representation_id(self): + return self._selection_model.get_selected_representation_id() + + def set_selected_representation_id(self, representation_id): + self._selection_model.set_selected_representation_id( + representation_id) + + def set_expected_selection( + self, + folder_id, + task_name, + workfile_name=None, + representation_id=None + ): + self._expected_selection.set_expected_selection( + self.get_current_project_name(), + folder_id, + task_name, + workfile_name, + representation_id + ) + self._trigger_expected_selection_changed() + + def expected_folder_selected(self, folder_id): + if self._expected_selection.expected_folder_selected(folder_id): + self._trigger_expected_selection_changed() + + def expected_task_selected(self, folder_id, task_name): + if self._expected_selection.expected_task_selected( + folder_id, task_name + ): + self._trigger_expected_selection_changed() + + def expected_workfile_selected(self, folder_id, task_name, workfile_name): + if self._expected_selection.expected_workfile_selected( + folder_id, task_name, workfile_name + ): + self._trigger_expected_selection_changed() + + def expected_representation_selected( + self, folder_id, task_name, representation_id + ): + if self._expected_selection.expected_representation_selected( + folder_id, task_name, representation_id + ): + self._trigger_expected_selection_changed() + + def get_expected_selection_data(self): + return self._expected_selection.get_expected_selection_data() + + def go_to_current_context(self): + self.set_expected_selection( + self._current_folder_id, self._current_task_name + ) + + # Model functions + def get_folder_items(self, project_name, sender=None): + return self._hierarchy_model.get_folder_items(project_name, sender) + + def get_task_items(self, project_name, folder_id, sender=None): + return self._hierarchy_model.get_task_items( + project_name, folder_id, sender + ) + + def get_workarea_dir_by_context(self, folder_id, task_id): + return self._workfiles_model.get_workarea_dir_by_context( + folder_id, task_id) + + def get_workarea_file_items(self, folder_id, task_id): + return self._workfiles_model.get_workarea_file_items( + folder_id, task_id) + + def get_workarea_save_as_data(self, folder_id, task_id): + return self._workfiles_model.get_workarea_save_as_data( + folder_id, task_id) + + def fill_workarea_filepath( + self, + folder_id, + task_id, + extension, + use_last_version, + version, + comment, + ): + return self._workfiles_model.fill_workarea_filepath( + folder_id, + task_id, + extension, + use_last_version, + version, + comment, + ) + + def get_published_file_items(self, folder_id, task_id): + task_name = None + if task_id: + task = self.get_task_entity( + self.get_current_project_name(), task_id + ) + task_name = task.get("name") + + return self._workfiles_model.get_published_file_items( + folder_id, task_name) + + def get_workfile_info(self, folder_id, task_id, filepath): + return self._workfiles_model.get_workfile_info( + folder_id, task_id, filepath + ) + + def save_workfile_info(self, folder_id, task_id, filepath, note): + self._workfiles_model.save_workfile_info( + folder_id, task_id, filepath, note + ) + + def reset(self): + if not self._host_is_valid: + self._emit_event("controller.reset.started") + self._emit_event("controller.reset.finished") + return + expected_folder_id = self.get_selected_folder_id() + expected_task_name = self.get_selected_task_name() + expected_work_path = self.get_selected_workfile_path() + expected_repre_id = self.get_selected_representation_id() + expected_work_name = None + if expected_work_path: + expected_work_name = os.path.basename(expected_work_path) + + self._emit_event("controller.reset.started") + + context = self._get_host_current_context() + + project_name = context["project_name"] + folder_name = context["asset_name"] + task_name = context["task_name"] + current_file = self.get_current_workfile() + folder_id = None + if folder_name: + folder = ayon_api.get_folder_by_path(project_name, folder_name) + if folder: + folder_id = folder["id"] + + self._project_settings = None + self._project_anatomy = None + + self._current_project_name = project_name + self._current_folder_name = folder_name + self._current_folder_id = folder_id + self._current_task_name = task_name + + self._projects_model.reset() + self._hierarchy_model.reset() + + if not expected_folder_id: + expected_folder_id = folder_id + expected_task_name = task_name + if current_file: + expected_work_name = os.path.basename(current_file) + + self._emit_event("controller.reset.finished") + + self._expected_selection.set_expected_selection( + project_name, + expected_folder_id, + expected_task_name, + expected_work_name, + expected_repre_id, + ) + + # Controller actions + def open_workfile(self, folder_id, task_id, filepath): + self._emit_event("open_workfile.started") + + failed = False + try: + self._open_workfile(folder_id, task_id, filepath) + + except Exception: + failed = True + self.log.warning("Open of workfile failed", exc_info=True) + + self._emit_event( + "open_workfile.finished", + {"failed": failed}, + ) + + def save_current_workfile(self): + current_file = self.get_current_workfile() + self._host_save_workfile(current_file) + + def save_as_workfile( + self, + folder_id, + task_id, + workdir, + filename, + template_key, + ): + self._emit_event("save_as.started") + + failed = False + try: + self._save_as_workfile( + folder_id, + task_id, + workdir, + filename, + template_key, + ) + except Exception: + failed = True + self.log.warning("Save as failed", exc_info=True) + + self._emit_event( + "save_as.finished", + {"failed": failed}, + ) + + def copy_workfile_representation( + self, + representation_id, + representation_filepath, + folder_id, + task_id, + workdir, + filename, + template_key, + ): + self._emit_event("copy_representation.started") + + failed = False + try: + self._save_as_workfile( + folder_id, + task_id, + workdir, + filename, + template_key, + ) + except Exception: + failed = True + self.log.warning( + "Copy of workfile representation failed", exc_info=True + ) + + self._emit_event( + "copy_representation.finished", + {"failed": failed}, + ) + + def duplicate_workfile(self, src_filepath, workdir, filename): + self._emit_event("workfile_duplicate.started") + + failed = False + try: + dst_filepath = os.path.join(workdir, filename) + shutil.copy(src_filepath, dst_filepath) + except Exception: + failed = True + self.log.warning("Duplication of workfile failed", exc_info=True) + + self._emit_event( + "workfile_duplicate.finished", + {"failed": failed}, + ) + + # Helper host methods that resolve 'IWorkfileHost' interface + def _host_open_workfile(self, filepath): + host = self._host + if isinstance(host, IWorkfileHost): + host.open_workfile(filepath) + else: + host.open_file(filepath) + + def _host_save_workfile(self, filepath): + host = self._host + if isinstance(host, IWorkfileHost): + host.save_workfile(filepath) + else: + host.save_file(filepath) + + def _emit_event(self, topic, data=None): + self.emit_event(topic, data, "controller") + + # Expected selection + # - expected selection is used to restore selection after refresh + # or when current context should be used + def _trigger_expected_selection_changed(self): + self._emit_event( + "expected_selection_changed", + self._expected_selection.get_expected_selection_data(), + ) + + def _get_event_context_data( + self, project_name, folder_id, task_id, folder=None, task=None + ): + if folder is None: + folder = self.get_folder_entity(project_name, folder_id) + if task is None: + task = self.get_task_entity(project_name, task_id) + # NOTE keys should are OpenPype compatible + return { + "project_name": project_name, + "folder_id": folder_id, + "asset_id": folder_id, + "asset_name": folder["name"], + "task_id": task_id, + "task_name": task["name"], + "host_name": self.get_host_name(), + } + + def _open_workfile(self, folder_id, task_id, filepath): + project_name = self.get_current_project_name() + event_data = self._get_event_context_data( + project_name, folder_id, task_id + ) + event_data["filepath"] = filepath + + emit_event("workfile.open.before", event_data, source="workfiles.tool") + + # Change context + task_name = event_data["task_name"] + if ( + folder_id != self.get_current_folder_id() + or task_name != self.get_current_task_name() + ): + # Use OpenPype asset-like object + asset_doc = get_asset_by_id( + event_data["project_name"], + event_data["folder_id"], + ) + change_current_context( + asset_doc, + event_data["task_name"] + ) + + self._host_open_workfile(filepath) + + emit_event("workfile.open.after", event_data, source="workfiles.tool") + + def _save_as_workfile( + self, + folder_id, + task_id, + workdir, + filename, + template_key, + src_filepath=None, + ): + # Trigger before save event + project_name = self.get_current_project_name() + folder = self.get_folder_entity(project_name, folder_id) + task = self.get_task_entity(project_name, task_id) + task_name = task["name"] + + # QUESTION should the data be different for 'before' and 'after'? + event_data = self._get_event_context_data( + project_name, folder_id, task_id, folder, task + ) + event_data.update({ + "filename": filename, + "workdir_path": workdir, + }) + + emit_event("workfile.save.before", event_data, source="workfiles.tool") + + # Create workfiles root folder + if not os.path.exists(workdir): + self.log.debug("Initializing work directory: %s", workdir) + os.makedirs(workdir) + + # Change context + if ( + folder_id != self.get_current_folder_id() + or task_name != self.get_current_task_name() + ): + # Use OpenPype asset-like object + asset_doc = get_asset_by_id(project_name, folder["id"]) + change_current_context( + asset_doc, + task["name"], + template_key=template_key + ) + + # Save workfile + dst_filepath = os.path.join(workdir, filename) + if src_filepath: + shutil.copyfile(src_filepath, dst_filepath) + self._host_open_workfile(dst_filepath) + else: + self._host_save_workfile(dst_filepath) + + # Make sure workfile info exists + self.save_workfile_info(folder_id, task_id, dst_filepath, None) + + # Create extra folders + create_workdir_extra_folders( + workdir, + self.get_host_name(), + task["taskType"], + task_name, + project_name + ) + + # Trigger after save events + emit_event("workfile.save.after", event_data, source="workfiles.tool") + self.reset() diff --git a/openpype/tools/workfiles/lock_dialog.py b/client/ayon_core/tools/workfiles/lock_dialog.py similarity index 91% rename from openpype/tools/workfiles/lock_dialog.py rename to client/ayon_core/tools/workfiles/lock_dialog.py index 29e0d3bd9b..5e2fa95526 100644 --- a/openpype/tools/workfiles/lock_dialog.py +++ b/client/ayon_core/tools/workfiles/lock_dialog.py @@ -1,7 +1,7 @@ from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import load_stylesheet, get_app_icon_path -from openpype.pipeline.workfile.lock_workfile import get_workfile_lock_data +from ayon_core.style import load_stylesheet, get_app_icon_path +from ayon_core.pipeline.workfile.lock_workfile import get_workfile_lock_data class WorkfileLockDialog(QtWidgets.QDialog): diff --git a/openpype/tools/ayon_workfiles/models/__init__.py b/client/ayon_core/tools/workfiles/models/__init__.py similarity index 100% rename from openpype/tools/ayon_workfiles/models/__init__.py rename to client/ayon_core/tools/workfiles/models/__init__.py diff --git a/openpype/tools/ayon_workfiles/models/selection.py b/client/ayon_core/tools/workfiles/models/selection.py similarity index 100% rename from openpype/tools/ayon_workfiles/models/selection.py rename to client/ayon_core/tools/workfiles/models/selection.py diff --git a/openpype/tools/ayon_workfiles/models/workfiles.py b/client/ayon_core/tools/workfiles/models/workfiles.py similarity index 98% rename from openpype/tools/ayon_workfiles/models/workfiles.py rename to client/ayon_core/tools/workfiles/models/workfiles.py index f9f910ac8a..55653e34d4 100644 --- a/openpype/tools/ayon_workfiles/models/workfiles.py +++ b/client/ayon_core/tools/workfiles/models/workfiles.py @@ -6,20 +6,20 @@ import ayon_api from ayon_api.operations import OperationsSession -from openpype.client import get_project -from openpype.client.operations import ( +from ayon_core.client import get_project +from ayon_core.client.operations import ( prepare_workfile_info_update_data, ) -from openpype.pipeline.template_data import ( +from ayon_core.pipeline.template_data import ( get_template_data, ) -from openpype.pipeline.workfile import ( +from ayon_core.pipeline.workfile import ( get_workdir_with_workdir_data, get_workfile_template_key, get_last_workfile_with_version, ) -from openpype.pipeline.version_start import get_versioning_start -from openpype.tools.ayon_workfiles.abstract import ( +from ayon_core.pipeline.version_start import get_versioning_start +from ayon_core.tools.workfiles.abstract import ( WorkareaFilepathResult, FileItem, WorkfileInfo, diff --git a/openpype/tools/ayon_workfiles/widgets/__init__.py b/client/ayon_core/tools/workfiles/widgets/__init__.py similarity index 100% rename from openpype/tools/ayon_workfiles/widgets/__init__.py rename to client/ayon_core/tools/workfiles/widgets/__init__.py diff --git a/openpype/tools/ayon_workfiles/widgets/constants.py b/client/ayon_core/tools/workfiles/widgets/constants.py similarity index 100% rename from openpype/tools/ayon_workfiles/widgets/constants.py rename to client/ayon_core/tools/workfiles/widgets/constants.py diff --git a/openpype/tools/ayon_workfiles/widgets/files_widget.py b/client/ayon_core/tools/workfiles/widgets/files_widget.py similarity index 100% rename from openpype/tools/ayon_workfiles/widgets/files_widget.py rename to client/ayon_core/tools/workfiles/widgets/files_widget.py diff --git a/openpype/tools/ayon_workfiles/widgets/files_widget_published.py b/client/ayon_core/tools/workfiles/widgets/files_widget_published.py similarity index 98% rename from openpype/tools/ayon_workfiles/widgets/files_widget_published.py rename to client/ayon_core/tools/workfiles/widgets/files_widget_published.py index 704f7b2f39..bf36d790e9 100644 --- a/openpype/tools/ayon_workfiles/widgets/files_widget_published.py +++ b/client/ayon_core/tools/workfiles/widgets/files_widget_published.py @@ -1,12 +1,12 @@ import qtawesome from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import ( +from ayon_core.style import ( get_default_entity_icon_color, get_disabled_entity_icon_color, ) -from openpype.tools.utils import TreeView -from openpype.tools.utils.delegates import PrettyTimeDelegate +from ayon_core.tools.utils import TreeView +from ayon_core.tools.utils.delegates import PrettyTimeDelegate from .utils import BaseOverlayFrame diff --git a/openpype/tools/ayon_workfiles/widgets/files_widget_workarea.py b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py similarity index 98% rename from openpype/tools/ayon_workfiles/widgets/files_widget_workarea.py rename to client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py index 8eefd3cf81..6a1572deb2 100644 --- a/openpype/tools/ayon_workfiles/widgets/files_widget_workarea.py +++ b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py @@ -1,12 +1,12 @@ import qtawesome from qtpy import QtWidgets, QtCore, QtGui -from openpype.style import ( +from ayon_core.style import ( get_default_entity_icon_color, get_disabled_entity_icon_color, ) -from openpype.tools.utils import TreeView -from openpype.tools.utils.delegates import PrettyTimeDelegate +from ayon_core.tools.utils import TreeView +from ayon_core.tools.utils.delegates import PrettyTimeDelegate FILENAME_ROLE = QtCore.Qt.UserRole + 1 FILEPATH_ROLE = QtCore.Qt.UserRole + 2 diff --git a/client/ayon_core/tools/workfiles/widgets/save_as_dialog.py b/client/ayon_core/tools/workfiles/widgets/save_as_dialog.py new file mode 100644 index 0000000000..2ed2dd0659 --- /dev/null +++ b/client/ayon_core/tools/workfiles/widgets/save_as_dialog.py @@ -0,0 +1,351 @@ +from qtpy import QtWidgets, QtCore + +from ayon_core.tools.utils import PlaceholderLineEdit + + +class SubversionLineEdit(QtWidgets.QWidget): + """QLineEdit with QPushButton for drop down selection of list of strings""" + + text_changed = QtCore.Signal(str) + + def __init__(self, *args, **kwargs): + super(SubversionLineEdit, self).__init__(*args, **kwargs) + + input_field = PlaceholderLineEdit(self) + menu_btn = QtWidgets.QPushButton(self) + menu_btn.setFixedWidth(18) + + menu = QtWidgets.QMenu(self) + menu_btn.setMenu(menu) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(3) + + layout.addWidget(input_field, 1) + layout.addWidget(menu_btn, 0) + + input_field.textChanged.connect(self.text_changed) + + self.setFocusProxy(input_field) + + self._input_field = input_field + self._menu_btn = menu_btn + self._menu = menu + + def set_placeholder(self, placeholder): + self._input_field.setPlaceholderText(placeholder) + + def set_text(self, text): + self._input_field.setText(text) + + def set_values(self, values): + self._update(values) + + def _on_button_clicked(self): + self._menu.exec_() + + def _on_action_clicked(self, action): + self._input_field.setText(action.text()) + + def _update(self, values): + """Create optional predefined subset names + + Args: + default_names(list): all predefined names + + Returns: + None + """ + + menu = self._menu + button = self._menu_btn + + state = any(values) + button.setEnabled(state) + if state is False: + return + + # Include an empty string + values = [""] + sorted(values) + + # Get and destroy the action group + group = button.findChild(QtWidgets.QActionGroup) + if group: + group.deleteLater() + + # Build new action group + group = QtWidgets.QActionGroup(button) + for name in values: + action = group.addAction(name) + menu.addAction(action) + + group.triggered.connect(self._on_action_clicked) + + +class SaveAsDialog(QtWidgets.QDialog): + """Save as dialog to define a unique filename inside workdir. + + The filename is calculated in controller where UI sends values from + dialog inputs. + + Args: + controller (AbstractWorkfilesFrontend): The control object. + """ + + def __init__(self, controller, parent): + super(SaveAsDialog, self).__init__(parent=parent) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) + + self._controller = controller + + self._folder_id = None + self._task_id = None + self._last_version = None + self._template_key = None + self._comment_value = None + self._version_value = None + self._ext_value = None + self._filename = None + self._workdir = None + + self._result = None + + # Btns widget + btns_widget = QtWidgets.QWidget(self) + + btn_ok = QtWidgets.QPushButton("Ok", btns_widget) + btn_cancel = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.addWidget(btn_ok) + btns_layout.addWidget(btn_cancel) + + # Inputs widget + inputs_widget = QtWidgets.QWidget(self) + + # Version widget + version_widget = QtWidgets.QWidget(inputs_widget) + + # Version number input + version_input = QtWidgets.QSpinBox(version_widget) + version_input.setMinimum(1) + version_input.setMaximum(9999) + + # Last version checkbox + last_version_check = QtWidgets.QCheckBox( + "Next Available Version", version_widget + ) + last_version_check.setChecked(True) + + version_layout = QtWidgets.QHBoxLayout(version_widget) + version_layout.setContentsMargins(0, 0, 0, 0) + version_layout.addWidget(version_input) + version_layout.addWidget(last_version_check) + + # Preview widget + preview_widget = QtWidgets.QLabel("Preview filename", inputs_widget) + preview_widget.setWordWrap(True) + + # Subversion input + subversion_input = SubversionLineEdit(inputs_widget) + subversion_input.set_placeholder("Will be part of filename.") + + # Extensions combobox + extension_combobox = QtWidgets.QComboBox(inputs_widget) + # Add styled delegate to use stylesheets + extension_delegate = QtWidgets.QStyledItemDelegate() + extension_combobox.setItemDelegate(extension_delegate) + + version_label = QtWidgets.QLabel("Version:", inputs_widget) + subversion_label = QtWidgets.QLabel("Subversion:", inputs_widget) + extension_label = QtWidgets.QLabel("Extension:", inputs_widget) + preview_label = QtWidgets.QLabel("Preview:", inputs_widget) + + # Build inputs + inputs_layout = QtWidgets.QGridLayout(inputs_widget) + inputs_layout.addWidget(version_label, 0, 0) + inputs_layout.addWidget(version_widget, 0, 1) + inputs_layout.addWidget(subversion_label, 1, 0) + inputs_layout.addWidget(subversion_input, 1, 1) + inputs_layout.addWidget(extension_label, 2, 0) + inputs_layout.addWidget(extension_combobox, 2, 1) + inputs_layout.addWidget(preview_label, 3, 0) + inputs_layout.addWidget(preview_widget, 3, 1) + + # Build layout + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(inputs_widget) + main_layout.addWidget(btns_widget) + + # Signal callback registration + version_input.valueChanged.connect(self._on_version_spinbox_change) + last_version_check.stateChanged.connect( + self._on_version_checkbox_change + ) + + subversion_input.text_changed.connect(self._on_comment_change) + extension_combobox.currentIndexChanged.connect( + self._on_extension_change) + + btn_ok.pressed.connect(self._on_ok_pressed) + btn_cancel.pressed.connect(self._on_cancel_pressed) + + # Store objects + self._inputs_layout = inputs_layout + + self._btn_ok = btn_ok + self._btn_cancel = btn_cancel + + self._version_widget = version_widget + + self._version_input = version_input + self._last_version_check = last_version_check + + self._extension_delegate = extension_delegate + self._extension_combobox = extension_combobox + self._subversion_input = subversion_input + self._preview_widget = preview_widget + + self._version_label = version_label + self._subversion_label = subversion_label + self._extension_label = extension_label + self._preview_label = preview_label + + # Post init setup + + # Allow "Enter" key to accept the save. + btn_ok.setDefault(True) + + # Disable version input if last version is checked + version_input.setEnabled(not last_version_check.isChecked()) + + # Force default focus to comment, some hosts didn't automatically + # apply focus to this line edit (e.g. Houdini) + subversion_input.setFocus() + + def get_result(self): + return self._result + + def update_context(self): + # Add version only if template contains version key + # - since the version can be padded with "{version:0>4}" we only search + # for "{version". + selected_context = self._controller.get_selected_context() + folder_id = selected_context["folder_id"] + task_id = selected_context["task_id"] + data = self._controller.get_workarea_save_as_data(folder_id, task_id) + last_version = data["last_version"] + comment = data["comment"] + comment_hints = data["comment_hints"] + + template_has_version = data["template_has_version"] + template_has_comment = data["template_has_comment"] + + self._folder_id = folder_id + self._task_id = task_id + self._workdir = data["workdir"] + self._comment_value = data["comment"] + self._ext_value = data["ext"] + self._template_key = data["template_key"] + self._last_version = data["last_version"] + + self._extension_combobox.clear() + self._extension_combobox.addItems(data["extensions"]) + + self._version_input.setValue(last_version) + + vw_idx = self._inputs_layout.indexOf(self._version_widget) + self._version_label.setVisible(template_has_version) + self._version_widget.setVisible(template_has_version) + if template_has_version: + if vw_idx == -1: + self._inputs_layout.addWidget(self._version_label, 0, 0) + self._inputs_layout.addWidget(self._version_widget, 0, 1) + elif vw_idx != -1: + self._inputs_layout.takeAt(vw_idx) + self._inputs_layout.takeAt( + self._inputs_layout.indexOf(self._version_label) + ) + + cw_idx = self._inputs_layout.indexOf(self._subversion_input) + self._subversion_label.setVisible(template_has_comment) + self._subversion_input.setVisible(template_has_comment) + if template_has_comment: + if cw_idx == -1: + self._inputs_layout.addWidget(self._subversion_label, 1, 0) + self._inputs_layout.addWidget(self._subversion_input, 1, 1) + elif cw_idx != -1: + self._inputs_layout.takeAt(cw_idx) + self._inputs_layout.takeAt( + self._inputs_layout.indexOf(self._subversion_label) + ) + + if template_has_comment: + self._subversion_input.set_text(comment or "") + self._subversion_input.set_values(comment_hints) + self._update_filename() + + def _on_version_spinbox_change(self, value): + if value == self._version_value: + return + self._version_value = value + if not self._last_version_check.isChecked(): + self._update_filename() + + def _on_version_checkbox_change(self): + use_last_version = self._last_version_check.isChecked() + self._version_input.setEnabled(not use_last_version) + if use_last_version: + self._version_input.blockSignals(True) + self._version_input.setValue(self._last_version) + self._version_input.blockSignals(False) + self._update_filename() + + def _on_comment_change(self, text): + if self._comment_value == text: + return + self._comment_value = text + self._update_filename() + + def _on_extension_change(self): + ext = self._extension_combobox.currentText() + if ext == self._ext_value: + return + self._ext_value = ext + self._update_filename() + + def _on_ok_pressed(self): + self._result = { + "filename": self._filename, + "workdir": self._workdir, + "folder_id": self._folder_id, + "task_id": self._task_id, + "template_key": self._template_key, + } + self.close() + + def _on_cancel_pressed(self): + self.close() + + def _update_filename(self): + result = self._controller.fill_workarea_filepath( + self._folder_id, + self._task_id, + self._ext_value, + self._last_version_check.isChecked(), + self._version_value, + self._comment_value, + ) + self._filename = result.filename + self._btn_ok.setEnabled(not result.exists) + + if result.exists: + self._preview_widget.setText(( + "Cannot create \"{}\" because file exists!" + "" + ).format(result.filename)) + else: + self._preview_widget.setText( + "{}".format(result.filename) + ) diff --git a/openpype/tools/ayon_workfiles/widgets/side_panel.py b/client/ayon_core/tools/workfiles/widgets/side_panel.py similarity index 100% rename from openpype/tools/ayon_workfiles/widgets/side_panel.py rename to client/ayon_core/tools/workfiles/widgets/side_panel.py diff --git a/openpype/tools/ayon_workfiles/widgets/utils.py b/client/ayon_core/tools/workfiles/widgets/utils.py similarity index 100% rename from openpype/tools/ayon_workfiles/widgets/utils.py rename to client/ayon_core/tools/workfiles/widgets/utils.py diff --git a/client/ayon_core/tools/workfiles/widgets/window.py b/client/ayon_core/tools/workfiles/widgets/window.py new file mode 100644 index 0000000000..86a84b6195 --- /dev/null +++ b/client/ayon_core/tools/workfiles/widgets/window.py @@ -0,0 +1,383 @@ +from qtpy import QtCore, QtWidgets, QtGui + +from ayon_core import style, resources +from ayon_core.tools.utils import ( + PlaceholderLineEdit, + MessageOverlayObject, +) + +from ayon_core.tools.ayon_utils.widgets import FoldersWidget, TasksWidget +from ayon_core.tools.workfiles.control import BaseWorkfileController +from ayon_core.tools.utils import GoToCurrentButton, RefreshButton + +from .side_panel import SidePanelWidget +from .files_widget import FilesWidget +from .utils import BaseOverlayFrame + + +class InvalidHostOverlay(BaseOverlayFrame): + def __init__(self, parent): + super(InvalidHostOverlay, self).__init__(parent) + + label_widget = QtWidgets.QLabel( + ( + "Workfiles tool is not supported in this host/DCCs." + "

This may be caused by a bug." + " Please contact your TD for more information." + ), + self + ) + label_widget.setAlignment(QtCore.Qt.AlignCenter) + label_widget.setObjectName("OverlayFrameLabel") + + layout = QtWidgets.QVBoxLayout(self) + layout.addStretch(2) + layout.addWidget(label_widget, 0, QtCore.Qt.AlignCenter) + layout.addStretch(3) + + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + +class WorkfilesToolWindow(QtWidgets.QWidget): + """WorkFiles Window. + + Main windows of workfiles tool. + + Args: + controller (AbstractWorkfilesFrontend): Frontend controller. + parent (Optional[QtWidgets.QWidget]): Parent widget. + """ + + title = "Work Files" + + def __init__(self, controller=None, parent=None): + super(WorkfilesToolWindow, self).__init__(parent=parent) + + if controller is None: + controller = BaseWorkfileController() + + self.setWindowTitle(self.title) + icon = QtGui.QIcon(resources.get_ayon_icon_filepath()) + self.setWindowIcon(icon) + flags = self.windowFlags() | QtCore.Qt.Window + self.setWindowFlags(flags) + + self._default_window_flags = flags + + self._folders_widget = None + self._folder_filter_input = None + + self._files_widget = None + + self._first_show = True + self._controller_refreshed = False + self._context_to_set = None + # Host validation should happen only once + self._host_is_valid = None + + self._controller = controller + + # Create pages widget and set it as central widget + pages_widget = QtWidgets.QStackedWidget(self) + + home_page_widget = QtWidgets.QWidget(pages_widget) + home_body_widget = QtWidgets.QWidget(home_page_widget) + + col_1_widget = self._create_col_1_widget(controller, parent) + tasks_widget = TasksWidget( + controller, home_body_widget, handle_expected_selection=True + ) + col_3_widget = self._create_col_3_widget(controller, home_body_widget) + side_panel = SidePanelWidget(controller, home_body_widget) + + pages_widget.addWidget(home_page_widget) + + # Build home + home_page_layout = QtWidgets.QVBoxLayout(home_page_widget) + home_page_layout.addWidget(home_body_widget) + + # Build home - body + body_layout = QtWidgets.QVBoxLayout(home_body_widget) + split_widget = QtWidgets.QSplitter(home_body_widget) + split_widget.addWidget(col_1_widget) + split_widget.addWidget(tasks_widget) + split_widget.addWidget(col_3_widget) + split_widget.addWidget(side_panel) + split_widget.setSizes([255, 160, 455, 175]) + + body_layout.addWidget(split_widget) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(pages_widget, 1) + + overlay_messages_widget = MessageOverlayObject(self) + overlay_invalid_host = InvalidHostOverlay(self) + overlay_invalid_host.setVisible(False) + + first_show_timer = QtCore.QTimer() + first_show_timer.setSingleShot(True) + first_show_timer.setInterval(50) + + first_show_timer.timeout.connect(self._on_first_show) + + controller.register_event_callback( + "save_as.finished", + self._on_save_as_finished, + ) + controller.register_event_callback( + "copy_representation.finished", + self._on_copy_representation_finished, + ) + controller.register_event_callback( + "workfile_duplicate.finished", + self._on_duplicate_finished + ) + controller.register_event_callback( + "open_workfile.finished", + self._on_open_finished + ) + controller.register_event_callback( + "controller.reset.started", + self._on_controller_refresh_started, + ) + controller.register_event_callback( + "controller.reset.finished", + self._on_controller_refresh_finished, + ) + + self._overlay_messages_widget = overlay_messages_widget + self._overlay_invalid_host = overlay_invalid_host + self._home_page_widget = home_page_widget + self._pages_widget = pages_widget + self._home_body_widget = home_body_widget + self._split_widget = split_widget + + self._tasks_widget = tasks_widget + self._side_panel = side_panel + + self._first_show_timer = first_show_timer + + self._post_init() + + def _post_init(self): + self._on_published_checkbox_changed() + + # Force focus on the open button by default, required for Houdini. + self._files_widget.setFocus() + + self.resize(1200, 600) + + def _create_col_1_widget(self, controller, parent): + col_widget = QtWidgets.QWidget(parent) + header_widget = QtWidgets.QWidget(col_widget) + + folder_filter_input = PlaceholderLineEdit(header_widget) + folder_filter_input.setPlaceholderText("Filter folders..") + + go_to_current_btn = GoToCurrentButton(header_widget) + refresh_btn = RefreshButton(header_widget) + + folder_widget = FoldersWidget( + controller, col_widget, handle_expected_selection=True + ) + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(folder_filter_input, 1) + header_layout.addWidget(go_to_current_btn, 0) + header_layout.addWidget(refresh_btn, 0) + + col_layout = QtWidgets.QVBoxLayout(col_widget) + col_layout.setContentsMargins(0, 0, 0, 0) + col_layout.addWidget(header_widget, 0) + col_layout.addWidget(folder_widget, 1) + + folder_filter_input.textChanged.connect(self._on_folder_filter_change) + go_to_current_btn.clicked.connect(self._on_go_to_current_clicked) + refresh_btn.clicked.connect(self._on_refresh_clicked) + + self._folder_filter_input = folder_filter_input + self._folders_widget = folder_widget + + return col_widget + + def _create_col_3_widget(self, controller, parent): + col_widget = QtWidgets.QWidget(parent) + + header_widget = QtWidgets.QWidget(col_widget) + + files_filter_input = PlaceholderLineEdit(header_widget) + files_filter_input.setPlaceholderText("Filter files..") + + published_checkbox = QtWidgets.QCheckBox("Published", header_widget) + published_checkbox.setToolTip("Show published workfiles") + + header_layout = QtWidgets.QHBoxLayout(header_widget) + header_layout.setContentsMargins(0, 0, 0, 0) + header_layout.addWidget(files_filter_input, 1) + header_layout.addWidget(published_checkbox, 0) + + files_widget = FilesWidget(controller, col_widget) + + col_layout = QtWidgets.QVBoxLayout(col_widget) + col_layout.setContentsMargins(0, 0, 0, 0) + col_layout.addWidget(header_widget, 0) + col_layout.addWidget(files_widget, 1) + + files_filter_input.textChanged.connect( + self._on_file_text_filter_change) + published_checkbox.stateChanged.connect( + self._on_published_checkbox_changed + ) + + self._files_filter_input = files_filter_input + self._published_checkbox = published_checkbox + + self._files_widget = files_widget + + return col_widget + + def set_window_on_top(self, on_top): + """Set window on top of other windows. + + Args: + on_top (bool): Show on top of other windows. + """ + + flags = self._default_window_flags + if on_top: + flags |= QtCore.Qt.WindowStaysOnTopHint + if self.windowFlags() != flags: + self.setWindowFlags(flags) + + def ensure_visible(self, use_context=True, save=True, on_top=False): + """Ensure the window is visible. + + This method expects arguments for compatibility with previous variant + of Workfiles tool. + + Args: + use_context (Optional[bool]): DEPRECATED: This argument is + ignored. + save (Optional[bool]): Allow to save workfiles. + on_top (Optional[bool]): Show on top of other windows. + """ + + save = True if save is None else save + on_top = False if on_top is None else on_top + + is_visible = self.isVisible() + self._controller.set_save_enabled(save) + self.set_window_on_top(on_top) + + self.show() + self.raise_() + self.activateWindow() + if is_visible: + self.refresh() + + def refresh(self): + """Trigger refresh of workfiles tool controller.""" + + self._controller.reset() + + def showEvent(self, event): + super(WorkfilesToolWindow, self).showEvent(event) + if self._first_show: + self._first_show = False + self._first_show_timer.start() + self.setStyleSheet(style.load_stylesheet()) + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidentally perform Maya commands + whilst trying to name an instance. + """ + + pass + + def _on_first_show(self): + if not self._controller_refreshed: + self.refresh() + + def _on_file_text_filter_change(self, text): + self._files_widget.set_text_filter(text) + + def _on_published_checkbox_changed(self): + """Publish mode changed. + + Tell children widgets about it so they can handle the mode. + """ + + published_mode = self._published_checkbox.isChecked() + self._files_widget.set_published_mode(published_mode) + self._side_panel.set_published_mode(published_mode) + + def _on_folder_filter_change(self, text): + self._folders_widget.set_name_filter(text) + + def _on_go_to_current_clicked(self): + self._controller.go_to_current_context() + + def _on_refresh_clicked(self): + self.refresh() + + def _on_controller_refresh_started(self): + self._controller_refreshed = True + + def _on_controller_refresh_finished(self): + if self._host_is_valid is None: + self._host_is_valid = self._controller.is_host_valid() + self._overlay_invalid_host.setVisible(not self._host_is_valid) + + if not self._host_is_valid: + return + + self._folders_widget.set_project_name( + self._controller.get_current_project_name() + ) + + def _on_save_as_finished(self, event): + if event["failed"]: + self._overlay_messages_widget.add_message( + "Failed to save workfile", + "error", + ) + else: + self._overlay_messages_widget.add_message( + "Workfile saved" + ) + + def _on_copy_representation_finished(self, event): + if event["failed"]: + self._overlay_messages_widget.add_message( + "Failed to copy published workfile", + "error", + ) + else: + self._overlay_messages_widget.add_message( + "Publish workfile saved" + ) + + def _on_duplicate_finished(self, event): + if event["failed"]: + self._overlay_messages_widget.add_message( + "Failed to duplicate workfile", + "error", + ) + else: + self._overlay_messages_widget.add_message( + "Workfile duplicated" + ) + + def _on_open_finished(self, event): + if event["failed"]: + self._overlay_messages_widget.add_message( + "Failed to open workfile", + "error", + ) + else: + self.close() diff --git a/openpype/tools/assetlinks/__init__.py b/client/ayon_core/vendor/__init__.py similarity index 100% rename from openpype/tools/assetlinks/__init__.py rename to client/ayon_core/vendor/__init__.py diff --git a/openpype/vendor/python/common/README.md b/client/ayon_core/vendor/python/common/README.md similarity index 100% rename from openpype/vendor/python/common/README.md rename to client/ayon_core/vendor/python/common/README.md diff --git a/openpype/vendor/python/common/capture.py b/client/ayon_core/vendor/python/common/capture.py similarity index 100% rename from openpype/vendor/python/common/capture.py rename to client/ayon_core/vendor/python/common/capture.py diff --git a/openpype/vendor/python/common/pysync.py b/client/ayon_core/vendor/python/common/pysync.py similarity index 100% rename from openpype/vendor/python/common/pysync.py rename to client/ayon_core/vendor/python/common/pysync.py diff --git a/openpype/vendor/python/common/qargparse.py b/client/ayon_core/vendor/python/common/qargparse.py similarity index 100% rename from openpype/vendor/python/common/qargparse.py rename to client/ayon_core/vendor/python/common/qargparse.py diff --git a/openpype/vendor/python/common/scriptsmenu/__init__.py b/client/ayon_core/vendor/python/common/scriptsmenu/__init__.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/__init__.py rename to client/ayon_core/vendor/python/common/scriptsmenu/__init__.py diff --git a/openpype/vendor/python/common/scriptsmenu/action.py b/client/ayon_core/vendor/python/common/scriptsmenu/action.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/action.py rename to client/ayon_core/vendor/python/common/scriptsmenu/action.py diff --git a/openpype/vendor/python/common/scriptsmenu/launchformari.py b/client/ayon_core/vendor/python/common/scriptsmenu/launchformari.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/launchformari.py rename to client/ayon_core/vendor/python/common/scriptsmenu/launchformari.py diff --git a/openpype/vendor/python/common/scriptsmenu/launchformaya.py b/client/ayon_core/vendor/python/common/scriptsmenu/launchformaya.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/launchformaya.py rename to client/ayon_core/vendor/python/common/scriptsmenu/launchformaya.py diff --git a/openpype/vendor/python/common/scriptsmenu/launchfornuke.py b/client/ayon_core/vendor/python/common/scriptsmenu/launchfornuke.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/launchfornuke.py rename to client/ayon_core/vendor/python/common/scriptsmenu/launchfornuke.py diff --git a/openpype/vendor/python/common/scriptsmenu/scriptsmenu.py b/client/ayon_core/vendor/python/common/scriptsmenu/scriptsmenu.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/scriptsmenu.py rename to client/ayon_core/vendor/python/common/scriptsmenu/scriptsmenu.py diff --git a/openpype/vendor/python/common/scriptsmenu/version.py b/client/ayon_core/vendor/python/common/scriptsmenu/version.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/version.py rename to client/ayon_core/vendor/python/common/scriptsmenu/version.py diff --git a/openpype/vendor/python/python_2/README.md b/client/ayon_core/vendor/python/python_2/README.md similarity index 100% rename from openpype/vendor/python/python_2/README.md rename to client/ayon_core/vendor/python/python_2/README.md diff --git a/openpype/vendor/python/python_2/arrow/__init__.py b/client/ayon_core/vendor/python/python_2/arrow/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/__init__.py rename to client/ayon_core/vendor/python/python_2/arrow/__init__.py diff --git a/openpype/vendor/python/python_2/arrow/_version.py b/client/ayon_core/vendor/python/python_2/arrow/_version.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/_version.py rename to client/ayon_core/vendor/python/python_2/arrow/_version.py diff --git a/openpype/vendor/python/python_2/arrow/api.py b/client/ayon_core/vendor/python/python_2/arrow/api.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/api.py rename to client/ayon_core/vendor/python/python_2/arrow/api.py diff --git a/openpype/vendor/python/python_2/arrow/arrow.py b/client/ayon_core/vendor/python/python_2/arrow/arrow.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/arrow.py rename to client/ayon_core/vendor/python/python_2/arrow/arrow.py diff --git a/openpype/vendor/python/python_2/arrow/constants.py b/client/ayon_core/vendor/python/python_2/arrow/constants.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/constants.py rename to client/ayon_core/vendor/python/python_2/arrow/constants.py diff --git a/openpype/vendor/python/python_2/arrow/factory.py b/client/ayon_core/vendor/python/python_2/arrow/factory.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/factory.py rename to client/ayon_core/vendor/python/python_2/arrow/factory.py diff --git a/openpype/vendor/python/python_2/arrow/formatter.py b/client/ayon_core/vendor/python/python_2/arrow/formatter.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/formatter.py rename to client/ayon_core/vendor/python/python_2/arrow/formatter.py diff --git a/openpype/vendor/python/python_2/arrow/locales.py b/client/ayon_core/vendor/python/python_2/arrow/locales.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/locales.py rename to client/ayon_core/vendor/python/python_2/arrow/locales.py diff --git a/openpype/vendor/python/python_2/arrow/parser.py b/client/ayon_core/vendor/python/python_2/arrow/parser.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/parser.py rename to client/ayon_core/vendor/python/python_2/arrow/parser.py diff --git a/openpype/vendor/python/python_2/arrow/util.py b/client/ayon_core/vendor/python/python_2/arrow/util.py similarity index 100% rename from openpype/vendor/python/python_2/arrow/util.py rename to client/ayon_core/vendor/python/python_2/arrow/util.py diff --git a/openpype/vendor/python/python_2/attr/__init__.py b/client/ayon_core/vendor/python/python_2/attr/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/attr/__init__.py rename to client/ayon_core/vendor/python/python_2/attr/__init__.py diff --git a/openpype/vendor/python/python_2/attr/__init__.pyi b/client/ayon_core/vendor/python/python_2/attr/__init__.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/__init__.pyi rename to client/ayon_core/vendor/python/python_2/attr/__init__.pyi diff --git a/openpype/vendor/python/python_2/attr/_cmp.py b/client/ayon_core/vendor/python/python_2/attr/_cmp.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_cmp.py rename to client/ayon_core/vendor/python/python_2/attr/_cmp.py diff --git a/openpype/vendor/python/python_2/attr/_cmp.pyi b/client/ayon_core/vendor/python/python_2/attr/_cmp.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/_cmp.pyi rename to client/ayon_core/vendor/python/python_2/attr/_cmp.pyi diff --git a/openpype/vendor/python/python_2/attr/_compat.py b/client/ayon_core/vendor/python/python_2/attr/_compat.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_compat.py rename to client/ayon_core/vendor/python/python_2/attr/_compat.py diff --git a/openpype/vendor/python/python_2/attr/_config.py b/client/ayon_core/vendor/python/python_2/attr/_config.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_config.py rename to client/ayon_core/vendor/python/python_2/attr/_config.py diff --git a/openpype/vendor/python/python_2/attr/_funcs.py b/client/ayon_core/vendor/python/python_2/attr/_funcs.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_funcs.py rename to client/ayon_core/vendor/python/python_2/attr/_funcs.py diff --git a/openpype/vendor/python/python_2/attr/_make.py b/client/ayon_core/vendor/python/python_2/attr/_make.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_make.py rename to client/ayon_core/vendor/python/python_2/attr/_make.py diff --git a/openpype/vendor/python/python_2/attr/_next_gen.py b/client/ayon_core/vendor/python/python_2/attr/_next_gen.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_next_gen.py rename to client/ayon_core/vendor/python/python_2/attr/_next_gen.py diff --git a/openpype/vendor/python/python_2/attr/_version_info.py b/client/ayon_core/vendor/python/python_2/attr/_version_info.py similarity index 100% rename from openpype/vendor/python/python_2/attr/_version_info.py rename to client/ayon_core/vendor/python/python_2/attr/_version_info.py diff --git a/openpype/vendor/python/python_2/attr/_version_info.pyi b/client/ayon_core/vendor/python/python_2/attr/_version_info.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/_version_info.pyi rename to client/ayon_core/vendor/python/python_2/attr/_version_info.pyi diff --git a/openpype/vendor/python/python_2/attr/converters.py b/client/ayon_core/vendor/python/python_2/attr/converters.py similarity index 100% rename from openpype/vendor/python/python_2/attr/converters.py rename to client/ayon_core/vendor/python/python_2/attr/converters.py diff --git a/openpype/vendor/python/python_2/attr/converters.pyi b/client/ayon_core/vendor/python/python_2/attr/converters.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/converters.pyi rename to client/ayon_core/vendor/python/python_2/attr/converters.pyi diff --git a/openpype/vendor/python/python_2/attr/exceptions.py b/client/ayon_core/vendor/python/python_2/attr/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/attr/exceptions.py rename to client/ayon_core/vendor/python/python_2/attr/exceptions.py diff --git a/openpype/vendor/python/python_2/attr/exceptions.pyi b/client/ayon_core/vendor/python/python_2/attr/exceptions.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/exceptions.pyi rename to client/ayon_core/vendor/python/python_2/attr/exceptions.pyi diff --git a/openpype/vendor/python/python_2/attr/filters.py b/client/ayon_core/vendor/python/python_2/attr/filters.py similarity index 100% rename from openpype/vendor/python/python_2/attr/filters.py rename to client/ayon_core/vendor/python/python_2/attr/filters.py diff --git a/openpype/vendor/python/python_2/attr/filters.pyi b/client/ayon_core/vendor/python/python_2/attr/filters.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/filters.pyi rename to client/ayon_core/vendor/python/python_2/attr/filters.pyi diff --git a/openpype/vendor/python/python_2/attr/py.typed b/client/ayon_core/vendor/python/python_2/attr/py.typed similarity index 100% rename from openpype/vendor/python/python_2/attr/py.typed rename to client/ayon_core/vendor/python/python_2/attr/py.typed diff --git a/openpype/vendor/python/python_2/attr/setters.py b/client/ayon_core/vendor/python/python_2/attr/setters.py similarity index 100% rename from openpype/vendor/python/python_2/attr/setters.py rename to client/ayon_core/vendor/python/python_2/attr/setters.py diff --git a/openpype/vendor/python/python_2/attr/setters.pyi b/client/ayon_core/vendor/python/python_2/attr/setters.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/setters.pyi rename to client/ayon_core/vendor/python/python_2/attr/setters.pyi diff --git a/openpype/vendor/python/python_2/attr/validators.py b/client/ayon_core/vendor/python/python_2/attr/validators.py similarity index 100% rename from openpype/vendor/python/python_2/attr/validators.py rename to client/ayon_core/vendor/python/python_2/attr/validators.py diff --git a/openpype/vendor/python/python_2/attr/validators.pyi b/client/ayon_core/vendor/python/python_2/attr/validators.pyi similarity index 100% rename from openpype/vendor/python/python_2/attr/validators.pyi rename to client/ayon_core/vendor/python/python_2/attr/validators.pyi diff --git a/openpype/vendor/python/python_2/attrs/__init__.py b/client/ayon_core/vendor/python/python_2/attrs/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/__init__.py rename to client/ayon_core/vendor/python/python_2/attrs/__init__.py diff --git a/openpype/vendor/python/python_2/attrs/__init__.pyi b/client/ayon_core/vendor/python/python_2/attrs/__init__.pyi similarity index 100% rename from openpype/vendor/python/python_2/attrs/__init__.pyi rename to client/ayon_core/vendor/python/python_2/attrs/__init__.pyi diff --git a/openpype/vendor/python/python_2/attrs/converters.py b/client/ayon_core/vendor/python/python_2/attrs/converters.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/converters.py rename to client/ayon_core/vendor/python/python_2/attrs/converters.py diff --git a/openpype/vendor/python/python_2/attrs/exceptions.py b/client/ayon_core/vendor/python/python_2/attrs/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/exceptions.py rename to client/ayon_core/vendor/python/python_2/attrs/exceptions.py diff --git a/openpype/vendor/python/python_2/attrs/filters.py b/client/ayon_core/vendor/python/python_2/attrs/filters.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/filters.py rename to client/ayon_core/vendor/python/python_2/attrs/filters.py diff --git a/openpype/vendor/python/python_2/attrs/py.typed b/client/ayon_core/vendor/python/python_2/attrs/py.typed similarity index 100% rename from openpype/vendor/python/python_2/attrs/py.typed rename to client/ayon_core/vendor/python/python_2/attrs/py.typed diff --git a/openpype/vendor/python/python_2/attrs/setters.py b/client/ayon_core/vendor/python/python_2/attrs/setters.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/setters.py rename to client/ayon_core/vendor/python/python_2/attrs/setters.py diff --git a/openpype/vendor/python/python_2/attrs/validators.py b/client/ayon_core/vendor/python/python_2/attrs/validators.py similarity index 100% rename from openpype/vendor/python/python_2/attrs/validators.py rename to client/ayon_core/vendor/python/python_2/attrs/validators.py diff --git a/openpype/vendor/python/python_2/backports/__init__.py b/client/ayon_core/vendor/python/python_2/backports/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/backports/__init__.py rename to client/ayon_core/vendor/python/python_2/backports/__init__.py diff --git a/openpype/vendor/python/python_2/backports/configparser/__init__.py b/client/ayon_core/vendor/python/python_2/backports/configparser/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/backports/configparser/__init__.py rename to client/ayon_core/vendor/python/python_2/backports/configparser/__init__.py diff --git a/openpype/vendor/python/python_2/backports/configparser/helpers.py b/client/ayon_core/vendor/python/python_2/backports/configparser/helpers.py similarity index 100% rename from openpype/vendor/python/python_2/backports/configparser/helpers.py rename to client/ayon_core/vendor/python/python_2/backports/configparser/helpers.py diff --git a/openpype/vendor/python/python_2/backports/functools_lru_cache.py b/client/ayon_core/vendor/python/python_2/backports/functools_lru_cache.py similarity index 100% rename from openpype/vendor/python/python_2/backports/functools_lru_cache.py rename to client/ayon_core/vendor/python/python_2/backports/functools_lru_cache.py diff --git a/openpype/vendor/python/python_2/builtins/__init__.py b/client/ayon_core/vendor/python/python_2/builtins/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/builtins/__init__.py rename to client/ayon_core/vendor/python/python_2/builtins/__init__.py diff --git a/openpype/vendor/python/python_2/certifi/__init__.py b/client/ayon_core/vendor/python/python_2/certifi/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/certifi/__init__.py rename to client/ayon_core/vendor/python/python_2/certifi/__init__.py diff --git a/openpype/vendor/python/python_2/certifi/__main__.py b/client/ayon_core/vendor/python/python_2/certifi/__main__.py similarity index 100% rename from openpype/vendor/python/python_2/certifi/__main__.py rename to client/ayon_core/vendor/python/python_2/certifi/__main__.py diff --git a/openpype/vendor/python/python_2/certifi/cacert.pem b/client/ayon_core/vendor/python/python_2/certifi/cacert.pem similarity index 100% rename from openpype/vendor/python/python_2/certifi/cacert.pem rename to client/ayon_core/vendor/python/python_2/certifi/cacert.pem diff --git a/openpype/vendor/python/python_2/certifi/core.py b/client/ayon_core/vendor/python/python_2/certifi/core.py similarity index 100% rename from openpype/vendor/python/python_2/certifi/core.py rename to client/ayon_core/vendor/python/python_2/certifi/core.py diff --git a/openpype/vendor/python/python_2/chardet/__init__.py b/client/ayon_core/vendor/python/python_2/chardet/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/__init__.py rename to client/ayon_core/vendor/python/python_2/chardet/__init__.py diff --git a/openpype/vendor/python/python_2/chardet/big5freq.py b/client/ayon_core/vendor/python/python_2/chardet/big5freq.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/big5freq.py rename to client/ayon_core/vendor/python/python_2/chardet/big5freq.py diff --git a/openpype/vendor/python/python_2/chardet/big5prober.py b/client/ayon_core/vendor/python/python_2/chardet/big5prober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/big5prober.py rename to client/ayon_core/vendor/python/python_2/chardet/big5prober.py diff --git a/openpype/vendor/python/python_2/chardet/chardistribution.py b/client/ayon_core/vendor/python/python_2/chardet/chardistribution.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/chardistribution.py rename to client/ayon_core/vendor/python/python_2/chardet/chardistribution.py diff --git a/openpype/vendor/python/python_2/chardet/charsetgroupprober.py b/client/ayon_core/vendor/python/python_2/chardet/charsetgroupprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/charsetgroupprober.py rename to client/ayon_core/vendor/python/python_2/chardet/charsetgroupprober.py diff --git a/openpype/vendor/python/python_2/chardet/charsetprober.py b/client/ayon_core/vendor/python/python_2/chardet/charsetprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/charsetprober.py rename to client/ayon_core/vendor/python/python_2/chardet/charsetprober.py diff --git a/openpype/vendor/python/python_2/chardet/cli/__init__.py b/client/ayon_core/vendor/python/python_2/chardet/cli/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/cli/__init__.py rename to client/ayon_core/vendor/python/python_2/chardet/cli/__init__.py diff --git a/openpype/vendor/python/python_2/chardet/cli/chardetect.py b/client/ayon_core/vendor/python/python_2/chardet/cli/chardetect.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/cli/chardetect.py rename to client/ayon_core/vendor/python/python_2/chardet/cli/chardetect.py diff --git a/openpype/vendor/python/python_2/chardet/codingstatemachine.py b/client/ayon_core/vendor/python/python_2/chardet/codingstatemachine.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/codingstatemachine.py rename to client/ayon_core/vendor/python/python_2/chardet/codingstatemachine.py diff --git a/openpype/vendor/python/python_2/chardet/compat.py b/client/ayon_core/vendor/python/python_2/chardet/compat.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/compat.py rename to client/ayon_core/vendor/python/python_2/chardet/compat.py diff --git a/openpype/vendor/python/python_2/chardet/cp949prober.py b/client/ayon_core/vendor/python/python_2/chardet/cp949prober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/cp949prober.py rename to client/ayon_core/vendor/python/python_2/chardet/cp949prober.py diff --git a/openpype/vendor/python/python_2/chardet/enums.py b/client/ayon_core/vendor/python/python_2/chardet/enums.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/enums.py rename to client/ayon_core/vendor/python/python_2/chardet/enums.py diff --git a/openpype/vendor/python/python_2/chardet/escprober.py b/client/ayon_core/vendor/python/python_2/chardet/escprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/escprober.py rename to client/ayon_core/vendor/python/python_2/chardet/escprober.py diff --git a/openpype/vendor/python/python_2/chardet/escsm.py b/client/ayon_core/vendor/python/python_2/chardet/escsm.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/escsm.py rename to client/ayon_core/vendor/python/python_2/chardet/escsm.py diff --git a/openpype/vendor/python/python_2/chardet/eucjpprober.py b/client/ayon_core/vendor/python/python_2/chardet/eucjpprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/eucjpprober.py rename to client/ayon_core/vendor/python/python_2/chardet/eucjpprober.py diff --git a/openpype/vendor/python/python_2/chardet/euckrfreq.py b/client/ayon_core/vendor/python/python_2/chardet/euckrfreq.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/euckrfreq.py rename to client/ayon_core/vendor/python/python_2/chardet/euckrfreq.py diff --git a/openpype/vendor/python/python_2/chardet/euckrprober.py b/client/ayon_core/vendor/python/python_2/chardet/euckrprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/euckrprober.py rename to client/ayon_core/vendor/python/python_2/chardet/euckrprober.py diff --git a/openpype/vendor/python/python_2/chardet/euctwfreq.py b/client/ayon_core/vendor/python/python_2/chardet/euctwfreq.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/euctwfreq.py rename to client/ayon_core/vendor/python/python_2/chardet/euctwfreq.py diff --git a/openpype/vendor/python/python_2/chardet/euctwprober.py b/client/ayon_core/vendor/python/python_2/chardet/euctwprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/euctwprober.py rename to client/ayon_core/vendor/python/python_2/chardet/euctwprober.py diff --git a/openpype/vendor/python/python_2/chardet/gb2312freq.py b/client/ayon_core/vendor/python/python_2/chardet/gb2312freq.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/gb2312freq.py rename to client/ayon_core/vendor/python/python_2/chardet/gb2312freq.py diff --git a/openpype/vendor/python/python_2/chardet/gb2312prober.py b/client/ayon_core/vendor/python/python_2/chardet/gb2312prober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/gb2312prober.py rename to client/ayon_core/vendor/python/python_2/chardet/gb2312prober.py diff --git a/openpype/vendor/python/python_2/chardet/hebrewprober.py b/client/ayon_core/vendor/python/python_2/chardet/hebrewprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/hebrewprober.py rename to client/ayon_core/vendor/python/python_2/chardet/hebrewprober.py diff --git a/openpype/vendor/python/python_2/chardet/jisfreq.py b/client/ayon_core/vendor/python/python_2/chardet/jisfreq.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/jisfreq.py rename to client/ayon_core/vendor/python/python_2/chardet/jisfreq.py diff --git a/openpype/vendor/python/python_2/chardet/jpcntx.py b/client/ayon_core/vendor/python/python_2/chardet/jpcntx.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/jpcntx.py rename to client/ayon_core/vendor/python/python_2/chardet/jpcntx.py diff --git a/openpype/vendor/python/python_2/chardet/langbulgarianmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langbulgarianmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langbulgarianmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langbulgarianmodel.py diff --git a/openpype/vendor/python/python_2/chardet/langgreekmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langgreekmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langgreekmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langgreekmodel.py diff --git a/openpype/vendor/python/python_2/chardet/langhebrewmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langhebrewmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langhebrewmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langhebrewmodel.py diff --git a/openpype/vendor/python/python_2/chardet/langhungarianmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langhungarianmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langhungarianmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langhungarianmodel.py diff --git a/openpype/vendor/python/python_2/chardet/langrussianmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langrussianmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langrussianmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langrussianmodel.py diff --git a/openpype/vendor/python/python_2/chardet/langthaimodel.py b/client/ayon_core/vendor/python/python_2/chardet/langthaimodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langthaimodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langthaimodel.py diff --git a/openpype/vendor/python/python_2/chardet/langturkishmodel.py b/client/ayon_core/vendor/python/python_2/chardet/langturkishmodel.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/langturkishmodel.py rename to client/ayon_core/vendor/python/python_2/chardet/langturkishmodel.py diff --git a/openpype/vendor/python/python_2/chardet/latin1prober.py b/client/ayon_core/vendor/python/python_2/chardet/latin1prober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/latin1prober.py rename to client/ayon_core/vendor/python/python_2/chardet/latin1prober.py diff --git a/openpype/vendor/python/python_2/chardet/mbcharsetprober.py b/client/ayon_core/vendor/python/python_2/chardet/mbcharsetprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/mbcharsetprober.py rename to client/ayon_core/vendor/python/python_2/chardet/mbcharsetprober.py diff --git a/openpype/vendor/python/python_2/chardet/mbcsgroupprober.py b/client/ayon_core/vendor/python/python_2/chardet/mbcsgroupprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/mbcsgroupprober.py rename to client/ayon_core/vendor/python/python_2/chardet/mbcsgroupprober.py diff --git a/openpype/vendor/python/python_2/chardet/mbcssm.py b/client/ayon_core/vendor/python/python_2/chardet/mbcssm.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/mbcssm.py rename to client/ayon_core/vendor/python/python_2/chardet/mbcssm.py diff --git a/openpype/tools/ayon_workfiles/__init__.py b/client/ayon_core/vendor/python/python_2/chardet/metadata/__init__.py similarity index 100% rename from openpype/tools/ayon_workfiles/__init__.py rename to client/ayon_core/vendor/python/python_2/chardet/metadata/__init__.py diff --git a/openpype/vendor/python/python_2/chardet/metadata/languages.py b/client/ayon_core/vendor/python/python_2/chardet/metadata/languages.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/metadata/languages.py rename to client/ayon_core/vendor/python/python_2/chardet/metadata/languages.py diff --git a/openpype/vendor/python/python_2/chardet/sbcharsetprober.py b/client/ayon_core/vendor/python/python_2/chardet/sbcharsetprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/sbcharsetprober.py rename to client/ayon_core/vendor/python/python_2/chardet/sbcharsetprober.py diff --git a/openpype/vendor/python/python_2/chardet/sbcsgroupprober.py b/client/ayon_core/vendor/python/python_2/chardet/sbcsgroupprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/sbcsgroupprober.py rename to client/ayon_core/vendor/python/python_2/chardet/sbcsgroupprober.py diff --git a/openpype/vendor/python/python_2/chardet/sjisprober.py b/client/ayon_core/vendor/python/python_2/chardet/sjisprober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/sjisprober.py rename to client/ayon_core/vendor/python/python_2/chardet/sjisprober.py diff --git a/openpype/vendor/python/python_2/chardet/universaldetector.py b/client/ayon_core/vendor/python/python_2/chardet/universaldetector.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/universaldetector.py rename to client/ayon_core/vendor/python/python_2/chardet/universaldetector.py diff --git a/openpype/vendor/python/python_2/chardet/utf8prober.py b/client/ayon_core/vendor/python/python_2/chardet/utf8prober.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/utf8prober.py rename to client/ayon_core/vendor/python/python_2/chardet/utf8prober.py diff --git a/openpype/vendor/python/python_2/chardet/version.py b/client/ayon_core/vendor/python/python_2/chardet/version.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/version.py rename to client/ayon_core/vendor/python/python_2/chardet/version.py diff --git a/openpype/vendor/python/python_2/charset_normalizer.py b/client/ayon_core/vendor/python/python_2/charset_normalizer.py similarity index 100% rename from openpype/vendor/python/python_2/charset_normalizer.py rename to client/ayon_core/vendor/python/python_2/charset_normalizer.py diff --git a/openpype/vendor/python/python_2/click/__init__.py b/client/ayon_core/vendor/python/python_2/click/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/click/__init__.py rename to client/ayon_core/vendor/python/python_2/click/__init__.py diff --git a/openpype/vendor/python/python_2/click/_bashcomplete.py b/client/ayon_core/vendor/python/python_2/click/_bashcomplete.py similarity index 100% rename from openpype/vendor/python/python_2/click/_bashcomplete.py rename to client/ayon_core/vendor/python/python_2/click/_bashcomplete.py diff --git a/openpype/vendor/python/python_2/click/_compat.py b/client/ayon_core/vendor/python/python_2/click/_compat.py similarity index 100% rename from openpype/vendor/python/python_2/click/_compat.py rename to client/ayon_core/vendor/python/python_2/click/_compat.py diff --git a/openpype/vendor/python/python_2/click/_termui_impl.py b/client/ayon_core/vendor/python/python_2/click/_termui_impl.py similarity index 100% rename from openpype/vendor/python/python_2/click/_termui_impl.py rename to client/ayon_core/vendor/python/python_2/click/_termui_impl.py diff --git a/openpype/vendor/python/python_2/click/_textwrap.py b/client/ayon_core/vendor/python/python_2/click/_textwrap.py similarity index 100% rename from openpype/vendor/python/python_2/click/_textwrap.py rename to client/ayon_core/vendor/python/python_2/click/_textwrap.py diff --git a/openpype/vendor/python/python_2/click/_unicodefun.py b/client/ayon_core/vendor/python/python_2/click/_unicodefun.py similarity index 100% rename from openpype/vendor/python/python_2/click/_unicodefun.py rename to client/ayon_core/vendor/python/python_2/click/_unicodefun.py diff --git a/openpype/vendor/python/python_2/click/_winconsole.py b/client/ayon_core/vendor/python/python_2/click/_winconsole.py similarity index 100% rename from openpype/vendor/python/python_2/click/_winconsole.py rename to client/ayon_core/vendor/python/python_2/click/_winconsole.py diff --git a/openpype/vendor/python/python_2/click/core.py b/client/ayon_core/vendor/python/python_2/click/core.py similarity index 100% rename from openpype/vendor/python/python_2/click/core.py rename to client/ayon_core/vendor/python/python_2/click/core.py diff --git a/openpype/vendor/python/python_2/click/decorators.py b/client/ayon_core/vendor/python/python_2/click/decorators.py similarity index 100% rename from openpype/vendor/python/python_2/click/decorators.py rename to client/ayon_core/vendor/python/python_2/click/decorators.py diff --git a/openpype/vendor/python/python_2/click/exceptions.py b/client/ayon_core/vendor/python/python_2/click/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/click/exceptions.py rename to client/ayon_core/vendor/python/python_2/click/exceptions.py diff --git a/openpype/vendor/python/python_2/click/formatting.py b/client/ayon_core/vendor/python/python_2/click/formatting.py similarity index 100% rename from openpype/vendor/python/python_2/click/formatting.py rename to client/ayon_core/vendor/python/python_2/click/formatting.py diff --git a/openpype/vendor/python/python_2/click/globals.py b/client/ayon_core/vendor/python/python_2/click/globals.py similarity index 100% rename from openpype/vendor/python/python_2/click/globals.py rename to client/ayon_core/vendor/python/python_2/click/globals.py diff --git a/openpype/vendor/python/python_2/click/parser.py b/client/ayon_core/vendor/python/python_2/click/parser.py similarity index 100% rename from openpype/vendor/python/python_2/click/parser.py rename to client/ayon_core/vendor/python/python_2/click/parser.py diff --git a/openpype/vendor/python/python_2/click/termui.py b/client/ayon_core/vendor/python/python_2/click/termui.py similarity index 100% rename from openpype/vendor/python/python_2/click/termui.py rename to client/ayon_core/vendor/python/python_2/click/termui.py diff --git a/openpype/vendor/python/python_2/click/testing.py b/client/ayon_core/vendor/python/python_2/click/testing.py similarity index 100% rename from openpype/vendor/python/python_2/click/testing.py rename to client/ayon_core/vendor/python/python_2/click/testing.py diff --git a/openpype/vendor/python/python_2/click/types.py b/client/ayon_core/vendor/python/python_2/click/types.py similarity index 100% rename from openpype/vendor/python/python_2/click/types.py rename to client/ayon_core/vendor/python/python_2/click/types.py diff --git a/openpype/vendor/python/python_2/click/utils.py b/client/ayon_core/vendor/python/python_2/click/utils.py similarity index 100% rename from openpype/vendor/python/python_2/click/utils.py rename to client/ayon_core/vendor/python/python_2/click/utils.py diff --git a/openpype/vendor/python/python_2/dns/__init__.py b/client/ayon_core/vendor/python/python_2/dns/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/dns/__init__.py rename to client/ayon_core/vendor/python/python_2/dns/__init__.py diff --git a/openpype/vendor/python/python_2/dns/_compat.py b/client/ayon_core/vendor/python/python_2/dns/_compat.py similarity index 100% rename from openpype/vendor/python/python_2/dns/_compat.py rename to client/ayon_core/vendor/python/python_2/dns/_compat.py diff --git a/openpype/vendor/python/python_2/dns/dnssec.py b/client/ayon_core/vendor/python/python_2/dns/dnssec.py similarity index 100% rename from openpype/vendor/python/python_2/dns/dnssec.py rename to client/ayon_core/vendor/python/python_2/dns/dnssec.py diff --git a/openpype/vendor/python/python_2/dns/e164.py b/client/ayon_core/vendor/python/python_2/dns/e164.py similarity index 100% rename from openpype/vendor/python/python_2/dns/e164.py rename to client/ayon_core/vendor/python/python_2/dns/e164.py diff --git a/openpype/vendor/python/python_2/dns/edns.py b/client/ayon_core/vendor/python/python_2/dns/edns.py similarity index 100% rename from openpype/vendor/python/python_2/dns/edns.py rename to client/ayon_core/vendor/python/python_2/dns/edns.py diff --git a/openpype/vendor/python/python_2/dns/entropy.py b/client/ayon_core/vendor/python/python_2/dns/entropy.py similarity index 100% rename from openpype/vendor/python/python_2/dns/entropy.py rename to client/ayon_core/vendor/python/python_2/dns/entropy.py diff --git a/openpype/vendor/python/python_2/dns/exception.py b/client/ayon_core/vendor/python/python_2/dns/exception.py similarity index 100% rename from openpype/vendor/python/python_2/dns/exception.py rename to client/ayon_core/vendor/python/python_2/dns/exception.py diff --git a/openpype/vendor/python/python_2/dns/flags.py b/client/ayon_core/vendor/python/python_2/dns/flags.py similarity index 100% rename from openpype/vendor/python/python_2/dns/flags.py rename to client/ayon_core/vendor/python/python_2/dns/flags.py diff --git a/openpype/vendor/python/python_2/dns/grange.py b/client/ayon_core/vendor/python/python_2/dns/grange.py similarity index 100% rename from openpype/vendor/python/python_2/dns/grange.py rename to client/ayon_core/vendor/python/python_2/dns/grange.py diff --git a/openpype/vendor/python/python_2/dns/hash.py b/client/ayon_core/vendor/python/python_2/dns/hash.py similarity index 100% rename from openpype/vendor/python/python_2/dns/hash.py rename to client/ayon_core/vendor/python/python_2/dns/hash.py diff --git a/openpype/vendor/python/python_2/dns/inet.py b/client/ayon_core/vendor/python/python_2/dns/inet.py similarity index 100% rename from openpype/vendor/python/python_2/dns/inet.py rename to client/ayon_core/vendor/python/python_2/dns/inet.py diff --git a/openpype/vendor/python/python_2/dns/ipv4.py b/client/ayon_core/vendor/python/python_2/dns/ipv4.py similarity index 100% rename from openpype/vendor/python/python_2/dns/ipv4.py rename to client/ayon_core/vendor/python/python_2/dns/ipv4.py diff --git a/openpype/vendor/python/python_2/dns/ipv6.py b/client/ayon_core/vendor/python/python_2/dns/ipv6.py similarity index 100% rename from openpype/vendor/python/python_2/dns/ipv6.py rename to client/ayon_core/vendor/python/python_2/dns/ipv6.py diff --git a/openpype/vendor/python/python_2/dns/message.py b/client/ayon_core/vendor/python/python_2/dns/message.py similarity index 100% rename from openpype/vendor/python/python_2/dns/message.py rename to client/ayon_core/vendor/python/python_2/dns/message.py diff --git a/openpype/vendor/python/python_2/dns/name.py b/client/ayon_core/vendor/python/python_2/dns/name.py similarity index 100% rename from openpype/vendor/python/python_2/dns/name.py rename to client/ayon_core/vendor/python/python_2/dns/name.py diff --git a/openpype/vendor/python/python_2/dns/namedict.py b/client/ayon_core/vendor/python/python_2/dns/namedict.py similarity index 100% rename from openpype/vendor/python/python_2/dns/namedict.py rename to client/ayon_core/vendor/python/python_2/dns/namedict.py diff --git a/openpype/vendor/python/python_2/dns/node.py b/client/ayon_core/vendor/python/python_2/dns/node.py similarity index 100% rename from openpype/vendor/python/python_2/dns/node.py rename to client/ayon_core/vendor/python/python_2/dns/node.py diff --git a/openpype/vendor/python/python_2/dns/opcode.py b/client/ayon_core/vendor/python/python_2/dns/opcode.py similarity index 100% rename from openpype/vendor/python/python_2/dns/opcode.py rename to client/ayon_core/vendor/python/python_2/dns/opcode.py diff --git a/openpype/vendor/python/python_2/dns/py.typed b/client/ayon_core/vendor/python/python_2/dns/py.typed similarity index 100% rename from openpype/vendor/python/python_2/dns/py.typed rename to client/ayon_core/vendor/python/python_2/dns/py.typed diff --git a/openpype/vendor/python/python_2/dns/query.py b/client/ayon_core/vendor/python/python_2/dns/query.py similarity index 100% rename from openpype/vendor/python/python_2/dns/query.py rename to client/ayon_core/vendor/python/python_2/dns/query.py diff --git a/openpype/vendor/python/python_2/dns/rcode.py b/client/ayon_core/vendor/python/python_2/dns/rcode.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rcode.py rename to client/ayon_core/vendor/python/python_2/dns/rcode.py diff --git a/openpype/vendor/python/python_2/dns/rdata.py b/client/ayon_core/vendor/python/python_2/dns/rdata.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdata.py rename to client/ayon_core/vendor/python/python_2/dns/rdata.py diff --git a/openpype/vendor/python/python_2/dns/rdataclass.py b/client/ayon_core/vendor/python/python_2/dns/rdataclass.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdataclass.py rename to client/ayon_core/vendor/python/python_2/dns/rdataclass.py diff --git a/openpype/vendor/python/python_2/dns/rdataset.py b/client/ayon_core/vendor/python/python_2/dns/rdataset.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdataset.py rename to client/ayon_core/vendor/python/python_2/dns/rdataset.py diff --git a/openpype/vendor/python/python_2/dns/rdatatype.py b/client/ayon_core/vendor/python/python_2/dns/rdatatype.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdatatype.py rename to client/ayon_core/vendor/python/python_2/dns/rdatatype.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/AVC.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/AVC.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/AVC.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/AVC.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CAA.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CAA.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CAA.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CAA.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDS.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CDS.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CDS.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CDS.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CERT.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CERT.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CERT.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CERT.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DLV.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DLV.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/DLV.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DLV.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DS.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DS.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/DS.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/DS.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/HIP.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/HIP.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/HIP.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/HIP.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/LOC.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/LOC.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/LOC.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/LOC.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/MX.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/MX.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/MX.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/MX.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NS.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NS.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/NS.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NS.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/PTR.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/PTR.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/PTR.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/PTR.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RP.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RP.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/RP.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RP.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RT.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RT.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/RT.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/RT.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SOA.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SOA.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/SOA.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SOA.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SPF.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SPF.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/SPF.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SPF.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/TXT.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/TXT.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/TXT.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/TXT.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/URI.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/URI.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/URI.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/URI.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/X25.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/X25.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/X25.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/X25.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/__init__.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/ANY/__init__.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/ANY/__init__.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/CH/A.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/CH/A.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/CH/A.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/CH/A.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/CH/__init__.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/CH/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/CH/__init__.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/CH/__init__.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/A.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/A.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/A.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/A.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/AAAA.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/AAAA.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/AAAA.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/AAAA.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/APL.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/APL.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/APL.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/APL.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/DHCID.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/DHCID.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/DHCID.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/DHCID.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/KX.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/KX.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/KX.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/KX.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NSAP.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NSAP.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/PX.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/PX.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/PX.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/PX.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/SRV.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/SRV.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/SRV.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/SRV.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/WKS.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/WKS.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/WKS.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/WKS.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/__init__.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/IN/__init__.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/IN/__init__.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/__init__.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/__init__.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/__init__.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/dnskeybase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/dnskeybase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/dnskeybase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/dnskeybase.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/dsbase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/dsbase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/dsbase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/dsbase.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/euibase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/euibase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/euibase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/euibase.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/mxbase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/mxbase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/mxbase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/mxbase.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/nsbase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/nsbase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/nsbase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/nsbase.py diff --git a/openpype/vendor/python/python_2/dns/rdtypes/txtbase.py b/client/ayon_core/vendor/python/python_2/dns/rdtypes/txtbase.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rdtypes/txtbase.py rename to client/ayon_core/vendor/python/python_2/dns/rdtypes/txtbase.py diff --git a/openpype/vendor/python/python_2/dns/renderer.py b/client/ayon_core/vendor/python/python_2/dns/renderer.py similarity index 100% rename from openpype/vendor/python/python_2/dns/renderer.py rename to client/ayon_core/vendor/python/python_2/dns/renderer.py diff --git a/openpype/vendor/python/python_2/dns/resolver.py b/client/ayon_core/vendor/python/python_2/dns/resolver.py similarity index 100% rename from openpype/vendor/python/python_2/dns/resolver.py rename to client/ayon_core/vendor/python/python_2/dns/resolver.py diff --git a/openpype/vendor/python/python_2/dns/reversename.py b/client/ayon_core/vendor/python/python_2/dns/reversename.py similarity index 100% rename from openpype/vendor/python/python_2/dns/reversename.py rename to client/ayon_core/vendor/python/python_2/dns/reversename.py diff --git a/openpype/vendor/python/python_2/dns/rrset.py b/client/ayon_core/vendor/python/python_2/dns/rrset.py similarity index 100% rename from openpype/vendor/python/python_2/dns/rrset.py rename to client/ayon_core/vendor/python/python_2/dns/rrset.py diff --git a/openpype/vendor/python/python_2/dns/set.py b/client/ayon_core/vendor/python/python_2/dns/set.py similarity index 100% rename from openpype/vendor/python/python_2/dns/set.py rename to client/ayon_core/vendor/python/python_2/dns/set.py diff --git a/openpype/vendor/python/python_2/dns/tokenizer.py b/client/ayon_core/vendor/python/python_2/dns/tokenizer.py similarity index 100% rename from openpype/vendor/python/python_2/dns/tokenizer.py rename to client/ayon_core/vendor/python/python_2/dns/tokenizer.py diff --git a/openpype/vendor/python/python_2/dns/tsig.py b/client/ayon_core/vendor/python/python_2/dns/tsig.py similarity index 100% rename from openpype/vendor/python/python_2/dns/tsig.py rename to client/ayon_core/vendor/python/python_2/dns/tsig.py diff --git a/openpype/vendor/python/python_2/dns/tsigkeyring.py b/client/ayon_core/vendor/python/python_2/dns/tsigkeyring.py similarity index 100% rename from openpype/vendor/python/python_2/dns/tsigkeyring.py rename to client/ayon_core/vendor/python/python_2/dns/tsigkeyring.py diff --git a/openpype/vendor/python/python_2/dns/ttl.py b/client/ayon_core/vendor/python/python_2/dns/ttl.py similarity index 100% rename from openpype/vendor/python/python_2/dns/ttl.py rename to client/ayon_core/vendor/python/python_2/dns/ttl.py diff --git a/openpype/vendor/python/python_2/dns/update.py b/client/ayon_core/vendor/python/python_2/dns/update.py similarity index 100% rename from openpype/vendor/python/python_2/dns/update.py rename to client/ayon_core/vendor/python/python_2/dns/update.py diff --git a/openpype/vendor/python/python_2/dns/version.py b/client/ayon_core/vendor/python/python_2/dns/version.py similarity index 100% rename from openpype/vendor/python/python_2/dns/version.py rename to client/ayon_core/vendor/python/python_2/dns/version.py diff --git a/openpype/vendor/python/python_2/dns/wiredata.py b/client/ayon_core/vendor/python/python_2/dns/wiredata.py similarity index 100% rename from openpype/vendor/python/python_2/dns/wiredata.py rename to client/ayon_core/vendor/python/python_2/dns/wiredata.py diff --git a/openpype/vendor/python/python_2/dns/zone.py b/client/ayon_core/vendor/python/python_2/dns/zone.py similarity index 100% rename from openpype/vendor/python/python_2/dns/zone.py rename to client/ayon_core/vendor/python/python_2/dns/zone.py diff --git a/openpype/vendor/python/python_2/engineio/__init__.py b/client/ayon_core/vendor/python/python_2/engineio/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/__init__.py rename to client/ayon_core/vendor/python/python_2/engineio/__init__.py diff --git a/openpype/vendor/python/python_2/engineio/async_aiohttp.py b/client/ayon_core/vendor/python/python_2/engineio/async_aiohttp.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_aiohttp.py rename to client/ayon_core/vendor/python/python_2/engineio/async_aiohttp.py diff --git a/openpype/vendor/python/python_2/engineio/async_asgi.py b/client/ayon_core/vendor/python/python_2/engineio/async_asgi.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_asgi.py rename to client/ayon_core/vendor/python/python_2/engineio/async_asgi.py diff --git a/openpype/tools/publisher/__init__.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/__init__.py similarity index 100% rename from openpype/tools/publisher/__init__.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/__init__.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/aiohttp.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/aiohttp.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/aiohttp.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/aiohttp.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/asgi.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/asgi.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/asgi.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/asgi.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/eventlet.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/eventlet.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/eventlet.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/eventlet.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/gevent.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/gevent.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/gevent.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/gevent.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/gevent_uwsgi.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/gevent_uwsgi.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/gevent_uwsgi.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/gevent_uwsgi.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/sanic.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/sanic.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/sanic.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/sanic.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/threading.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/threading.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/threading.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/threading.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/tornado.py b/client/ayon_core/vendor/python/python_2/engineio/async_drivers/tornado.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/tornado.py rename to client/ayon_core/vendor/python/python_2/engineio/async_drivers/tornado.py diff --git a/openpype/vendor/python/python_2/engineio/async_eventlet.py b/client/ayon_core/vendor/python/python_2/engineio/async_eventlet.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_eventlet.py rename to client/ayon_core/vendor/python/python_2/engineio/async_eventlet.py diff --git a/openpype/vendor/python/python_2/engineio/async_gevent.py b/client/ayon_core/vendor/python/python_2/engineio/async_gevent.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_gevent.py rename to client/ayon_core/vendor/python/python_2/engineio/async_gevent.py diff --git a/openpype/vendor/python/python_2/engineio/async_gevent_uwsgi.py b/client/ayon_core/vendor/python/python_2/engineio/async_gevent_uwsgi.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_gevent_uwsgi.py rename to client/ayon_core/vendor/python/python_2/engineio/async_gevent_uwsgi.py diff --git a/openpype/vendor/python/python_2/engineio/async_sanic.py b/client/ayon_core/vendor/python/python_2/engineio/async_sanic.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_sanic.py rename to client/ayon_core/vendor/python/python_2/engineio/async_sanic.py diff --git a/openpype/vendor/python/python_2/engineio/async_threading.py b/client/ayon_core/vendor/python/python_2/engineio/async_threading.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_threading.py rename to client/ayon_core/vendor/python/python_2/engineio/async_threading.py diff --git a/openpype/vendor/python/python_2/engineio/async_tornado.py b/client/ayon_core/vendor/python/python_2/engineio/async_tornado.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_tornado.py rename to client/ayon_core/vendor/python/python_2/engineio/async_tornado.py diff --git a/openpype/vendor/python/python_2/engineio/asyncio_client.py b/client/ayon_core/vendor/python/python_2/engineio/asyncio_client.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/asyncio_client.py rename to client/ayon_core/vendor/python/python_2/engineio/asyncio_client.py diff --git a/openpype/vendor/python/python_2/engineio/asyncio_server.py b/client/ayon_core/vendor/python/python_2/engineio/asyncio_server.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/asyncio_server.py rename to client/ayon_core/vendor/python/python_2/engineio/asyncio_server.py diff --git a/openpype/vendor/python/python_2/engineio/asyncio_socket.py b/client/ayon_core/vendor/python/python_2/engineio/asyncio_socket.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/asyncio_socket.py rename to client/ayon_core/vendor/python/python_2/engineio/asyncio_socket.py diff --git a/openpype/vendor/python/python_2/engineio/client.py b/client/ayon_core/vendor/python/python_2/engineio/client.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/client.py rename to client/ayon_core/vendor/python/python_2/engineio/client.py diff --git a/openpype/vendor/python/python_2/engineio/exceptions.py b/client/ayon_core/vendor/python/python_2/engineio/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/exceptions.py rename to client/ayon_core/vendor/python/python_2/engineio/exceptions.py diff --git a/openpype/vendor/python/python_2/engineio/middleware.py b/client/ayon_core/vendor/python/python_2/engineio/middleware.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/middleware.py rename to client/ayon_core/vendor/python/python_2/engineio/middleware.py diff --git a/openpype/vendor/python/python_2/engineio/packet.py b/client/ayon_core/vendor/python/python_2/engineio/packet.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/packet.py rename to client/ayon_core/vendor/python/python_2/engineio/packet.py diff --git a/openpype/vendor/python/python_2/engineio/payload.py b/client/ayon_core/vendor/python/python_2/engineio/payload.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/payload.py rename to client/ayon_core/vendor/python/python_2/engineio/payload.py diff --git a/openpype/vendor/python/python_2/engineio/server.py b/client/ayon_core/vendor/python/python_2/engineio/server.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/server.py rename to client/ayon_core/vendor/python/python_2/engineio/server.py diff --git a/openpype/vendor/python/python_2/engineio/socket.py b/client/ayon_core/vendor/python/python_2/engineio/socket.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/socket.py rename to client/ayon_core/vendor/python/python_2/engineio/socket.py diff --git a/openpype/vendor/python/python_2/engineio/static_files.py b/client/ayon_core/vendor/python/python_2/engineio/static_files.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/static_files.py rename to client/ayon_core/vendor/python/python_2/engineio/static_files.py diff --git a/openpype/vendor/python/python_2/functools32/__init__.py b/client/ayon_core/vendor/python/python_2/functools32/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/functools32/__init__.py rename to client/ayon_core/vendor/python/python_2/functools32/__init__.py diff --git a/openpype/vendor/python/python_2/functools32/_dummy_thread32.py b/client/ayon_core/vendor/python/python_2/functools32/_dummy_thread32.py similarity index 100% rename from openpype/vendor/python/python_2/functools32/_dummy_thread32.py rename to client/ayon_core/vendor/python/python_2/functools32/_dummy_thread32.py diff --git a/openpype/vendor/python/python_2/functools32/functools32.py b/client/ayon_core/vendor/python/python_2/functools32/functools32.py similarity index 100% rename from openpype/vendor/python/python_2/functools32/functools32.py rename to client/ayon_core/vendor/python/python_2/functools32/functools32.py diff --git a/openpype/vendor/python/python_2/functools32/reprlib32.py b/client/ayon_core/vendor/python/python_2/functools32/reprlib32.py similarity index 100% rename from openpype/vendor/python/python_2/functools32/reprlib32.py rename to client/ayon_core/vendor/python/python_2/functools32/reprlib32.py diff --git a/openpype/vendor/python/python_2/idna/__init__.py b/client/ayon_core/vendor/python/python_2/idna/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/idna/__init__.py rename to client/ayon_core/vendor/python/python_2/idna/__init__.py diff --git a/openpype/vendor/python/python_2/idna/codec.py b/client/ayon_core/vendor/python/python_2/idna/codec.py similarity index 100% rename from openpype/vendor/python/python_2/idna/codec.py rename to client/ayon_core/vendor/python/python_2/idna/codec.py diff --git a/openpype/vendor/python/python_2/idna/compat.py b/client/ayon_core/vendor/python/python_2/idna/compat.py similarity index 100% rename from openpype/vendor/python/python_2/idna/compat.py rename to client/ayon_core/vendor/python/python_2/idna/compat.py diff --git a/openpype/vendor/python/python_2/idna/core.py b/client/ayon_core/vendor/python/python_2/idna/core.py similarity index 100% rename from openpype/vendor/python/python_2/idna/core.py rename to client/ayon_core/vendor/python/python_2/idna/core.py diff --git a/openpype/vendor/python/python_2/idna/idnadata.py b/client/ayon_core/vendor/python/python_2/idna/idnadata.py similarity index 100% rename from openpype/vendor/python/python_2/idna/idnadata.py rename to client/ayon_core/vendor/python/python_2/idna/idnadata.py diff --git a/openpype/vendor/python/python_2/idna/intranges.py b/client/ayon_core/vendor/python/python_2/idna/intranges.py similarity index 100% rename from openpype/vendor/python/python_2/idna/intranges.py rename to client/ayon_core/vendor/python/python_2/idna/intranges.py diff --git a/openpype/vendor/python/python_2/idna/package_data.py b/client/ayon_core/vendor/python/python_2/idna/package_data.py similarity index 100% rename from openpype/vendor/python/python_2/idna/package_data.py rename to client/ayon_core/vendor/python/python_2/idna/package_data.py diff --git a/openpype/vendor/python/python_2/idna/uts46data.py b/client/ayon_core/vendor/python/python_2/idna/uts46data.py similarity index 100% rename from openpype/vendor/python/python_2/idna/uts46data.py rename to client/ayon_core/vendor/python/python_2/idna/uts46data.py diff --git a/openpype/vendor/python/python_2/opentimelineio/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/adapter.py b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/adapter.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/adapter.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/adapter.py diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py diff --git a/openpype/vendor/python/python_2/opentimelineio/adapters/otio_json.py b/client/ayon_core/vendor/python/python_2/opentimelineio/adapters/otio_json.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/adapters/otio_json.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/adapters/otio_json.py diff --git a/openpype/vendor/python/python_2/opentimelineio/algorithms/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/algorithms/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/algorithms/filter.py b/client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/filter.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/algorithms/filter.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/filter.py diff --git a/openpype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py b/client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py diff --git a/openpype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py b/client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py diff --git a/openpype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py b/client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/track_algo.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/algorithms/track_algo.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/console_utils.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/console_utils.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/console_utils.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/console_utils.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/otiocat.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/otiocat.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/otiocat.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/otiocat.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/otioconvert.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/otioconvert.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/otioconvert.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/otioconvert.py diff --git a/openpype/vendor/python/python_2/opentimelineio/console/otiostat.py b/client/ayon_core/vendor/python/python_2/opentimelineio/console/otiostat.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/console/otiostat.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/console/otiostat.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/composable.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/composable.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/composable.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/composable.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/composition.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/composition.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/composition.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/composition.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/item.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/item.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/item.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/item.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/json_serializer.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/json_serializer.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/json_serializer.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/json_serializer.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/media_reference.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/media_reference.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/media_reference.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/media_reference.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/serializable_object.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/serializable_object.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/serializable_object.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/serializable_object.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/type_registry.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/type_registry.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/type_registry.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/type_registry.py diff --git a/openpype/vendor/python/python_2/opentimelineio/core/unknown_schema.py b/client/ayon_core/vendor/python/python_2/opentimelineio/core/unknown_schema.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/core/unknown_schema.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/core/unknown_schema.py diff --git a/openpype/vendor/python/python_2/opentimelineio/exceptions.py b/client/ayon_core/vendor/python/python_2/opentimelineio/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/exceptions.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/exceptions.py diff --git a/openpype/vendor/python/python_2/opentimelineio/hooks.py b/client/ayon_core/vendor/python/python_2/opentimelineio/hooks.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/hooks.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/hooks.py diff --git a/openpype/vendor/python/python_2/opentimelineio/media_linker.py b/client/ayon_core/vendor/python/python_2/opentimelineio/media_linker.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/media_linker.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/media_linker.py diff --git a/openpype/vendor/python/python_2/opentimelineio/opentime.py b/client/ayon_core/vendor/python/python_2/opentimelineio/opentime.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/opentime.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/opentime.py diff --git a/openpype/vendor/python/python_2/opentimelineio/plugins/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/plugins/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/plugins/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/plugins/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/plugins/manifest.py b/client/ayon_core/vendor/python/python_2/opentimelineio/plugins/manifest.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/plugins/manifest.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/plugins/manifest.py diff --git a/openpype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py b/client/ayon_core/vendor/python/python_2/opentimelineio/plugins/python_plugin.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/plugins/python_plugin.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/clip.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/clip.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/clip.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/clip.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/effect.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/effect.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/effect.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/effect.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/external_reference.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/external_reference.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/external_reference.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/external_reference.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/gap.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/gap.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/gap.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/gap.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/generator_reference.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/generator_reference.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/generator_reference.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/generator_reference.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/marker.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/marker.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/marker.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/marker.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/missing_reference.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/missing_reference.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/missing_reference.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/missing_reference.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/schemadef.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/schemadef.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/schemadef.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/schemadef.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/serializable_collection.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/serializable_collection.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/stack.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/stack.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/stack.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/stack.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/timeline.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/timeline.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/timeline.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/timeline.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/track.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/track.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/track.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/track.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schema/transition.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schema/transition.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schema/transition.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schema/transition.py diff --git a/openpype/vendor/python/python_2/opentimelineio/schemadef/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio/schemadef/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/schemadef/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/schemadef/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio/test_utils.py b/client/ayon_core/vendor/python/python_2/opentimelineio/test_utils.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio/test_utils.py rename to client/ayon_core/vendor/python/python_2/opentimelineio/test_utils.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/__init__.py diff --git a/openpype/tools/push_to_project/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py similarity index 100% rename from openpype/tools/push_to_project/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py diff --git a/openpype/tools/pyblish_pype/vendor/__init__.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py similarity index 100% rename from openpype/tools/pyblish_pype/vendor/__init__.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py b/client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py rename to client/ayon_core/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py diff --git a/openpype/vendor/python/python_2/pkg_resources/__init__.py b/client/ayon_core/vendor/python/python_2/pkg_resources/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/__init__.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/__init__.py diff --git a/openpype/tools/stdout_broker/__init__.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/__init__.py similarity index 100% rename from openpype/tools/stdout_broker/__init__.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/__init__.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/appdirs.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/appdirs.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/appdirs.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/appdirs.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/__about__.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/__about__.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/__about__.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/__about__.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/__init__.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/__init__.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/__init__.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/_compat.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/_compat.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/_compat.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/_compat.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/_structures.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/_structures.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/_structures.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/_structures.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/markers.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/markers.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/markers.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/markers.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/requirements.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/requirements.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/requirements.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/requirements.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/specifiers.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/specifiers.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/specifiers.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/specifiers.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/utils.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/utils.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/utils.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/utils.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/version.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/version.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/packaging/version.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/packaging/version.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/pyparsing.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/pyparsing.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/pyparsing.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/pyparsing.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/six.py b/client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/six.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/six.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/_vendor/six.py diff --git a/openpype/vendor/python/python_2/pkg_resources/extern/__init__.py b/client/ayon_core/vendor/python/python_2/pkg_resources/extern/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/extern/__init__.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/extern/__init__.py diff --git a/openpype/vendor/python/python_2/pkg_resources/py2_warn.py b/client/ayon_core/vendor/python/python_2/pkg_resources/py2_warn.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/py2_warn.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/py2_warn.py diff --git a/openpype/vendor/python/python_2/pkg_resources/py31compat.py b/client/ayon_core/vendor/python/python_2/pkg_resources/py31compat.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/py31compat.py rename to client/ayon_core/vendor/python/python_2/pkg_resources/py31compat.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DAnimation.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DAnimation.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DAnimation.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DAnimation.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DCore.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DCore.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DCore.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DCore.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DExtras.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DExtras.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DExtras.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DExtras.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DInput.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DInput.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DInput.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DInput.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DLogic.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DLogic.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DLogic.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DLogic.py diff --git a/openpype/vendor/python/python_2/qtpy/Qt3DRender.py b/client/ayon_core/vendor/python/python_2/qtpy/Qt3DRender.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/Qt3DRender.py rename to client/ayon_core/vendor/python/python_2/qtpy/Qt3DRender.py diff --git a/openpype/vendor/python/python_2/qtpy/QtCharts.py b/client/ayon_core/vendor/python/python_2/qtpy/QtCharts.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtCharts.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtCharts.py diff --git a/openpype/vendor/python/python_2/qtpy/QtCore.py b/client/ayon_core/vendor/python/python_2/qtpy/QtCore.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtCore.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtCore.py diff --git a/openpype/vendor/python/python_2/qtpy/QtDataVisualization.py b/client/ayon_core/vendor/python/python_2/qtpy/QtDataVisualization.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtDataVisualization.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtDataVisualization.py diff --git a/openpype/vendor/python/python_2/qtpy/QtDesigner.py b/client/ayon_core/vendor/python/python_2/qtpy/QtDesigner.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtDesigner.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtDesigner.py diff --git a/openpype/vendor/python/python_2/qtpy/QtGui.py b/client/ayon_core/vendor/python/python_2/qtpy/QtGui.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtGui.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtGui.py diff --git a/openpype/vendor/python/python_2/qtpy/QtHelp.py b/client/ayon_core/vendor/python/python_2/qtpy/QtHelp.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtHelp.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtHelp.py diff --git a/openpype/vendor/python/python_2/qtpy/QtLocation.py b/client/ayon_core/vendor/python/python_2/qtpy/QtLocation.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtLocation.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtLocation.py diff --git a/openpype/vendor/python/python_2/qtpy/QtMultimedia.py b/client/ayon_core/vendor/python/python_2/qtpy/QtMultimedia.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtMultimedia.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtMultimedia.py diff --git a/openpype/vendor/python/python_2/qtpy/QtMultimediaWidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/QtMultimediaWidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtMultimediaWidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtMultimediaWidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/QtNetwork.py b/client/ayon_core/vendor/python/python_2/qtpy/QtNetwork.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtNetwork.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtNetwork.py diff --git a/openpype/vendor/python/python_2/qtpy/QtOpenGL.py b/client/ayon_core/vendor/python/python_2/qtpy/QtOpenGL.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtOpenGL.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtOpenGL.py diff --git a/openpype/vendor/python/python_2/qtpy/QtPositioning.py b/client/ayon_core/vendor/python/python_2/qtpy/QtPositioning.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtPositioning.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtPositioning.py diff --git a/openpype/vendor/python/python_2/qtpy/QtPrintSupport.py b/client/ayon_core/vendor/python/python_2/qtpy/QtPrintSupport.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtPrintSupport.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtPrintSupport.py diff --git a/openpype/vendor/python/python_2/qtpy/QtQml.py b/client/ayon_core/vendor/python/python_2/qtpy/QtQml.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtQml.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtQml.py diff --git a/openpype/vendor/python/python_2/qtpy/QtQuick.py b/client/ayon_core/vendor/python/python_2/qtpy/QtQuick.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtQuick.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtQuick.py diff --git a/openpype/vendor/python/python_2/qtpy/QtQuickWidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/QtQuickWidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtQuickWidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtQuickWidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/QtSerialPort.py b/client/ayon_core/vendor/python/python_2/qtpy/QtSerialPort.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtSerialPort.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtSerialPort.py diff --git a/openpype/vendor/python/python_2/qtpy/QtSql.py b/client/ayon_core/vendor/python/python_2/qtpy/QtSql.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtSql.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtSql.py diff --git a/openpype/vendor/python/python_2/qtpy/QtSvg.py b/client/ayon_core/vendor/python/python_2/qtpy/QtSvg.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtSvg.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtSvg.py diff --git a/openpype/vendor/python/python_2/qtpy/QtTest.py b/client/ayon_core/vendor/python/python_2/qtpy/QtTest.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtTest.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtTest.py diff --git a/openpype/vendor/python/python_2/qtpy/QtWebChannel.py b/client/ayon_core/vendor/python/python_2/qtpy/QtWebChannel.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtWebChannel.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtWebChannel.py diff --git a/openpype/vendor/python/python_2/qtpy/QtWebEngineWidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/QtWebEngineWidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtWebEngineWidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtWebEngineWidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/QtWebSockets.py b/client/ayon_core/vendor/python/python_2/qtpy/QtWebSockets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtWebSockets.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtWebSockets.py diff --git a/openpype/vendor/python/python_2/qtpy/QtWidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/QtWidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtWidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtWidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/QtWinExtras.py b/client/ayon_core/vendor/python/python_2/qtpy/QtWinExtras.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtWinExtras.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtWinExtras.py diff --git a/openpype/vendor/python/python_2/qtpy/QtXmlPatterns.py b/client/ayon_core/vendor/python/python_2/qtpy/QtXmlPatterns.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/QtXmlPatterns.py rename to client/ayon_core/vendor/python/python_2/qtpy/QtXmlPatterns.py diff --git a/openpype/vendor/python/python_2/qtpy/__init__.py b/client/ayon_core/vendor/python/python_2/qtpy/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/__init__.py rename to client/ayon_core/vendor/python/python_2/qtpy/__init__.py diff --git a/openpype/vendor/__init__.py b/client/ayon_core/vendor/python/python_2/qtpy/_patch/__init__.py similarity index 100% rename from openpype/vendor/__init__.py rename to client/ayon_core/vendor/python/python_2/qtpy/_patch/__init__.py diff --git a/openpype/vendor/python/python_2/qtpy/_patch/qcombobox.py b/client/ayon_core/vendor/python/python_2/qtpy/_patch/qcombobox.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/_patch/qcombobox.py rename to client/ayon_core/vendor/python/python_2/qtpy/_patch/qcombobox.py diff --git a/openpype/vendor/python/python_2/qtpy/_patch/qheaderview.py b/client/ayon_core/vendor/python/python_2/qtpy/_patch/qheaderview.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/_patch/qheaderview.py rename to client/ayon_core/vendor/python/python_2/qtpy/_patch/qheaderview.py diff --git a/openpype/vendor/python/python_2/qtpy/_version.py b/client/ayon_core/vendor/python/python_2/qtpy/_version.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/_version.py rename to client/ayon_core/vendor/python/python_2/qtpy/_version.py diff --git a/openpype/vendor/python/python_2/qtpy/compat.py b/client/ayon_core/vendor/python/python_2/qtpy/compat.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/compat.py rename to client/ayon_core/vendor/python/python_2/qtpy/compat.py diff --git a/openpype/vendor/python/python_2/qtpy/py3compat.py b/client/ayon_core/vendor/python/python_2/qtpy/py3compat.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/py3compat.py rename to client/ayon_core/vendor/python/python_2/qtpy/py3compat.py diff --git a/openpype/vendor/python/python_2/chardet/metadata/__init__.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/chardet/metadata/__init__.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/__init__.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/conftest.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/conftest.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/conftest.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/conftest.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/runtests.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/runtests.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/runtests.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/runtests.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_macos_checks.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_macos_checks.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_macos_checks.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_macos_checks.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_main.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_main.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_main.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_main.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_patch_qcombobox.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_patch_qcombobox.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_patch_qcombobox.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_patch_qcombobox.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_patch_qheaderview.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_patch_qheaderview.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_patch_qheaderview.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_patch_qheaderview.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qdesktopservice_split.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qdesktopservice_split.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qdesktopservice_split.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qdesktopservice_split.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3danimation.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3danimation.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3danimation.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3danimation.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3dcore.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dcore.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3dcore.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dcore.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3dextras.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dextras.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3dextras.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dextras.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3dinput.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dinput.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3dinput.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dinput.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3dlogic.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dlogic.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3dlogic.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3dlogic.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qt3drender.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3drender.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qt3drender.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qt3drender.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtcharts.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtcharts.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtcharts.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtcharts.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtcore.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtcore.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtcore.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtcore.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtdatavisualization.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtdatavisualization.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtdatavisualization.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtdatavisualization.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtdesigner.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtdesigner.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtdesigner.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtdesigner.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qthelp.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qthelp.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qthelp.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qthelp.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtlocation.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtlocation.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtlocation.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtlocation.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtmultimedia.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtmultimedia.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtmultimedia.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtmultimedia.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtmultimediawidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtmultimediawidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtmultimediawidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtmultimediawidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtnetwork.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtnetwork.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtnetwork.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtnetwork.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtpositioning.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtpositioning.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtpositioning.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtpositioning.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtprintsupport.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtprintsupport.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtprintsupport.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtprintsupport.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtqml.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtqml.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtqml.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtqml.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtquick.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtquick.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtquick.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtquick.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtquickwidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtquickwidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtquickwidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtquickwidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtserialport.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtserialport.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtserialport.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtserialport.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtsql.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtsql.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtsql.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtsql.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtsvg.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtsvg.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtsvg.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtsvg.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qttest.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qttest.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qttest.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qttest.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtwebchannel.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebchannel.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtwebchannel.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebchannel.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtwebenginewidgets.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebenginewidgets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtwebenginewidgets.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebenginewidgets.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtwebsockets.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebsockets.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtwebsockets.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwebsockets.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtwinextras.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwinextras.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtwinextras.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtwinextras.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_qtxmlpatterns.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtxmlpatterns.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_qtxmlpatterns.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_qtxmlpatterns.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/test_uic.py b/client/ayon_core/vendor/python/python_2/qtpy/tests/test_uic.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/test_uic.py rename to client/ayon_core/vendor/python/python_2/qtpy/tests/test_uic.py diff --git a/openpype/vendor/python/python_2/qtpy/uic.py b/client/ayon_core/vendor/python/python_2/qtpy/uic.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/uic.py rename to client/ayon_core/vendor/python/python_2/qtpy/uic.py diff --git a/openpype/vendor/python/python_2/requests/__init__.py b/client/ayon_core/vendor/python/python_2/requests/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/requests/__init__.py rename to client/ayon_core/vendor/python/python_2/requests/__init__.py diff --git a/openpype/vendor/python/python_2/requests/__version__.py b/client/ayon_core/vendor/python/python_2/requests/__version__.py similarity index 100% rename from openpype/vendor/python/python_2/requests/__version__.py rename to client/ayon_core/vendor/python/python_2/requests/__version__.py diff --git a/openpype/vendor/python/python_2/requests/_internal_utils.py b/client/ayon_core/vendor/python/python_2/requests/_internal_utils.py similarity index 100% rename from openpype/vendor/python/python_2/requests/_internal_utils.py rename to client/ayon_core/vendor/python/python_2/requests/_internal_utils.py diff --git a/openpype/vendor/python/python_2/requests/adapters.py b/client/ayon_core/vendor/python/python_2/requests/adapters.py similarity index 100% rename from openpype/vendor/python/python_2/requests/adapters.py rename to client/ayon_core/vendor/python/python_2/requests/adapters.py diff --git a/openpype/vendor/python/python_2/requests/api.py b/client/ayon_core/vendor/python/python_2/requests/api.py similarity index 100% rename from openpype/vendor/python/python_2/requests/api.py rename to client/ayon_core/vendor/python/python_2/requests/api.py diff --git a/openpype/vendor/python/python_2/requests/auth.py b/client/ayon_core/vendor/python/python_2/requests/auth.py similarity index 100% rename from openpype/vendor/python/python_2/requests/auth.py rename to client/ayon_core/vendor/python/python_2/requests/auth.py diff --git a/openpype/vendor/python/python_2/requests/certs.py b/client/ayon_core/vendor/python/python_2/requests/certs.py similarity index 100% rename from openpype/vendor/python/python_2/requests/certs.py rename to client/ayon_core/vendor/python/python_2/requests/certs.py diff --git a/openpype/vendor/python/python_2/requests/compat.py b/client/ayon_core/vendor/python/python_2/requests/compat.py similarity index 100% rename from openpype/vendor/python/python_2/requests/compat.py rename to client/ayon_core/vendor/python/python_2/requests/compat.py diff --git a/openpype/vendor/python/python_2/requests/cookies.py b/client/ayon_core/vendor/python/python_2/requests/cookies.py similarity index 100% rename from openpype/vendor/python/python_2/requests/cookies.py rename to client/ayon_core/vendor/python/python_2/requests/cookies.py diff --git a/openpype/vendor/python/python_2/requests/exceptions.py b/client/ayon_core/vendor/python/python_2/requests/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/requests/exceptions.py rename to client/ayon_core/vendor/python/python_2/requests/exceptions.py diff --git a/openpype/vendor/python/python_2/requests/help.py b/client/ayon_core/vendor/python/python_2/requests/help.py similarity index 100% rename from openpype/vendor/python/python_2/requests/help.py rename to client/ayon_core/vendor/python/python_2/requests/help.py diff --git a/openpype/vendor/python/python_2/requests/hooks.py b/client/ayon_core/vendor/python/python_2/requests/hooks.py similarity index 100% rename from openpype/vendor/python/python_2/requests/hooks.py rename to client/ayon_core/vendor/python/python_2/requests/hooks.py diff --git a/openpype/vendor/python/python_2/requests/models.py b/client/ayon_core/vendor/python/python_2/requests/models.py similarity index 100% rename from openpype/vendor/python/python_2/requests/models.py rename to client/ayon_core/vendor/python/python_2/requests/models.py diff --git a/openpype/vendor/python/python_2/requests/packages.py b/client/ayon_core/vendor/python/python_2/requests/packages.py similarity index 100% rename from openpype/vendor/python/python_2/requests/packages.py rename to client/ayon_core/vendor/python/python_2/requests/packages.py diff --git a/openpype/vendor/python/python_2/requests/sessions.py b/client/ayon_core/vendor/python/python_2/requests/sessions.py similarity index 100% rename from openpype/vendor/python/python_2/requests/sessions.py rename to client/ayon_core/vendor/python/python_2/requests/sessions.py diff --git a/openpype/vendor/python/python_2/requests/status_codes.py b/client/ayon_core/vendor/python/python_2/requests/status_codes.py similarity index 100% rename from openpype/vendor/python/python_2/requests/status_codes.py rename to client/ayon_core/vendor/python/python_2/requests/status_codes.py diff --git a/openpype/vendor/python/python_2/requests/structures.py b/client/ayon_core/vendor/python/python_2/requests/structures.py similarity index 100% rename from openpype/vendor/python/python_2/requests/structures.py rename to client/ayon_core/vendor/python/python_2/requests/structures.py diff --git a/openpype/vendor/python/python_2/requests/utils.py b/client/ayon_core/vendor/python/python_2/requests/utils.py similarity index 100% rename from openpype/vendor/python/python_2/requests/utils.py rename to client/ayon_core/vendor/python/python_2/requests/utils.py diff --git a/openpype/vendor/python/python_2/secrets/LICENSE b/client/ayon_core/vendor/python/python_2/secrets/LICENSE similarity index 100% rename from openpype/vendor/python/python_2/secrets/LICENSE rename to client/ayon_core/vendor/python/python_2/secrets/LICENSE diff --git a/openpype/vendor/python/python_2/secrets/__init__.py b/client/ayon_core/vendor/python/python_2/secrets/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/secrets/__init__.py rename to client/ayon_core/vendor/python/python_2/secrets/__init__.py diff --git a/openpype/vendor/python/python_2/secrets/secrets.py b/client/ayon_core/vendor/python/python_2/secrets/secrets.py similarity index 100% rename from openpype/vendor/python/python_2/secrets/secrets.py rename to client/ayon_core/vendor/python/python_2/secrets/secrets.py diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/INSTALLER b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/INSTALLER similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/INSTALLER rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/INSTALLER diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/LICENSE b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/LICENSE similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/LICENSE rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/LICENSE diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/METADATA b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/METADATA similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/METADATA rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/METADATA diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/RECORD b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/RECORD similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/RECORD rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/RECORD diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/REQUESTED b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/REQUESTED similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/REQUESTED rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/REQUESTED diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/WHEEL b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/WHEEL similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/WHEEL rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/WHEEL diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/dependency_links.txt b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/dependency_links.txt similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/dependency_links.txt rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/dependency_links.txt diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/entry_points.txt b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/entry_points.txt similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/entry_points.txt rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/entry_points.txt diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/top_level.txt b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/top_level.txt similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/top_level.txt rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/top_level.txt diff --git a/openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/zip-safe b/client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/zip-safe similarity index 100% rename from openpype/vendor/python/python_2/setuptools-45.0.0.dist-info/zip-safe rename to client/ayon_core/vendor/python/python_2/setuptools-45.0.0.dist-info/zip-safe diff --git a/openpype/vendor/python/python_2/setuptools/__init__.py b/client/ayon_core/vendor/python/python_2/setuptools/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/__init__.py rename to client/ayon_core/vendor/python/python_2/setuptools/__init__.py diff --git a/openpype/vendor/python/python_2/setuptools/_deprecation_warning.py b/client/ayon_core/vendor/python/python_2/setuptools/_deprecation_warning.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_deprecation_warning.py rename to client/ayon_core/vendor/python/python_2/setuptools/_deprecation_warning.py diff --git a/openpype/vendor/python/python_2/setuptools/_imp.py b/client/ayon_core/vendor/python/python_2/setuptools/_imp.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_imp.py rename to client/ayon_core/vendor/python/python_2/setuptools/_imp.py diff --git a/openpype/vendor/python/python_2/engineio/async_drivers/__init__.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/engineio/async_drivers/__init__.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/__init__.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/ordered_set.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/ordered_set.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/ordered_set.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/ordered_set.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/__about__.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/__about__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/__about__.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/__about__.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/__init__.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/__init__.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/__init__.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/_compat.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/_compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/_compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/_compat.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/_structures.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/_structures.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/_structures.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/_structures.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/markers.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/markers.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/markers.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/markers.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/requirements.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/requirements.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/requirements.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/requirements.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/specifiers.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/specifiers.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/specifiers.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/specifiers.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/tags.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/tags.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/tags.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/tags.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/utils.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/utils.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/utils.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/utils.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/packaging/version.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/version.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/packaging/version.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/packaging/version.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/pyparsing.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/pyparsing.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/pyparsing.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/pyparsing.py diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/six.py b/client/ayon_core/vendor/python/python_2/setuptools/_vendor/six.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/six.py rename to client/ayon_core/vendor/python/python_2/setuptools/_vendor/six.py diff --git a/openpype/vendor/python/python_2/setuptools/archive_util.py b/client/ayon_core/vendor/python/python_2/setuptools/archive_util.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/archive_util.py rename to client/ayon_core/vendor/python/python_2/setuptools/archive_util.py diff --git a/openpype/vendor/python/python_2/setuptools/build_meta.py b/client/ayon_core/vendor/python/python_2/setuptools/build_meta.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/build_meta.py rename to client/ayon_core/vendor/python/python_2/setuptools/build_meta.py diff --git a/openpype/vendor/python/python_2/setuptools/cli-32.exe b/client/ayon_core/vendor/python/python_2/setuptools/cli-32.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/cli-32.exe rename to client/ayon_core/vendor/python/python_2/setuptools/cli-32.exe diff --git a/openpype/vendor/python/python_2/setuptools/cli-64.exe b/client/ayon_core/vendor/python/python_2/setuptools/cli-64.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/cli-64.exe rename to client/ayon_core/vendor/python/python_2/setuptools/cli-64.exe diff --git a/openpype/vendor/python/python_2/setuptools/cli.exe b/client/ayon_core/vendor/python/python_2/setuptools/cli.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/cli.exe rename to client/ayon_core/vendor/python/python_2/setuptools/cli.exe diff --git a/openpype/vendor/python/python_2/setuptools/command/__init__.py b/client/ayon_core/vendor/python/python_2/setuptools/command/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/__init__.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/__init__.py diff --git a/openpype/vendor/python/python_2/setuptools/command/alias.py b/client/ayon_core/vendor/python/python_2/setuptools/command/alias.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/alias.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/alias.py diff --git a/openpype/vendor/python/python_2/setuptools/command/bdist_egg.py b/client/ayon_core/vendor/python/python_2/setuptools/command/bdist_egg.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/bdist_egg.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/bdist_egg.py diff --git a/openpype/vendor/python/python_2/setuptools/command/bdist_rpm.py b/client/ayon_core/vendor/python/python_2/setuptools/command/bdist_rpm.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/bdist_rpm.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/bdist_rpm.py diff --git a/openpype/vendor/python/python_2/setuptools/command/bdist_wininst.py b/client/ayon_core/vendor/python/python_2/setuptools/command/bdist_wininst.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/bdist_wininst.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/bdist_wininst.py diff --git a/openpype/vendor/python/python_2/setuptools/command/build_clib.py b/client/ayon_core/vendor/python/python_2/setuptools/command/build_clib.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/build_clib.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/build_clib.py diff --git a/openpype/vendor/python/python_2/setuptools/command/build_ext.py b/client/ayon_core/vendor/python/python_2/setuptools/command/build_ext.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/build_ext.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/build_ext.py diff --git a/openpype/vendor/python/python_2/setuptools/command/build_py.py b/client/ayon_core/vendor/python/python_2/setuptools/command/build_py.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/build_py.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/build_py.py diff --git a/openpype/vendor/python/python_2/setuptools/command/develop.py b/client/ayon_core/vendor/python/python_2/setuptools/command/develop.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/develop.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/develop.py diff --git a/openpype/vendor/python/python_2/setuptools/command/dist_info.py b/client/ayon_core/vendor/python/python_2/setuptools/command/dist_info.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/dist_info.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/dist_info.py diff --git a/openpype/vendor/python/python_2/setuptools/command/easy_install.py b/client/ayon_core/vendor/python/python_2/setuptools/command/easy_install.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/easy_install.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/easy_install.py diff --git a/openpype/vendor/python/python_2/setuptools/command/egg_info.py b/client/ayon_core/vendor/python/python_2/setuptools/command/egg_info.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/egg_info.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/egg_info.py diff --git a/openpype/vendor/python/python_2/setuptools/command/install.py b/client/ayon_core/vendor/python/python_2/setuptools/command/install.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/install.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/install.py diff --git a/openpype/vendor/python/python_2/setuptools/command/install_egg_info.py b/client/ayon_core/vendor/python/python_2/setuptools/command/install_egg_info.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/install_egg_info.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/install_egg_info.py diff --git a/openpype/vendor/python/python_2/setuptools/command/install_lib.py b/client/ayon_core/vendor/python/python_2/setuptools/command/install_lib.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/install_lib.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/install_lib.py diff --git a/openpype/vendor/python/python_2/setuptools/command/install_scripts.py b/client/ayon_core/vendor/python/python_2/setuptools/command/install_scripts.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/install_scripts.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/install_scripts.py diff --git a/openpype/vendor/python/python_2/setuptools/command/launcher manifest.xml b/client/ayon_core/vendor/python/python_2/setuptools/command/launcher manifest.xml similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/launcher manifest.xml rename to client/ayon_core/vendor/python/python_2/setuptools/command/launcher manifest.xml diff --git a/openpype/vendor/python/python_2/setuptools/command/py36compat.py b/client/ayon_core/vendor/python/python_2/setuptools/command/py36compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/py36compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/py36compat.py diff --git a/openpype/vendor/python/python_2/setuptools/command/register.py b/client/ayon_core/vendor/python/python_2/setuptools/command/register.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/register.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/register.py diff --git a/openpype/vendor/python/python_2/setuptools/command/rotate.py b/client/ayon_core/vendor/python/python_2/setuptools/command/rotate.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/rotate.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/rotate.py diff --git a/openpype/vendor/python/python_2/setuptools/command/saveopts.py b/client/ayon_core/vendor/python/python_2/setuptools/command/saveopts.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/saveopts.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/saveopts.py diff --git a/openpype/vendor/python/python_2/setuptools/command/sdist.py b/client/ayon_core/vendor/python/python_2/setuptools/command/sdist.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/sdist.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/sdist.py diff --git a/openpype/vendor/python/python_2/setuptools/command/setopt.py b/client/ayon_core/vendor/python/python_2/setuptools/command/setopt.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/setopt.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/setopt.py diff --git a/openpype/vendor/python/python_2/setuptools/command/test.py b/client/ayon_core/vendor/python/python_2/setuptools/command/test.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/test.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/test.py diff --git a/openpype/vendor/python/python_2/setuptools/command/upload.py b/client/ayon_core/vendor/python/python_2/setuptools/command/upload.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/upload.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/upload.py diff --git a/openpype/vendor/python/python_2/setuptools/command/upload_docs.py b/client/ayon_core/vendor/python/python_2/setuptools/command/upload_docs.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/command/upload_docs.py rename to client/ayon_core/vendor/python/python_2/setuptools/command/upload_docs.py diff --git a/openpype/vendor/python/python_2/setuptools/config.py b/client/ayon_core/vendor/python/python_2/setuptools/config.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/config.py rename to client/ayon_core/vendor/python/python_2/setuptools/config.py diff --git a/openpype/vendor/python/python_2/setuptools/dep_util.py b/client/ayon_core/vendor/python/python_2/setuptools/dep_util.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/dep_util.py rename to client/ayon_core/vendor/python/python_2/setuptools/dep_util.py diff --git a/openpype/vendor/python/python_2/setuptools/depends.py b/client/ayon_core/vendor/python/python_2/setuptools/depends.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/depends.py rename to client/ayon_core/vendor/python/python_2/setuptools/depends.py diff --git a/openpype/vendor/python/python_2/setuptools/dist.py b/client/ayon_core/vendor/python/python_2/setuptools/dist.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/dist.py rename to client/ayon_core/vendor/python/python_2/setuptools/dist.py diff --git a/openpype/vendor/python/python_2/setuptools/errors.py b/client/ayon_core/vendor/python/python_2/setuptools/errors.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/errors.py rename to client/ayon_core/vendor/python/python_2/setuptools/errors.py diff --git a/openpype/vendor/python/python_2/setuptools/extension.py b/client/ayon_core/vendor/python/python_2/setuptools/extension.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/extension.py rename to client/ayon_core/vendor/python/python_2/setuptools/extension.py diff --git a/openpype/vendor/python/python_2/setuptools/extern/__init__.py b/client/ayon_core/vendor/python/python_2/setuptools/extern/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/extern/__init__.py rename to client/ayon_core/vendor/python/python_2/setuptools/extern/__init__.py diff --git a/openpype/vendor/python/python_2/setuptools/glob.py b/client/ayon_core/vendor/python/python_2/setuptools/glob.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/glob.py rename to client/ayon_core/vendor/python/python_2/setuptools/glob.py diff --git a/openpype/vendor/python/python_2/setuptools/gui-32.exe b/client/ayon_core/vendor/python/python_2/setuptools/gui-32.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/gui-32.exe rename to client/ayon_core/vendor/python/python_2/setuptools/gui-32.exe diff --git a/openpype/vendor/python/python_2/setuptools/gui-64.exe b/client/ayon_core/vendor/python/python_2/setuptools/gui-64.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/gui-64.exe rename to client/ayon_core/vendor/python/python_2/setuptools/gui-64.exe diff --git a/openpype/vendor/python/python_2/setuptools/gui.exe b/client/ayon_core/vendor/python/python_2/setuptools/gui.exe similarity index 100% rename from openpype/vendor/python/python_2/setuptools/gui.exe rename to client/ayon_core/vendor/python/python_2/setuptools/gui.exe diff --git a/openpype/vendor/python/python_2/setuptools/installer.py b/client/ayon_core/vendor/python/python_2/setuptools/installer.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/installer.py rename to client/ayon_core/vendor/python/python_2/setuptools/installer.py diff --git a/openpype/vendor/python/python_2/setuptools/launch.py b/client/ayon_core/vendor/python/python_2/setuptools/launch.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/launch.py rename to client/ayon_core/vendor/python/python_2/setuptools/launch.py diff --git a/openpype/vendor/python/python_2/setuptools/lib2to3_ex.py b/client/ayon_core/vendor/python/python_2/setuptools/lib2to3_ex.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/lib2to3_ex.py rename to client/ayon_core/vendor/python/python_2/setuptools/lib2to3_ex.py diff --git a/openpype/vendor/python/python_2/setuptools/monkey.py b/client/ayon_core/vendor/python/python_2/setuptools/monkey.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/monkey.py rename to client/ayon_core/vendor/python/python_2/setuptools/monkey.py diff --git a/openpype/vendor/python/python_2/setuptools/msvc.py b/client/ayon_core/vendor/python/python_2/setuptools/msvc.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/msvc.py rename to client/ayon_core/vendor/python/python_2/setuptools/msvc.py diff --git a/openpype/vendor/python/python_2/setuptools/namespaces.py b/client/ayon_core/vendor/python/python_2/setuptools/namespaces.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/namespaces.py rename to client/ayon_core/vendor/python/python_2/setuptools/namespaces.py diff --git a/openpype/vendor/python/python_2/setuptools/package_index.py b/client/ayon_core/vendor/python/python_2/setuptools/package_index.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/package_index.py rename to client/ayon_core/vendor/python/python_2/setuptools/package_index.py diff --git a/openpype/vendor/python/python_2/setuptools/py27compat.py b/client/ayon_core/vendor/python/python_2/setuptools/py27compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/py27compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/py27compat.py diff --git a/openpype/vendor/python/python_2/setuptools/py31compat.py b/client/ayon_core/vendor/python/python_2/setuptools/py31compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/py31compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/py31compat.py diff --git a/openpype/vendor/python/python_2/setuptools/py33compat.py b/client/ayon_core/vendor/python/python_2/setuptools/py33compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/py33compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/py33compat.py diff --git a/openpype/vendor/python/python_2/setuptools/py34compat.py b/client/ayon_core/vendor/python/python_2/setuptools/py34compat.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/py34compat.py rename to client/ayon_core/vendor/python/python_2/setuptools/py34compat.py diff --git a/openpype/vendor/python/python_2/setuptools/sandbox.py b/client/ayon_core/vendor/python/python_2/setuptools/sandbox.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/sandbox.py rename to client/ayon_core/vendor/python/python_2/setuptools/sandbox.py diff --git a/openpype/vendor/python/python_2/setuptools/script (dev).tmpl b/client/ayon_core/vendor/python/python_2/setuptools/script (dev).tmpl similarity index 100% rename from openpype/vendor/python/python_2/setuptools/script (dev).tmpl rename to client/ayon_core/vendor/python/python_2/setuptools/script (dev).tmpl diff --git a/openpype/vendor/python/python_2/setuptools/script.tmpl b/client/ayon_core/vendor/python/python_2/setuptools/script.tmpl similarity index 100% rename from openpype/vendor/python/python_2/setuptools/script.tmpl rename to client/ayon_core/vendor/python/python_2/setuptools/script.tmpl diff --git a/openpype/vendor/python/python_2/setuptools/site-patch.py b/client/ayon_core/vendor/python/python_2/setuptools/site-patch.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/site-patch.py rename to client/ayon_core/vendor/python/python_2/setuptools/site-patch.py diff --git a/openpype/vendor/python/python_2/setuptools/ssl_support.py b/client/ayon_core/vendor/python/python_2/setuptools/ssl_support.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/ssl_support.py rename to client/ayon_core/vendor/python/python_2/setuptools/ssl_support.py diff --git a/openpype/vendor/python/python_2/setuptools/unicode_utils.py b/client/ayon_core/vendor/python/python_2/setuptools/unicode_utils.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/unicode_utils.py rename to client/ayon_core/vendor/python/python_2/setuptools/unicode_utils.py diff --git a/openpype/vendor/python/python_2/setuptools/version.py b/client/ayon_core/vendor/python/python_2/setuptools/version.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/version.py rename to client/ayon_core/vendor/python/python_2/setuptools/version.py diff --git a/openpype/vendor/python/python_2/setuptools/wheel.py b/client/ayon_core/vendor/python/python_2/setuptools/wheel.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/wheel.py rename to client/ayon_core/vendor/python/python_2/setuptools/wheel.py diff --git a/openpype/vendor/python/python_2/setuptools/windows_support.py b/client/ayon_core/vendor/python/python_2/setuptools/windows_support.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/windows_support.py rename to client/ayon_core/vendor/python/python_2/setuptools/windows_support.py diff --git a/openpype/vendor/python/python_2/socketio/__init__.py b/client/ayon_core/vendor/python/python_2/socketio/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/__init__.py rename to client/ayon_core/vendor/python/python_2/socketio/__init__.py diff --git a/openpype/vendor/python/python_2/socketio/asgi.py b/client/ayon_core/vendor/python/python_2/socketio/asgi.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asgi.py rename to client/ayon_core/vendor/python/python_2/socketio/asgi.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_client.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_client.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_client.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_client.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_manager.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_manager.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_namespace.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_namespace.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_namespace.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_namespace.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_pubsub_manager.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_pubsub_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_pubsub_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_pubsub_manager.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_redis_manager.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_redis_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_redis_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_redis_manager.py diff --git a/openpype/vendor/python/python_2/socketio/asyncio_server.py b/client/ayon_core/vendor/python/python_2/socketio/asyncio_server.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/asyncio_server.py rename to client/ayon_core/vendor/python/python_2/socketio/asyncio_server.py diff --git a/openpype/vendor/python/python_2/socketio/base_manager.py b/client/ayon_core/vendor/python/python_2/socketio/base_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/base_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/base_manager.py diff --git a/openpype/vendor/python/python_2/socketio/client.py b/client/ayon_core/vendor/python/python_2/socketio/client.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/client.py rename to client/ayon_core/vendor/python/python_2/socketio/client.py diff --git a/openpype/vendor/python/python_2/socketio/exceptions.py b/client/ayon_core/vendor/python/python_2/socketio/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/exceptions.py rename to client/ayon_core/vendor/python/python_2/socketio/exceptions.py diff --git a/openpype/vendor/python/python_2/socketio/kombu_manager.py b/client/ayon_core/vendor/python/python_2/socketio/kombu_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/kombu_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/kombu_manager.py diff --git a/openpype/vendor/python/python_2/socketio/middleware.py b/client/ayon_core/vendor/python/python_2/socketio/middleware.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/middleware.py rename to client/ayon_core/vendor/python/python_2/socketio/middleware.py diff --git a/openpype/vendor/python/python_2/socketio/namespace.py b/client/ayon_core/vendor/python/python_2/socketio/namespace.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/namespace.py rename to client/ayon_core/vendor/python/python_2/socketio/namespace.py diff --git a/openpype/vendor/python/python_2/socketio/packet.py b/client/ayon_core/vendor/python/python_2/socketio/packet.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/packet.py rename to client/ayon_core/vendor/python/python_2/socketio/packet.py diff --git a/openpype/vendor/python/python_2/socketio/pubsub_manager.py b/client/ayon_core/vendor/python/python_2/socketio/pubsub_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/pubsub_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/pubsub_manager.py diff --git a/openpype/vendor/python/python_2/socketio/redis_manager.py b/client/ayon_core/vendor/python/python_2/socketio/redis_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/redis_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/redis_manager.py diff --git a/openpype/vendor/python/python_2/socketio/server.py b/client/ayon_core/vendor/python/python_2/socketio/server.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/server.py rename to client/ayon_core/vendor/python/python_2/socketio/server.py diff --git a/openpype/vendor/python/python_2/socketio/tornado.py b/client/ayon_core/vendor/python/python_2/socketio/tornado.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/tornado.py rename to client/ayon_core/vendor/python/python_2/socketio/tornado.py diff --git a/openpype/vendor/python/python_2/socketio/zmq_manager.py b/client/ayon_core/vendor/python/python_2/socketio/zmq_manager.py similarity index 100% rename from openpype/vendor/python/python_2/socketio/zmq_manager.py rename to client/ayon_core/vendor/python/python_2/socketio/zmq_manager.py diff --git a/openpype/vendor/python/python_2/urllib3/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/__init__.py diff --git a/openpype/vendor/python/python_2/urllib3/_collections.py b/client/ayon_core/vendor/python/python_2/urllib3/_collections.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/_collections.py rename to client/ayon_core/vendor/python/python_2/urllib3/_collections.py diff --git a/openpype/vendor/python/python_2/urllib3/_version.py b/client/ayon_core/vendor/python/python_2/urllib3/_version.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/_version.py rename to client/ayon_core/vendor/python/python_2/urllib3/_version.py diff --git a/openpype/vendor/python/python_2/urllib3/connection.py b/client/ayon_core/vendor/python/python_2/urllib3/connection.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/connection.py rename to client/ayon_core/vendor/python/python_2/urllib3/connection.py diff --git a/openpype/vendor/python/python_2/urllib3/connectionpool.py b/client/ayon_core/vendor/python/python_2/urllib3/connectionpool.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/connectionpool.py rename to client/ayon_core/vendor/python/python_2/urllib3/connectionpool.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/__init__.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/_appengine_environ.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/_appengine_environ.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/_appengine_environ.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/_appengine_environ.py diff --git a/openpype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/__init__.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/_securetransport/bindings.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/bindings.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/_securetransport/bindings.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/bindings.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/_securetransport/low_level.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/low_level.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/_securetransport/low_level.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/_securetransport/low_level.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/appengine.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/appengine.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/appengine.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/appengine.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/ntlmpool.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/ntlmpool.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/ntlmpool.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/ntlmpool.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/pyopenssl.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/pyopenssl.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/pyopenssl.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/pyopenssl.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/securetransport.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/securetransport.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/securetransport.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/securetransport.py diff --git a/openpype/vendor/python/python_2/urllib3/contrib/socks.py b/client/ayon_core/vendor/python/python_2/urllib3/contrib/socks.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/contrib/socks.py rename to client/ayon_core/vendor/python/python_2/urllib3/contrib/socks.py diff --git a/openpype/vendor/python/python_2/urllib3/exceptions.py b/client/ayon_core/vendor/python/python_2/urllib3/exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/exceptions.py rename to client/ayon_core/vendor/python/python_2/urllib3/exceptions.py diff --git a/openpype/vendor/python/python_2/urllib3/fields.py b/client/ayon_core/vendor/python/python_2/urllib3/fields.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/fields.py rename to client/ayon_core/vendor/python/python_2/urllib3/fields.py diff --git a/openpype/vendor/python/python_2/urllib3/filepost.py b/client/ayon_core/vendor/python/python_2/urllib3/filepost.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/filepost.py rename to client/ayon_core/vendor/python/python_2/urllib3/filepost.py diff --git a/openpype/vendor/python/python_2/pkg_resources/_vendor/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/packages/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/pkg_resources/_vendor/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/packages/__init__.py diff --git a/openpype/vendor/python/python_2/qtpy/_patch/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/packages/backports/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/_patch/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/packages/backports/__init__.py diff --git a/openpype/vendor/python/python_2/urllib3/packages/backports/makefile.py b/client/ayon_core/vendor/python/python_2/urllib3/packages/backports/makefile.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/packages/backports/makefile.py rename to client/ayon_core/vendor/python/python_2/urllib3/packages/backports/makefile.py diff --git a/openpype/vendor/python/python_2/urllib3/packages/six.py b/client/ayon_core/vendor/python/python_2/urllib3/packages/six.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/packages/six.py rename to client/ayon_core/vendor/python/python_2/urllib3/packages/six.py diff --git a/openpype/vendor/python/python_2/urllib3/poolmanager.py b/client/ayon_core/vendor/python/python_2/urllib3/poolmanager.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/poolmanager.py rename to client/ayon_core/vendor/python/python_2/urllib3/poolmanager.py diff --git a/openpype/vendor/python/python_2/urllib3/request.py b/client/ayon_core/vendor/python/python_2/urllib3/request.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/request.py rename to client/ayon_core/vendor/python/python_2/urllib3/request.py diff --git a/openpype/vendor/python/python_2/urllib3/response.py b/client/ayon_core/vendor/python/python_2/urllib3/response.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/response.py rename to client/ayon_core/vendor/python/python_2/urllib3/response.py diff --git a/openpype/vendor/python/python_2/urllib3/util/__init__.py b/client/ayon_core/vendor/python/python_2/urllib3/util/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/__init__.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/__init__.py diff --git a/openpype/vendor/python/python_2/urllib3/util/connection.py b/client/ayon_core/vendor/python/python_2/urllib3/util/connection.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/connection.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/connection.py diff --git a/openpype/vendor/python/python_2/urllib3/util/proxy.py b/client/ayon_core/vendor/python/python_2/urllib3/util/proxy.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/proxy.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/proxy.py diff --git a/openpype/vendor/python/python_2/urllib3/util/queue.py b/client/ayon_core/vendor/python/python_2/urllib3/util/queue.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/queue.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/queue.py diff --git a/openpype/vendor/python/python_2/urllib3/util/request.py b/client/ayon_core/vendor/python/python_2/urllib3/util/request.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/request.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/request.py diff --git a/openpype/vendor/python/python_2/urllib3/util/response.py b/client/ayon_core/vendor/python/python_2/urllib3/util/response.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/response.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/response.py diff --git a/openpype/vendor/python/python_2/urllib3/util/retry.py b/client/ayon_core/vendor/python/python_2/urllib3/util/retry.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/retry.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/retry.py diff --git a/openpype/vendor/python/python_2/urllib3/util/ssl_.py b/client/ayon_core/vendor/python/python_2/urllib3/util/ssl_.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/ssl_.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/ssl_.py diff --git a/openpype/vendor/python/python_2/urllib3/util/ssl_match_hostname.py b/client/ayon_core/vendor/python/python_2/urllib3/util/ssl_match_hostname.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/ssl_match_hostname.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/ssl_match_hostname.py diff --git a/openpype/vendor/python/python_2/urllib3/util/ssltransport.py b/client/ayon_core/vendor/python/python_2/urllib3/util/ssltransport.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/ssltransport.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/ssltransport.py diff --git a/openpype/vendor/python/python_2/urllib3/util/timeout.py b/client/ayon_core/vendor/python/python_2/urllib3/util/timeout.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/timeout.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/timeout.py diff --git a/openpype/vendor/python/python_2/urllib3/util/url.py b/client/ayon_core/vendor/python/python_2/urllib3/util/url.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/url.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/url.py diff --git a/openpype/vendor/python/python_2/urllib3/util/wait.py b/client/ayon_core/vendor/python/python_2/urllib3/util/wait.py similarity index 100% rename from openpype/vendor/python/python_2/urllib3/util/wait.py rename to client/ayon_core/vendor/python/python_2/urllib3/util/wait.py diff --git a/openpype/vendor/python/python_2/websocket/__init__.py b/client/ayon_core/vendor/python/python_2/websocket/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/__init__.py rename to client/ayon_core/vendor/python/python_2/websocket/__init__.py diff --git a/openpype/vendor/python/python_2/websocket/_abnf.py b/client/ayon_core/vendor/python/python_2/websocket/_abnf.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_abnf.py rename to client/ayon_core/vendor/python/python_2/websocket/_abnf.py diff --git a/openpype/vendor/python/python_2/websocket/_app.py b/client/ayon_core/vendor/python/python_2/websocket/_app.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_app.py rename to client/ayon_core/vendor/python/python_2/websocket/_app.py diff --git a/openpype/vendor/python/python_2/websocket/_cookiejar.py b/client/ayon_core/vendor/python/python_2/websocket/_cookiejar.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_cookiejar.py rename to client/ayon_core/vendor/python/python_2/websocket/_cookiejar.py diff --git a/openpype/vendor/python/python_2/websocket/_core.py b/client/ayon_core/vendor/python/python_2/websocket/_core.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_core.py rename to client/ayon_core/vendor/python/python_2/websocket/_core.py diff --git a/openpype/vendor/python/python_2/websocket/_exceptions.py b/client/ayon_core/vendor/python/python_2/websocket/_exceptions.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_exceptions.py rename to client/ayon_core/vendor/python/python_2/websocket/_exceptions.py diff --git a/openpype/vendor/python/python_2/websocket/_handshake.py b/client/ayon_core/vendor/python/python_2/websocket/_handshake.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_handshake.py rename to client/ayon_core/vendor/python/python_2/websocket/_handshake.py diff --git a/openpype/vendor/python/python_2/websocket/_http.py b/client/ayon_core/vendor/python/python_2/websocket/_http.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_http.py rename to client/ayon_core/vendor/python/python_2/websocket/_http.py diff --git a/openpype/vendor/python/python_2/websocket/_logging.py b/client/ayon_core/vendor/python/python_2/websocket/_logging.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_logging.py rename to client/ayon_core/vendor/python/python_2/websocket/_logging.py diff --git a/openpype/vendor/python/python_2/websocket/_socket.py b/client/ayon_core/vendor/python/python_2/websocket/_socket.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_socket.py rename to client/ayon_core/vendor/python/python_2/websocket/_socket.py diff --git a/openpype/vendor/python/python_2/websocket/_ssl_compat.py b/client/ayon_core/vendor/python/python_2/websocket/_ssl_compat.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_ssl_compat.py rename to client/ayon_core/vendor/python/python_2/websocket/_ssl_compat.py diff --git a/openpype/vendor/python/python_2/websocket/_url.py b/client/ayon_core/vendor/python/python_2/websocket/_url.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_url.py rename to client/ayon_core/vendor/python/python_2/websocket/_url.py diff --git a/openpype/vendor/python/python_2/websocket/_utils.py b/client/ayon_core/vendor/python/python_2/websocket/_utils.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/_utils.py rename to client/ayon_core/vendor/python/python_2/websocket/_utils.py diff --git a/openpype/vendor/python/python_2/qtpy/tests/__init__.py b/client/ayon_core/vendor/python/python_2/websocket/tests/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/qtpy/tests/__init__.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/__init__.py diff --git a/openpype/vendor/python/python_2/websocket/tests/data/header01.txt b/client/ayon_core/vendor/python/python_2/websocket/tests/data/header01.txt similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/data/header01.txt rename to client/ayon_core/vendor/python/python_2/websocket/tests/data/header01.txt diff --git a/openpype/vendor/python/python_2/websocket/tests/data/header02.txt b/client/ayon_core/vendor/python/python_2/websocket/tests/data/header02.txt similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/data/header02.txt rename to client/ayon_core/vendor/python/python_2/websocket/tests/data/header02.txt diff --git a/openpype/vendor/python/python_2/websocket/tests/data/header03.txt b/client/ayon_core/vendor/python/python_2/websocket/tests/data/header03.txt similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/data/header03.txt rename to client/ayon_core/vendor/python/python_2/websocket/tests/data/header03.txt diff --git a/openpype/vendor/python/python_2/websocket/tests/test_abnf.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_abnf.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_abnf.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_abnf.py diff --git a/openpype/vendor/python/python_2/websocket/tests/test_app.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_app.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_app.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_app.py diff --git a/openpype/vendor/python/python_2/websocket/tests/test_cookiejar.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_cookiejar.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_cookiejar.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_cookiejar.py diff --git a/openpype/vendor/python/python_2/websocket/tests/test_http.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_http.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_http.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_http.py diff --git a/openpype/vendor/python/python_2/websocket/tests/test_url.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_url.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_url.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_url.py diff --git a/openpype/vendor/python/python_2/websocket/tests/test_websocket.py b/client/ayon_core/vendor/python/python_2/websocket/tests/test_websocket.py similarity index 100% rename from openpype/vendor/python/python_2/websocket/tests/test_websocket.py rename to client/ayon_core/vendor/python/python_2/websocket/tests/test_websocket.py diff --git a/openpype/vendor/python/python_3/README.md b/client/ayon_core/vendor/python/python_3/README.md similarity index 100% rename from openpype/vendor/python/python_3/README.md rename to client/ayon_core/vendor/python/python_3/README.md diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py new file mode 100644 index 0000000000..b4293649fe --- /dev/null +++ b/client/ayon_core/version.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +"""Package declaring AYON core addon version.""" +__version__ = "0.2.0" diff --git a/openpype/vendor/python/python_2/setuptools/_vendor/__init__.py b/client/ayon_core/widgets/__init__.py similarity index 100% rename from openpype/vendor/python/python_2/setuptools/_vendor/__init__.py rename to client/ayon_core/widgets/__init__.py diff --git a/client/ayon_core/widgets/password_dialog.py b/client/ayon_core/widgets/password_dialog.py new file mode 100644 index 0000000000..a4c50128ff --- /dev/null +++ b/client/ayon_core/widgets/password_dialog.py @@ -0,0 +1,33 @@ +# TODO remove - kept for kitsu addon which imported it +from qtpy import QtWidgets, QtCore, QtGui + + +class PressHoverButton(QtWidgets.QPushButton): + """ + Deprecated: + Use `openpype.tools.utils.PressHoverButton` instead. + """ + _mouse_pressed = False + _mouse_hovered = False + change_state = QtCore.Signal(bool) + + def mousePressEvent(self, event): + self._mouse_pressed = True + self._mouse_hovered = True + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + self._mouse_pressed = False + self._mouse_hovered = False + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mouseReleaseEvent(event) + + def mouseMoveEvent(self, event): + mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos()) + under_mouse = self.rect().contains(mouse_pos) + if under_mouse != self._mouse_hovered: + self._mouse_hovered = under_mouse + self.change_state.emit(self._mouse_hovered) + + super(PressHoverButton, self).mouseMoveEvent(event) diff --git a/client/pyproject.toml b/client/pyproject.toml new file mode 100644 index 0000000000..c21ca305a7 --- /dev/null +++ b/client/pyproject.toml @@ -0,0 +1,24 @@ +[project] +name="core" +description="AYON core addon." + +[tool.poetry.dependencies] +python = ">=3.9.1,<3.10" +aiohttp_json_rpc = "*" # TVPaint server +aiohttp-middlewares = "^2.0.0" +wsrpc_aiohttp = "^3.1.1" # websocket server +Click = "^8" +clique = "1.6.*" +jsonschema = "^2.6.0" +pymongo = "^3.11.2" +log4mongo = "^1.7" +pyblish-base = "^1.8.11" +pynput = "^1.7.2" # Timers manager - TODO remove +speedcopy = "^2.1" +six = "^1.15" +qtawesome = "0.7.3" + +[ayon.runtimeDependencies] +OpenTimelineIO = "0.14.1" +opencolorio = "2.2.1" +Pillow = "9.5.0" diff --git a/conftest.py b/conftest.py deleted file mode 100644 index 835d4e01db..0000000000 --- a/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Conftest.""" -... diff --git a/create_package.py b/create_package.py new file mode 100644 index 0000000000..94b31a03f2 --- /dev/null +++ b/create_package.py @@ -0,0 +1,353 @@ +"""Prepares server package from addon repo to upload to server. + +Requires Python 3.9. (Or at least 3.8+). + +This script should be called from cloned addon repo. + +It will produce 'package' subdirectory which could be pasted into server +addon directory directly (eg. into `ayon-backend/addons`). + +Format of package folder: +ADDON_REPO/package/{addon name}/{addon version} + +You can specify `--output_dir` in arguments to change output directory where +package will be created. Existing package directory will always be purged if +already present! This could be used to create package directly in server folder +if available. + +Package contains server side files directly, +client side code zipped in `private` subfolder. +""" + +import os +import sys +import re +import shutil +import argparse +import platform +import logging +import collections +import zipfile +import hashlib + +from typing import Optional + +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) +PACKAGE_PATH = os.path.join(CURRENT_DIR, "package.py") +package_content = {} +with open(PACKAGE_PATH, "r") as stream: + exec(stream.read(), package_content) + +ADDON_VERSION = package_content["version"] +ADDON_NAME = package_content["name"] +ADDON_CLIENT_DIR = package_content["client_dir"] +CLIENT_VERSION_CONTENT = '''# -*- coding: utf-8 -*- +"""Package declaring AYON core addon version.""" +__version__ = "{}" +''' + +# Patterns of directories to be skipped for server part of addon +IGNORE_DIR_PATTERNS = [ + re.compile(pattern) + for pattern in { + # Skip directories starting with '.' + r"^\.", + # Skip any pycache folders + "^__pycache__$" + } +] + +# Patterns of files to be skipped for server part of addon +IGNORE_FILE_PATTERNS = [ + re.compile(pattern) + for pattern in { + # Skip files starting with '.' + # NOTE this could be an issue in some cases + r"^\.", + # Skip '.pyc' files + r"\.pyc$" + } +] + + +def calculate_file_checksum(filepath, hash_algorithm, chunk_size=10000): + func = getattr(hashlib, hash_algorithm) + hash_obj = func() + with open(filepath, "rb") as f: + for chunk in iter(lambda: f.read(chunk_size), b""): + hash_obj.update(chunk) + return hash_obj.hexdigest() + + +class ZipFileLongPaths(zipfile.ZipFile): + """Allows longer paths in zip files. + + Regular DOS paths are limited to MAX_PATH (260) characters, including + the string's terminating NUL character. + That limit can be exceeded by using an extended-length path that + starts with the '\\?\' prefix. + """ + _is_windows = platform.system().lower() == "windows" + + def _extract_member(self, member, tpath, pwd): + if self._is_windows: + tpath = os.path.abspath(tpath) + if tpath.startswith("\\\\"): + tpath = "\\\\?\\UNC\\" + tpath[2:] + else: + tpath = "\\\\?\\" + tpath + + return super(ZipFileLongPaths, self)._extract_member( + member, tpath, pwd + ) + + +def safe_copy_file(src_path, dst_path): + """Copy file and make sure destination directory exists. + + Ignore if destination already contains directories from source. + + Args: + src_path (str): File path that will be copied. + dst_path (str): Path to destination file. + """ + + if src_path == dst_path: + return + + dst_dir = os.path.dirname(dst_path) + try: + os.makedirs(dst_dir) + except Exception: + pass + + shutil.copy2(src_path, dst_path) + + +def _value_match_regexes(value, regexes): + for regex in regexes: + if regex.search(value): + return True + return False + + +def find_files_in_subdir( + src_path, + ignore_file_patterns=None, + ignore_dir_patterns=None +): + if ignore_file_patterns is None: + ignore_file_patterns = IGNORE_FILE_PATTERNS + + if ignore_dir_patterns is None: + ignore_dir_patterns = IGNORE_DIR_PATTERNS + output = [] + + hierarchy_queue = collections.deque() + hierarchy_queue.append((src_path, [])) + while hierarchy_queue: + item = hierarchy_queue.popleft() + dirpath, parents = item + for name in os.listdir(dirpath): + path = os.path.join(dirpath, name) + if os.path.isfile(path): + if not _value_match_regexes(name, ignore_file_patterns): + items = list(parents) + items.append(name) + output.append((path, os.path.sep.join(items))) + continue + + if not _value_match_regexes(name, ignore_dir_patterns): + items = list(parents) + items.append(name) + hierarchy_queue.append((path, items)) + + return output + + +def copy_server_content(addon_output_dir, current_dir, log): + """Copies server side folders to 'addon_package_dir' + + Args: + addon_output_dir (str): package dir in addon repo dir + current_dir (str): addon repo dir + log (logging.Logger) + """ + + log.info("Copying server content") + + filepaths_to_copy = [] + server_dirpath = os.path.join(current_dir, "server") + + for item in find_files_in_subdir(server_dirpath): + src_path, dst_subpath = item + dst_path = os.path.join(addon_output_dir, "server", dst_subpath) + filepaths_to_copy.append((src_path, dst_path)) + + # Copy files + for src_path, dst_path in filepaths_to_copy: + safe_copy_file(src_path, dst_path) + + +def _update_client_version(client_addon_dir): + """Write version.py file to 'client' directory. + + Make sure the version in client dir is the same as in package.py. + + Args: + client_addon_dir (str): Directory path of client addon. + """ + + dst_version_path = os.path.join(client_addon_dir, "version.py") + with open(dst_version_path, "w") as stream: + stream.write(CLIENT_VERSION_CONTENT.format(ADDON_VERSION)) + + +def zip_client_side(addon_package_dir, current_dir, log): + """Copy and zip `client` content into 'addon_package_dir'. + + Args: + addon_package_dir (str): Output package directory path. + current_dir (str): Directory path of addon source. + log (logging.Logger): Logger object. + """ + + client_dir = os.path.join(current_dir, "client") + client_addon_dir = os.path.join(client_dir, ADDON_CLIENT_DIR) + if not os.path.isdir(client_addon_dir): + raise ValueError( + f"Failed to find client directory '{client_addon_dir}'" + ) + + log.info("Preparing client code zip") + private_dir = os.path.join(addon_package_dir, "private") + + if not os.path.exists(private_dir): + os.makedirs(private_dir) + + _update_client_version(client_addon_dir) + + zip_filepath = os.path.join(os.path.join(private_dir, "client.zip")) + with ZipFileLongPaths(zip_filepath, "w", zipfile.ZIP_DEFLATED) as zipf: + # Add client code content to zip + for path, sub_path in find_files_in_subdir(client_addon_dir): + sub_path = os.path.join(ADDON_CLIENT_DIR, sub_path) + zipf.write(path, sub_path) + + shutil.copy(os.path.join(client_dir, "pyproject.toml"), private_dir) + + +def create_server_package( + output_dir: str, + addon_output_dir: str, + log: logging.Logger +): + """Create server package zip file. + + The zip file can be installed to a server using UI or rest api endpoints. + + Args: + output_dir (str): Directory path to output zip file. + addon_output_dir (str): Directory path to addon output directory. + log (logging.Logger): Logger object. + """ + + log.info("Creating server package") + output_path = os.path.join( + output_dir, f"{ADDON_NAME}-{ADDON_VERSION}.zip" + ) + with ZipFileLongPaths(output_path, "w", zipfile.ZIP_DEFLATED) as zipf: + # Move addon content to zip into 'addon' directory + addon_output_dir_offset = len(addon_output_dir) + 1 + for root, _, filenames in os.walk(addon_output_dir): + if not filenames: + continue + + dst_root = None + if root != addon_output_dir: + dst_root = root[addon_output_dir_offset:] + for filename in filenames: + src_path = os.path.join(root, filename) + dst_path = filename + if dst_root: + dst_path = os.path.join(dst_root, dst_path) + zipf.write(src_path, dst_path) + + log.info(f"Output package can be found: {output_path}") + + +def main( + output_dir: Optional[str]=None, + skip_zip: bool=False, + keep_sources: bool=False +): + log = logging.getLogger("create_package") + log.info("Start creating package") + + current_dir = os.path.dirname(os.path.abspath(__file__)) + if not output_dir: + output_dir = os.path.join(current_dir, "package") + + + new_created_version_dir = os.path.join( + output_dir, ADDON_NAME, ADDON_VERSION + ) + if os.path.isdir(new_created_version_dir): + log.info(f"Purging {new_created_version_dir}") + shutil.rmtree(output_dir) + + log.info(f"Preparing package for {ADDON_NAME}-{ADDON_VERSION}") + + addon_output_root = os.path.join(output_dir, ADDON_NAME) + addon_output_dir = os.path.join(addon_output_root, ADDON_VERSION) + if not os.path.exists(addon_output_dir): + os.makedirs(addon_output_dir) + + copy_server_content(addon_output_dir, current_dir, log) + safe_copy_file( + PACKAGE_PATH, + os.path.join(addon_output_dir, os.path.basename(PACKAGE_PATH)) + ) + zip_client_side(addon_output_dir, current_dir, log) + + # Skip server zipping + if not skip_zip: + create_server_package(output_dir, addon_output_dir, log) + # Remove sources only if zip file is created + if not keep_sources: + log.info("Removing source files for server package") + shutil.rmtree(addon_output_root) + log.info("Package creation finished") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--skip-zip", + dest="skip_zip", + action="store_true", + help=( + "Skip zipping server package and create only" + " server folder structure." + ) + ) + parser.add_argument( + "--keep-sources", + dest="keep_sources", + action="store_true", + help=( + "Keep folder structure when server package is created." + ) + ) + parser.add_argument( + "-o", "--output", + dest="output_dir", + default=None, + help=( + "Directory path where package will be created" + " (Will be purged if already exists!)" + ) + ) + + args = parser.parse_args(sys.argv[1:]) + main(args.output_dir, args.skip_zip, args.keep_sources) diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 69fe55ecfa..0000000000 --- a/docs/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 102da990aa..0000000000 --- a/docs/README.md +++ /dev/null @@ -1,74 +0,0 @@ -API Documentation -================= - -This documents the way how to build and modify API documentation using Sphinx and AutoAPI. Ground for documentation -should be directly in sources - in docstrings and markdowns. Sphinx and AutoAPI will crawl over them and generate -RST files that are in turn used to generate HTML documentation. For docstrings we prefer "Napoleon" or "Google" style -docstrings, but RST is also acceptable mainly in cases where you need to use Sphinx directives. - -Using only docstrings is not really viable as some documentation should be done on higher level - like overview of -some modules/functionality and so on. This should be done directly in RST files and committed to repository. - -Configuration -------------- -Configuration is done in `/docs/source/conf.py`. The most important settings are: - -- `autodoc_mock_imports`: add modules that can't be actually imported by Sphinx in running environment, like `nuke`, `maya`, etc. -- `autoapi_ignore`: add directories that shouldn't be processed by **AutoAPI**, like vendor dirs, etc. -- `html_theme_options`: you can use these options to influence how the html theme of the generated files will look. -- `myst_gfm_only`: are Myst parser option for Markdown setting what flavour of Markdown should be used. - -How to build it ---------------- - -You can run: - -```sh -cd .\docs -make.bat html -``` - -on linux/macOS: - -```sh -cd ./docs -make html -``` - -This will go over our code and generate **.rst** files in `/docs/source/autoapi` and from those it will generate -full html documentation in `/docs/build/html`. - -During the build you may see tons of red errors that are pointing to our issues: - -1) **Wrong imports** - -Invalid import are usually wrong relative imports (too deep) or circular imports. -2) **Invalid docstrings** - -Docstrings to be processed into documentation needs to follow some syntax - this can be checked by running -`pydocstyle` that is already included with OpenPype -3) **Invalid markdown/rst files** - -Markdown/RST files can be included inside RST files using `.. include::` directive. But they have to be properly -formatted. - -Editing RST templates ---------------------- -Everything starts with `/docs/source/index.rst` - this file should be properly edited, Right now it just -includes `readme.rst` that in turn include and parse main `README.md`. This is entrypoint to API documentation. -All templates generated by AutoAPI are in `/docs/source/autoapi`. They should be eventually committed to repository -and edited too. - -Steps for enhancing API documentation -------------------------------------- - -1) Run `/docs/make.bat html` -2) Read the red errors/warnings - fix it in the code -3) Run `/docs/make.bat html` - again until there are no red lines -4) Edit RST files and add some meaningful content there - -Resources -========= - -- [ReStructuredText on Wikipedia](https://en.wikipedia.org/wiki/ReStructuredText) -- [RST Quick Reference](https://docutils.sourceforge.io/docs/user/rst/quickref.html) -- [Sphinx AutoAPI Documentation](https://sphinx-autoapi.readthedocs.io/en/latest/) -- [Example of Google Style Python Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) -- [Sphinx Directives](https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html) diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 1d261df277..0000000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=..\.poetry\bin\poetry run sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/docs/source/_static/AYON_tight_G.svg b/docs/source/_static/AYON_tight_G.svg deleted file mode 100644 index 2c5b73deea..0000000000 --- a/docs/source/_static/AYON_tight_G.svg +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - diff --git a/docs/source/_static/README.md b/docs/source/_static/README.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/source/_templates/autoapi/index.rst b/docs/source/_templates/autoapi/index.rst deleted file mode 100644 index 95d0ad8911..0000000000 --- a/docs/source/_templates/autoapi/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -API Reference -============= - -This page contains auto-generated API reference documentation [#f1]_. - -.. toctree:: - :titlesonly: - - {% for page in pages %} - {% if page.top_level_object and page.display %} - {{ page.include_path }} - {% endif %} - {% endfor %} - -.. [#f1] Created with `sphinx-autoapi `_ diff --git a/docs/source/_templates/autoapi/python/attribute.rst b/docs/source/_templates/autoapi/python/attribute.rst deleted file mode 100644 index ebaba555ad..0000000000 --- a/docs/source/_templates/autoapi/python/attribute.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/data.rst" %} diff --git a/docs/source/_templates/autoapi/python/class.rst b/docs/source/_templates/autoapi/python/class.rst deleted file mode 100644 index df5edffb62..0000000000 --- a/docs/source/_templates/autoapi/python/class.rst +++ /dev/null @@ -1,58 +0,0 @@ -{% if obj.display %} -.. py:{{ obj.type }}:: {{ obj.short_name }}{% if obj.args %}({{ obj.args }}){% endif %} -{% for (args, return_annotation) in obj.overloads %} - {{ " " * (obj.type | length) }} {{ obj.short_name }}{% if args %}({{ args }}){% endif %} -{% endfor %} - - - {% if obj.bases %} - {% if "show-inheritance" in autoapi_options %} - Bases: {% for base in obj.bases %}{{ base|link_objs }}{% if not loop.last %}, {% endif %}{% endfor %} - {% endif %} - - - {% if "show-inheritance-diagram" in autoapi_options and obj.bases != ["object"] %} - .. autoapi-inheritance-diagram:: {{ obj.obj["full_name"] }} - :parts: 1 - {% if "private-members" in autoapi_options %} - :private-bases: - {% endif %} - - {% endif %} - {% endif %} - {% if obj.docstring %} - {{ obj.docstring|indent(3) }} - {% endif %} - {% if "inherited-members" in autoapi_options %} - {% set visible_classes = obj.classes|selectattr("display")|list %} - {% else %} - {% set visible_classes = obj.classes|rejectattr("inherited")|selectattr("display")|list %} - {% endif %} - {% for klass in visible_classes %} - {{ klass.render()|indent(3) }} - {% endfor %} - {% if "inherited-members" in autoapi_options %} - {% set visible_properties = obj.properties|selectattr("display")|list %} - {% else %} - {% set visible_properties = obj.properties|rejectattr("inherited")|selectattr("display")|list %} - {% endif %} - {% for property in visible_properties %} - {{ property.render()|indent(3) }} - {% endfor %} - {% if "inherited-members" in autoapi_options %} - {% set visible_attributes = obj.attributes|selectattr("display")|list %} - {% else %} - {% set visible_attributes = obj.attributes|rejectattr("inherited")|selectattr("display")|list %} - {% endif %} - {% for attribute in visible_attributes %} - {{ attribute.render()|indent(3) }} - {% endfor %} - {% if "inherited-members" in autoapi_options %} - {% set visible_methods = obj.methods|selectattr("display")|list %} - {% else %} - {% set visible_methods = obj.methods|rejectattr("inherited")|selectattr("display")|list %} - {% endif %} - {% for method in visible_methods %} - {{ method.render()|indent(3) }} - {% endfor %} -{% endif %} diff --git a/docs/source/_templates/autoapi/python/data.rst b/docs/source/_templates/autoapi/python/data.rst deleted file mode 100644 index 3d12b2d0c7..0000000000 --- a/docs/source/_templates/autoapi/python/data.rst +++ /dev/null @@ -1,37 +0,0 @@ -{% if obj.display %} -.. py:{{ obj.type }}:: {{ obj.name }} - {%- if obj.annotation is not none %} - - :type: {%- if obj.annotation %} {{ obj.annotation }}{%- endif %} - - {%- endif %} - - {%- if obj.value is not none %} - - :value: {% if obj.value is string and obj.value.splitlines()|count > 1 -%} - Multiline-String - - .. raw:: html - -
Show Value - - .. code-block:: python - - """{{ obj.value|indent(width=8,blank=true) }}""" - - .. raw:: html - -
- - {%- else -%} - {%- if obj.value is string -%} - {{ "%r" % obj.value|string|truncate(100) }} - {%- else -%} - {{ obj.value|string|truncate(100) }} - {%- endif -%} - {%- endif %} - {%- endif %} - - - {{ obj.docstring|indent(3) }} -{% endif %} diff --git a/docs/source/_templates/autoapi/python/exception.rst b/docs/source/_templates/autoapi/python/exception.rst deleted file mode 100644 index 92f3d38fd5..0000000000 --- a/docs/source/_templates/autoapi/python/exception.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/class.rst" %} diff --git a/docs/source/_templates/autoapi/python/function.rst b/docs/source/_templates/autoapi/python/function.rst deleted file mode 100644 index b00d5c2445..0000000000 --- a/docs/source/_templates/autoapi/python/function.rst +++ /dev/null @@ -1,15 +0,0 @@ -{% if obj.display %} -.. py:function:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} - -{% for (args, return_annotation) in obj.overloads %} - {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} - -{% endfor %} - {% for property in obj.properties %} - :{{ property }}: - {% endfor %} - - {% if obj.docstring %} - {{ obj.docstring|indent(3) }} - {% endif %} -{% endif %} diff --git a/docs/source/_templates/autoapi/python/method.rst b/docs/source/_templates/autoapi/python/method.rst deleted file mode 100644 index 723cb7bbe5..0000000000 --- a/docs/source/_templates/autoapi/python/method.rst +++ /dev/null @@ -1,19 +0,0 @@ -{%- if obj.display %} -.. py:method:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} - -{% for (args, return_annotation) in obj.overloads %} - {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} - -{% endfor %} - {% if obj.properties %} - {% for property in obj.properties %} - :{{ property }}: - {% endfor %} - - {% else %} - - {% endif %} - {% if obj.docstring %} - {{ obj.docstring|indent(3) }} - {% endif %} -{% endif %} diff --git a/docs/source/_templates/autoapi/python/module.rst b/docs/source/_templates/autoapi/python/module.rst deleted file mode 100644 index d2714f6c9d..0000000000 --- a/docs/source/_templates/autoapi/python/module.rst +++ /dev/null @@ -1,114 +0,0 @@ -{% if not obj.display %} -:orphan: - -{% endif %} -:py:mod:`{{ obj.name }}` -=========={{ "=" * obj.name|length }} - -.. py:module:: {{ obj.name }} - -{% if obj.docstring %} -.. autoapi-nested-parse:: - - {{ obj.docstring|indent(3) }} - -{% endif %} - -{% block subpackages %} -{% set visible_subpackages = obj.subpackages|selectattr("display")|list %} -{% if visible_subpackages %} -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - -{% for subpackage in visible_subpackages %} - {{ subpackage.short_name }}/index.rst -{% endfor %} - - -{% endif %} -{% endblock %} -{% block submodules %} -{% set visible_submodules = obj.submodules|selectattr("display")|list %} -{% if visible_submodules %} -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - -{% for submodule in visible_submodules %} - {{ submodule.short_name }}/index.rst -{% endfor %} - - -{% endif %} -{% endblock %} -{% block content %} -{% if obj.all is not none %} -{% set visible_children = obj.children|selectattr("short_name", "in", obj.all)|list %} -{% elif obj.type is equalto("package") %} -{% set visible_children = obj.children|selectattr("display")|list %} -{% else %} -{% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %} -{% endif %} -{% if visible_children %} -{{ obj.type|title }} Contents -{{ "-" * obj.type|length }}--------- - -{% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %} -{% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %} -{% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %} -{% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %} -{% block classes scoped %} -{% if visible_classes %} -Classes -~~~~~~~ - -.. autoapisummary:: - -{% for klass in visible_classes %} - {{ klass.id }} -{% endfor %} - - -{% endif %} -{% endblock %} - -{% block functions scoped %} -{% if visible_functions %} -Functions -~~~~~~~~~ - -.. autoapisummary:: - -{% for function in visible_functions %} - {{ function.id }} -{% endfor %} - - -{% endif %} -{% endblock %} - -{% block attributes scoped %} -{% if visible_attributes %} -Attributes -~~~~~~~~~~ - -.. autoapisummary:: - -{% for attribute in visible_attributes %} - {{ attribute.id }} -{% endfor %} - - -{% endif %} -{% endblock %} -{% endif %} -{% for obj_item in visible_children %} -{{ obj_item.render()|indent(0) }} -{% endfor %} -{% endif %} -{% endblock %} diff --git a/docs/source/_templates/autoapi/python/package.rst b/docs/source/_templates/autoapi/python/package.rst deleted file mode 100644 index fb9a64965e..0000000000 --- a/docs/source/_templates/autoapi/python/package.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/module.rst" %} diff --git a/docs/source/_templates/autoapi/python/property.rst b/docs/source/_templates/autoapi/python/property.rst deleted file mode 100644 index 70af24236f..0000000000 --- a/docs/source/_templates/autoapi/python/property.rst +++ /dev/null @@ -1,15 +0,0 @@ -{%- if obj.display %} -.. py:property:: {{ obj.short_name }} - {% if obj.annotation %} - :type: {{ obj.annotation }} - {% endif %} - {% if obj.properties %} - {% for property in obj.properties %} - :{{ property }}: - {% endfor %} - {% endif %} - - {% if obj.docstring %} - {{ obj.docstring|indent(3) }} - {% endif %} -{% endif %} diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 916a397e8e..0000000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys - -import os -import sys -import revitron_sphinx_theme - -openpype_root = os.path.abspath('../..') -sys.path.insert(0, openpype_root) -# app = QApplication([]) - -""" -repos = os.listdir(os.path.abspath("../../repos")) -repos = [os.path.join(openpype_root, "repos", repo) for repo in repos] -for repo in repos: - sys.path.append(repo) -""" - -todo_include_todos = True -autodoc_mock_imports = ["maya", "pymel", "nuke", "nukestudio", "nukescripts", - "hiero", "bpy", "fusion", "houdini", "hou", "unreal", - "__builtin__", "resolve", "pysync", "DaVinciResolveScript"] - -# -- Project information ----------------------------------------------------- - -project = 'OpenPype' -copyright = '2023 Ynput' -author = 'Ynput' - -# The short X.Y version -version = '' -# The full version, including alpha/beta/rc tags -release = '' - - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.autosummary', - 'revitron_sphinx_theme', - 'autoapi.extension', - 'myst_parser' -] - -############################## -# Autoapi settings -############################## - -autoapi_dirs = ['../../openpype', '../../igniter'] - -# bypass modules with a lot of python2 content for now -autoapi_ignore = [ - "*vendor*", - "*schemas*", - "*startup/*", - "*/website*", - "*openpype/hooks*", - "*openpype/style*", - "openpype/tests*", - # to many levels of relative import: - "*/modules/sync_server/*" -] -autoapi_keep_files = True -autoapi_options = [ - 'members', - 'undoc-members', - 'show-inheritance', - 'show-module-summary' -] -autoapi_add_toctree_entry = True -autoapi_template_dir = '_templates/autoapi' - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ['.rst', '.md'] - -# The master toctree document. -master_doc = 'index' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = "English" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [ - "openpype.hosts.resolve.*", - "openpype.tools.*" - ] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'friendly' - -# -- Options for autodoc ----------------------------------------------------- -autodoc_default_flags = ['members'] -autosummary_generate = True - - -# -- Options for HTML output ------------------------------------------------- - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'revitron_sphinx_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -html_theme_options = { - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False, - 'github_url': '', -} -html_logo = '_static/AYON_tight_G.svg' - - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = 'pypedoc' - - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'openpype.tex', 'OpenPype Documentation', - 'Ynput', 'manual'), -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'openpype', 'OpenPype Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'OpenPype', 'OpenPype Documentation', - author, 'OpenPype', 'Pipeline for studios', - 'Miscellaneous'), -] - - -# -- Options for Epub output ------------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = project - -# The unique identifier of the text. This can be a ISBN number -# or the project homepage. -# -# epub_identifier = '' - -# A unique identification for the text. -# -# epub_uid = '' - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] - - -# -- Extension configuration ------------------------------------------------- - -# -- Options for intersphinx extension --------------------------------------- - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'https://docs.python.org/3/': None -} - -myst_gfm_only = True diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index f703468fca..0000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. openpype documentation master file, created by - sphinx-quickstart on Mon May 13 17:18:23 2019. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to OpenPype's API documentation! -======================================== - -.. toctree:: - - Readme - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/source/readme.rst b/docs/source/readme.rst deleted file mode 100644 index 138b88bba8..0000000000 --- a/docs/source/readme.rst +++ /dev/null @@ -1,6 +0,0 @@ -=============== -OpenPype Readme -=============== - -.. include:: ../../README.md - :parser: myst_parser.sphinx_ diff --git a/igniter/Poppins/OFL.txt b/igniter/Poppins/OFL.txt deleted file mode 100644 index 76df3b5656..0000000000 --- a/igniter/Poppins/OFL.txt +++ /dev/null @@ -1,93 +0,0 @@ -Copyright 2020 The Poppins Project Authors (https://github.com/itfoundry/Poppins) - -This Font Software is licensed under the SIL Open Font License, Version 1.1. -This license is copied below, and is also available with a FAQ at: -http://scripts.sil.org/OFL - - ------------------------------------------------------------ -SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ------------------------------------------------------------ - -PREAMBLE -The goals of the Open Font License (OFL) are to stimulate worldwide -development of collaborative font projects, to support the font creation -efforts of academic and linguistic communities, and to provide a free and -open framework in which fonts may be shared and improved in partnership -with others. - -The OFL allows the licensed fonts to be used, studied, modified and -redistributed freely as long as they are not sold by themselves. The -fonts, including any derivative works, can be bundled, embedded, -redistributed and/or sold with any software provided that any reserved -names are not used by derivative works. The fonts and derivatives, -however, cannot be released under any other type of license. The -requirement for fonts to remain under this license does not apply -to any document created using the fonts or their derivatives. - -DEFINITIONS -"Font Software" refers to the set of files released by the Copyright -Holder(s) under this license and clearly marked as such. This may -include source files, build scripts and documentation. - -"Reserved Font Name" refers to any names specified as such after the -copyright statement(s). - -"Original Version" refers to the collection of Font Software components as -distributed by the Copyright Holder(s). - -"Modified Version" refers to any derivative made by adding to, deleting, -or substituting -- in part or in whole -- any of the components of the -Original Version, by changing formats or by porting the Font Software to a -new environment. - -"Author" refers to any designer, engineer, programmer, technical -writer or other person who contributed to the Font Software. - -PERMISSION & CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Font Software, to use, study, copy, merge, embed, modify, -redistribute, and sell modified and unmodified copies of the Font -Software, subject to the following conditions: - -1) Neither the Font Software nor any of its individual components, -in Original or Modified Versions, may be sold by itself. - -2) Original or Modified Versions of the Font Software may be bundled, -redistributed and/or sold with any software, provided that each copy -contains the above copyright notice and this license. These can be -included either as stand-alone text files, human-readable headers or -in the appropriate machine-readable metadata fields within text or -binary files as long as those fields can be easily viewed by the user. - -3) No Modified Version of the Font Software may use the Reserved Font -Name(s) unless explicit written permission is granted by the corresponding -Copyright Holder. This restriction only applies to the primary font name as -presented to the users. - -4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font -Software shall not be used to promote, endorse or advertise any -Modified Version, except to acknowledge the contribution(s) of the -Copyright Holder(s) and the Author(s) or with their explicit written -permission. - -5) The Font Software, modified or unmodified, in part or in whole, -must be distributed entirely under this license, and must not be -distributed under any other license. The requirement for fonts to -remain under this license does not apply to any document created -using the Font Software. - -TERMINATION -This license becomes null and void if any of the above conditions are -not met. - -DISCLAIMER -THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT -OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/igniter/Poppins/Poppins-Black.ttf b/igniter/Poppins/Poppins-Black.ttf deleted file mode 100644 index a9520b78ac..0000000000 Binary files a/igniter/Poppins/Poppins-Black.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-BlackItalic.ttf b/igniter/Poppins/Poppins-BlackItalic.ttf deleted file mode 100644 index ebfdd707e5..0000000000 Binary files a/igniter/Poppins/Poppins-BlackItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Bold.ttf b/igniter/Poppins/Poppins-Bold.ttf deleted file mode 100644 index b94d47f3af..0000000000 Binary files a/igniter/Poppins/Poppins-Bold.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-BoldItalic.ttf b/igniter/Poppins/Poppins-BoldItalic.ttf deleted file mode 100644 index e2e64456c7..0000000000 Binary files a/igniter/Poppins/Poppins-BoldItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-ExtraBold.ttf b/igniter/Poppins/Poppins-ExtraBold.ttf deleted file mode 100644 index 8f008c3684..0000000000 Binary files a/igniter/Poppins/Poppins-ExtraBold.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-ExtraBoldItalic.ttf b/igniter/Poppins/Poppins-ExtraBoldItalic.ttf deleted file mode 100644 index b2a9bf557a..0000000000 Binary files a/igniter/Poppins/Poppins-ExtraBoldItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-ExtraLight.ttf b/igniter/Poppins/Poppins-ExtraLight.ttf deleted file mode 100644 index ee6238251f..0000000000 Binary files a/igniter/Poppins/Poppins-ExtraLight.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-ExtraLightItalic.ttf b/igniter/Poppins/Poppins-ExtraLightItalic.ttf deleted file mode 100644 index e392492abd..0000000000 Binary files a/igniter/Poppins/Poppins-ExtraLightItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Italic.ttf b/igniter/Poppins/Poppins-Italic.ttf deleted file mode 100644 index 46203996d3..0000000000 Binary files a/igniter/Poppins/Poppins-Italic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Light.ttf b/igniter/Poppins/Poppins-Light.ttf deleted file mode 100644 index 2ab022196b..0000000000 Binary files a/igniter/Poppins/Poppins-Light.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-LightItalic.ttf b/igniter/Poppins/Poppins-LightItalic.ttf deleted file mode 100644 index 6f9279daef..0000000000 Binary files a/igniter/Poppins/Poppins-LightItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Medium.ttf b/igniter/Poppins/Poppins-Medium.ttf deleted file mode 100644 index e90e87ed69..0000000000 Binary files a/igniter/Poppins/Poppins-Medium.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-MediumItalic.ttf b/igniter/Poppins/Poppins-MediumItalic.ttf deleted file mode 100644 index d8a251c7c4..0000000000 Binary files a/igniter/Poppins/Poppins-MediumItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Regular.ttf b/igniter/Poppins/Poppins-Regular.ttf deleted file mode 100644 index be06e7fdca..0000000000 Binary files a/igniter/Poppins/Poppins-Regular.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-SemiBold.ttf b/igniter/Poppins/Poppins-SemiBold.ttf deleted file mode 100644 index dabf7c242e..0000000000 Binary files a/igniter/Poppins/Poppins-SemiBold.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-SemiBoldItalic.ttf b/igniter/Poppins/Poppins-SemiBoldItalic.ttf deleted file mode 100644 index 29d5f7419b..0000000000 Binary files a/igniter/Poppins/Poppins-SemiBoldItalic.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-Thin.ttf b/igniter/Poppins/Poppins-Thin.ttf deleted file mode 100644 index f5c0fdd531..0000000000 Binary files a/igniter/Poppins/Poppins-Thin.ttf and /dev/null differ diff --git a/igniter/Poppins/Poppins-ThinItalic.ttf b/igniter/Poppins/Poppins-ThinItalic.ttf deleted file mode 100644 index b910089316..0000000000 Binary files a/igniter/Poppins/Poppins-ThinItalic.ttf and /dev/null differ diff --git a/igniter/RobotoMono-Regular.ttf b/igniter/RobotoMono-Regular.ttf deleted file mode 100644 index 7c4ce36a44..0000000000 Binary files a/igniter/RobotoMono-Regular.ttf and /dev/null differ diff --git a/igniter/__init__.py b/igniter/__init__.py deleted file mode 100644 index 085a825860..0000000000 --- a/igniter/__init__.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -"""Open install dialog.""" - -import os -import sys - -os.chdir(os.path.dirname(__file__)) # for override sys.path in Deadline - -from .bootstrap_repos import ( - BootstrapRepos, - OpenPypeVersion -) -from .version import __version__ as version - -# Store OpenPypeVersion to 'sys.modules' -# - this makes it available in OpenPype processes without modifying -# 'sys.path' or 'PYTHONPATH' -if "OpenPypeVersion" not in sys.modules: - sys.modules["OpenPypeVersion"] = OpenPypeVersion - - -def _get_qt_app(): - from qtpy import QtWidgets, QtCore - - app = QtWidgets.QApplication.instance() - if app is not None: - return app - - for attr_name in ( - "AA_EnableHighDpiScaling", - "AA_UseHighDpiPixmaps", - ): - attr = getattr(QtCore.Qt, attr_name, None) - if attr is not None: - QtWidgets.QApplication.setAttribute(attr) - - policy = os.getenv("QT_SCALE_FACTOR_ROUNDING_POLICY") - if ( - hasattr(QtWidgets.QApplication, "setHighDpiScaleFactorRoundingPolicy") - and not policy - ): - QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy( - QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough - ) - - return QtWidgets.QApplication(sys.argv) - - -def open_dialog(): - """Show Igniter dialog.""" - if os.getenv("OPENPYPE_HEADLESS_MODE"): - print("!!! Can't open dialog in headless mode. Exiting.") - sys.exit(1) - from .install_dialog import InstallDialog - - app = _get_qt_app() - - d = InstallDialog() - d.open() - - app.exec_() - return d.result() - - -def open_update_window(openpype_version): - """Open update window.""" - if os.getenv("OPENPYPE_HEADLESS_MODE"): - print("!!! Can't open dialog in headless mode. Exiting.") - sys.exit(1) - - from .update_window import UpdateWindow - - app = _get_qt_app() - - d = UpdateWindow(version=openpype_version) - d.open() - - app.exec_() - version_path = d.get_version_path() - return version_path - - -def show_message_dialog(title, message): - """Show dialog with a message and title to user.""" - if os.getenv("OPENPYPE_HEADLESS_MODE"): - print("!!! Can't open dialog in headless mode. Exiting.") - sys.exit(1) - - from .message_dialog import MessageDialog - - app = _get_qt_app() - - dialog = MessageDialog(title, message) - dialog.open() - - app.exec_() - - -__all__ = [ - "BootstrapRepos", - "open_dialog", - "open_update_window", - "show_message_dialog", - "version" -] diff --git a/igniter/__main__.py b/igniter/__main__.py deleted file mode 100644 index 9783b20f49..0000000000 --- a/igniter/__main__.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -"""Open install dialog.""" - -import sys -from qtpy import QtWidgets - -from .install_dialog import InstallDialog - - -RESULT = 0 - - -def get_result(res: int): - """Sets result returned from dialog.""" - global RESULT - RESULT = res - - -app = QtWidgets.QApplication(sys.argv) - -d = InstallDialog() -d.finished.connect(get_result) -d.open() -app.exec() -sys.exit(RESULT) diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py deleted file mode 100644 index e7b440f812..0000000000 --- a/igniter/bootstrap_repos.py +++ /dev/null @@ -1,1602 +0,0 @@ -# -*- coding: utf-8 -*- -"""Bootstrap OpenPype repositories.""" -from __future__ import annotations -import logging as log -import os -import re -import shutil -import sys -import tempfile -from pathlib import Path -from typing import Union, Callable, List, Tuple -import hashlib -import platform - -from zipfile import ZipFile, BadZipFile - -from appdirs import user_data_dir -from speedcopy import copyfile -import semver - -from .user_settings import ( - OpenPypeSecureRegistry, - OpenPypeSettingsRegistry -) -from .tools import ( - get_openpype_global_settings, - get_openpype_path_from_settings, - get_expected_studio_version_str, - get_local_openpype_path_from_settings -) - - -LOG_INFO = 0 -LOG_WARNING = 1 -LOG_ERROR = 3 - - -def sanitize_long_path(path): - """Sanitize long paths (260 characters) when on Windows. - - Long paths are not capatible with ZipFile or reading a file, so we can - shorten the path to use. - - Args: - path (str): path to either directory or file. - - Returns: - str: sanitized path - """ - if platform.system().lower() != "windows": - return path - path = os.path.abspath(path) - - if path.startswith("\\\\"): - path = "\\\\?\\UNC\\" + path[2:] - else: - path = "\\\\?\\" + path - return path - - -def sha256sum(filename): - """Calculate sha256 for content of the file. - - Args: - filename (str): Path to file. - - Returns: - str: hex encoded sha256 - - """ - h = hashlib.sha256() - b = bytearray(128 * 1024) - mv = memoryview(b) - with open(filename, 'rb', buffering=0) as f: - for n in iter(lambda: f.readinto(mv), 0): - h.update(mv[:n]) - return h.hexdigest() - - -class ZipFileLongPaths(ZipFile): - def _extract_member(self, member, targetpath, pwd): - return ZipFile._extract_member( - self, member, sanitize_long_path(targetpath), pwd - ) - - -class OpenPypeVersion(semver.VersionInfo): - """Class for storing information about OpenPype version. - - Attributes: - path (str): path to OpenPype - - """ - path = None - - _local_openpype_path = None - # this should match any string complying with https://semver.org/ - _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P[a-zA-Z\d\-.]*))?(?:\+(?P[a-zA-Z\d\-.]*))?") # noqa: E501 - _installed_version = None - - def __init__(self, *args, **kwargs): - """Create OpenPype version. - - .. deprecated:: 3.0.0-rc.2 - `client` and `variant` are removed. - - - Args: - major (int): version when you make incompatible API changes. - minor (int): version when you add functionality in a - backwards-compatible manner. - patch (int): version when you make backwards-compatible bug fixes. - prerelease (str): an optional prerelease string - build (str): an optional build string - version (str): if set, it will be parsed and will override - parameters like `major`, `minor` and so on. - path (Path): path to version location. - - """ - self.path = None - - if "version" in kwargs.keys(): - if not kwargs.get("version"): - raise ValueError("Invalid version specified") - v = OpenPypeVersion.parse(kwargs.get("version")) - kwargs["major"] = v.major - kwargs["minor"] = v.minor - kwargs["patch"] = v.patch - kwargs["prerelease"] = v.prerelease - kwargs["build"] = v.build - kwargs.pop("version") - - if kwargs.get("path"): - if isinstance(kwargs.get("path"), str): - self.path = Path(kwargs.get("path")) - elif isinstance(kwargs.get("path"), Path): - self.path = kwargs.get("path") - else: - raise TypeError("Path must be str or Path") - kwargs.pop("path") - - if "path" in kwargs.keys(): - kwargs.pop("path") - - super().__init__(*args, **kwargs) - - def __repr__(self): - return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" - - def __lt__(self, other: OpenPypeVersion): - result = super().__lt__(other) - # prefer path over no path - if self == other and not self.path and other.path: - return True - - if self == other and self.path and other.path and \ - other.path.is_dir() and self.path.is_file(): - return True - - if self.finalize_version() == other.finalize_version() and \ - self.prerelease == other.prerelease: - return True - - return result - - def get_main_version(self) -> str: - """Return main version component. - - This returns x.x.x part of version from possibly more complex one - like x.x.x-foo-bar. - - .. deprecated:: 3.0.0-rc.2 - use `finalize_version()` instead. - Returns: - str: main version component - - """ - return str(self.finalize_version()) - - @staticmethod - def version_in_str(string: str) -> Union[None, OpenPypeVersion]: - """Find OpenPype version in given string. - - Args: - string (str): string to search. - - Returns: - OpenPypeVersion: of detected or None. - - """ - # strip .zip ext if present - string = re.sub(r"\.zip$", "", string, flags=re.IGNORECASE) - m = re.search(OpenPypeVersion._VERSION_REGEX, string) - if not m: - return None - version = OpenPypeVersion.parse(string[m.start():m.end()]) - return version - - def __hash__(self): - return hash(self.path) if self.path else hash(str(self)) - - @staticmethod - def is_version_in_dir( - dir_item: Path, version: OpenPypeVersion) -> Tuple[bool, str]: - """Test if path item is OpenPype version matching detected version. - - If item is directory that might (based on it's name) - contain OpenPype version, check if it really does contain - OpenPype and that their versions matches. - - Args: - dir_item (Path): Directory to test. - version (OpenPypeVersion): OpenPype version detected - from name. - - Returns: - Tuple: State and reason, True if it is valid OpenPype version, - False otherwise. - - """ - try: - # add one 'openpype' level as inside dir there should - # be many other repositories. - version_str = OpenPypeVersion.get_version_string_from_directory( - dir_item) # noqa: E501 - version_check = OpenPypeVersion(version=version_str) - except ValueError: - return False, f"cannot determine version from {dir_item}" - - version_main = version_check.get_main_version() - detected_main = version.get_main_version() - if version_main != detected_main: - return False, (f"dir version ({version}) and " - f"its content version ({version_check}) " - "doesn't match. Skipping.") - return True, "Versions match" - - @staticmethod - def is_version_in_zip( - zip_item: Path, version: OpenPypeVersion) -> Tuple[bool, str]: - """Test if zip path is OpenPype version matching detected version. - - Open zip file, look inside and parse version from OpenPype - inside it. If there is none, or it is different from - version specified in file name, skip it. - - Args: - zip_item (Path): Zip file to test. - version (OpenPypeVersion): Pype version detected - from name. - - Returns: - Tuple: State and reason, True if it is valid OpenPype version, - False otherwise. - - """ - # skip non-zip files - if zip_item.suffix.lower() != ".zip": - return False, "Not a zip" - - try: - with ZipFile(zip_item, "r") as zip_file: - with zip_file.open( - "openpype/version.py") as version_file: - zip_version = {} - exec(version_file.read(), zip_version) - try: - version_check = OpenPypeVersion( - version=zip_version["__version__"]) - except ValueError as e: - return False, str(e) - - version_main = version_check.get_main_version() # - # noqa: E501 - detected_main = version.get_main_version() - # noqa: E501 - - if version_main != detected_main: - return False, (f"zip version ({version}) " - f"and its content version " - f"({version_check}) " - "doesn't match. Skipping.") - except BadZipFile: - return False, f"{zip_item} is not a zip file" - except KeyError: - return False, "Zip does not contain OpenPype" - return True, "Versions match" - - @staticmethod - def get_version_string_from_directory(repo_dir: Path) -> Union[str, None]: - """Get version of OpenPype in given directory. - - Note: in frozen OpenPype installed in user data dir, this must point - one level deeper as it is: - `openpype-version-v3.0.0/openpype/version.py` - - Args: - repo_dir (Path): Path to OpenPype repo. - - Returns: - str: version string. - None: if OpenPype is not found. - - """ - # try to find version - version_file = Path(repo_dir) / "openpype" / "version.py" - if not version_file.exists(): - return None - - version = {} - with version_file.open("r") as fp: - exec(fp.read(), version) - - return version['__version__'] - - @classmethod - def get_openpype_path(cls): - """Path to openpype zip directory. - - Path can be set through environment variable 'OPENPYPE_PATH' which - is set during start of OpenPype if is not available. - """ - return os.getenv("OPENPYPE_PATH") - - @classmethod - def get_local_openpype_path(cls): - """Path to unzipped versions. - - By default it should be user appdata, but could be overridden by - settings. - """ - if cls._local_openpype_path: - return cls._local_openpype_path - - settings = get_openpype_global_settings(os.environ["OPENPYPE_MONGO"]) - data_dir = get_local_openpype_path_from_settings(settings) - if not data_dir: - data_dir = Path(user_data_dir("openpype", "pypeclub")) - cls._local_openpype_path = data_dir - return data_dir - - @classmethod - def openpype_path_is_set(cls): - """Path to OpenPype zip directory is set.""" - if cls.get_openpype_path(): - return True - return False - - @classmethod - def openpype_path_is_accessible(cls): - """Path to OpenPype zip directory is accessible. - - Exists for this machine. - """ - # First check if is set - if not cls.openpype_path_is_set(): - return False - - # Validate existence - if Path(cls.get_openpype_path()).exists(): - return True - return False - - @classmethod - def get_local_versions(cls) -> List: - """Get all versions available on this machine. - - Returns: - list: of compatible versions available on the machine. - - """ - dir_to_search = cls.get_local_openpype_path() - versions = cls.get_versions_from_directory(dir_to_search) - - return list(sorted(set(versions))) - - @classmethod - def get_remote_versions(cls) -> List: - """Get all versions available in OpenPype Path. - - Returns: - list of OpenPypeVersions: Versions found in OpenPype path. - - """ - # Return all local versions if arguments are set to None - - dir_to_search = None - if cls.openpype_path_is_accessible(): - dir_to_search = Path(cls.get_openpype_path()) - else: - registry = OpenPypeSettingsRegistry() - try: - registry_dir = Path(str(registry.get_item("openPypePath"))) - if registry_dir.exists(): - dir_to_search = registry_dir - - except ValueError: - # nothing found in registry, we'll use data dir - pass - - if not dir_to_search: - return [] - - versions = cls.get_versions_from_directory(dir_to_search) - - return list(sorted(set(versions))) - - @staticmethod - def get_versions_from_directory( - openpype_dir: Path) -> List: - """Get all detected OpenPype versions in directory. - - Args: - openpype_dir (Path): Directory to scan. - - Returns: - list of OpenPypeVersion - - Throws: - ValueError: if invalid path is specified. - - """ - openpype_versions = [] - if not openpype_dir.exists() and not openpype_dir.is_dir(): - return openpype_versions - - # iterate over directory in first level and find all that might - # contain OpenPype. - for item in openpype_dir.iterdir(): - # if the item is directory with major.minor version, dive deeper - - if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): - _versions = OpenPypeVersion.get_versions_from_directory( - item) - if _versions: - openpype_versions += _versions - - # if file exists, strip extension, in case of dir don't. - name = item.name if item.is_dir() else item.stem - result = OpenPypeVersion.version_in_str(name) - - if result: - detected_version: OpenPypeVersion - detected_version = result - - if item.is_dir() and not OpenPypeVersion.is_version_in_dir( - item, detected_version - )[0]: - continue - - if item.is_file() and not OpenPypeVersion.is_version_in_zip( - item, detected_version - )[0]: - continue - - detected_version.path = item - openpype_versions.append(detected_version) - - return sorted(openpype_versions) - - @staticmethod - def get_installed_version_str() -> str: - """Get version of local OpenPype.""" - - version = {} - path = Path(os.environ["OPENPYPE_ROOT"]) / "openpype" / "version.py" - with open(path, "r") as fp: - exec(fp.read(), version) - return version["__version__"] - - @classmethod - def get_installed_version(cls): - """Get version of OpenPype inside build.""" - if cls._installed_version is None: - installed_version_str = cls.get_installed_version_str() - if installed_version_str: - cls._installed_version = OpenPypeVersion( - version=installed_version_str, - path=Path(os.environ["OPENPYPE_ROOT"]) - ) - return cls._installed_version - - @staticmethod - def get_latest_version( - local: bool = None, - remote: bool = None - ) -> Union[OpenPypeVersion, None]: - """Get the latest available version. - - The version does not contain information about path and source. - - This is utility version to get the latest version from all found. - - Arguments 'local' and 'remote' define if local and remote repository - versions are used. All versions are used if both are not set (or set - to 'None'). If only one of them is set to 'True' the other is disabled. - It is possible to set both to 'True' (same as both set to None) and to - 'False' in that case only build version can be used. - - Args: - local (bool, optional): List local versions if True. - remote (bool, optional): List remote versions if True. - - Returns: - Latest OpenPypeVersion or None - - """ - if local is None and remote is None: - local = True - remote = True - - elif local is None and not remote: - local = True - - elif remote is None and not local: - remote = True - - installed_version = OpenPypeVersion.get_installed_version() - local_versions = OpenPypeVersion.get_local_versions() if local else [] - remote_versions = OpenPypeVersion.get_remote_versions() if remote else [] # noqa: E501 - all_versions = local_versions + remote_versions + [installed_version] - - all_versions.sort() - return all_versions[-1] - - @classmethod - def get_expected_studio_version(cls, staging=False, global_settings=None): - """Expected OpenPype version that should be used at the moment. - - If version is not defined in settings the latest found version is - used. - - Using precached global settings is needed for usage inside OpenPype. - - Args: - staging (bool): Staging version or production version. - global_settings (dict): Optional precached global settings. - - Returns: - OpenPypeVersion: Version that should be used. - """ - result = get_expected_studio_version_str(staging, global_settings) - if not result: - return None - return OpenPypeVersion(version=result) - - def is_compatible(self, version: OpenPypeVersion): - """Test build compatibility. - - This will simply compare major and minor versions (ignoring patch - and the rest). - - Args: - version (OpenPypeVersion): Version to check compatibility with. - - Returns: - bool: if the version is compatible - - """ - return self.major == version.major and self.minor == version.minor - - -class BootstrapRepos: - """Class for bootstrapping local OpenPype installation. - - Attributes: - data_dir (Path): local OpenPype installation directory. - registry (OpenPypeSettingsRegistry): OpenPype registry object. - zip_filter (list): List of files to exclude from zip - openpype_filter (list): list of top level directories to - include in zip in OpenPype repository. - - """ - - def __init__(self, progress_callback: Callable = None, message=None): - """Constructor. - - Args: - progress_callback (callable): Optional callback method to report - progress. - message (QtCore.Signal, optional): Signal to report messages back. - - """ - # vendor and app used to construct user data dir - self._message = message - self._log = log.getLogger(str(__class__)) - self.set_data_dir(None) - self.secure_registry = OpenPypeSecureRegistry("mongodb") - self.registry = OpenPypeSettingsRegistry() - self.zip_filter = [".pyc", "__pycache__"] - self.openpype_filter = [ - "openpype", "LICENSE" - ] - - # dummy progress reporter - def empty_progress(x: int): - """Progress callback dummy.""" - return x - - if not progress_callback: - progress_callback = empty_progress - self._progress_callback = progress_callback - - def set_data_dir(self, data_dir): - if not data_dir: - self.data_dir = Path(user_data_dir("openpype", "pypeclub")) - else: - self._print(f"overriding local folder: {data_dir}") - self.data_dir = data_dir - - @staticmethod - def get_version_path_from_list( - version: str, version_list: list) -> Union[Path, None]: - """Get path for specific version in list of OpenPype versions. - - Args: - version (str): Version string to look for (1.2.4-nightly.1+test) - version_list (list of OpenPypeVersion): list of version to search. - - Returns: - Path: Path to given version. - - """ - for v in version_list: - if str(v) == version: - return v.path - return None - - @staticmethod - def get_version(repo_dir: Path) -> Union[str, None]: - """Get version of OpenPype in given directory. - - Note: in frozen OpenPype installed in user data dir, this must point - one level deeper as it is: - `openpype-version-v3.0.0/openpype/version.py` - - Args: - repo_dir (Path): Path to OpenPype repo. - - Returns: - str: version string. - None: if OpenPype is not found. - - """ - # try to find version - version_file = Path(repo_dir) / "openpype" / "version.py" - if not version_file.exists(): - return None - - version = {} - with version_file.open("r") as fp: - exec(fp.read(), version) - - return version['__version__'] - - def create_version_from_live_code( - self, repo_dir: Path = None) -> Union[OpenPypeVersion, None]: - """Copy zip created from OpenPype repositories to user data dir. - - This detects OpenPype version either in local "live" OpenPype - repository or in user provided path. Then it will zip it in temporary - directory, and finally it will move it to destination which is user - data directory. Existing files will be replaced. - - Args: - repo_dir (Path, optional): Path to OpenPype repository. - - Returns: - Path: path of installed repository file. - - """ - # if repo dir is not set, we detect local "live" OpenPype repository - # version and use it as a source. Otherwise, repo_dir is user - # entered location. - if repo_dir: - version = self.get_version(repo_dir) - else: - installed_version = OpenPypeVersion.get_installed_version() - version = str(installed_version) - repo_dir = installed_version.path - - if not version: - self._print("OpenPype not found.", LOG_ERROR) - return - - # create destination directory - destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa - if not destination.exists(): - destination.mkdir(parents=True) - - # create zip inside temporary directory. - with tempfile.TemporaryDirectory() as temp_dir: - temp_zip = \ - Path(temp_dir) / f"openpype-v{version}.zip" - self._print(f"creating zip: {temp_zip}") - - self._create_openpype_zip(temp_zip, repo_dir) - if not os.path.exists(temp_zip): - self._print("make archive failed.", LOG_ERROR) - return None - - destination = self._move_zip_to_data_dir(temp_zip) - - return OpenPypeVersion(version=version, path=Path(destination)) - - def _move_zip_to_data_dir(self, zip_file) -> Union[None, Path]: - """Move zip with OpenPype version to user data directory. - - Args: - zip_file (Path): Path to zip file. - - Returns: - None if move fails. - Path to moved zip on success. - - """ - version = OpenPypeVersion.version_in_str(zip_file.name) - destination_dir = self.data_dir / f"{version.major}.{version.minor}" - if not destination_dir.exists(): - destination_dir.mkdir(parents=True) - destination = destination_dir / zip_file.name - - if destination.exists(): - self._print( - f"Destination file {destination} exists, removing.", - LOG_WARNING) - try: - destination.unlink() - except Exception as e: - self._print(str(e), LOG_ERROR, exc_info=True) - return None - if not destination_dir.exists(): - destination_dir.mkdir(parents=True) - elif not destination_dir.is_dir(): - self._print( - "Destination exists but is not directory.", LOG_ERROR) - return None - - try: - shutil.move(zip_file.as_posix(), destination_dir.as_posix()) - except shutil.Error as e: - self._print(str(e), LOG_ERROR, exc_info=True) - return None - - return destination - - def _filter_dir(self, path: Path, path_filter: List) -> List[Path]: - """Recursively crawl over path and filter.""" - result = [] - for item in path.iterdir(): - if item.name in path_filter: - continue - if item.name.startswith('.'): - continue - if item.is_dir(): - result.extend(self._filter_dir(item, path_filter)) - else: - result.append(item) - return result - - def create_version_from_frozen_code(self) -> Union[None, OpenPypeVersion]: - """Create OpenPype version from *frozen* code distributed by installer. - - This should be real edge case for those wanting to try out OpenPype - without setting up whole infrastructure but is strongly discouraged - in studio setup as this use local version independent of others - that can be out of date. - - Returns: - :class:`OpenPypeVersion` zip file to be installed. - - """ - frozen_root = Path(sys.executable).parent - - openpype_list = [] - for f in self.openpype_filter: - if (frozen_root / f).is_dir(): - openpype_list += self._filter_dir( - frozen_root / f, self.zip_filter) - else: - openpype_list.append(frozen_root / f) - - version = self.get_version(frozen_root) - - # create zip inside temporary directory. - with tempfile.TemporaryDirectory() as temp_dir: - temp_zip = \ - Path(temp_dir) / f"openpype-v{version}.zip" - self._print(f"creating zip: {temp_zip}") - - with ZipFile(temp_zip, "w") as zip_file: - progress = 0 - openpype_inc = 98.0 / float(len(openpype_list)) - file: Path - for file in openpype_list: - progress += openpype_inc - self._progress_callback(int(progress)) - - arc_name = file.relative_to(frozen_root.parent) - # we need to replace first part of path which starts with - # something like `exe.win/linux....` with `openpype` as - # this is expected by OpenPype in zip archive. - arc_name = Path().joinpath(*arc_name.parts[1:]) - zip_file.write(file, arc_name) - - destination = self._move_zip_to_data_dir(temp_zip) - - return OpenPypeVersion(version=version, path=destination) - - def _create_openpype_zip(self, zip_path: Path, openpype_path: Path) -> None: - """Pack repositories and OpenPype into zip. - - We are using :mod:`ZipFile` instead :meth:`shutil.make_archive` - because we need to decide what file and directories to include in zip - and what not. They are determined by :attr:`zip_filter` on file level - and :attr:`openpype_filter` on top level directory in OpenPype - repository. - - Args: - zip_path (Path): Path to zip file. - openpype_path (Path): Path to OpenPype sources. - - """ - # get filtered list of file in Pype repository - # openpype_list = self._filter_dir(openpype_path, self.zip_filter) - openpype_list = [] - for f in self.openpype_filter: - if (openpype_path / f).is_dir(): - openpype_list += self._filter_dir( - openpype_path / f, self.zip_filter) - else: - openpype_list.append(openpype_path / f) - - openpype_files = len(openpype_list) - - openpype_inc = 98.0 / float(openpype_files) - - with ZipFile(zip_path, "w") as zip_file: - progress = 0 - openpype_root = openpype_path.resolve() - # generate list of filtered paths - dir_filter = [openpype_root / f for f in self.openpype_filter] - checksums = [] - - file: Path - for file in openpype_list: - progress += openpype_inc - self._progress_callback(int(progress)) - - # if file resides in filtered path, skip it - is_inside = None - df: Path - for df in dir_filter: - try: - is_inside = file.resolve().relative_to(df) - except ValueError: - pass - - if not is_inside: - continue - - processed_path = file - self._print(f"- processing {processed_path}") - - checksums.append( - ( - sha256sum(sanitize_long_path(file.as_posix())), - file.resolve().relative_to(openpype_root) - ) - ) - zip_file.write( - file, file.resolve().relative_to(openpype_root)) - - checksums_str = "" - for c in checksums: - file_str = c[1] - if platform.system().lower() == "windows": - file_str = c[1].as_posix().replace("\\", "/") - checksums_str += "{}:{}\n".format(c[0], file_str) - zip_file.writestr("checksums", checksums_str) - # test if zip is ok - zip_file.testzip() - self._progress_callback(100) - - def validate_openpype_version(self, path: Path) -> tuple: - """Validate version directory or zip file. - - This will load `checksums` file if present, calculate checksums - of existing files in given path and compare. It will also compare - lists of files together for missing files. - - Args: - path (Path): Path to OpenPype version to validate. - - Returns: - tuple(bool, str): with version validity as first item - and string with reason as second. - - """ - if os.getenv("OPENPYPE_DONT_VALIDATE_VERSION"): - return True, "Disabled validation" - if not path.exists(): - return False, "Path doesn't exist" - - if path.is_file(): - return self._validate_zip(path) - return self._validate_dir(path) - - @staticmethod - def _validate_zip(path: Path) -> tuple: - """Validate content of zip file.""" - with ZipFile(path, "r") as zip_file: - # read checksums - try: - checksums_data = str(zip_file.read("checksums")) - except IOError: - # FIXME: This should be set to False sometimes in the future - return True, "Cannot read checksums for archive." - - # split it to the list of tuples - checksums = [ - tuple(line.split(":")) - for line in checksums_data.split("\n") if line - ] - - # get list of files in zip minus `checksums` file itself - # and turn in to set to compare against list of files - # from checksum file. If difference exists, something is - # wrong - files_in_zip = set(zip_file.namelist()) - files_in_zip.remove("checksums") - files_in_checksum = {file[1] for file in checksums} - diff = files_in_zip.difference(files_in_checksum) - if diff: - return False, f"Missing files {diff}" - - # calculate and compare checksums in the zip file - for file_checksum, file_name in checksums: - if platform.system().lower() == "windows": - file_name = file_name.replace("/", "\\") - h = hashlib.sha256() - try: - h.update(zip_file.read(file_name)) - except FileNotFoundError: - return False, f"Missing file [ {file_name} ]" - if h.hexdigest() != file_checksum: - return False, f"Invalid checksum on {file_name}" - - return True, "All ok" - - @staticmethod - def _validate_dir(path: Path) -> tuple: - """Validate checksums in a given path. - - Args: - path (Path): path to folder to validate. - - Returns: - tuple(bool, str): returns status and reason as a bool - and str in a tuple. - - """ - checksums_file = Path(path / "checksums") - if not checksums_file.exists(): - # FIXME: This should be set to False sometimes in the future - return True, "Cannot read checksums for archive." - checksums_data = checksums_file.read_text() - checksums = [ - tuple(line.split(":")) - for line in checksums_data.split("\n") if line - ] - - # compare file list against list of files from checksum file. - # If difference exists, something is wrong and we invalidate directly - files_in_dir = set( - file.relative_to(path).as_posix() - for file in path.iterdir() if file.is_file() - ) - files_in_dir.remove("checksums") - files_in_checksum = {file[1] for file in checksums} - - diff = files_in_dir.difference(files_in_checksum) - if diff: - return False, f"Missing files {diff}" - - # calculate and compare checksums - for file_checksum, file_name in checksums: - if platform.system().lower() == "windows": - file_name = file_name.replace("/", "\\") - try: - current = sha256sum( - sanitize_long_path((path / file_name).as_posix()) - ) - except FileNotFoundError: - return False, f"Missing file [ {file_name} ]" - - if file_checksum != current: - return False, f"Invalid checksum on {file_name}" - - return True, "All ok" - - @staticmethod - def add_paths_from_archive(archive: Path) -> None: - """Add first-level directory and 'repos' as paths to :mod:`sys.path`. - - This will enable Python to import OpenPype and modules in `repos` - submodule directory in zip file. - - Adding to both `sys.path` and `PYTHONPATH`, skipping duplicates. - - Args: - archive (Path): path to archive. - - .. deprecated:: 3.0 - we don't use zip archives directly - - """ - if not archive.is_file() and not archive.exists(): - raise ValueError("Archive is not file.") - - archive_path = str(archive) - sys.path.insert(0, archive_path) - pythonpath = os.getenv("PYTHONPATH", "") - python_paths = pythonpath.split(os.pathsep) - python_paths.insert(0, archive_path) - - os.environ["PYTHONPATH"] = os.pathsep.join(python_paths) - - @staticmethod - def add_paths_from_directory(directory: Path) -> None: - """Add repos first level directories as paths to :mod:`sys.path`. - - This works the same as :meth:`add_paths_from_archive` but in - specified directory. - - Adding to both `sys.path` and `PYTHONPATH`, skipping duplicates. - - Args: - directory (Path): path to directory. - - """ - - sys.path.insert(0, directory.as_posix()) - - @staticmethod - def find_openpype_version( - version: Union[str, OpenPypeVersion] - ) -> Union[OpenPypeVersion, None]: - """Find location of specified OpenPype version. - - Args: - version (Union[str, OpenPypeVersion): Version to find. - - Returns: - requested OpenPypeVersion. - - """ - installed_version = OpenPypeVersion.get_installed_version() - if isinstance(version, str): - version = OpenPypeVersion(version=version) - - if installed_version == version: - return installed_version - - local_versions = OpenPypeVersion.get_local_versions() - zip_version = None - for local_version in local_versions: - if local_version == version: - if local_version.path.suffix.lower() == ".zip": - zip_version = local_version - else: - return local_version - - if zip_version is not None: - return zip_version - - remote_versions = OpenPypeVersion.get_remote_versions() - return next( - ( - remote_version for remote_version in remote_versions - if remote_version == version - ), None) - - @staticmethod - def find_latest_openpype_version() -> Union[OpenPypeVersion, None]: - """Find the latest available OpenPype version in all location. - - Returns: - Latest OpenPype version on None if nothing was found. - - """ - installed_version = OpenPypeVersion.get_installed_version() - local_versions = OpenPypeVersion.get_local_versions() - remote_versions = OpenPypeVersion.get_remote_versions() - all_versions = local_versions + remote_versions + [installed_version] - - if not all_versions: - return None - - all_versions.sort() - latest_version = all_versions[-1] - if latest_version == installed_version: - return latest_version - - if not latest_version.path.is_dir(): - for version in local_versions: - if version == latest_version and version.path.is_dir(): - latest_version = version - break - return latest_version - - def find_openpype( - self, - openpype_path: Union[Path, str] = None, - include_zips: bool = False - ) -> Union[List[OpenPypeVersion], None]: - """Get ordered dict of detected OpenPype version. - - Resolution order for OpenPype is following: - - 1) First we test for ``OPENPYPE_PATH`` environment variable - 2) We try to find ``openPypePath`` in registry setting - 3) We use user data directory - - Args: - openpype_path (Path or str, optional): Try to find OpenPype on - the given path or url. - include_zips (bool, optional): If set True it will try to find - OpenPype in zip files in given directory. - - Returns: - dict of Path: Dictionary of detected OpenPype version. - Key is version, value is path to zip file. - - None: if OpenPype is not found. - - Todo: - implement git/url support as OpenPype location, so it would be - possible to enter git url, OpenPype would check it out and if it is - ok install it as normal version. - - """ - if openpype_path and not isinstance(openpype_path, Path): - raise NotImplementedError( - ("Finding OpenPype in non-filesystem locations is" - " not implemented yet.")) - - # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir - # DEPRECATED: lookup in root of this folder is deprecated in favour - # of major.minor sub-folders. - dirs_to_search = [self.data_dir] - - if openpype_path: - dirs_to_search = [openpype_path] - elif os.getenv("OPENPYPE_PATH") \ - and Path(os.getenv("OPENPYPE_PATH")).exists(): - # first try OPENPYPE_PATH and if that is not available, - # try registry. - dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] - else: - try: - registry_dir = Path( - str(self.registry.get_item("openPypePath"))) - if registry_dir.exists(): - dirs_to_search = [registry_dir] - - except ValueError: - # nothing found in registry, we'll use data dir - pass - - openpype_versions = [] - for dir_to_search in dirs_to_search: - try: - openpype_versions += self.get_openpype_versions( - dir_to_search) - except ValueError: - # location is invalid, skip it - pass - - if not include_zips: - openpype_versions = [ - v for v in openpype_versions if v.path.suffix != ".zip" - ] - - # remove duplicates - openpype_versions = sorted(list(set(openpype_versions))) - - return openpype_versions - - def process_entered_location(self, location: str) -> Union[Path, None]: - """Process user entered location string. - - It decides if location string is mongodb url or path. - If it is mongodb url, it will connect and load ``OPENPYPE_PATH`` from - there and use it as path to OpenPype. In it is _not_ mongodb url, it - is assumed we have a path, this is tested and zip file is - produced and installed using :meth:`create_version_from_live_code`. - - Args: - location (str): User entered location. - - Returns: - Path: to OpenPype zip produced from this location. - None: Zipping failed. - - """ - openpype_path = None - # try to get OpenPype path from mongo. - if location.startswith("mongodb"): - global_settings = get_openpype_global_settings(location) - openpype_path = get_openpype_path_from_settings(global_settings) - if not openpype_path: - self._print("cannot find OPENPYPE_PATH in settings.") - return None - - # if not successful, consider location to be fs path. - if not openpype_path: - openpype_path = Path(location) - - # test if this path does exist. - if not openpype_path.exists(): - self._print(f"{openpype_path} doesn't exists.") - return None - - # test if entered path isn't user data dir - if self.data_dir == openpype_path: - self._print("cannot point to user data dir", LOG_ERROR) - return None - - # find openpype zip files in location. There can be - # either "live" OpenPype repository, or multiple zip files or even - # multiple OpenPype version directories. This process looks into zip - # files and directories and tries to parse `version.py` file. - versions = self.find_openpype(openpype_path, include_zips=True) - if versions: - self._print(f"found OpenPype in [ {openpype_path} ]") - self._print(f"latest version found is [ {versions[-1]} ]") - - return self.install_version(versions[-1]) - - # if we got here, it means that location is "live" - # OpenPype repository. We'll create zip from it and move it to user - # data dir. - live_openpype = self.create_version_from_live_code(openpype_path) - if not live_openpype.path.exists(): - self._print(f"installing zip {live_openpype} failed.", LOG_ERROR) - return None - # install it - return self.install_version(live_openpype) - - def _print(self, - message: str, - level: int = LOG_INFO, - exc_info: bool = False): - """Helper function passing logs to UI and to logger. - - Supporting 3 levels of logs defined with `LOG_INFO`, `LOG_WARNING` and - `LOG_ERROR` constants. - - Args: - message (str): Message to log. - level (int, optional): Log level to use. - exc_info (bool, optional): Exception info object to pass to logger. - - """ - if self._message: - self._message.emit(message, level == LOG_ERROR) - - if level == LOG_WARNING: - self._log.warning(message, exc_info=exc_info) - return - if level == LOG_ERROR: - self._log.error(message, exc_info=exc_info) - return - self._log.info(message, exc_info=exc_info) - - def extract_openpype(self, version: OpenPypeVersion) -> Union[Path, None]: - """Extract zipped OpenPype version to user data directory. - - Args: - version (OpenPypeVersion): Version of OpenPype. - - Returns: - Path: path to extracted version. - None: if something failed. - - """ - if not version.path: - raise ValueError( - f"version {version} is not associated with any file") - - destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa - if destination.exists() and destination.is_dir(): - try: - shutil.rmtree(destination) - except OSError as e: - msg = f"!!! Cannot remove already existing {destination}" - self._print(msg, LOG_ERROR, exc_info=True) - raise e - - destination.mkdir(parents=True) - - # extract zip there - self._print("Extracting zip to destination ...") - with ZipFileLongPaths(version.path, "r") as zip_ref: - zip_ref.extractall(destination) - - self._print(f"Installed as {version.path.stem}") - - return destination - - def is_inside_user_data(self, path: Path) -> bool: - """Test if version is located in user data dir. - - Args: - path (Path) Path to test. - - Returns: - True if path is inside user data dir. - - """ - is_inside = False - try: - is_inside = path.resolve().relative_to( - self.data_dir) - except ValueError: - # if relative path cannot be calculated, OpenPype version is not - # inside user data dir - pass - return is_inside - - def install_version(self, - openpype_version: OpenPypeVersion, - force: bool = False) -> Path: - """Install OpenPype version to user data directory. - - Args: - openpype_version (OpenPypeVersion): OpenPype version to install. - force (bool, optional): Force overwrite existing version. - - Returns: - Path: Path to installed OpenPype. - - Raises: - OpenPypeVersionExists: If not forced and this version already exist - in user data directory. - OpenPypeVersionInvalid: If version to install is invalid. - OpenPypeVersionIOError: If copying or zipping fail. - - """ - if self.is_inside_user_data(openpype_version.path) and not openpype_version.path.is_file(): # noqa - raise OpenPypeVersionExists( - "OpenPype already inside user data dir") - - # determine destination directory name - # for zip file strip suffix, in case of dir use whole dir name - if openpype_version.path.is_dir(): - dir_name = openpype_version.path.name - else: - dir_name = openpype_version.path.stem - - destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa - - # test if destination directory already exist, if so lets delete it. - if destination.exists() and force: - self._print("removing existing directory") - try: - shutil.rmtree(destination) - except OSError as e: - self._print( - f"cannot remove already existing {destination}", - LOG_ERROR, exc_info=True) - raise OpenPypeVersionIOError( - f"cannot remove existing {destination}") from e - elif destination.exists() and not force: - self._print("destination directory already exists") - raise OpenPypeVersionExists(f"{destination} already exist.") - else: - # create destination parent directories even if they don't exist. - destination.mkdir(parents=True) - - remove_source_file = False - # version is directory - if openpype_version.path.is_dir(): - # create zip inside temporary directory. - self._print("Creating zip from directory ...") - self._progress_callback(0) - with tempfile.TemporaryDirectory() as temp_dir: - temp_zip = \ - Path(temp_dir) / f"openpype-v{openpype_version}.zip" - self._print(f"creating zip: {temp_zip}") - - self._create_openpype_zip(temp_zip, openpype_version.path) - if not os.path.exists(temp_zip): - self._print("make archive failed.", LOG_ERROR) - raise OpenPypeVersionIOError("Zip creation failed.") - - # set zip as version source - openpype_version.path = temp_zip - - if self.is_inside_user_data(openpype_version.path): - raise OpenPypeVersionInvalid( - "Version is in user data dir.") - openpype_version.path = self._copy_zip( - openpype_version.path, destination) - - elif openpype_version.path.is_file(): - # check if file is zip (by extension) - if openpype_version.path.suffix.lower() != ".zip": - raise OpenPypeVersionInvalid("Invalid file format") - - if not self.is_inside_user_data(openpype_version.path): - self._progress_callback(35) - openpype_version.path = self._copy_zip( - openpype_version.path, destination) - # Mark zip to be deleted when done - remove_source_file = True - - # extract zip there - self._print("extracting zip to destination ...") - with ZipFileLongPaths(openpype_version.path, "r") as zip_ref: - self._progress_callback(75) - zip_ref.extractall(destination) - self._progress_callback(100) - - # Remove zip file copied to local app data - if remove_source_file: - os.remove(openpype_version.path) - - return destination - - def _copy_zip(self, source: Path, destination: Path) -> Path: - try: - # copy file to destination - self._print("Copying zip to destination ...") - _destination_zip = destination.parent / source.name # noqa: E501 - copyfile( - source.as_posix(), - _destination_zip.as_posix()) - except OSError as e: - self._print( - "cannot copy version to user data directory", LOG_ERROR, - exc_info=True) - raise OpenPypeVersionIOError(( - f"can't copy version {source.as_posix()} " - f"to destination {destination.parent.as_posix()}")) from e - return _destination_zip - - def _is_openpype_in_dir(self, - dir_item: Path, - detected_version: OpenPypeVersion) -> bool: - """Test if path item is OpenPype version matching detected version. - - If item is directory that might (based on it's name) - contain OpenPype version, check if it really does contain - OpenPype and that their versions matches. - - Args: - dir_item (Path): Directory to test. - detected_version (OpenPypeVersion): OpenPype version detected - from name. - - Returns: - True if it is valid OpenPype version, False otherwise. - - """ - try: - # add one 'openpype' level as inside dir there should - # be many other repositories. - version_str = BootstrapRepos.get_version(dir_item) - version_check = OpenPypeVersion(version=version_str) - except ValueError: - self._print( - f"cannot determine version from {dir_item}", True) - return False - - version_main = version_check.get_main_version() - detected_main = detected_version.get_main_version() - if version_main != detected_main: - self._print( - (f"dir version ({detected_version}) and " - f"its content version ({version_check}) " - "doesn't match. Skipping.")) - return False - return True - - def _is_openpype_in_zip(self, - zip_item: Path, - detected_version: OpenPypeVersion) -> bool: - """Test if zip path is OpenPype version matching detected version. - - Open zip file, look inside and parse version from OpenPype - inside it. If there is none, or it is different from - version specified in file name, skip it. - - Args: - zip_item (Path): Zip file to test. - detected_version (OpenPypeVersion): Pype version detected from - name. - - Returns: - True if it is valid OpenPype version, False otherwise. - - """ - # skip non-zip files - if zip_item.suffix.lower() != ".zip": - return False - - try: - with ZipFile(zip_item, "r") as zip_file: - with zip_file.open( - "openpype/version.py") as version_file: - zip_version = {} - exec(version_file.read(), zip_version) - try: - version_check = OpenPypeVersion( - version=zip_version["__version__"]) - except ValueError as e: - self._print(str(e), True) - return False - - version_main = version_check.get_main_version() # noqa: E501 - detected_main = detected_version.get_main_version() # noqa: E501 - - if version_main != detected_main: - self._print( - (f"zip version ({detected_version}) " - f"and its content version " - f"({version_check}) " - "doesn't match. Skipping."), True) - return False - except BadZipFile: - self._print(f"{zip_item} is not a zip file", True) - return False - except KeyError: - self._print("Zip does not contain OpenPype", True) - return False - return True - - def get_openpype_versions(self, openpype_dir: Path) -> list: - """Get all detected OpenPype versions in directory. - - Args: - openpype_dir (Path): Directory to scan. - - Returns: - list of OpenPypeVersion - - Throws: - ValueError: if invalid path is specified. - - """ - if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError(f"specified directory {openpype_dir} is invalid") - - openpype_versions = [] - # iterate over directory in first level and find all that might - # contain OpenPype. - for item in openpype_dir.iterdir(): - # if the item is directory with major.minor version, dive deeper - if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): - _versions = self.get_openpype_versions(item) - if _versions: - openpype_versions += _versions - - # if it is file, strip extension, in case of dir don't. - name = item.name if item.is_dir() else item.stem - result = OpenPypeVersion.version_in_str(name) - - if result: - detected_version: OpenPypeVersion - detected_version = result - - if item.is_dir() and not self._is_openpype_in_dir( - item, detected_version - ): - continue - - if item.is_file() and not self._is_openpype_in_zip( - item, detected_version - ): - continue - - detected_version.path = item - openpype_versions.append(detected_version) - - return sorted(openpype_versions) - - -class OpenPypeVersionExists(Exception): - """Exception for handling existing OpenPype version.""" - pass - - -class OpenPypeVersionInvalid(Exception): - """Exception for handling invalid OpenPype version.""" - pass - - -class OpenPypeVersionIOError(Exception): - """Exception for handling IO errors in OpenPype version.""" - pass diff --git a/igniter/install_dialog.py b/igniter/install_dialog.py deleted file mode 100644 index 551e2da918..0000000000 --- a/igniter/install_dialog.py +++ /dev/null @@ -1,509 +0,0 @@ -# -*- coding: utf-8 -*- -"""Show dialog for choosing central pype repository.""" -import os -import sys -import re -import collections - -from qtpy import QtCore, QtGui, QtWidgets - -from .install_thread import InstallThread -from .tools import ( - validate_mongo_connection, - get_openpype_icon_path -) - -from .nice_progress_bar import NiceProgressBar -from .user_settings import OpenPypeSecureRegistry -from .tools import load_stylesheet -from .version import __version__ - - -class ButtonWithOptions(QtWidgets.QFrame): - option_clicked = QtCore.Signal(str) - - def __init__(self, commands, parent=None): - super(ButtonWithOptions, self).__init__(parent) - - self.setObjectName("ButtonWithOptions") - - options_btn = QtWidgets.QToolButton(self) - options_btn.setArrowType(QtCore.Qt.DownArrow) - options_btn.setIconSize(QtCore.QSize(12, 12)) - - default = None - default_label = None - options_menu = QtWidgets.QMenu(self) - for option, option_label in commands.items(): - if default is None: - default = option - default_label = option_label - continue - action = QtWidgets.QAction(option_label, options_menu) - action.setData(option) - options_menu.addAction(action) - - main_btn = QtWidgets.QPushButton(default_label, self) - main_btn.setFlat(True) - - main_layout = QtWidgets.QHBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.setSpacing(1) - - main_layout.addWidget(main_btn, 1, QtCore.Qt.AlignVCenter) - main_layout.addWidget(options_btn, 0, QtCore.Qt.AlignVCenter) - - main_btn.clicked.connect(self._on_main_button) - options_btn.clicked.connect(self._on_options_click) - options_menu.triggered.connect(self._on_trigger) - - self.main_btn = main_btn - self.options_btn = options_btn - self.options_menu = options_menu - - options_btn.setEnabled(not options_menu.isEmpty()) - - self._default_value = default - - def resizeEvent(self, event): - super(ButtonWithOptions, self).resizeEvent(event) - self.options_btn.setFixedHeight(self.main_btn.height()) - - def _on_options_click(self): - pos = self.main_btn.rect().bottomLeft() - point = self.main_btn.mapToGlobal(pos) - self.options_menu.popup(point) - - def _on_trigger(self, action): - self.option_clicked.emit(action.data()) - - def _on_main_button(self): - self.option_clicked.emit(self._default_value) - - -class ConsoleWidget(QtWidgets.QWidget): - def __init__(self, parent=None): - super(ConsoleWidget, self).__init__(parent) - - # style for normal and error console text - default_console_style = QtGui.QTextCharFormat() - error_console_style = QtGui.QTextCharFormat() - default_console_style.setForeground( - QtGui.QColor.fromRgb(72, 200, 150) - ) - error_console_style.setForeground( - QtGui.QColor.fromRgb(184, 54, 19) - ) - - label = QtWidgets.QLabel("Console:", self) - - console_output = QtWidgets.QPlainTextEdit(self) - console_output.setMinimumSize(QtCore.QSize(300, 200)) - console_output.setReadOnly(True) - console_output.setCurrentCharFormat(default_console_style) - console_output.setObjectName("Console") - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.addWidget(label, 0) - main_layout.addWidget(console_output, 1) - - self.default_console_style = default_console_style - self.error_console_style = error_console_style - - self.label = label - self.console_output = console_output - - self.hide_console() - - def hide_console(self): - self.label.setVisible(False) - self.console_output.setVisible(False) - - self.updateGeometry() - - def show_console(self): - self.label.setVisible(True) - self.console_output.setVisible(True) - - self.updateGeometry() - - def update_console(self, msg: str, error: bool = False) -> None: - if not error: - self.console_output.setCurrentCharFormat( - self.default_console_style - ) - else: - self.console_output.setCurrentCharFormat( - self.error_console_style - ) - self.console_output.appendPlainText(msg) - - -class MongoUrlInput(QtWidgets.QLineEdit): - """Widget to input mongodb URL.""" - - def set_valid(self): - """Set valid state on mongo url input.""" - self.setProperty("state", "valid") - self.style().polish(self) - - def remove_state(self): - """Set invalid state on mongo url input.""" - self.setProperty("state", "") - self.style().polish(self) - - def set_invalid(self): - """Set invalid state on mongo url input.""" - self.setProperty("state", "invalid") - self.style().polish(self) - - -class InstallDialog(QtWidgets.QDialog): - """Main Igniter dialog window.""" - - mongo_url_regex = re.compile(r"^(mongodb|mongodb\+srv)://.*?") - - _width = 500 - _height = 200 - commands = collections.OrderedDict([ - ("run", "Start"), - ("run_from_code", "Run from code") - ]) - - def __init__(self, parent=None): - super(InstallDialog, self).__init__(parent) - - self.setWindowTitle( - f"OpenPype Igniter {__version__}" - ) - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowMinimizeButtonHint - ) - - current_dir = os.path.dirname(os.path.abspath(__file__)) - roboto_font_path = os.path.join(current_dir, "RobotoMono-Regular.ttf") - poppins_font_path = os.path.join(current_dir, "Poppins") - - # Install roboto font - QtGui.QFontDatabase.addApplicationFont(roboto_font_path) - for filename in os.listdir(poppins_font_path): - if os.path.splitext(filename)[1] == ".ttf": - QtGui.QFontDatabase.addApplicationFont(filename) - - # Load logo - icon_path = get_openpype_icon_path() - pixmap_openpype_logo = QtGui.QPixmap(icon_path) - # Set logo as icon of window - self.setWindowIcon(QtGui.QIcon(pixmap_openpype_logo)) - - secure_registry = OpenPypeSecureRegistry("mongodb") - mongo_url = "" - try: - mongo_url = ( - os.getenv("OPENPYPE_MONGO", "") - or secure_registry.get_item("openPypeMongo") - ) - except ValueError: - pass - - self.mongo_url = mongo_url - self._pixmap_openpype_logo = pixmap_openpype_logo - - self._secure_registry = secure_registry - self._controls_disabled = False - self._install_thread = None - - self.resize(QtCore.QSize(self._width, self._height)) - self._init_ui() - - # Set stylesheet - self.setStyleSheet(load_stylesheet()) - - # Trigger Mongo URL validation - self._mongo_input.setText(self.mongo_url) - - def _init_ui(self): - # basic visual style - dark background, light text - - # Main info - # -------------------------------------------------------------------- - main_label = QtWidgets.QLabel("Welcome to OpenPype", self) - main_label.setWordWrap(True) - main_label.setObjectName("MainLabel") - - # Mongo box | OK button - # -------------------------------------------------------------------- - mongo_input = MongoUrlInput(self) - mongo_input.setPlaceholderText( - "Enter your database Address. Example: mongodb://192.168.1.10:2707" - ) - - mongo_messages_widget = QtWidgets.QWidget(self) - - mongo_connection_msg = QtWidgets.QLabel(mongo_messages_widget) - mongo_connection_msg.setVisible(True) - mongo_connection_msg.setTextInteractionFlags( - QtCore.Qt.TextSelectableByMouse - ) - - mongo_messages_layout = QtWidgets.QVBoxLayout(mongo_messages_widget) - mongo_messages_layout.setContentsMargins(0, 0, 0, 0) - mongo_messages_layout.addWidget(mongo_connection_msg) - - # Progress bar - # -------------------------------------------------------------------- - progress_bar = NiceProgressBar(self) - progress_bar.setAlignment(QtCore.Qt.AlignCenter) - progress_bar.setTextVisible(False) - - # Console - # -------------------------------------------------------------------- - console_widget = ConsoleWidget(self) - - # Bottom button bar - # -------------------------------------------------------------------- - bottom_widget = QtWidgets.QWidget(self) - - btns_widget = QtWidgets.QWidget(bottom_widget) - - openpype_logo_label = QtWidgets.QLabel("openpype logo", bottom_widget) - openpype_logo_label.setPixmap(self._pixmap_openpype_logo) - - run_button = ButtonWithOptions( - self.commands, - btns_widget - ) - run_button.setMinimumSize(64, 24) - run_button.setToolTip("Run OpenPype") - - # install button - - - - - - - - - - - - - - - - - - - - - - - - - - - - exit_button = QtWidgets.QPushButton("Exit", btns_widget) - exit_button.setObjectName("ExitBtn") - exit_button.setFlat(True) - exit_button.setMinimumSize(64, 24) - exit_button.setToolTip("Exit") - - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - btns_layout.setContentsMargins(0, 0, 0, 0) - btns_layout.addWidget(run_button, 0) - btns_layout.addWidget(exit_button, 0) - - bottom_layout = QtWidgets.QHBoxLayout(bottom_widget) - bottom_layout.setContentsMargins(0, 0, 0, 0) - bottom_layout.setAlignment(QtCore.Qt.AlignHCenter) - bottom_layout.addWidget(openpype_logo_label, 0) - bottom_layout.addStretch(1) - bottom_layout.addWidget(btns_widget, 0) - - # add all to main - main = QtWidgets.QVBoxLayout(self) - main.addSpacing(15) - main.addWidget(main_label, 0) - main.addSpacing(15) - main.addWidget(mongo_input, 0) - main.addWidget(mongo_messages_widget, 0) - - main.addWidget(progress_bar, 0) - main.addSpacing(15) - - main.addWidget(console_widget, 1) - - main.addWidget(bottom_widget, 0) - - run_button.option_clicked.connect(self._on_run_btn_click) - exit_button.clicked.connect(self._on_exit_clicked) - mongo_input.textChanged.connect(self._on_mongo_url_change) - - self._console_widget = console_widget - - self.main_label = main_label - - self._mongo_input = mongo_input - - self._mongo_connection_msg = mongo_connection_msg - - self._run_button = run_button - self._exit_button = exit_button - self._progress_bar = progress_bar - - def _on_run_btn_click(self, option): - # Disable buttons - self._disable_buttons() - # Set progress to any value - self._update_progress(1) - self._progress_bar.repaint() - # Add label to show that is connecting to mongo - self.set_invalid_mongo_connection(self.mongo_url, True) - - # Process events to repaint changes - QtWidgets.QApplication.processEvents() - - if not self.validate_url(): - self._enable_buttons() - self._update_progress(0) - # Update any messages - self._mongo_input.setText(self.mongo_url) - return - - if option == "run": - self._run_openpype() - elif option == "run_from_code": - self._run_openpype_from_code() - else: - raise AssertionError("BUG: Unknown variant \"{}\"".format(option)) - - def _run_openpype_from_code(self): - os.environ["OPENPYPE_MONGO"] = self.mongo_url - try: - self._secure_registry.set_item("openPypeMongo", self.mongo_url) - except ValueError: - print("Couldn't save Mongo URL to keyring") - - self.done(2) - - def _run_openpype(self): - """Start install process. - - This will once again validate entered path and mongo if ok, start - working thread that will do actual job. - """ - # Check if install thread is not already running - if self._install_thread and self._install_thread.isRunning(): - return - - self._mongo_input.set_valid() - - install_thread = InstallThread(self) - install_thread.message.connect(self.update_console) - install_thread.progress.connect(self._update_progress) - install_thread.finished.connect(self._installation_finished) - install_thread.set_mongo(self.mongo_url) - - self._install_thread = install_thread - - install_thread.start() - - def _installation_finished(self): - # TODO we should find out why status can be set to 'None'? - # - 'InstallThread.run' should handle all cases so not sure where - # that come from - status = self._install_thread.result() - if status is not None and status >= 0: - self._update_progress(100) - QtWidgets.QApplication.processEvents() - self.done(3) - else: - self._enable_buttons() - self._show_console() - - def _update_progress(self, progress: int): - self._progress_bar.setValue(progress) - text_visible = self._progress_bar.isTextVisible() - if progress == 0: - if text_visible: - self._progress_bar.setTextVisible(False) - elif not text_visible: - self._progress_bar.setTextVisible(True) - - def _on_exit_clicked(self): - self.reject() - - def _on_mongo_url_change(self, new_value): - # Strip the value - new_value = new_value.strip() - # Store new mongo url to variable - self.mongo_url = new_value - - msg = None - # Change style of input - if not new_value: - self._mongo_input.remove_state() - elif not self.mongo_url_regex.match(new_value): - self._mongo_input.set_invalid() - msg = ( - "Mongo URL should start with" - " \"mongodb://\" or \"mongodb+srv://\"" - ) - else: - self._mongo_input.set_valid() - - self.set_invalid_mongo_url(msg) - - def validate_url(self): - """Validate if entered url is ok. - - Returns: - True if url is valid monogo string. - - """ - if self.mongo_url == "": - return False - - is_valid, reason_str = validate_mongo_connection(self.mongo_url) - if not is_valid: - self.set_invalid_mongo_connection(self.mongo_url) - self._mongo_input.set_invalid() - self.update_console(f"!!! {reason_str}", True) - return False - - self.set_invalid_mongo_connection(None) - self._mongo_input.set_valid() - return True - - def set_invalid_mongo_url(self, reason): - if reason is None: - self._mongo_connection_msg.setText("") - else: - self._mongo_connection_msg.setText("- {}".format(reason)) - - def set_invalid_mongo_connection(self, mongo_url, connecting=False): - if mongo_url is None: - self.set_invalid_mongo_url(mongo_url) - return - - if connecting: - msg = "Connecting to: {}".format(mongo_url) - else: - msg = "Can't connect to: {}".format(mongo_url) - - self.set_invalid_mongo_url(msg) - - def update_console(self, msg: str, error: bool = False) -> None: - """Display message in console. - - Args: - msg (str): message. - error (bool): if True, print it red. - """ - self._console_widget.update_console(msg, error) - - def _show_console(self): - self._console_widget.show_console() - self.updateGeometry() - - def _disable_buttons(self): - """Disable buttons so user interaction doesn't interfere.""" - self._exit_button.setEnabled(False) - self._run_button.setEnabled(False) - self._controls_disabled = True - - def _enable_buttons(self): - """Enable buttons after operation is complete.""" - self._exit_button.setEnabled(True) - self._run_button.setEnabled(True) - self._controls_disabled = False - - def closeEvent(self, event): # noqa - """Prevent closing if window when controls are disabled.""" - if self._controls_disabled: - return event.ignore() - return super(InstallDialog, self).closeEvent(event) - - -if __name__ == "__main__": - app = QtWidgets.QApplication(sys.argv) - d = InstallDialog() - d.show() - sys.exit(app.exec_()) diff --git a/igniter/install_thread.py b/igniter/install_thread.py deleted file mode 100644 index 1d55213de7..0000000000 --- a/igniter/install_thread.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding: utf-8 -*- -"""Working thread for installer.""" -import os -import sys -from pathlib import Path - -from qtpy import QtCore - -from .bootstrap_repos import ( - BootstrapRepos, - OpenPypeVersionInvalid, - OpenPypeVersionIOError, - OpenPypeVersionExists, - OpenPypeVersion -) - -from .tools import ( - get_openpype_global_settings, - get_local_openpype_path_from_settings, - validate_mongo_connection -) - - -class InstallThread(QtCore.QThread): - """Install Worker thread. - - This class takes care of finding OpenPype version on user entered path - (or loading this path from database). If nothing is entered by user, - OpenPype will create its zip files from repositories that comes with it. - - If path contains plain repositories, they are zipped and installed to - user data dir. - - """ - progress = QtCore.Signal(int) - message = QtCore.Signal((str, bool)) - - def __init__(self, parent=None,): - self._mongo = None - self._result = None - - super().__init__(parent) - - def result(self): - """Result of finished installation.""" - return self._result - - def _set_result(self, value): - if self._result is not None: - raise AssertionError("BUG: Result was set more than once!") - self._result = value - - def run(self): - """Thread entry point. - - Using :class:`BootstrapRepos` to either install OpenPype as zip files - or copy them from location specified by user or retrieved from - database. - - """ - self.message.emit("Installing OpenPype ...", False) - - # find local version of OpenPype - bs = BootstrapRepos( - progress_callback=self.set_progress, message=self.message) - local_version = OpenPypeVersion.get_installed_version_str() - - # user did not entered url - if self._mongo: - self.message.emit("Saving mongo connection string ...", False) - bs.secure_registry.set_item("openPypeMongo", self._mongo) - - elif os.getenv("OPENPYPE_MONGO"): - self._mongo = os.getenv("OPENPYPE_MONGO") - else: - # try to get it from settings registry - try: - self._mongo = bs.secure_registry.get_item( - "openPypeMongo") - except ValueError: - self.message.emit( - "!!! We need MongoDB URL to proceed.", True) - self._set_result(-1) - return - os.environ["OPENPYPE_MONGO"] = self._mongo - - if not validate_mongo_connection(self._mongo): - self.message.emit(f"Cannot connect to {self._mongo}", True) - self._set_result(-1) - return - - global_settings = get_openpype_global_settings(self._mongo) - data_dir = get_local_openpype_path_from_settings(global_settings) - bs.set_data_dir(data_dir) - - self.message.emit( - f"Detecting installed OpenPype versions in {bs.data_dir}", - False) - detected = bs.find_openpype(include_zips=True) - if not detected and getattr(sys, 'frozen', False): - self.message.emit("None detected.", True) - self.message.emit(("We will use OpenPype coming with " - "installer."), False) - openpype_version = bs.create_version_from_frozen_code() - if not openpype_version: - self.message.emit( - f"!!! Install failed - {openpype_version}", True) - self._set_result(-1) - return - self.message.emit(f"Using: {openpype_version}", False) - bs.install_version(openpype_version) - self.message.emit(f"Installed as {openpype_version}", False) - self.progress.emit(100) - self._set_result(1) - return - - if detected and not OpenPypeVersion.get_installed_version().is_compatible(detected[-1]): # noqa: E501 - self.message.emit(( - f"Latest detected version {detected[-1]} " - "is not compatible with the currently running " - f"{local_version}" - ), True) - self.message.emit(( - "Filtering detected versions to compatible ones..." - ), False) - - # filter results to get only compatible versions - detected = [ - version for version in detected - if version.is_compatible( - OpenPypeVersion.get_installed_version()) - ] - - if detected: - if OpenPypeVersion( - version=local_version, path=Path()) < detected[-1]: - self.message.emit(( - f"Latest installed version {detected[-1]} is newer " - f"then currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - if detected[-1].path.suffix.lower() == ".zip": - bs.extract_openpype(detected[-1]) - self._set_result(0) - return - - if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa: E501 - self.message.emit(( - f"Latest installed version is the same as " - f"currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - self._set_result(0) - return - - self.message.emit(( - "All installed versions are older then " - f"currently running one {local_version}" - ), False) - - self.message.emit("None detected.", False) - - self.message.emit( - f"We will use local OpenPype version {local_version}", False) - - local_openpype = bs.create_version_from_live_code() - if not local_openpype: - self.message.emit( - f"!!! Install failed - {local_openpype}", True) - self._set_result(-1) - return - - try: - bs.install_version(local_openpype) - except (OpenPypeVersionExists, - OpenPypeVersionInvalid, - OpenPypeVersionIOError) as e: - self.message.emit(f"Installed failed: ", True) - self.message.emit(str(e), True) - self._set_result(-1) - return - - self.message.emit(f"Installed as {local_openpype}", False) - self.progress.emit(100) - self._set_result(1) - return - - self.progress.emit(100) - self._set_result(1) - return - - def set_path(self, path: str) -> None: - """Helper to set path. - - Args: - path (str): Path to set. - - """ - self._path = path - - def set_mongo(self, mongo: str) -> None: - """Helper to set mongo url. - - Args: - mongo (str): Mongodb url. - - """ - self._mongo = mongo - - def set_progress(self, progress: int) -> None: - """Helper to set progress bar. - - Args: - progress (int): Progress in percents. - - """ - self.progress.emit(progress) diff --git a/igniter/message_dialog.py b/igniter/message_dialog.py deleted file mode 100644 index a2a8bce3a2..0000000000 --- a/igniter/message_dialog.py +++ /dev/null @@ -1,44 +0,0 @@ -from qtpy import QtWidgets, QtGui - -from .tools import ( - load_stylesheet, - get_openpype_icon_path -) - - -class MessageDialog(QtWidgets.QDialog): - """Simple message dialog with title, message and OK button.""" - def __init__(self, title, message): - super(MessageDialog, self).__init__() - - # Set logo as icon of window - icon_path = get_openpype_icon_path() - pixmap_openpype_logo = QtGui.QPixmap(icon_path) - self.setWindowIcon(QtGui.QIcon(pixmap_openpype_logo)) - - # Set title - self.setWindowTitle(title) - - # Set message - label_widget = QtWidgets.QLabel(message, self) - - ok_btn = QtWidgets.QPushButton("OK", self) - btns_layout = QtWidgets.QHBoxLayout() - btns_layout.addStretch(1) - btns_layout.addWidget(ok_btn, 0) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(label_widget, 1) - layout.addLayout(btns_layout, 0) - - ok_btn.clicked.connect(self._on_ok_clicked) - - self._label_widget = label_widget - self._ok_btn = ok_btn - - def _on_ok_clicked(self): - self.close() - - def showEvent(self, event): - super(MessageDialog, self).showEvent(event) - self.setStyleSheet(load_stylesheet()) diff --git a/igniter/nice_progress_bar.py b/igniter/nice_progress_bar.py deleted file mode 100644 index ee16d108d4..0000000000 --- a/igniter/nice_progress_bar.py +++ /dev/null @@ -1,20 +0,0 @@ -from qtpy import QtWidgets - - -class NiceProgressBar(QtWidgets.QProgressBar): - def __init__(self, parent=None): - super(NiceProgressBar, self).__init__(parent) - self._real_value = 0 - - def setValue(self, value): - self._real_value = value - if value != 0 and value < 11: - value = 11 - - super(NiceProgressBar, self).setValue(value) - - def value(self): - return self._real_value - - def text(self): - return "{} %".format(self._real_value) diff --git a/igniter/openpype.icns b/igniter/openpype.icns deleted file mode 100644 index 792f819ad9..0000000000 Binary files a/igniter/openpype.icns and /dev/null differ diff --git a/igniter/openpype.ico b/igniter/openpype.ico deleted file mode 100644 index f0c15accc4..0000000000 Binary files a/igniter/openpype.ico and /dev/null differ diff --git a/igniter/openpype_icon.png b/igniter/openpype_icon.png deleted file mode 100644 index 6eae8abca3..0000000000 Binary files a/igniter/openpype_icon.png and /dev/null differ diff --git a/igniter/splash.txt b/igniter/splash.txt deleted file mode 100644 index 833bcd4b9c..0000000000 --- a/igniter/splash.txt +++ /dev/null @@ -1,413 +0,0 @@ - - - - * - - - - - - - .* - - - - - - * - .* - * - - - - . - * - .* - * - . - - . - * - .* - .* - .* - * - . - . - * - .* - .* - .* - * - . - _. - /** - \ * - \* - * - * - . - __. - ---* - \ \* - \ * - \* - * - . - \___. - /* * - \ \ * - \ \* - \ * - \* - . - |____. - /* * - \|\ * - \ \ * - \ \ * - \ \* - \/. - _/_____. - /* * - / \ * - \ \ * - \ \ * - \ \__* - \/__. - __________. - --*-- ___* - \ \ \/_* - \ \ __* - \ \ \_* - \ \____\* - \/____/. - \____________ . - /* ___ \* - \ \ \/_\ * - \ \ _____* - \ \ \___/* - \ \____\ * - \/____/ . - |___________ . - /* ___ \ * - \|\ \/_\ \ * - \ \ _____/ * - \ \ \___/ * - \ \____\ / * - \/____/ \. - _/__________ . - /* ___ \ * - / \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---* - \ \____\ / \__* - \/____/ \/__. - ____________ . - --*-- ___ \ * - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\* - \/____/ \/____/. - ____________ - /\ ___ \ . - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ . - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ . - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- * - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- . - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ _ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ __\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___. - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ . - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ _. - \ \ \___/ ---- \ \\\ - \ \____\ / \____\ \__\\\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ __. - \ \ \___/ ---- \ \\ \ - \ \____\ / \____\ \__\\_/. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___. - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ . - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ _. - \ \ \___/ ---- \ \\ \\\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\_. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ * - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ O* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ ..oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . p.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . Py.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYp.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPe.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE c.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE C1.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE ClU.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE CluB.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . diff --git a/igniter/stylesheet.css b/igniter/stylesheet.css deleted file mode 100644 index 8df2621d83..0000000000 --- a/igniter/stylesheet.css +++ /dev/null @@ -1,280 +0,0 @@ -*{ - font-size: 10pt; - font-family: "Poppins"; -} - -QWidget { - color: #bfccd6; - background-color: #282C34; - border-radius: 0px; -} - -QMenu { - border: 1px solid #555555; - background-color: #21252B; -} - -QMenu::item { - padding: 5px 10px 5px 10px; - border-left: 5px solid #313741;; -} - -QMenu::item:selected { - border-left-color: rgb(84, 209, 178); - background-color: #222d37; -} - -QLineEdit, QPlainTextEdit { - border: 1px solid #464b54; - border-radius: 3px; - background-color: #21252B; - padding: 0.5em; -} - -QLineEdit[state="valid"] { - background-color: rgb(19, 19, 19); - color: rgb(64, 230, 132); - border-color: rgb(32, 64, 32); -} - -QLineEdit[state="invalid"] { - background-color: rgb(32, 19, 19); - color: rgb(255, 69, 0); - border-color: rgb(64, 32, 32); -} - -QLabel { - background: transparent; - color: #969b9e; -} - -QLabel:hover {color: #b8c1c5;} - -QPushButton { - border: 1px solid #aaaaaa; - border-radius: 3px; - padding: 5px; -} - -QPushButton:hover { - background-color: #333840; - border: 1px solid #fff; - color: #fff; -} - -QTableView { - border: 1px solid #444; - gridline-color: #6c6c6c; - background-color: #201F1F; - alternate-background-color:#21252B; -} - -QTableView::item:pressed, QListView::item:pressed, QTreeView::item:pressed { - background: #78879b; - color: #FFFFFF; -} - -QTableView::item:selected:active, QTreeView::item:selected:active, QListView::item:selected:active { - background: #3d8ec9; -} - -QProgressBar { - border: 1px solid grey; - border-radius: 10px; - color: #222222; - font-weight: bold; -} -QProgressBar:horizontal { - height: 20px; -} - -QProgressBar::chunk { - border-radius: 10px; - background-color: qlineargradient( - x1: 0, - y1: 0.5, - x2: 1, - y2: 0.5, - stop: 0 rgb(72, 200, 150), - stop: 1 rgb(82, 172, 215) - ); -} - - -QScrollBar:horizontal { - height: 15px; - margin: 3px 15px 3px 15px; - border: 1px transparent #21252B; - border-radius: 4px; - background-color: #21252B; -} - -QScrollBar::handle:horizontal { - background-color: #4B5362; - min-width: 5px; - border-radius: 4px; -} - -QScrollBar::add-line:horizontal { - margin: 0px 3px 0px 3px; - border-image: url(:/qss_icons/rc/right_arrow_disabled.png); - width: 10px; - height: 10px; - subcontrol-position: right; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:horizontal { - margin: 0px 3px 0px 3px; - border-image: url(:/qss_icons/rc/left_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: left; - subcontrol-origin: margin; -} - -QScrollBar::add-line:horizontal:hover,QScrollBar::add-line:horizontal:on { - border-image: url(:/qss_icons/rc/right_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: right; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on { - border-image: url(:/qss_icons/rc/left_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: left; - subcontrol-origin: margin; -} - -QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal { - background: none; -} - -QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal { - background: none; -} - -QScrollBar:vertical { - background-color: #21252B; - width: 15px; - margin: 15px 3px 15px 3px; - border: 1px transparent #21252B; - border-radius: 4px; -} - -QScrollBar::handle:vertical { - background-color: #4B5362; - min-height: 5px; - border-radius: 4px; -} - -QScrollBar::sub-line:vertical { - margin: 3px 0px 3px 0px; - border-image: url(:/qss_icons/rc/up_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: top; - subcontrol-origin: margin; -} - -QScrollBar::add-line:vertical { - margin: 3px 0px 3px 0px; - border-image: url(:/qss_icons/rc/down_arrow_disabled.png); - height: 10px; - width: 10px; - subcontrol-position: bottom; - subcontrol-origin: margin; -} - -QScrollBar::sub-line:vertical:hover,QScrollBar::sub-line:vertical:on { - - border-image: url(:/qss_icons/rc/up_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: top; - subcontrol-origin: margin; -} - - -QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on { - border-image: url(:/qss_icons/rc/down_arrow.png); - height: 10px; - width: 10px; - subcontrol-position: bottom; - subcontrol-origin: margin; -} - -QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical { - background: none; -} - - -QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { - background: none; -} - -#MainLabel { - color: rgb(200, 200, 200); - font-size: 12pt; -} - -#Console { - background-color: #21252B; - color: rgb(72, 200, 150); - font-family: "Roboto Mono"; - font-size: 8pt; -} - -#ExitBtn { - /* `border` must be set to background of flat button is painted .*/ - border: none; - color: rgb(39, 39, 39); - background-color: #828a97; - padding: 0.5em; - font-weight: 400; -} - -#ExitBtn:hover{ - background-color: #b2bece -} -#ExitBtn:disabled { - background-color: rgba(185, 185, 185, 31); - color: rgba(64, 64, 64, 63); -} - -#ButtonWithOptions QPushButton{ - border-top-right-radius: 0px; - border-bottom-right-radius: 0px; - border: none; - background-color: rgb(84, 209, 178); - color: rgb(39, 39, 39); - font-weight: 400; - padding: 0.5em; -} -#ButtonWithOptions QPushButton:hover{ - background-color: rgb(85, 224, 189) -} -#ButtonWithOptions QPushButton:disabled { - background-color: rgba(72, 200, 150, 31); - color: rgba(64, 64, 64, 63); -} - -#ButtonWithOptions QToolButton{ - border: none; - border-top-left-radius: 0px; - border-bottom-left-radius: 0px; - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; - background-color: rgb(84, 209, 178); - color: rgb(39, 39, 39); -} -#ButtonWithOptions QToolButton:hover{ - background-color: rgb(85, 224, 189) -} -#ButtonWithOptions QToolButton:disabled { - background-color: rgba(72, 200, 150, 31); - color: rgba(64, 64, 64, 63); -} diff --git a/igniter/terminal_splash.py b/igniter/terminal_splash.py deleted file mode 100644 index 1d85fd3927..0000000000 --- a/igniter/terminal_splash.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype terminal animation.""" -import blessed -from pathlib import Path -from time import sleep - -NO_TERMINAL = False - -try: - term = blessed.Terminal() -except AttributeError: - # this happens when blessed cannot find proper terminal. - # If so, skip printing ascii art animation. - NO_TERMINAL = True - - -def play_animation(): - """Play ASCII art OpenPype animation.""" - if NO_TERMINAL: - return - print(term.home + term.clear) - frame_size = 7 - splash_file = Path(__file__).parent / "splash.txt" - with splash_file.open("r") as sf: - animation = sf.readlines() - - animation_length = int(len(animation) / frame_size) - current_frame = 0 - for _ in range(animation_length): - frame = "".join( - scanline - for y, scanline in enumerate( - animation[current_frame : current_frame + frame_size] - ) - ) - - with term.location(0, 0): - # term.aquamarine3_bold(frame) - print(f"{term.bold}{term.aquamarine3}{frame}{term.normal}") - - sleep(0.02) - current_frame += frame_size - print(term.move_y(7)) diff --git a/igniter/tools.py b/igniter/tools.py deleted file mode 100644 index 9dea203f0c..0000000000 --- a/igniter/tools.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tools used in **Igniter** GUI.""" -import os -from typing import Union -from urllib.parse import urlparse, parse_qs -from pathlib import Path -import platform - -import certifi -from pymongo import MongoClient -from pymongo.errors import ( - ServerSelectionTimeoutError, - InvalidURI, - ConfigurationError, - OperationFailure -) - - -class OpenPypeVersionNotFound(Exception): - """OpenPype version was not found in remote and local repository.""" - pass - - -class OpenPypeVersionIncompatible(Exception): - """OpenPype version is not compatible with the installed one (build).""" - pass - - -def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. - - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query[key]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - return add_certificate - - -def validate_mongo_connection(cnx: str) -> (bool, str): - """Check if provided mongodb URL is valid. - - Args: - cnx (str): URL to validate. - - Returns: - (bool, str): True if ok, False if not and reason in str. - - """ - parsed = urlparse(cnx) - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - return False, "Not mongodb schema" - - kwargs = { - "serverSelectionTimeoutMS": os.environ.get("AVALON_TIMEOUT", 2000) - } - # Add certificate path if should be required - if should_add_certificate_path_to_mongo_url(cnx): - kwargs["tlsCAFile"] = certifi.where() - - try: - client = MongoClient(cnx, **kwargs) - client.server_info() - with client.start_session(): - pass - client.close() - except ServerSelectionTimeoutError as e: - return False, f"Cannot connect to server {cnx} - {e}" - except ValueError: - return False, f"Invalid port specified {parsed.port}" - except (ConfigurationError, OperationFailure, InvalidURI) as exc: - return False, str(exc) - else: - return True, "Connection is successful" - - -def validate_mongo_string(mongo: str) -> (bool, str): - """Validate string if it is mongo url acceptable by **Igniter**.. - - Args: - mongo (str): String to validate. - - Returns: - (bool, str): - True if valid, False if not and in second part of tuple - the reason why it failed. - - """ - if not mongo: - return True, "empty string" - return validate_mongo_connection(mongo) - - -def validate_path_string(path: str) -> (bool, str): - """Validate string if it is path to OpenPype repository. - - Args: - path (str): Path to validate. - - - Returns: - (bool, str): - True if valid, False if not and in second part of tuple - the reason why it failed. - - """ - if not path: - return False, "empty string" - - if not Path(path).exists(): - return False, "path doesn't exists" - - if not Path(path).is_dir(): - return False, "path is not directory" - - return True, "valid path" - - -def get_openpype_global_settings(url: str) -> dict: - """Load global settings from Mongo database. - - We are loading data from database `openpype` and collection `settings`. - There we expect document type `global_settings`. - - Args: - url (str): MongoDB url. - - Returns: - dict: With settings data. Empty dictionary is returned if not found. - """ - kwargs = {} - if should_add_certificate_path_to_mongo_url(url): - kwargs["tlsCAFile"] = certifi.where() - - try: - # Create mongo connection - client = MongoClient(url, **kwargs) - # Access settings collection - openpype_db = os.environ.get("OPENPYPE_DATABASE_NAME") or "openpype" - col = client[openpype_db]["settings"] - # Query global settings - global_settings = col.find_one({"type": "global_settings"}) or {} - # Close Mongo connection - client.close() - - except Exception: - # TODO log traceback or message - return {} - - return global_settings.get("data") or {} - - -def get_openpype_path_from_settings(settings: dict) -> Union[str, None]: - """Get OpenPype path from global settings. - - Args: - settings (dict): mongodb url. - - Returns: - path to OpenPype or None if not found - """ - paths = ( - settings - .get("openpype_path", {}) - .get(platform.system().lower()) - ) or [] - # For cases when `openpype_path` is a single path - if paths and isinstance(paths, str): - paths = [paths] - - return next((path for path in paths if os.path.exists(path)), None) - - -def get_local_openpype_path_from_settings(settings: dict) -> Union[str, None]: - """Get OpenPype local path from global settings. - - Used to download and unzip OP versions. - Args: - settings (dict): settings from DB. - - Returns: - path to OpenPype or None if not found - """ - path = ( - settings - .get("local_openpype_path", {}) - .get(platform.system().lower()) - ) - if path: - return Path(path) - return None - - -def get_expected_studio_version_str( - staging=False, global_settings=None -) -> str: - """Version that should be currently used in studio. - - Args: - staging (bool): Get current version for staging. - global_settings (dict): Optional precached global settings. - - Returns: - str: OpenPype version which should be used. Empty string means latest. - """ - mongo_url = os.environ.get("OPENPYPE_MONGO") - if global_settings is None: - global_settings = get_openpype_global_settings(mongo_url) - key = "staging_version" if staging else "production_version" - return global_settings.get(key) or "" - - -def load_stylesheet() -> str: - """Load css style sheet. - - Returns: - str: content of the stylesheet - - """ - stylesheet_path = Path(__file__).parent.resolve() / "stylesheet.css" - - return stylesheet_path.read_text() - - -def get_openpype_icon_path() -> str: - """Path to OpenPype icon png file.""" - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "openpype_icon.png" - ) diff --git a/igniter/update_thread.py b/igniter/update_thread.py deleted file mode 100644 index 0223477d0a..0000000000 --- a/igniter/update_thread.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -"""Working thread for update.""" -from qtpy import QtCore - -from .bootstrap_repos import ( - BootstrapRepos, - OpenPypeVersion -) - - -class UpdateThread(QtCore.QThread): - """Install Worker thread. - - This class takes care of finding OpenPype version on user entered path - (or loading this path from database). If nothing is entered by user, - OpenPype will create its zip files from repositories that comes with it. - - If path contains plain repositories, they are zipped and installed to - user data dir. - - """ - progress = QtCore.Signal(int) - message = QtCore.Signal((str, bool)) - - def __init__(self, parent=None): - self._result = None - self._openpype_version = None - super().__init__(parent) - - def set_version(self, openpype_version: OpenPypeVersion): - self._openpype_version = openpype_version - - def result(self): - """Result of finished installation.""" - return self._result - - def _set_result(self, value): - if self._result is not None: - raise AssertionError("BUG: Result was set more than once!") - self._result = value - - def run(self): - """Thread entry point. - - Using :class:`BootstrapRepos` to either install OpenPype as zip files - or copy them from location specified by user or retrieved from - database. - """ - bs = BootstrapRepos( - progress_callback=self.set_progress, message=self.message) - - bs.set_data_dir(OpenPypeVersion.get_local_openpype_path()) - version_path = bs.install_version(self._openpype_version) - self._set_result(version_path) - - def set_progress(self, progress: int) -> None: - """Helper to set progress bar. - - Args: - progress (int): Progress in percents. - - """ - self.progress.emit(progress) diff --git a/igniter/update_window.py b/igniter/update_window.py deleted file mode 100644 index d51ae18cd0..0000000000 --- a/igniter/update_window.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 -*- -"""Progress window to show when OpenPype is updating/installing locally.""" -import os - -from qtpy import QtCore, QtGui, QtWidgets - -from .update_thread import UpdateThread -from .bootstrap_repos import OpenPypeVersion -from .nice_progress_bar import NiceProgressBar -from .tools import load_stylesheet - - -class UpdateWindow(QtWidgets.QDialog): - """OpenPype update window.""" - - _width = 500 - _height = 100 - - def __init__(self, version: OpenPypeVersion, parent=None): - super(UpdateWindow, self).__init__(parent) - self._openpype_version = version - self._result_version_path = None - - self.setWindowTitle( - f"OpenPype is updating ..." - ) - self.setModal(True) - self.setWindowFlags( - QtCore.Qt.WindowMinimizeButtonHint - ) - - current_dir = os.path.dirname(os.path.abspath(__file__)) - roboto_font_path = os.path.join(current_dir, "RobotoMono-Regular.ttf") - poppins_font_path = os.path.join(current_dir, "Poppins") - icon_path = os.path.join(current_dir, "openpype_icon.png") - - # Install roboto font - QtGui.QFontDatabase.addApplicationFont(roboto_font_path) - for filename in os.listdir(poppins_font_path): - if os.path.splitext(filename)[1] == ".ttf": - QtGui.QFontDatabase.addApplicationFont(filename) - - # Load logo - pixmap_openpype_logo = QtGui.QPixmap(icon_path) - # Set logo as icon of window - self.setWindowIcon(QtGui.QIcon(pixmap_openpype_logo)) - - self._pixmap_openpype_logo = pixmap_openpype_logo - - self._update_thread = None - - self._init_ui() - - # Set stylesheet - self.setStyleSheet(load_stylesheet()) - self._run_update() - - def _init_ui(self): - - # Main info - # -------------------------------------------------------------------- - main_label = QtWidgets.QLabel( - f"OpenPype is updating to {self._openpype_version}", self) - main_label.setWordWrap(True) - main_label.setObjectName("MainLabel") - - # Progress bar - # -------------------------------------------------------------------- - progress_bar = NiceProgressBar(self) - progress_bar.setAlignment(QtCore.Qt.AlignCenter) - progress_bar.setTextVisible(False) - - # add all to main - main = QtWidgets.QVBoxLayout(self) - main.addSpacing(15) - main.addWidget(main_label, 0) - main.addSpacing(15) - main.addWidget(progress_bar, 0) - main.addSpacing(15) - - self._progress_bar = progress_bar - - def showEvent(self, event): - super().showEvent(event) - current_size = self.size() - new_size = QtCore.QSize( - max(current_size.width(), self._width), - max(current_size.height(), self._height) - ) - if current_size != new_size: - self.resize(new_size) - - def _run_update(self): - """Start install process. - - This will once again validate entered path and mongo if ok, start - working thread that will do actual job. - """ - # Check if install thread is not already running - if self._update_thread and self._update_thread.isRunning(): - return - self._progress_bar.setRange(0, 0) - update_thread = UpdateThread(self) - update_thread.set_version(self._openpype_version) - update_thread.message.connect(self.update_console) - update_thread.progress.connect(self._update_progress) - update_thread.finished.connect(self._installation_finished) - - self._update_thread = update_thread - - update_thread.start() - - def get_version_path(self): - return self._result_version_path - - def _installation_finished(self): - status = self._update_thread.result() - self._result_version_path = status - self._progress_bar.setRange(0, 1) - self._update_progress(100) - QtWidgets.QApplication.processEvents() - self.done(0) - - def _update_progress(self, progress: int): - # not updating progress as we are not able to determine it - # correctly now. Progress bar is set to un-deterministic mode - # until we are able to get progress in better way. - """ - self._progress_bar.setRange(0, 0) - self._progress_bar.setValue(progress) - text_visible = self._progress_bar.isTextVisible() - if progress == 0: - if text_visible: - self._progress_bar.setTextVisible(False) - elif not text_visible: - self._progress_bar.setTextVisible(True) - """ - return - - def update_console(self, msg: str, error: bool = False) -> None: - """Display message in console. - - Args: - msg (str): message. - error (bool): if True, print it red. - """ - print(msg) diff --git a/igniter/user_settings.py b/igniter/user_settings.py deleted file mode 100644 index 2a406f83dd..0000000000 --- a/igniter/user_settings.py +++ /dev/null @@ -1,493 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package to deal with saving and retrieving user specific settings.""" -import os -from datetime import datetime -from abc import ABCMeta, abstractmethod -import json - -# disable lru cache in Python 2 -try: - from functools import lru_cache -except ImportError: - def lru_cache(maxsize): - def max_size(func): - def wrapper(*args, **kwargs): - value = func(*args, **kwargs) - return value - return wrapper - return max_size - -# ConfigParser was renamed in python3 to configparser -try: - import configparser -except ImportError: - import ConfigParser as configparser - -import platform - -import six -import appdirs - -_PLACEHOLDER = object() - - -class OpenPypeSecureRegistry: - """Store information using keyring. - - Registry should be used for private data that should be available only for - user. - - All passed registry names will have added prefix `OpenPype/` to easier - identify which data were created by OpenPype. - - Args: - name(str): Name of registry used as identifier for data. - """ - def __init__(self, name): - try: - import keyring - - except Exception: - raise NotImplementedError( - "Python module `keyring` is not available." - ) - - # hack for cx_freeze and Windows keyring backend - if platform.system().lower() == "windows": - from keyring.backends import Windows - - keyring.set_keyring(Windows.WinVaultKeyring()) - - # Force "OpenPype" prefix - self._name = "/".join(("OpenPype", name)) - - def set_item(self, name, value): - # type: (str, str) -> None - """Set sensitive item into system's keyring. - - This uses `Keyring module`_ to save sensitive stuff into system's - keyring. - - Args: - name (str): Name of the item. - value (str): Value of the item. - - .. _Keyring module: - https://github.com/jaraco/keyring - - """ - import keyring - - keyring.set_password(self._name, name, value) - - @lru_cache(maxsize=32) - def get_item(self, name, default=_PLACEHOLDER): - """Get value of sensitive item from system's keyring. - - See also `Keyring module`_ - - Args: - name (str): Name of the item. - default (Any): Default value if item is not available. - - Returns: - value (str): Value of the item. - - Raises: - ValueError: If item doesn't exist and default is not defined. - - .. _Keyring module: - https://github.com/jaraco/keyring - - """ - import keyring - - value = keyring.get_password(self._name, name) - if value: - return value - - if default is not _PLACEHOLDER: - return default - - # NOTE Should raise `KeyError` - raise ValueError( - "Item {}:{} does not exist in keyring.".format(self._name, name) - ) - - def delete_item(self, name): - # type: (str) -> None - """Delete value stored in system's keyring. - - See also `Keyring module`_ - - Args: - name (str): Name of the item to be deleted. - - .. _Keyring module: - https://github.com/jaraco/keyring - - """ - import keyring - - self.get_item.cache_clear() - keyring.delete_password(self._name, name) - - -@six.add_metaclass(ABCMeta) -class ASettingRegistry(): - """Abstract class defining structure of **SettingRegistry** class. - - It is implementing methods to store secure items into keyring, otherwise - mechanism for storing common items must be implemented in abstract - methods. - - Attributes: - _name (str): Registry names. - - """ - - def __init__(self, name): - # type: (str) -> ASettingRegistry - super(ASettingRegistry, self).__init__() - - self._name = name - self._items = {} - - def set_item(self, name, value): - # type: (str, str) -> None - """Set item to settings registry. - - Args: - name (str): Name of the item. - value (str): Value of the item. - - """ - self._set_item(name, value) - - @abstractmethod - def _set_item(self, name, value): - # type: (str, str) -> None - # Implement it - pass - - def __setitem__(self, name, value): - self._items[name] = value - self._set_item(name, value) - - def get_item(self, name): - # type: (str) -> str - """Get item from settings registry. - - Args: - name (str): Name of the item. - - Returns: - value (str): Value of the item. - - Raises: - ValueError: If item doesn't exist. - - """ - return self._get_item(name) - - @abstractmethod - def _get_item(self, name): - # type: (str) -> str - # Implement it - pass - - def __getitem__(self, name): - return self._get_item(name) - - def delete_item(self, name): - # type: (str) -> None - """Delete item from settings registry. - - Args: - name (str): Name of the item. - - """ - self._delete_item(name) - - @abstractmethod - def _delete_item(self, name): - # type: (str) -> None - """Delete item from settings. - - Note: - see :meth:`openpype.lib.user_settings.ARegistrySettings.delete_item` - - """ - pass - - def __delitem__(self, name): - del self._items[name] - self._delete_item(name) - - -class IniSettingRegistry(ASettingRegistry): - """Class using :mod:`configparser`. - - This class is using :mod:`configparser` (ini) files to store items. - - """ - - def __init__(self, name, path): - # type: (str, str) -> IniSettingRegistry - super(IniSettingRegistry, self).__init__(name) - # get registry file - version = os.getenv("OPENPYPE_VERSION", "N/A") - self._registry_file = os.path.join(path, "{}.ini".format(name)) - if not os.path.exists(self._registry_file): - with open(self._registry_file, mode="w") as cfg: - print("# Settings registry", cfg) - print("# Generated by OpenPype {}".format(version), cfg) - now = datetime.now().strftime("%d/%m/%Y %H:%M:%S") - print("# {}".format(now), cfg) - - def set_item_section( - self, section, name, value): - # type: (str, str, str) -> None - """Set item to specific section of ini registry. - - If section doesn't exists, it is created. - - Args: - section (str): Name of section. - name (str): Name of the item. - value (str): Value of the item. - - """ - value = str(value) - config = configparser.ConfigParser() - - config.read(self._registry_file) - if not config.has_section(section): - config.add_section(section) - current = config[section] - current[name] = value - - with open(self._registry_file, mode="w") as cfg: - config.write(cfg) - - def _set_item(self, name, value): - # type: (str, str) -> None - self.set_item_section("MAIN", name, value) - - def set_item(self, name, value): - # type: (str, str) -> None - """Set item to settings ini file. - - This saves item to ``DEFAULT`` section of ini as each item there - must reside in some section. - - Args: - name (str): Name of the item. - value (str): Value of the item. - - """ - # this does the some, overridden just for different docstring. - # we cast value to str as ini options values must be strings. - super(IniSettingRegistry, self).set_item(name, str(value)) - - def get_item(self, name): - # type: (str) -> str - """Gets item from settings ini file. - - This gets settings from ``DEFAULT`` section of ini file as each item - there must reside in some section. - - Args: - name (str): Name of the item. - - Returns: - str: Value of item. - - Raises: - ValueError: If value doesn't exist. - - """ - return super(IniSettingRegistry, self).get_item(name) - - @lru_cache(maxsize=32) - def get_item_from_section(self, section, name): - # type: (str, str) -> str - """Get item from section of ini file. - - This will read ini file and try to get item value from specified - section. If that section or item doesn't exist, :exc:`ValueError` - is risen. - - Args: - section (str): Name of ini section. - name (str): Name of the item. - - Returns: - str: Item value. - - Raises: - ValueError: If value doesn't exist. - - """ - config = configparser.ConfigParser() - config.read(self._registry_file) - try: - value = config[section][name] - except KeyError: - raise ValueError( - "Registry doesn't contain value {}:{}".format(section, name)) - return value - - def _get_item(self, name): - # type: (str) -> str - return self.get_item_from_section("MAIN", name) - - def delete_item_from_section(self, section, name): - # type: (str, str) -> None - """Delete item from section in ini file. - - Args: - section (str): Section name. - name (str): Name of the item. - - Raises: - ValueError: If item doesn't exist. - - """ - self.get_item_from_section.cache_clear() - config = configparser.ConfigParser() - config.read(self._registry_file) - try: - _ = config[section][name] - except KeyError: - raise ValueError( - "Registry doesn't contain value {}:{}".format(section, name)) - config.remove_option(section, name) - - # if section is empty, delete it - if len(config[section].keys()) == 0: - config.remove_section(section) - - with open(self._registry_file, mode="w") as cfg: - config.write(cfg) - - def _delete_item(self, name): - """Delete item from default section. - - Note: - See :meth:`~openpype.lib.IniSettingsRegistry.delete_item_from_section` - - """ - self.delete_item_from_section("MAIN", name) - - -class JSONSettingRegistry(ASettingRegistry): - """Class using json file as storage.""" - - def __init__(self, name, path): - # type: (str, str) -> JSONSettingRegistry - super(JSONSettingRegistry, self).__init__(name) - #: str: name of registry file - self._registry_file = os.path.join(path, "{}.json".format(name)) - now = datetime.now().strftime("%d/%m/%Y %H:%M:%S") - header = { - "__metadata__": { - "openpype-version": os.getenv("OPENPYPE_VERSION", "N/A"), - "generated": now - }, - "registry": {} - } - - if not os.path.exists(os.path.dirname(self._registry_file)): - os.makedirs(os.path.dirname(self._registry_file), exist_ok=True) - if not os.path.exists(self._registry_file): - with open(self._registry_file, mode="w") as cfg: - json.dump(header, cfg, indent=4) - - @lru_cache(maxsize=32) - def _get_item(self, name): - # type: (str) -> object - """Get item value from registry json. - - Note: - See :meth:`openpype.lib.JSONSettingRegistry.get_item` - - """ - with open(self._registry_file, mode="r") as cfg: - data = json.load(cfg) - try: - value = data["registry"][name] - except KeyError: - raise ValueError( - "Registry doesn't contain value {}".format(name)) - return value - - def get_item(self, name): - # type: (str) -> object - """Get item value from registry json. - - Args: - name (str): Name of the item. - - Returns: - value of the item - - Raises: - ValueError: If item is not found in registry file. - - """ - return self._get_item(name) - - def _set_item(self, name, value): - # type: (str, object) -> None - """Set item value to registry json. - - Note: - See :meth:`openpype.lib.JSONSettingRegistry.set_item` - - """ - with open(self._registry_file, "r+") as cfg: - data = json.load(cfg) - data["registry"][name] = value - cfg.truncate(0) - cfg.seek(0) - json.dump(data, cfg, indent=4) - - def set_item(self, name, value): - # type: (str, object) -> None - """Set item and its value into json registry file. - - Args: - name (str): name of the item. - value (Any): value of the item. - - """ - self._set_item(name, value) - - def _delete_item(self, name): - # type: (str) -> None - self._get_item.cache_clear() - with open(self._registry_file, "r+") as cfg: - data = json.load(cfg) - del data["registry"][name] - cfg.truncate(0) - cfg.seek(0) - json.dump(data, cfg, indent=4) - - -class OpenPypeSettingsRegistry(JSONSettingRegistry): - """Class handling OpenPype general settings registry. - - Attributes: - vendor (str): Name used for path construction. - product (str): Additional name used for path construction. - - """ - - def __init__(self, name=None): - self.vendor = "pypeclub" - self.product = "openpype" - if not name: - name = "openpype_settings" - path = appdirs.user_data_dir(self.product, self.vendor) - super(OpenPypeSettingsRegistry, self).__init__(name, path) diff --git a/igniter/version.py b/igniter/version.py deleted file mode 100644 index 8e7731f6d6..0000000000 --- a/igniter/version.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- coding: utf-8 -*- -"""Definition of Igniter version.""" - -__version__ = "1.0.2" diff --git a/inno_setup.iss b/inno_setup.iss deleted file mode 100644 index d9a41d22ee..0000000000 --- a/inno_setup.iss +++ /dev/null @@ -1,55 +0,0 @@ -; Script generated by the Inno Setup Script Wizard. -; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! - - -#define MyAppName "OpenPype" -#define Build GetEnv("BUILD_DIR") -#define AppVer GetEnv("BUILD_VERSION") - - -[Setup] -; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications. -; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) -AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93} -AppName={#MyAppName} -AppVersion={#AppVer} -AppVerName={#MyAppName} version {#AppVer} -AppPublisher=Ynput s.r.o -AppPublisherURL=https://ynput.io -AppSupportURL=https://ynput.io -AppUpdatesURL=https://ynput.io -DefaultDirName={autopf}\{#MyAppName}\{#AppVer} -UsePreviousAppDir=no -DisableProgramGroupPage=yes -OutputBaseFilename={#MyAppName}-{#AppVer}-install -AllowCancelDuringInstall=yes -; Uncomment the following line to run in non administrative install mode (install for current user only.) -;PrivilegesRequired=lowest -PrivilegesRequiredOverridesAllowed=dialog -SetupIconFile=igniter\openpype.ico -OutputDir=build\ -Compression=lzma2 -SolidCompression=yes -WizardStyle=modern - -[Languages] -Name: "english"; MessagesFile: "compiler:Default.isl" - -[Tasks] -Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}" - -[InstallDelete] -; clean everything in previous installation folder -Type: filesandordirs; Name: "{app}\*" - - -[Files] -Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs -; NOTE: Don't use "Flags: ignoreversion" on any shared system files - -[Icons] -Name: "{autoprograms}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe" -Name: "{autodesktop}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon - -[Run] -Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent diff --git a/openpype/__init__.py b/openpype/__init__.py deleted file mode 100644 index e6b77b1853..0000000000 --- a/openpype/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -import os - - -PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) -PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") - -AYON_SERVER_ENABLED = os.environ.get("USE_AYON_SERVER") == "1" diff --git a/openpype/__main__.py b/openpype/__main__.py deleted file mode 100644 index 399ca035ff..0000000000 --- a/openpype/__main__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -"""Main entry point for Pype command.""" -from . import cli -import sys -import traceback - -if __name__ == '__main__': - try: - cli.main(obj={}, prog_name="pype") - except Exception: - exc_info = sys.exc_info() - print("!!! Pype crashed:") - traceback.print_exception(*exc_info) - sys.exit(1) diff --git a/openpype/addons/README.md b/openpype/addons/README.md deleted file mode 100644 index 92b8b8c07c..0000000000 --- a/openpype/addons/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This directory is for storing external addons that needs to be included in the pipeline when distributed. - -The directory is ignored by Git, but included in the zip and installation files. diff --git a/openpype/cli.py b/openpype/cli.py deleted file mode 100644 index 8caa139765..0000000000 --- a/openpype/cli.py +++ /dev/null @@ -1,432 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package for handling pype command line arguments.""" -import os -import sys -import code -import click - -from openpype import AYON_SERVER_ENABLED -from .pype_commands import PypeCommands - - -class AliasedGroup(click.Group): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._aliases = {} - - def set_alias(self, src_name, dst_name): - self._aliases[dst_name] = src_name - - def get_command(self, ctx, cmd_name): - if cmd_name in self._aliases: - cmd_name = self._aliases[cmd_name] - return super().get_command(ctx, cmd_name) - - -@click.group(cls=AliasedGroup, invoke_without_command=True) -@click.pass_context -@click.option("--use-version", - expose_value=False, help="use specified version") -@click.option("--use-staging", is_flag=True, - expose_value=False, help="use staging variants") -@click.option("--list-versions", is_flag=True, expose_value=False, - help="list all detected versions.") -@click.option("--validate-version", expose_value=False, - help="validate given version integrity") -@click.option("--debug", is_flag=True, expose_value=False, - help="Enable debug") -@click.option("--verbose", expose_value=False, - help=("Change OpenPype log level (debug - critical or 0-50)")) -@click.option("--automatic-tests", is_flag=True, expose_value=False, - help=("Run in automatic tests mode")) -def main(ctx): - """Pype is main command serving as entry point to pipeline system. - - It wraps different commands together. - """ - - if ctx.invoked_subcommand is None: - # Print help if headless mode is used - if AYON_SERVER_ENABLED: - is_headless = os.getenv("AYON_HEADLESS_MODE") == "1" - else: - is_headless = os.getenv("OPENPYPE_HEADLESS_MODE") == "1" - if is_headless: - print(ctx.get_help()) - sys.exit(0) - else: - ctx.invoke(tray) - - -@main.command() -@click.option("-d", "--dev", is_flag=True, help="Settings in Dev mode") -def settings(dev): - """Show Pype Settings UI.""" - - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'settings' command.") - PypeCommands().launch_settings_gui(dev) - - -@main.command() -def tray(): - """Launch pype tray. - - Default action of pype command is to launch tray widget to control basic - aspects of pype. See documentation for more information. - """ - PypeCommands().launch_tray() - - -@PypeCommands.add_modules -@main.group(help="Run command line arguments of OpenPype addons") -@click.pass_context -def module(ctx): - """Addon specific commands created dynamically. - - These commands are generated dynamically by currently loaded addons. - """ - pass - - -# Add 'addon' as alias for module -main.set_alias("module", "addon") - - -@main.command() -@click.option("--ftrack-url", envvar="FTRACK_SERVER", - help="Ftrack server url") -@click.option("--ftrack-user", envvar="FTRACK_API_USER", - help="Ftrack api user") -@click.option("--ftrack-api-key", envvar="FTRACK_API_KEY", - help="Ftrack api key") -@click.option("--legacy", is_flag=True, - help="run event server without mongo storing") -@click.option("--clockify-api-key", envvar="CLOCKIFY_API_KEY", - help="Clockify API key.") -@click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE", - help="Clockify workspace") -def eventserver(ftrack_url, - ftrack_user, - ftrack_api_key, - legacy, - clockify_api_key, - clockify_workspace): - """Launch ftrack event server. - - This should be ideally used by system service (such us systemd or upstart - on linux and window service). - """ - - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'eventserver' command.") - PypeCommands().launch_eventservercli( - ftrack_url, - ftrack_user, - ftrack_api_key, - legacy, - clockify_api_key, - clockify_workspace - ) - - -@main.command() -@click.option("-h", "--host", help="Host", default=None) -@click.option("-p", "--port", help="Port", default=None) -@click.option("-e", "--executable", help="Executable") -@click.option("-u", "--upload_dir", help="Upload dir") -def webpublisherwebserver(executable, upload_dir, host=None, port=None): - """Starts webserver for communication with Webpublish FR via command line - - OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND - FTRACK_BOT_API_KEY provided with api key from Ftrack. - - Expect "pype.club" user created on Ftrack. - """ - - if AYON_SERVER_ENABLED: - raise RuntimeError( - "AYON does not support 'webpublisherwebserver' command." - ) - PypeCommands().launch_webpublisher_webservercli( - upload_dir=upload_dir, - executable=executable, - host=host, - port=port - ) - - -@main.command() -@click.argument("output_json_path") -@click.option("--project", help="Project name", default=None) -@click.option("--asset", help="Asset name", default=None) -@click.option("--task", help="Task name", default=None) -@click.option("--app", help="Application name", default=None) -@click.option( - "--envgroup", help="Environment group (e.g. \"farm\")", default=None -) -def extractenvironments(output_json_path, project, asset, task, app, envgroup): - """Extract environment variables for entered context to a json file. - - Entered output filepath will be created if does not exists. - - All context options must be passed otherwise only pype's global - environments will be extracted. - - Context options are "project", "asset", "task", "app" - """ - PypeCommands.extractenvironments( - output_json_path, project, asset, task, app, envgroup - ) - - -@main.command() -@click.argument("paths", nargs=-1) -@click.option("-t", "--targets", help="Targets module", default=None, - multiple=True) -@click.option("-g", "--gui", is_flag=True, - help="Show Publish UI", default=False) -def publish(paths, targets, gui): - """Start CLI publishing. - - Publish collects json from paths provided as an argument. - More than one path is allowed. - """ - - PypeCommands.publish(list(paths), targets, gui) - - -@main.command(context_settings={"ignore_unknown_options": True}) -def projectmanager(): - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'projectmanager' command.") - PypeCommands().launch_project_manager() - - -@main.command(context_settings={"ignore_unknown_options": True}) -def publish_report_viewer(): - from openpype.tools.publisher.publish_report_viewer import main - - sys.exit(main()) - - -@main.command() -@click.argument("output_path") -@click.option("--project", help="Define project context") -@click.option("--asset", help="Define asset in project (project must be set)") -@click.option( - "--strict", - is_flag=True, - help="Full context must be set otherwise dialog can't be closed." -) -def contextselection( - output_path, - project, - asset, - strict -): - """Show Qt dialog to select context. - - Context is project name, asset name and task name. The result is stored - into json file which path is passed in first argument. - """ - PypeCommands.contextselection( - output_path, - project, - asset, - strict - ) - - -@main.command( - context_settings=dict( - ignore_unknown_options=True, - allow_extra_args=True)) -@click.argument("script", required=True, type=click.Path(exists=True)) -def run(script): - """Run python script in Pype context.""" - import runpy - - if not script: - print("Error: missing path to script file.") - else: - - args = sys.argv - args.remove("run") - args.remove(script) - sys.argv = args - args_string = " ".join(args[1:]) - print(f"... running: {script} {args_string}") - runpy.run_path(script, run_name="__main__", ) - - -@main.command() -@click.argument("folder", nargs=-1) -@click.option("-m", - "--mark", - help="Run tests marked by", - default=None) -@click.option("-p", - "--pyargs", - help="Run tests from package", - default=None) -@click.option("-t", - "--test_data_folder", - help="Unzipped directory path of test file", - default=None) -@click.option("-s", - "--persist", - help="Persist test DB and published files after test end", - default=None) -@click.option("-a", - "--app_variant", - help="Provide specific app variant for test, empty for latest", - default=None) -@click.option("--app_group", - help="Provide specific app group for test, empty for default", - default=None) -@click.option("-t", - "--timeout", - help="Provide specific timeout value for test case", - default=None) -@click.option("-so", - "--setup_only", - help="Only create dbs, do not run tests", - default=None) -@click.option("--mongo_url", - help="MongoDB for testing.", - default=None) -@click.option("--dump_databases", - help="Dump all databases to data folder.", - default=None) -def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, - timeout, setup_only, mongo_url, app_group, dump_databases): - """Run all automatic tests after proper initialization via start.py""" - PypeCommands().run_tests(folder, mark, pyargs, test_data_folder, - persist, app_variant, timeout, setup_only, - mongo_url, app_group, dump_databases) - - -@main.command(help="DEPRECATED - run sync server") -@click.pass_context -@click.option("-a", "--active_site", required=True, - help="Name of active site") -def syncserver(ctx, active_site): - """Run sync site server in background. - - Deprecated: - This command is deprecated and will be removed in future versions. - Use '~/openpype_console module sync_server syncservice' instead. - - Details: - Some Site Sync use cases need to expose site to another one. - For example if majority of artists work in studio, they are not using - SS at all, but if you want to expose published assets to 'studio' site - to SFTP for only a couple of artists, some background process must - mark published assets to live on multiple sites (they might be - physically in same location - mounted shared disk). - - Process mimics OP Tray with specific 'active_site' name, all - configuration for this "dummy" user comes from Setting or Local - Settings (configured by starting OP Tray with env - var OPENPYPE_LOCAL_ID set to 'active_site'. - """ - - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'syncserver' command.") - - from openpype.modules.sync_server.sync_server_module import ( - syncservice) - ctx.invoke(syncservice, active_site=active_site) - - -@main.command() -@click.argument("directory") -def repack_version(directory): - """Repack OpenPype version from directory. - - This command will re-create zip file from specified directory, - recalculating file checksums. It will try to use version detected in - directory name. - """ - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'repack-version' command.") - PypeCommands().repack_version(directory) - - -@main.command() -@click.option("--project", help="Project name") -@click.option( - "--dirpath", help="Directory where package is stored", default=None) -@click.option( - "--dbonly", help="Store only Database data", default=False, is_flag=True) -def pack_project(project, dirpath, dbonly): - """Create a package of project with all files and database dump.""" - - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'pack-project' command.") - PypeCommands().pack_project(project, dirpath, dbonly) - - -@main.command() -@click.option("--zipfile", help="Path to zip file") -@click.option( - "--root", help="Replace root which was stored in project", default=None -) -@click.option( - "--dbonly", help="Store only Database data", default=False, is_flag=True) -def unpack_project(zipfile, root, dbonly): - """Create a package of project with all files and database dump.""" - if AYON_SERVER_ENABLED: - raise RuntimeError("AYON does not support 'unpack-project' command.") - PypeCommands().unpack_project(zipfile, root, dbonly) - - -@main.command() -def interactive(): - """Interactive (Python like) console. - - Helpful command not only for development to directly work with python - interpreter. - - Warning: - Executable 'openpype_gui' on Windows won't work. - """ - - if AYON_SERVER_ENABLED: - version = os.environ["AYON_VERSION"] - banner = ( - f"AYON launcher {version}\nPython {sys.version} on {sys.platform}" - ) - else: - from openpype.version import __version__ - - banner = ( - f"OpenPype {__version__}\nPython {sys.version} on {sys.platform}" - ) - code.interact(banner) - - -@main.command() -@click.option("--build", help="Print only build version", - is_flag=True, default=False) -def version(build): - """Print OpenPype version.""" - if AYON_SERVER_ENABLED: - print(os.environ["AYON_VERSION"]) - return - - from openpype.version import __version__ - from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion - from pathlib import Path - - if getattr(sys, 'frozen', False): - local_version = BootstrapRepos.get_version( - Path(os.getenv("OPENPYPE_ROOT"))) - else: - local_version = OpenPypeVersion.get_installed_version_str() - - if build: - print(local_version) - return - print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py deleted file mode 100644 index ba36d940e3..0000000000 --- a/openpype/client/__init__.py +++ /dev/null @@ -1,115 +0,0 @@ -from .mongo import ( - OpenPypeMongoConnection, -) -from .server.utils import get_ayon_server_api_connection - -from .entities import ( - get_projects, - get_project, - get_whole_project, - - get_asset_by_id, - get_asset_by_name, - get_assets, - get_archived_assets, - get_asset_ids_with_subsets, - - get_subset_by_id, - get_subset_by_name, - get_subsets, - get_subset_families, - - get_version_by_id, - get_version_by_name, - get_versions, - get_hero_version_by_id, - get_hero_version_by_subset_id, - get_hero_versions, - get_last_versions, - get_last_version_by_subset_id, - get_last_version_by_subset_name, - get_output_link_versions, - - version_is_latest, - - get_representation_by_id, - get_representation_by_name, - get_representations, - get_representation_parents, - get_representations_parents, - get_archived_representations, - - get_thumbnail, - get_thumbnails, - get_thumbnail_id_from_source, - - get_workfile_info, - - get_asset_name_identifier, -) - -from .entity_links import ( - get_linked_asset_ids, - get_linked_assets, - get_linked_representation_id, -) - -from .operations import ( - create_project, -) - - -__all__ = ( - "OpenPypeMongoConnection", - - "get_ayon_server_api_connection", - - "get_projects", - "get_project", - "get_whole_project", - - "get_asset_by_id", - "get_asset_by_name", - "get_assets", - "get_archived_assets", - "get_asset_ids_with_subsets", - - "get_subset_by_id", - "get_subset_by_name", - "get_subsets", - "get_subset_families", - - "get_version_by_id", - "get_version_by_name", - "get_versions", - "get_hero_version_by_id", - "get_hero_version_by_subset_id", - "get_hero_versions", - "get_last_versions", - "get_last_version_by_subset_id", - "get_last_version_by_subset_name", - "get_output_link_versions", - - "version_is_latest", - - "get_representation_by_id", - "get_representation_by_name", - "get_representations", - "get_representation_parents", - "get_representations_parents", - "get_archived_representations", - - "get_thumbnail", - "get_thumbnails", - "get_thumbnail_id_from_source", - - "get_workfile_info", - - "get_linked_asset_ids", - "get_linked_assets", - "get_linked_representation_id", - - "create_project", - - "get_asset_name_identifier", -) diff --git a/openpype/client/entities.py b/openpype/client/entities.py deleted file mode 100644 index cbaa943743..0000000000 --- a/openpype/client/entities.py +++ /dev/null @@ -1,25 +0,0 @@ -from openpype import AYON_SERVER_ENABLED - -if not AYON_SERVER_ENABLED: - from .mongo.entities import * -else: - from .server.entities import * - - -def get_asset_name_identifier(asset_doc): - """Get asset name identifier by asset document. - - This function is added because of AYON implementation where name - identifier is not just a name but full path. - - Asset document must have "name" key, and "data.parents" when in AYON mode. - - Args: - asset_doc (dict[str, Any]): Asset document. - """ - - if not AYON_SERVER_ENABLED: - return asset_doc["name"] - parents = list(asset_doc["data"]["parents"]) - parents.append(asset_doc["name"]) - return "/" + "/".join(parents) diff --git a/openpype/client/entity_links.py b/openpype/client/entity_links.py deleted file mode 100644 index e18970de90..0000000000 --- a/openpype/client/entity_links.py +++ /dev/null @@ -1,6 +0,0 @@ -from openpype import AYON_SERVER_ENABLED - -if not AYON_SERVER_ENABLED: - from .mongo.entity_links import * -else: - from .server.entity_links import * diff --git a/openpype/client/mongo/__init__.py b/openpype/client/mongo/__init__.py deleted file mode 100644 index 9f62d7a9cf..0000000000 --- a/openpype/client/mongo/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from .mongo import ( - MongoEnvNotSet, - get_default_components, - should_add_certificate_path_to_mongo_url, - validate_mongo_connection, - OpenPypeMongoConnection, - get_project_database, - get_project_connection, - load_json_file, - replace_project_documents, - store_project_documents, -) - - -__all__ = ( - "MongoEnvNotSet", - "get_default_components", - "should_add_certificate_path_to_mongo_url", - "validate_mongo_connection", - "OpenPypeMongoConnection", - "get_project_database", - "get_project_connection", - "load_json_file", - "replace_project_documents", - "store_project_documents", -) diff --git a/openpype/client/mongo/entities.py b/openpype/client/mongo/entities.py deleted file mode 100644 index 260fde4594..0000000000 --- a/openpype/client/mongo/entities.py +++ /dev/null @@ -1,1555 +0,0 @@ -"""Unclear if these will have public functions like these. - -Goal is that most of functions here are called on (or with) an object -that has project name as a context (e.g. on 'ProjectEntity'?). - -+ We will need more specific functions doing very specific queries really fast. -""" - -import re -import collections - -import six -from bson.objectid import ObjectId - -from .mongo import get_project_database, get_project_connection - -PatternType = type(re.compile("")) - - -def _prepare_fields(fields, required_fields=None): - if not fields: - return None - - output = { - field: True - for field in fields - } - if "_id" not in output: - output["_id"] = True - - if required_fields: - for key in required_fields: - output[key] = True - return output - - -def convert_id(in_id): - """Helper function for conversion of id from string to ObjectId. - - Args: - in_id (Union[str, ObjectId, Any]): Entity id that should be converted - to right type for queries. - - Returns: - Union[ObjectId, Any]: Converted ids to ObjectId or in type. - """ - - if isinstance(in_id, six.string_types): - return ObjectId(in_id) - return in_id - - -def convert_ids(in_ids): - """Helper function for conversion of ids from string to ObjectId. - - Args: - in_ids (Iterable[Union[str, ObjectId, Any]]): List of entity ids that - should be converted to right type for queries. - - Returns: - List[ObjectId]: Converted ids to ObjectId. - """ - - _output = set() - for in_id in in_ids: - if in_id is not None: - _output.add(convert_id(in_id)) - return list(_output) - - -def get_projects(active=True, inactive=False, fields=None): - """Yield all project entity documents. - - Args: - active (Optional[bool]): Include active projects. Defaults to True. - inactive (Optional[bool]): Include inactive projects. - Defaults to False. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Yields: - dict: Project entity data which can be reduced to specified 'fields'. - None is returned if project with specified filters was not found. - """ - mongodb = get_project_database() - for project_name in mongodb.collection_names(): - if project_name in ("system.indexes",): - continue - project_doc = get_project( - project_name, active=active, inactive=inactive, fields=fields - ) - if project_doc is not None: - yield project_doc - - -def get_project(project_name, active=True, inactive=True, fields=None): - """Return project entity document by project name. - - Args: - project_name (str): Name of project. - active (Optional[bool]): Allow active project. Defaults to True. - inactive (Optional[bool]): Allow inactive project. Defaults to True. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Project entity data which can be reduced to - specified 'fields'. None is returned if project with specified - filters was not found. - """ - # Skip if both are disabled - if not active and not inactive: - return None - - query_filter = {"type": "project"} - # Keep query untouched if both should be available - if active and inactive: - pass - - # Add filter to keep only active - elif active: - query_filter["$or"] = [ - {"data.active": {"$exists": False}}, - {"data.active": True}, - ] - - # Add filter to keep only inactive - elif inactive: - query_filter["$or"] = [ - {"data.active": {"$exists": False}}, - {"data.active": False}, - ] - - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def get_whole_project(project_name): - """Receive all documents from project. - - Helper that can be used to get all document from whole project. For example - for backups etc. - - Returns: - Cursor: Query cursor as iterable which returns all documents from - project collection. - """ - - conn = get_project_connection(project_name) - return conn.find({}) - - -def get_asset_by_id(project_name, asset_id, fields=None): - """Receive asset data by its id. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_id (Union[str, ObjectId]): Asset's id. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Asset entity data which can be reduced to - specified 'fields'. None is returned if asset with specified - filters was not found. - """ - - asset_id = convert_id(asset_id) - if not asset_id: - return None - - query_filter = {"type": "asset", "_id": asset_id} - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def get_asset_by_name(project_name, asset_name, fields=None): - """Receive asset data by its name. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_name (str): Asset's name. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Asset entity data which can be reduced to - specified 'fields'. None is returned if asset with specified - filters was not found. - """ - - if not asset_name: - return None - - query_filter = {"type": "asset", "name": asset_name} - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -# NOTE this could be just public function? -# - any better variable name instead of 'standard'? -# - same approach can be used for rest of types -def _get_assets( - project_name, - asset_ids=None, - asset_names=None, - parent_ids=None, - standard=True, - archived=False, - fields=None -): - """Assets for specified project by passed filters. - - Passed filters (ids and names) are always combined so all conditions must - match. - - To receive all assets from project just keep filters empty. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should - be found. - asset_names (Iterable[str]): Name assets that should be found. - parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - standard (bool): Query standard assets (type 'asset'). - archived (bool): Query archived assets (type 'archived_asset'). - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Query cursor as iterable which returns asset documents matching - passed filters. - """ - - asset_types = [] - if standard: - asset_types.append("asset") - if archived: - asset_types.append("archived_asset") - - if not asset_types: - return [] - - if len(asset_types) == 1: - query_filter = {"type": asset_types[0]} - else: - query_filter = {"type": {"$in": asset_types}} - - if asset_ids is not None: - asset_ids = convert_ids(asset_ids) - if not asset_ids: - return [] - query_filter["_id"] = {"$in": asset_ids} - - if asset_names is not None: - if not asset_names: - return [] - query_filter["name"] = {"$in": list(asset_names)} - - if parent_ids is not None: - parent_ids = convert_ids(parent_ids) - if not parent_ids: - return [] - query_filter["data.visualParent"] = {"$in": parent_ids} - - conn = get_project_connection(project_name) - - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_assets( - project_name, - asset_ids=None, - asset_names=None, - parent_ids=None, - archived=False, - fields=None -): - """Assets for specified project by passed filters. - - Passed filters (ids and names) are always combined so all conditions must - match. - - To receive all assets from project just keep filters empty. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should - be found. - asset_names (Iterable[str]): Name assets that should be found. - parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - archived (bool): Add also archived assets. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Query cursor as iterable which returns asset documents matching - passed filters. - """ - - return _get_assets( - project_name, - asset_ids, - asset_names, - parent_ids, - True, - archived, - fields - ) - - -def get_archived_assets( - project_name, - asset_ids=None, - asset_names=None, - parent_ids=None, - fields=None -): - """Archived assets for specified project by passed filters. - - Passed filters (ids and names) are always combined so all conditions must - match. - - To receive all archived assets from project just keep filters empty. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should - be found. - asset_names (Iterable[str]): Name assets that should be found. - parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Query cursor as iterable which returns asset documents matching - passed filters. - """ - - return _get_assets( - project_name, asset_ids, asset_names, parent_ids, False, True, fields - ) - - -def get_asset_ids_with_subsets(project_name, asset_ids=None): - """Find out which assets have existing subsets. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_ids (Iterable[Union[str, ObjectId]]): Look only for entered - asset ids. - - Returns: - Iterable[ObjectId]: Asset ids that have existing subsets. - """ - - subset_query = { - "type": "subset" - } - if asset_ids is not None: - asset_ids = convert_ids(asset_ids) - if not asset_ids: - return [] - subset_query["parent"] = {"$in": asset_ids} - - conn = get_project_connection(project_name) - result = conn.aggregate([ - { - "$match": subset_query - }, - { - "$group": { - "_id": "$parent", - "count": {"$sum": 1} - } - } - ]) - asset_ids_with_subsets = [] - for item in result: - asset_id = item["_id"] - count = item["count"] - if count > 0: - asset_ids_with_subsets.append(asset_id) - return asset_ids_with_subsets - - -def get_subset_by_id(project_name, subset_id, fields=None): - """Single subset entity data by its id. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_id (Union[str, ObjectId]): Id of subset which should be found. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Subset entity data which can be reduced to - specified 'fields'. None is returned if subset with specified - filters was not found. - """ - - subset_id = convert_id(subset_id) - if not subset_id: - return None - - query_filters = {"type": "subset", "_id": subset_id} - conn = get_project_connection(project_name) - return conn.find_one(query_filters, _prepare_fields(fields)) - - -def get_subset_by_name(project_name, subset_name, asset_id, fields=None): - """Single subset entity data by its name and its version id. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_name (str): Name of subset. - asset_id (Union[str, ObjectId]): Id of parent asset. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Subset entity data which can be reduced to - specified 'fields'. None is returned if subset with specified - filters was not found. - """ - if not subset_name: - return None - - asset_id = convert_id(asset_id) - if not asset_id: - return None - - query_filters = { - "type": "subset", - "name": subset_name, - "parent": asset_id - } - conn = get_project_connection(project_name) - return conn.find_one(query_filters, _prepare_fields(fields)) - - -def get_subsets( - project_name, - subset_ids=None, - subset_names=None, - asset_ids=None, - names_by_asset_ids=None, - archived=False, - fields=None -): - """Subset entities data from one project filtered by entered filters. - - Filters are additive (all conditions must pass to return subset). - - Args: - project_name (str): Name of project where to look for queried entities. - subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should be - queried. Filter ignored if 'None' is passed. - subset_names (Iterable[str]): Subset names that should be queried. - Filter ignored if 'None' is passed. - asset_ids (Iterable[Union[str, ObjectId]]): Asset ids under which - should look for the subsets. Filter ignored if 'None' is passed. - names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering - using asset ids and list of subset names under the asset. - archived (bool): Look for archived subsets too. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Iterable cursor yielding all matching subsets. - """ - - subset_types = ["subset"] - if archived: - subset_types.append("archived_subset") - - if len(subset_types) == 1: - query_filter = {"type": subset_types[0]} - else: - query_filter = {"type": {"$in": subset_types}} - - if asset_ids is not None: - asset_ids = convert_ids(asset_ids) - if not asset_ids: - return [] - query_filter["parent"] = {"$in": asset_ids} - - if subset_ids is not None: - subset_ids = convert_ids(subset_ids) - if not subset_ids: - return [] - query_filter["_id"] = {"$in": subset_ids} - - if subset_names is not None: - if not subset_names: - return [] - query_filter["name"] = {"$in": list(subset_names)} - - if names_by_asset_ids is not None: - or_query = [] - for asset_id, names in names_by_asset_ids.items(): - if asset_id and names: - or_query.append({ - "parent": convert_id(asset_id), - "name": {"$in": list(names)} - }) - if not or_query: - return [] - query_filter["$or"] = or_query - - conn = get_project_connection(project_name) - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_subset_families(project_name, subset_ids=None): - """Set of main families of subsets. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should - be queried. All subsets from project are used if 'None' is passed. - - Returns: - set[str]: Main families of matching subsets. - """ - - subset_filter = { - "type": "subset" - } - if subset_ids is not None: - if not subset_ids: - return set() - subset_filter["_id"] = {"$in": list(subset_ids)} - - conn = get_project_connection(project_name) - result = list(conn.aggregate([ - {"$match": subset_filter}, - {"$project": { - "family": {"$arrayElemAt": ["$data.families", 0]} - }}, - {"$group": { - "_id": "family_group", - "families": {"$addToSet": "$family"} - }} - ])) - if result: - return set(result[0]["families"]) - return set() - - -def get_version_by_id(project_name, version_id, fields=None): - """Single version entity data by its id. - - Args: - project_name (str): Name of project where to look for queried entities. - version_id (Union[str, ObjectId]): Id of version which should be found. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Version entity data which can be reduced to - specified 'fields'. None is returned if version with specified - filters was not found. - """ - - version_id = convert_id(version_id) - if not version_id: - return None - - query_filter = { - "type": {"$in": ["version", "hero_version"]}, - "_id": version_id - } - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def get_version_by_name(project_name, version, subset_id, fields=None): - """Single version entity data by its name and subset id. - - Args: - project_name (str): Name of project where to look for queried entities. - version (int): name of version entity (its version). - subset_id (Union[str, ObjectId]): Id of version which should be found. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Version entity data which can be reduced to - specified 'fields'. None is returned if version with specified - filters was not found. - """ - - subset_id = convert_id(subset_id) - if not subset_id: - return None - - conn = get_project_connection(project_name) - query_filter = { - "type": "version", - "parent": subset_id, - "name": version - } - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def version_is_latest(project_name, version_id): - """Is version the latest from its subset. - - Note: - Hero versions are considered as latest. - - Todo: - Maybe raise exception when version was not found? - - Args: - project_name (str):Name of project where to look for queried entities. - version_id (Union[str, ObjectId]): Version id which is checked. - - Returns: - bool: True if is latest version from subset else False. - """ - - version_id = convert_id(version_id) - if not version_id: - return False - version_doc = get_version_by_id( - project_name, version_id, fields=["_id", "type", "parent"] - ) - # What to do when version is not found? - if not version_doc: - return False - - if version_doc["type"] == "hero_version": - return True - - last_version = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - return last_version["_id"] == version_id - - -def _get_versions( - project_name, - subset_ids=None, - version_ids=None, - versions=None, - standard=True, - hero=False, - fields=None -): - version_types = [] - if standard: - version_types.append("version") - - if hero: - version_types.append("hero_version") - - if not version_types: - return [] - elif len(version_types) == 1: - query_filter = {"type": version_types[0]} - else: - query_filter = {"type": {"$in": version_types}} - - if subset_ids is not None: - subset_ids = convert_ids(subset_ids) - if not subset_ids: - return [] - query_filter["parent"] = {"$in": subset_ids} - - if version_ids is not None: - version_ids = convert_ids(version_ids) - if not version_ids: - return [] - query_filter["_id"] = {"$in": version_ids} - - if versions is not None: - versions = list(versions) - if not versions: - return [] - - if len(versions) == 1: - query_filter["name"] = versions[0] - else: - query_filter["name"] = {"$in": versions} - - conn = get_project_connection(project_name) - - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_versions( - project_name, - version_ids=None, - subset_ids=None, - versions=None, - hero=False, - fields=None -): - """Version entities data from one project filtered by entered filters. - - Filters are additive (all conditions must pass to return subset). - - Args: - project_name (str): Name of project where to look for queried entities. - version_ids (Iterable[Union[str, ObjectId]]): Version ids that will - be queried. Filter ignored if 'None' is passed. - subset_ids (Iterable[str]): Subset ids that will be queried. - Filter ignored if 'None' is passed. - versions (Iterable[int]): Version names (as integers). - Filter ignored if 'None' is passed. - hero (bool): Look also for hero versions. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Iterable cursor yielding all matching versions. - """ - - return _get_versions( - project_name, - subset_ids, - version_ids, - versions, - standard=True, - hero=hero, - fields=fields - ) - - -def get_hero_version_by_subset_id(project_name, subset_id, fields=None): - """Hero version by subset id. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_id (Union[str, ObjectId]): Subset id under which - is hero version. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Hero version entity data which can be reduced to - specified 'fields'. None is returned if hero version with specified - filters was not found. - """ - - subset_id = convert_id(subset_id) - if not subset_id: - return None - - versions = list(_get_versions( - project_name, - subset_ids=[subset_id], - standard=False, - hero=True, - fields=fields - )) - if versions: - return versions[0] - return None - - -def get_hero_version_by_id(project_name, version_id, fields=None): - """Hero version by its id. - - Args: - project_name (str): Name of project where to look for queried entities. - version_id (Union[str, ObjectId]): Hero version id. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Hero version entity data which can be reduced to - specified 'fields'. None is returned if hero version with specified - filters was not found. - """ - - version_id = convert_id(version_id) - if not version_id: - return None - - versions = list(_get_versions( - project_name, - version_ids=[version_id], - standard=False, - hero=True, - fields=fields - )) - if versions: - return versions[0] - return None - - -def get_hero_versions( - project_name, - subset_ids=None, - version_ids=None, - fields=None -): - """Hero version entities data from one project filtered by entered filters. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_ids (Iterable[Union[str, ObjectId]]): Subset ids for which - should look for hero versions. Filter ignored if 'None' is passed. - version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter - ignored if 'None' is passed. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor|list: Iterable yielding hero versions matching passed filters. - """ - - return _get_versions( - project_name, - subset_ids, - version_ids, - standard=False, - hero=True, - fields=fields - ) - - -def get_output_link_versions(project_name, version_id, fields=None): - """Versions where passed version was used as input. - - Question: - Not 100% sure about the usage of the function so the name and docstring - maybe does not match what it does? - - Args: - project_name (str): Name of project where to look for queried entities. - version_id (Union[str, ObjectId]): Version id which can be used - as input link for other versions. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Iterable: Iterable cursor yielding versions that are used as input - links for passed version. - """ - - version_id = convert_id(version_id) - if not version_id: - return [] - - conn = get_project_connection(project_name) - # Does make sense to look for hero versions? - query_filter = { - "type": "version", - "data.inputLinks.id": version_id - } - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_last_versions(project_name, subset_ids, active=None, fields=None): - """Latest versions for entered subset_ids. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. - active (Optional[bool]): If True only active versions are returned. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - dict[ObjectId, int]: Key is subset id and value is last version name. - """ - - subset_ids = convert_ids(subset_ids) - if not subset_ids: - return {} - - if fields is not None: - fields = list(fields) - if not fields: - return {} - - # Avoid double query if only name and _id are requested - name_needed = False - limit_query = False - if fields: - fields_s = set(fields) - if "name" in fields_s: - name_needed = True - fields_s.remove("name") - - for field in ("_id", "parent"): - if field in fields_s: - fields_s.remove(field) - limit_query = len(fields_s) == 0 - - group_item = { - "_id": "$parent", - "_version_id": {"$last": "$_id"} - } - # Add name if name is needed (only for limit query) - if name_needed: - group_item["name"] = {"$last": "$name"} - - aggregate_filter = { - "type": "version", - "parent": {"$in": subset_ids} - } - if active is False: - aggregate_filter["data.active"] = active - elif active is True: - aggregate_filter["$or"] = [ - {"data.active": {"$exists": 0}}, - {"data.active": active}, - ] - - aggregation_pipeline = [ - # Find all versions of those subsets - {"$match": aggregate_filter}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": group_item} - ] - - conn = get_project_connection(project_name) - aggregate_result = conn.aggregate(aggregation_pipeline) - if limit_query: - output = {} - for item in aggregate_result: - subset_id = item["_id"] - item_data = {"_id": item["_version_id"], "parent": subset_id} - if name_needed: - item_data["name"] = item["name"] - output[subset_id] = item_data - return output - - version_ids = [ - doc["_version_id"] - for doc in aggregate_result - ] - - fields = _prepare_fields(fields, ["parent"]) - - version_docs = get_versions( - project_name, version_ids=version_ids, fields=fields - ) - - return { - version_doc["parent"]: version_doc - for version_doc in version_docs - } - - -def get_last_version_by_subset_id(project_name, subset_id, fields=None): - """Last version for passed subset id. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_id (Union[str, ObjectId]): Id of version which should be found. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Version entity data which can be reduced to - specified 'fields'. None is returned if version with specified - filters was not found. - """ - - subset_id = convert_id(subset_id) - if not subset_id: - return None - - last_versions = get_last_versions( - project_name, subset_ids=[subset_id], fields=fields - ) - return last_versions.get(subset_id) - - -def get_last_version_by_subset_name( - project_name, subset_name, asset_id=None, asset_name=None, fields=None -): - """Last version for passed subset name under asset id/name. - - It is required to pass 'asset_id' or 'asset_name'. Asset id is recommended - if is available. - - Args: - project_name (str): Name of project where to look for queried entities. - subset_name (str): Name of subset. - asset_id (Union[str, ObjectId]): Asset id which is parent of passed - subset name. - asset_name (str): Asset name which is parent of passed subset name. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Version entity data which can be reduced to - specified 'fields'. None is returned if version with specified - filters was not found. - """ - - if not asset_id and not asset_name: - return None - - if not asset_id: - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - if not asset_doc: - return None - asset_id = asset_doc["_id"] - subset_doc = get_subset_by_name( - project_name, subset_name, asset_id, fields=["_id"] - ) - if not subset_doc: - return None - return get_last_version_by_subset_id( - project_name, subset_doc["_id"], fields=fields - ) - - -def get_representation_by_id(project_name, representation_id, fields=None): - """Representation entity data by its id. - - Args: - project_name (str): Name of project where to look for queried entities. - representation_id (Union[str, ObjectId]): Representation id. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Representation entity data which can be reduced to - specified 'fields'. None is returned if representation with - specified filters was not found. - """ - - if not representation_id: - return None - - repre_types = ["representation", "archived_representation"] - query_filter = { - "type": {"$in": repre_types} - } - if representation_id is not None: - query_filter["_id"] = convert_id(representation_id) - - conn = get_project_connection(project_name) - - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def get_representation_by_name( - project_name, representation_name, version_id, fields=None -): - """Representation entity data by its name and its version id. - - Args: - project_name (str): Name of project where to look for queried entities. - representation_name (str): Representation name. - version_id (Union[str, ObjectId]): Id of parent version entity. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[dict[str, Any], None]: Representation entity data which can be - reduced to specified 'fields'. None is returned if representation - with specified filters was not found. - """ - - version_id = convert_id(version_id) - if not version_id or not representation_name: - return None - repre_types = ["representation", "archived_representations"] - query_filter = { - "type": {"$in": repre_types}, - "name": representation_name, - "parent": version_id - } - - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def _flatten_dict(data): - flatten_queue = collections.deque() - flatten_queue.append(data) - output = {} - while flatten_queue: - item = flatten_queue.popleft() - for key, value in item.items(): - if not isinstance(value, dict): - output[key] = value - continue - - tmp = {} - for subkey, subvalue in value.items(): - new_key = "{}.{}".format(key, subkey) - tmp[new_key] = subvalue - flatten_queue.append(tmp) - return output - - -def _regex_filters(filters): - output = [] - for key, value in filters.items(): - regexes = [] - a_values = [] - if isinstance(value, PatternType): - regexes.append(value) - elif isinstance(value, (list, tuple, set)): - for item in value: - if isinstance(item, PatternType): - regexes.append(item) - else: - a_values.append(item) - else: - a_values.append(value) - - key_filters = [] - if len(a_values) == 1: - key_filters.append({key: a_values[0]}) - elif a_values: - key_filters.append({key: {"$in": a_values}}) - - for regex in regexes: - key_filters.append({key: {"$regex": regex}}) - - if len(key_filters) == 1: - output.append(key_filters[0]) - else: - output.append({"$or": key_filters}) - - return output - - -def _get_representations( - project_name, - representation_ids, - representation_names, - version_ids, - context_filters, - names_by_version_ids, - standard, - archived, - fields -): - default_output = [] - repre_types = [] - if standard: - repre_types.append("representation") - if archived: - repre_types.append("archived_representation") - - if not repre_types: - return default_output - - if len(repre_types) == 1: - query_filter = {"type": repre_types[0]} - else: - query_filter = {"type": {"$in": repre_types}} - - if representation_ids is not None: - representation_ids = convert_ids(representation_ids) - if not representation_ids: - return default_output - query_filter["_id"] = {"$in": representation_ids} - - if representation_names is not None: - if not representation_names: - return default_output - query_filter["name"] = {"$in": list(representation_names)} - - if version_ids is not None: - version_ids = convert_ids(version_ids) - if not version_ids: - return default_output - query_filter["parent"] = {"$in": version_ids} - - or_queries = [] - if names_by_version_ids is not None: - or_query = [] - for version_id, names in names_by_version_ids.items(): - if version_id and names: - or_query.append({ - "parent": convert_id(version_id), - "name": {"$in": list(names)} - }) - if not or_query: - return default_output - or_queries.append(or_query) - - if context_filters is not None: - if not context_filters: - return [] - _flatten_filters = _flatten_dict(context_filters) - flatten_filters = {} - for key, value in _flatten_filters.items(): - if not key.startswith("context"): - key = "context.{}".format(key) - flatten_filters[key] = value - - for item in _regex_filters(flatten_filters): - for key, value in item.items(): - if key != "$or": - query_filter[key] = value - - elif value: - or_queries.append(value) - - if len(or_queries) == 1: - query_filter["$or"] = or_queries[0] - elif or_queries: - and_query = [] - for or_query in or_queries: - if isinstance(or_query, list): - or_query = {"$or": or_query} - and_query.append(or_query) - query_filter["$and"] = and_query - - conn = get_project_connection(project_name) - - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_representations( - project_name, - representation_ids=None, - representation_names=None, - version_ids=None, - context_filters=None, - names_by_version_ids=None, - archived=False, - standard=True, - fields=None -): - """Representation entities data from one project filtered by filters. - - Filters are additive (all conditions must pass to return subset). - - Args: - project_name (str): Name of project where to look for queried entities. - representation_ids (Iterable[Union[str, ObjectId]]): Representation ids - used as filter. Filter ignored if 'None' is passed. - representation_names (Iterable[str]): Representations names used - as filter. Filter ignored if 'None' is passed. - version_ids (Iterable[str]): Subset ids used as parent filter. Filter - ignored if 'None' is passed. - context_filters (Dict[str, List[str, PatternType]]): Filter by - representation context fields. - names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering - using version ids and list of names under the version. - archived (bool): Output will also contain archived representations. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Iterable cursor yielding all matching representations. - """ - - return _get_representations( - project_name=project_name, - representation_ids=representation_ids, - representation_names=representation_names, - version_ids=version_ids, - context_filters=context_filters, - names_by_version_ids=names_by_version_ids, - standard=standard, - archived=archived, - fields=fields - ) - - -def get_archived_representations( - project_name, - representation_ids=None, - representation_names=None, - version_ids=None, - context_filters=None, - names_by_version_ids=None, - fields=None -): - """Archived representation entities data from project with applied filters. - - Filters are additive (all conditions must pass to return subset). - - Args: - project_name (str): Name of project where to look for queried entities. - representation_ids (Iterable[Union[str, ObjectId]]): Representation ids - used as filter. Filter ignored if 'None' is passed. - representation_names (Iterable[str]): Representations names used - as filter. Filter ignored if 'None' is passed. - version_ids (Iterable[str]): Subset ids used as parent filter. Filter - ignored if 'None' is passed. - context_filters (Dict[str, List[str, PatternType]]): Filter by - representation context fields. - names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering - using version ids and list of names under the version. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Cursor: Iterable cursor yielding all matching representations. - """ - - return _get_representations( - project_name=project_name, - representation_ids=representation_ids, - representation_names=representation_names, - version_ids=version_ids, - context_filters=context_filters, - names_by_version_ids=names_by_version_ids, - standard=False, - archived=True, - fields=fields - ) - - -def get_representations_parents(project_name, representations): - """Prepare parents of representation entities. - - Each item of returned dictionary contains version, subset, asset - and project in that order. - - Args: - project_name (str): Name of project where to look for queried entities. - representations (List[dict]): Representation entities with at least - '_id' and 'parent' keys. - - Returns: - dict[ObjectId, tuple]: Parents by representation id. - """ - - repre_docs_by_version_id = collections.defaultdict(list) - version_docs_by_version_id = {} - version_docs_by_subset_id = collections.defaultdict(list) - subset_docs_by_subset_id = {} - subset_docs_by_asset_id = collections.defaultdict(list) - output = {} - for repre_doc in representations: - repre_id = repre_doc["_id"] - version_id = repre_doc["parent"] - output[repre_id] = (None, None, None, None) - repre_docs_by_version_id[version_id].append(repre_doc) - - version_docs = get_versions( - project_name, - version_ids=repre_docs_by_version_id.keys(), - hero=True - ) - for version_doc in version_docs: - version_id = version_doc["_id"] - subset_id = version_doc["parent"] - version_docs_by_version_id[version_id] = version_doc - version_docs_by_subset_id[subset_id].append(version_doc) - - subset_docs = get_subsets( - project_name, subset_ids=version_docs_by_subset_id.keys() - ) - for subset_doc in subset_docs: - subset_id = subset_doc["_id"] - asset_id = subset_doc["parent"] - subset_docs_by_subset_id[subset_id] = subset_doc - subset_docs_by_asset_id[asset_id].append(subset_doc) - - asset_docs = get_assets( - project_name, asset_ids=subset_docs_by_asset_id.keys() - ) - asset_docs_by_id = { - asset_doc["_id"]: asset_doc - for asset_doc in asset_docs - } - - project_doc = get_project(project_name) - - for version_id, repre_docs in repre_docs_by_version_id.items(): - asset_doc = None - subset_doc = None - version_doc = version_docs_by_version_id.get(version_id) - if version_doc: - subset_id = version_doc["parent"] - subset_doc = subset_docs_by_subset_id.get(subset_id) - if subset_doc: - asset_id = subset_doc["parent"] - asset_doc = asset_docs_by_id.get(asset_id) - - for repre_doc in repre_docs: - repre_id = repre_doc["_id"] - output[repre_id] = ( - version_doc, subset_doc, asset_doc, project_doc - ) - return output - - -def get_representation_parents(project_name, representation): - """Prepare parents of representation entity. - - Each item of returned dictionary contains version, subset, asset - and project in that order. - - Args: - project_name (str): Name of project where to look for queried entities. - representation (dict): Representation entities with at least - '_id' and 'parent' keys. - - Returns: - dict[ObjectId, tuple]: Parents by representation id. - """ - - if not representation: - return None - - repre_id = representation["_id"] - parents_by_repre_id = get_representations_parents( - project_name, [representation] - ) - return parents_by_repre_id[repre_id] - - -def get_thumbnail_id_from_source(project_name, src_type, src_id): - """Receive thumbnail id from source entity. - - Args: - project_name (str): Name of project where to look for queried entities. - src_type (str): Type of source entity ('asset', 'version'). - src_id (Union[str, ObjectId]): Id of source entity. - - Returns: - Union[ObjectId, None]: Thumbnail id assigned to entity. If Source - entity does not have any thumbnail id assigned. - """ - - if not src_type or not src_id: - return None - - query_filter = {"_id": convert_id(src_id)} - - conn = get_project_connection(project_name) - src_doc = conn.find_one(query_filter, {"data.thumbnail_id"}) - if src_doc: - return src_doc.get("data", {}).get("thumbnail_id") - return None - - -def get_thumbnails(project_name, thumbnail_ids, fields=None): - """Receive thumbnails entity data. - - Thumbnail entity can be used to receive binary content of thumbnail based - on its content and ThumbnailResolvers. - - Args: - project_name (str): Name of project where to look for queried entities. - thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail - entities. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - cursor: Cursor of queried documents. - """ - - if thumbnail_ids: - thumbnail_ids = convert_ids(thumbnail_ids) - - if not thumbnail_ids: - return [] - query_filter = { - "type": "thumbnail", - "_id": {"$in": thumbnail_ids} - } - conn = get_project_connection(project_name) - return conn.find(query_filter, _prepare_fields(fields)) - - -def get_thumbnail( - project_name, thumbnail_id, entity_type, entity_id, fields=None -): - """Receive thumbnail entity data. - - Args: - project_name (str): Name of project where to look for queried entities. - thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Thumbnail entity data which can be reduced to - specified 'fields'.None is returned if thumbnail with specified - filters was not found. - """ - - if not thumbnail_id: - return None - query_filter = {"type": "thumbnail", "_id": convert_id(thumbnail_id)} - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -def get_workfile_info( - project_name, asset_id, task_name, filename, fields=None -): - """Document with workfile information. - - Warning: - Query is based on filename and context which does not meant it will - find always right and expected result. Information have limited usage - and is not recommended to use it as source information about workfile. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_id (Union[str, ObjectId]): Id of asset entity. - task_name (str): Task name on asset. - fields (Optional[Iterable[str]]): Fields that should be returned. All - fields are returned if 'None' is passed. - - Returns: - Union[Dict, None]: Workfile entity data which can be reduced to - specified 'fields'.None is returned if workfile with specified - filters was not found. - """ - - if not asset_id or not task_name or not filename: - return None - - query_filter = { - "type": "workfile", - "parent": convert_id(asset_id), - "task_name": task_name, - "filename": filename - } - conn = get_project_connection(project_name) - return conn.find_one(query_filter, _prepare_fields(fields)) - - -""" -## Custom data storage: -- Settings - OP settings overrides and local settings -- Logging - logs from Logger -- Webpublisher - jobs -- Ftrack - events -- Maya - Shaders - - openpype/hosts/maya/api/shader_definition_editor.py - - openpype/hosts/maya/plugins/publish/validate_model_name.py - -## Global publish plugins -- openpype/plugins/publish/extract_hierarchy_avalon.py - Create: - - asset - Update: - - asset - -## Lib -- openpype/lib/avalon_context.py - Update: - - workfile data -- openpype/lib/project_backpack.py - Update: - - project -""" diff --git a/openpype/client/mongo/entity_links.py b/openpype/client/mongo/entity_links.py deleted file mode 100644 index fd13a2d83b..0000000000 --- a/openpype/client/mongo/entity_links.py +++ /dev/null @@ -1,240 +0,0 @@ -from .mongo import get_project_connection -from .entities import ( - get_assets, - get_asset_by_id, - get_version_by_id, - get_representation_by_id, - convert_id, -) - - -def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None): - """Extract linked asset ids from asset document. - - One of asset document or asset id must be passed. - - Note: - Asset links now works only from asset to assets. - - Args: - asset_doc (dict): Asset document from DB. - - Returns: - List[Union[ObjectId, str]]: Asset ids of input links. - """ - - output = [] - if not asset_doc and not asset_id: - return output - - if not asset_doc: - asset_doc = get_asset_by_id( - project_name, asset_id, fields=["data.inputLinks"] - ) - - input_links = asset_doc["data"].get("inputLinks") - if not input_links: - return output - - for item in input_links: - # Backwards compatibility for "_id" key which was replaced with - # "id" - if "_id" in item: - link_id = item["_id"] - else: - link_id = item["id"] - output.append(link_id) - return output - - -def get_linked_assets( - project_name, asset_doc=None, asset_id=None, fields=None -): - """Return linked assets based on passed asset document. - - One of asset document or asset id must be passed. - - Args: - project_name (str): Name of project where to look for queried entities. - asset_doc (Dict[str, Any]): Asset document from database. - asset_id (Union[ObjectId, str]): Asset id. Can be used instead of - asset document. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. - - Returns: - List[Dict[str, Any]]: Asset documents of input links for passed - asset doc. - """ - - if not asset_doc: - if not asset_id: - return [] - asset_doc = get_asset_by_id( - project_name, - asset_id, - fields=["data.inputLinks"] - ) - if not asset_doc: - return [] - - link_ids = get_linked_asset_ids(project_name, asset_doc=asset_doc) - if not link_ids: - return [] - - return list(get_assets(project_name, asset_ids=link_ids, fields=fields)) - - -def get_linked_representation_id( - project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None -): - """Returns list of linked ids of particular type (if provided). - - One of representation document or representation id must be passed. - Note: - Representation links now works only from representation through version - back to representations. - - Args: - project_name (str): Name of project where look for links. - repre_doc (Dict[str, Any]): Representation document. - repre_id (Union[ObjectId, str]): Representation id. - link_type (str): Type of link (e.g. 'reference', ...). - max_depth (int): Limit recursion level. Default: 0 - - Returns: - List[ObjectId] Linked representation ids. - """ - - if repre_doc: - repre_id = repre_doc["_id"] - - if repre_id: - repre_id = convert_id(repre_id) - - if not repre_id and not repre_doc: - return [] - - version_id = None - if repre_doc: - version_id = repre_doc.get("parent") - - if not version_id: - repre_doc = get_representation_by_id( - project_name, repre_id, fields=["parent"] - ) - version_id = repre_doc["parent"] - - if not version_id: - return [] - - version_doc = get_version_by_id( - project_name, version_id, fields=["type", "version_id"] - ) - if version_doc["type"] == "hero_version": - version_id = version_doc["version_id"] - - if max_depth is None: - max_depth = 0 - - match = { - "_id": version_id, - # Links are not stored to hero versions at this moment so filter - # is limited to just versions - "type": "version" - } - - graph_lookup = { - "from": project_name, - "startWith": "$data.inputLinks.id", - "connectFromField": "data.inputLinks.id", - "connectToField": "_id", - "as": "outputs_recursive", - "depthField": "depth" - } - if max_depth != 0: - # We offset by -1 since 0 basically means no recursion - # but the recursion only happens after the initial lookup - # for outputs. - graph_lookup["maxDepth"] = max_depth - 1 - - query_pipeline = [ - # Match - {"$match": match}, - # Recursive graph lookup for inputs - {"$graphLookup": graph_lookup} - ] - - conn = get_project_connection(project_name) - result = conn.aggregate(query_pipeline) - referenced_version_ids = _process_referenced_pipeline_result( - result, link_type - ) - if not referenced_version_ids: - return [] - - ref_ids = conn.distinct( - "_id", - filter={ - "parent": {"$in": list(referenced_version_ids)}, - "type": "representation" - } - ) - - return list(ref_ids) - - -def _process_referenced_pipeline_result(result, link_type): - """Filters result from pipeline for particular link_type. - - Pipeline cannot use link_type directly in a query. - - Returns: - (list) - """ - - referenced_version_ids = set() - correctly_linked_ids = set() - for item in result: - input_links = item.get("data", {}).get("inputLinks") - if not input_links: - continue - - _filter_input_links( - input_links, - link_type, - correctly_linked_ids - ) - - # outputs_recursive in random order, sort by depth - outputs_recursive = item.get("outputs_recursive") - if not outputs_recursive: - continue - - for output in sorted(outputs_recursive, key=lambda o: o["depth"]): - # Leaf - if output["_id"] not in correctly_linked_ids: - continue - - _filter_input_links( - output.get("data", {}).get("inputLinks"), - link_type, - correctly_linked_ids - ) - - referenced_version_ids.add(output["_id"]) - - return referenced_version_ids - - -def _filter_input_links(input_links, link_type, correctly_linked_ids): - if not input_links: # to handle hero versions - return - - for input_link in input_links: - if link_type and input_link["type"] != link_type: - continue - - link_id = input_link.get("id") or input_link.get("_id") - if link_id is not None: - correctly_linked_ids.add(link_id) diff --git a/openpype/client/mongo/mongo.py b/openpype/client/mongo/mongo.py deleted file mode 100644 index 2be426efeb..0000000000 --- a/openpype/client/mongo/mongo.py +++ /dev/null @@ -1,447 +0,0 @@ -import os -import sys -import time -import logging -import pymongo -import certifi - -from bson.json_util import ( - loads, - dumps, - CANONICAL_JSON_OPTIONS -) - -from openpype import AYON_SERVER_ENABLED -if sys.version_info[0] == 2: - from urlparse import urlparse, parse_qs -else: - from urllib.parse import urlparse, parse_qs - - -class MongoEnvNotSet(Exception): - pass - - -def documents_to_json(docs): - """Convert documents to json string. - - Args: - Union[list[dict[str, Any]], dict[str, Any]]: Document/s to convert to - json string. - - Returns: - str: Json string with mongo documents. - """ - - return dumps(docs, json_options=CANONICAL_JSON_OPTIONS) - - -def load_json_file(filepath): - """Load mongo documents from a json file. - - Args: - filepath (str): Path to a json file. - - Returns: - Union[dict[str, Any], list[dict[str, Any]]]: Loaded content from a - json file. - """ - - if not os.path.exists(filepath): - raise ValueError("Path {} was not found".format(filepath)) - - with open(filepath, "r") as stream: - content = stream.read() - return loads("".join(content)) - - -def get_project_database_name(): - """Name of database name where projects are available. - - Returns: - str: Name of database name where projects are. - """ - - return os.environ.get("AVALON_DB") or "avalon" - - -def _decompose_url(url): - """Decompose mongo url to basic components. - - Used for creation of MongoHandler which expect mongo url components as - separated kwargs. Components are at the end not used as we're setting - connection directly this is just a dumb components for MongoHandler - validation pass. - """ - - # Use first url from passed url - # - this is because it is possible to pass multiple urls for multiple - # replica sets which would crash on urlparse otherwise - # - please don't use comma in username of password - url = url.split(",")[0] - components = { - "scheme": None, - "host": None, - "port": None, - "username": None, - "password": None, - "auth_db": None - } - - result = urlparse(url) - if result.scheme is None: - _url = "mongodb://{}".format(url) - result = urlparse(_url) - - components["scheme"] = result.scheme - components["host"] = result.hostname - try: - components["port"] = result.port - except ValueError: - raise RuntimeError("invalid port specified") - components["username"] = result.username - components["password"] = result.password - - try: - components["auth_db"] = parse_qs(result.query)['authSource'][0] - except KeyError: - # no auth db provided, mongo will use the one we are connecting to - pass - - return components - - -def get_default_components(): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url is None: - raise MongoEnvNotSet( - "URL for Mongo logging connection is not set." - ) - return _decompose_url(mongo_url) - - -def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. - - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query[key]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - - return add_certificate - - -def validate_mongo_connection(mongo_uri): - """Check if provided mongodb URL is valid. - - Args: - mongo_uri (str): URL to validate. - - Raises: - ValueError: When port in mongo uri is not valid. - pymongo.errors.InvalidURI: If passed mongo is invalid. - pymongo.errors.ServerSelectionTimeoutError: If connection timeout - passed so probably couldn't connect to mongo server. - - """ - - client = OpenPypeMongoConnection.create_connection( - mongo_uri, retry_attempts=1 - ) - client.close() - - -class OpenPypeMongoConnection: - """Singleton MongoDB connection. - - Keeps MongoDB connections by url. - """ - - mongo_clients = {} - log = logging.getLogger("OpenPypeMongoConnection") - - @staticmethod - def get_default_mongo_url(): - return os.environ["OPENPYPE_MONGO"] - - @classmethod - def get_mongo_client(cls, mongo_url=None): - if mongo_url is None: - mongo_url = cls.get_default_mongo_url() - - connection = cls.mongo_clients.get(mongo_url) - if connection: - # Naive validation of existing connection - try: - connection.server_info() - with connection.start_session(): - pass - except Exception: - connection = None - - if not connection: - cls.log.debug("Creating mongo connection to {}".format(mongo_url)) - connection = cls.create_connection(mongo_url) - cls.mongo_clients[mongo_url] = connection - - return connection - - @classmethod - def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): - if AYON_SERVER_ENABLED: - raise RuntimeError("Created mongo connection in AYON mode") - parsed = urlparse(mongo_url) - # Force validation of scheme - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - raise pymongo.errors.InvalidURI(( - "Invalid URI scheme:" - " URI must begin with 'mongodb://' or 'mongodb+srv://'" - )) - - if timeout is None: - timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) - - kwargs = { - "serverSelectionTimeoutMS": timeout - } - if should_add_certificate_path_to_mongo_url(mongo_url): - kwargs["tlsCAFile"] = certifi.where() - - mongo_client = pymongo.MongoClient(mongo_url, **kwargs) - - if retry_attempts is None: - retry_attempts = 3 - - elif not retry_attempts: - retry_attempts = 1 - - last_exc = None - valid = False - t1 = time.time() - for attempt in range(1, retry_attempts + 1): - try: - mongo_client.server_info() - with mongo_client.start_session(): - pass - valid = True - break - - except Exception as exc: - last_exc = exc - if attempt < retry_attempts: - cls.log.warning( - "Attempt {} failed. Retrying... ".format(attempt) - ) - time.sleep(1) - - if not valid: - raise last_exc - - cls.log.info("Connected to {}, delay {:.3f}s".format( - mongo_url, time.time() - t1 - )) - return mongo_client - - -# ------ Helper Mongo functions ------ -# Functions can be helpful with custom tools to backup/restore mongo state. -# Not meant as API functionality that should be used in production codebase! -def get_collection_documents(database_name, collection_name, as_json=False): - """Query all documents from a collection. - - Args: - database_name (str): Name of database where to look for collection. - collection_name (str): Name of collection where to look for collection. - as_json (Optional[bool]): Output should be a json string. - Default: 'False' - - Returns: - Union[list[dict[str, Any]], str]: Queried documents. - """ - - client = OpenPypeMongoConnection.get_mongo_client() - output = list(client[database_name][collection_name].find({})) - if as_json: - output = documents_to_json(output) - return output - - -def store_collection(filepath, database_name, collection_name): - """Store collection documents to a json file. - - Args: - filepath (str): Path to a json file where documents will be stored. - database_name (str): Name of database where to look for collection. - collection_name (str): Name of collection to store. - """ - - # Make sure directory for output file exists - dirpath = os.path.dirname(filepath) - if not os.path.isdir(dirpath): - os.makedirs(dirpath) - - content = get_collection_documents(database_name, collection_name, True) - with open(filepath, "w") as stream: - stream.write(content) - - -def replace_collection_documents(docs, database_name, collection_name): - """Replace all documents in a collection with passed documents. - - Warnings: - All existing documents in collection will be removed if there are any. - - Args: - docs (list[dict[str, Any]]): New documents. - database_name (str): Name of database where to look for collection. - collection_name (str): Name of collection where new documents are - uploaded. - """ - - client = OpenPypeMongoConnection.get_mongo_client() - database = client[database_name] - if collection_name in database.list_collection_names(): - database.drop_collection(collection_name) - col = database[collection_name] - col.insert_many(docs) - - -def restore_collection(filepath, database_name, collection_name): - """Restore/replace collection from a json filepath. - - Warnings: - All existing documents in collection will be removed if there are any. - - Args: - filepath (str): Path to a json with documents. - database_name (str): Name of database where to look for collection. - collection_name (str): Name of collection where new documents are - uploaded. - """ - - docs = load_json_file(filepath) - replace_collection_documents(docs, database_name, collection_name) - - -def get_project_database(database_name=None): - """Database object where project collections are. - - Args: - database_name (Optional[str]): Custom name of database. - - Returns: - pymongo.database.Database: Collection related to passed project. - """ - - if not database_name: - database_name = get_project_database_name() - return OpenPypeMongoConnection.get_mongo_client()[database_name] - - -def get_project_connection(project_name, database_name=None): - """Direct access to mongo collection. - - We're trying to avoid using direct access to mongo. This should be used - only for Create, Update and Remove operations until there are implemented - api calls for that. - - Args: - project_name (str): Project name for which collection should be - returned. - database_name (Optional[str]): Custom name of database. - - Returns: - pymongo.collection.Collection: Collection related to passed project. - """ - - if not project_name: - raise ValueError("Invalid project name {}".format(str(project_name))) - return get_project_database(database_name)[project_name] - - -def get_project_documents(project_name, database_name=None): - """Query all documents from project collection. - - Args: - project_name (str): Name of project. - database_name (Optional[str]): Name of mongo database where to look for - project. - - Returns: - list[dict[str, Any]]: Documents in project collection. - """ - - if not database_name: - database_name = get_project_database_name() - return get_collection_documents(database_name, project_name) - - -def store_project_documents(project_name, filepath, database_name=None): - """Store project documents to a file as json string. - - Args: - project_name (str): Name of project to store. - filepath (str): Path to a json file where output will be stored. - database_name (Optional[str]): Name of mongo database where to look for - project. - """ - - if not database_name: - database_name = get_project_database_name() - - store_collection(filepath, database_name, project_name) - - -def replace_project_documents(project_name, docs, database_name=None): - """Replace documents in mongo with passed documents. - - Warnings: - Existing project collection is removed if exists in mongo. - - Args: - project_name (str): Name of project. - docs (list[dict[str, Any]]): Documents to restore. - database_name (Optional[str]): Name of mongo database where project - collection will be created. - """ - - if not database_name: - database_name = get_project_database_name() - replace_collection_documents(docs, database_name, project_name) - - -def restore_project_documents(project_name, filepath, database_name=None): - """Replace documents in mongo with passed documents. - - Warnings: - Existing project collection is removed if exists in mongo. - - Args: - project_name (str): Name of project. - filepath (str): File to json file with project documents. - database_name (Optional[str]): Name of mongo database where project - collection will be created. - """ - - if not database_name: - database_name = get_project_database_name() - restore_collection(filepath, database_name, project_name) diff --git a/openpype/client/mongo/operations.py b/openpype/client/mongo/operations.py deleted file mode 100644 index 3537aa4a3d..0000000000 --- a/openpype/client/mongo/operations.py +++ /dev/null @@ -1,632 +0,0 @@ -import re -import copy -import collections - -from bson.objectid import ObjectId -from pymongo import DeleteOne, InsertOne, UpdateOne - -from openpype.client.operations_base import ( - REMOVED_VALUE, - CreateOperation, - UpdateOperation, - DeleteOperation, - BaseOperationsSession -) -from .mongo import get_project_connection -from .entities import get_project - - -PROJECT_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_" -PROJECT_NAME_REGEX = re.compile( - "^[{}]+$".format(PROJECT_NAME_ALLOWED_SYMBOLS) -) - -CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" -CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" -CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" -CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" -CURRENT_VERSION_SCHEMA = "openpype:version-3.0" -CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0" -CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" -CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" -CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0" - - -def _create_or_convert_to_mongo_id(mongo_id): - if mongo_id is None: - return ObjectId() - return ObjectId(mongo_id) - - -def new_project_document( - project_name, project_code, config, data=None, entity_id=None -): - """Create skeleton data of project document. - - Args: - project_name (str): Name of project. Used as identifier of a project. - project_code (str): Shorter version of projet without spaces and - special characters (in most of cases). Should be also considered - as unique name across projects. - config (Dic[str, Any]): Project config consist of roots, templates, - applications and other project Anatomy related data. - data (Dict[str, Any]): Project data with information about it's - attributes (e.g. 'fps' etc.) or integration specific keys. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of project document. - """ - - if data is None: - data = {} - - data["code"] = project_code - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "name": project_name, - "type": CURRENT_PROJECT_SCHEMA, - "entity_data": data, - "config": config - } - - -def new_asset_document( - name, project_id, parent_id, parents, data=None, entity_id=None -): - """Create skeleton data of asset document. - - Args: - name (str): Is considered as unique identifier of asset in project. - project_id (Union[str, ObjectId]): Id of project doument. - parent_id (Union[str, ObjectId]): Id of parent asset. - parents (List[str]): List of parent assets names. - data (Dict[str, Any]): Asset document data. Empty dictionary is used - if not passed. Value of 'parent_id' is used to fill 'visualParent'. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of asset document. - """ - - if data is None: - data = {} - if parent_id is not None: - parent_id = ObjectId(parent_id) - data["visualParent"] = parent_id - data["parents"] = parents - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "type": "asset", - "name": name, - "parent": ObjectId(project_id), - "data": data, - "schema": CURRENT_ASSET_DOC_SCHEMA - } - - -def new_subset_document(name, family, asset_id, data=None, entity_id=None): - """Create skeleton data of subset document. - - Args: - name (str): Is considered as unique identifier of subset under asset. - family (str): Subset's family. - asset_id (Union[str, ObjectId]): Id of parent asset. - data (Dict[str, Any]): Subset document data. Empty dictionary is used - if not passed. Value of 'family' is used to fill 'family'. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of subset document. - """ - - if data is None: - data = {} - data["family"] = family - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "schema": CURRENT_SUBSET_SCHEMA, - "type": "subset", - "name": name, - "data": data, - "parent": asset_id - } - - -def new_version_doc(version, subset_id, data=None, entity_id=None): - """Create skeleton data of version document. - - Args: - version (int): Is considered as unique identifier of version - under subset. - subset_id (Union[str, ObjectId]): Id of parent subset. - data (Dict[str, Any]): Version document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "schema": CURRENT_VERSION_SCHEMA, - "type": "version", - "name": int(version), - "parent": subset_id, - "data": data - } - - -def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None): - """Create skeleton data of hero version document. - - Args: - version_id (ObjectId): Is considered as unique identifier of version - under subset. - subset_id (Union[str, ObjectId]): Id of parent subset. - data (Dict[str, Any]): Version document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "schema": CURRENT_HERO_VERSION_SCHEMA, - "type": "hero_version", - "version_id": version_id, - "parent": subset_id, - "data": data - } - - -def new_representation_doc( - name, version_id, context, data=None, entity_id=None -): - """Create skeleton data of asset document. - - Args: - version (int): Is considered as unique identifier of version - under subset. - version_id (Union[str, ObjectId]): Id of parent version. - context (Dict[str, Any]): Representation context used for fill template - of to query. - data (Dict[str, Any]): Representation document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "schema": CURRENT_REPRESENTATION_SCHEMA, - "type": "representation", - "parent": version_id, - "name": name, - "data": data, - - # Imprint shortcut to context for performance reasons. - "context": context - } - - -def new_thumbnail_doc(data=None, entity_id=None): - """Create skeleton data of thumbnail document. - - Args: - data (Dict[str, Any]): Thumbnail document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of thumbnail document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "type": "thumbnail", - "schema": CURRENT_THUMBNAIL_SCHEMA, - "data": data - } - - -def new_workfile_info_doc( - filename, asset_id, task_name, files, data=None, entity_id=None -): - """Create skeleton data of workfile info document. - - Workfile document is at this moment used primarily for artist notes. - - Args: - filename (str): Filename of workfile. - asset_id (Union[str, ObjectId]): Id of asset under which workfile live. - task_name (str): Task under which was workfile created. - files (List[str]): List of rootless filepaths related to workfile. - data (Dict[str, Any]): Additional metadata. - - Returns: - Dict[str, Any]: Skeleton of workfile info document. - """ - - if not data: - data = {} - - return { - "_id": _create_or_convert_to_mongo_id(entity_id), - "type": "workfile", - "parent": ObjectId(asset_id), - "task_name": task_name, - "filename": filename, - "data": data, - "files": files - } - - -def _prepare_update_data(old_doc, new_doc, replace): - changes = {} - for key, value in new_doc.items(): - if key not in old_doc or value != old_doc[key]: - changes[key] = value - - if replace: - for key in old_doc.keys(): - if key not in new_doc: - changes[key] = REMOVED_VALUE - return changes - - -def prepare_subset_update_data(old_doc, new_doc, replace=True): - """Compare two subset documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_version_update_data(old_doc, new_doc, replace=True): - """Compare two version documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_hero_version_update_data(old_doc, new_doc, replace=True): - """Compare two hero version documents and prepare update data. - - Based on compared values will create update data for 'UpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_representation_update_data(old_doc, new_doc, replace=True): - """Compare two representation documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): - """Compare two workfile info documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -class MongoCreateOperation(CreateOperation): - """Operation to create an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - data (Dict[str, Any]): Data of entity that will be created. - """ - - operation_name = "create" - - def __init__(self, project_name, entity_type, data): - super(MongoCreateOperation, self).__init__( - project_name, entity_type, data - ) - - if "_id" not in self._data: - self._data["_id"] = ObjectId() - else: - self._data["_id"] = ObjectId(self._data["_id"]) - - @property - def entity_id(self): - return self._data["_id"] - - def to_mongo_operation(self): - return InsertOne(copy.deepcopy(self._data)) - - -class MongoUpdateOperation(UpdateOperation): - """Operation to update an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - entity_id (Union[str, ObjectId]): Identifier of an entity. - update_data (Dict[str, Any]): Key -> value changes that will be set in - database. If value is set to 'REMOVED_VALUE' the key will be - removed. Only first level of dictionary is checked (on purpose). - """ - - operation_name = "update" - - def __init__(self, project_name, entity_type, entity_id, update_data): - super(MongoUpdateOperation, self).__init__( - project_name, entity_type, entity_id, update_data - ) - - self._entity_id = ObjectId(self._entity_id) - - def to_mongo_operation(self): - unset_data = {} - set_data = {} - for key, value in self._update_data.items(): - if value is REMOVED_VALUE: - unset_data[key] = None - else: - set_data[key] = value - - op_data = {} - if unset_data: - op_data["$unset"] = unset_data - if set_data: - op_data["$set"] = set_data - - if not op_data: - return None - - return UpdateOne( - {"_id": self.entity_id}, - op_data - ) - - -class MongoDeleteOperation(DeleteOperation): - """Operation to delete an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - entity_id (Union[str, ObjectId]): Entity id that will be removed. - """ - - operation_name = "delete" - - def __init__(self, project_name, entity_type, entity_id): - super(MongoDeleteOperation, self).__init__( - project_name, entity_type, entity_id - ) - - self._entity_id = ObjectId(self._entity_id) - - def to_mongo_operation(self): - return DeleteOne({"_id": self.entity_id}) - - -class MongoOperationsSession(BaseOperationsSession): - """Session storing operations that should happen in an order. - - At this moment does not handle anything special can be sonsidered as - stupid list of operations that will happen after each other. If creation - of same entity is there multiple times it's handled in any way and document - values are not validated. - - All operations must be related to single project. - - Args: - project_name (str): Project name to which are operations related. - """ - - def commit(self): - """Commit session operations.""" - - operations, self._operations = self._operations, [] - if not operations: - return - - operations_by_project = collections.defaultdict(list) - for operation in operations: - operations_by_project[operation.project_name].append(operation) - - for project_name, operations in operations_by_project.items(): - bulk_writes = [] - for operation in operations: - mongo_op = operation.to_mongo_operation() - if mongo_op is not None: - bulk_writes.append(mongo_op) - - if bulk_writes: - collection = get_project_connection(project_name) - collection.bulk_write(bulk_writes) - - def create_entity(self, project_name, entity_type, data): - """Fast access to 'MongoCreateOperation'. - - Returns: - MongoCreateOperation: Object of update operation. - """ - - operation = MongoCreateOperation(project_name, entity_type, data) - self.add(operation) - return operation - - def update_entity(self, project_name, entity_type, entity_id, update_data): - """Fast access to 'MongoUpdateOperation'. - - Returns: - MongoUpdateOperation: Object of update operation. - """ - - operation = MongoUpdateOperation( - project_name, entity_type, entity_id, update_data - ) - self.add(operation) - return operation - - def delete_entity(self, project_name, entity_type, entity_id): - """Fast access to 'MongoDeleteOperation'. - - Returns: - MongoDeleteOperation: Object of delete operation. - """ - - operation = MongoDeleteOperation(project_name, entity_type, entity_id) - self.add(operation) - return operation - - -def create_project( - project_name, - project_code, - library_project=False, -): - """Create project using OpenPype settings. - - This project creation function is not validating project document on - creation. It is because project document is created blindly with only - minimum required information about project which is it's name, code, type - and schema. - - Entered project name must be unique and project must not exist yet. - - Note: - This function is here to be OP v4 ready but in v3 has more logic - to do. That's why inner imports are in the body. - - Args: - project_name(str): New project name. Should be unique. - project_code(str): Project's code should be unique too. - library_project(bool): Project is library project. - - Raises: - ValueError: When project name already exists in MongoDB. - - Returns: - dict: Created project document. - """ - - from openpype.settings import ProjectSettings, SaveWarningExc - from openpype.pipeline.schema import validate - - if get_project(project_name, fields=["name"]): - raise ValueError("Project with name \"{}\" already exists".format( - project_name - )) - - if not PROJECT_NAME_REGEX.match(project_name): - raise ValueError(( - "Project name \"{}\" contain invalid characters" - ).format(project_name)) - - project_doc = { - "type": "project", - "name": project_name, - "data": { - "code": project_code, - "library_project": library_project - }, - "schema": CURRENT_PROJECT_SCHEMA - } - - op_session = MongoOperationsSession() - # Insert document with basic data - create_op = op_session.create_entity( - project_name, project_doc["type"], project_doc - ) - op_session.commit() - - # Load ProjectSettings for the project and save it to store all attributes - # and Anatomy - try: - project_settings_entity = ProjectSettings(project_name) - project_settings_entity.save() - except SaveWarningExc as exc: - print(str(exc)) - except Exception: - op_session.delete_entity( - project_name, project_doc["type"], create_op.entity_id - ) - op_session.commit() - raise - - project_doc = get_project(project_name) - - try: - # Validate created project document - validate(project_doc) - except Exception: - # Remove project if is not valid - op_session.delete_entity( - project_name, project_doc["type"], create_op.entity_id - ) - op_session.commit() - raise - - return project_doc diff --git a/openpype/client/operations.py b/openpype/client/operations.py deleted file mode 100644 index 8bc09dffd3..0000000000 --- a/openpype/client/operations.py +++ /dev/null @@ -1,24 +0,0 @@ -from openpype import AYON_SERVER_ENABLED - -from .operations_base import REMOVED_VALUE -if not AYON_SERVER_ENABLED: - from .mongo.operations import * - OperationsSession = MongoOperationsSession - -else: - from ayon_api.server_api import ( - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, - ) - from .server.operations import * - from .mongo.operations import ( - CURRENT_PROJECT_SCHEMA, - CURRENT_PROJECT_CONFIG_SCHEMA, - CURRENT_ASSET_DOC_SCHEMA, - CURRENT_SUBSET_SCHEMA, - CURRENT_VERSION_SCHEMA, - CURRENT_HERO_VERSION_SCHEMA, - CURRENT_REPRESENTATION_SCHEMA, - CURRENT_WORKFILE_INFO_SCHEMA, - CURRENT_THUMBNAIL_SCHEMA - ) diff --git a/openpype/client/server/constants.py b/openpype/client/server/constants.py deleted file mode 100644 index 1d3f94c702..0000000000 --- a/openpype/client/server/constants.py +++ /dev/null @@ -1,18 +0,0 @@ -# --- Folders --- -DEFAULT_FOLDER_FIELDS = { - "id", - "name", - "path", - "parentId", - "active", - "parents", - "thumbnailId" -} - -REPRESENTATION_FILES_FIELDS = { - "files.name", - "files.hash", - "files.id", - "files.path", - "files.size", -} diff --git a/openpype/client/server/entities.py b/openpype/client/server/entities.py deleted file mode 100644 index 75b5dc2cdd..0000000000 --- a/openpype/client/server/entities.py +++ /dev/null @@ -1,725 +0,0 @@ -import collections - -from openpype.client.mongo.operations import CURRENT_THUMBNAIL_SCHEMA - -from .utils import get_ayon_server_api_connection -from .openpype_comp import get_folders_with_tasks -from .conversion_utils import ( - project_fields_v3_to_v4, - convert_v4_project_to_v3, - - folder_fields_v3_to_v4, - convert_v4_folder_to_v3, - - subset_fields_v3_to_v4, - convert_v4_subset_to_v3, - - version_fields_v3_to_v4, - convert_v4_version_to_v3, - - representation_fields_v3_to_v4, - convert_v4_representation_to_v3, - - workfile_info_fields_v3_to_v4, - convert_v4_workfile_info_to_v3, -) - - -def get_projects(active=True, inactive=False, library=None, fields=None): - if not active and not inactive: - return - - if active and inactive: - active = None - elif active: - active = True - elif inactive: - active = False - - con = get_ayon_server_api_connection() - fields = project_fields_v3_to_v4(fields, con) - for project in con.get_projects(active, library, fields=fields): - yield convert_v4_project_to_v3(project) - - -def get_project(project_name, active=True, inactive=False, fields=None): - # Skip if both are disabled - con = get_ayon_server_api_connection() - fields = project_fields_v3_to_v4(fields, con) - return convert_v4_project_to_v3( - con.get_project(project_name, fields=fields) - ) - - -def get_whole_project(*args, **kwargs): - raise NotImplementedError("'get_whole_project' not implemented") - - -def _get_subsets( - project_name, - subset_ids=None, - subset_names=None, - folder_ids=None, - names_by_folder_ids=None, - archived=False, - fields=None -): - # Convert fields and add minimum required fields - con = get_ayon_server_api_connection() - fields = subset_fields_v3_to_v4(fields, con) - if fields is not None: - for key in ( - "id", - "active" - ): - fields.add(key) - - active = True - if archived: - active = None - - for subset in con.get_products( - project_name, - product_ids=subset_ids, - product_names=subset_names, - folder_ids=folder_ids, - names_by_folder_ids=names_by_folder_ids, - active=active, - fields=fields, - ): - yield convert_v4_subset_to_v3(subset) - - -def _get_versions( - project_name, - version_ids=None, - subset_ids=None, - versions=None, - hero=True, - standard=True, - latest=None, - active=None, - fields=None -): - con = get_ayon_server_api_connection() - - fields = version_fields_v3_to_v4(fields, con) - - # Make sure 'productId' and 'version' are available when hero versions - # are queried - if fields and hero: - fields = set(fields) - fields |= {"productId", "version"} - - queried_versions = con.get_versions( - project_name, - version_ids=version_ids, - product_ids=subset_ids, - versions=versions, - hero=hero, - standard=standard, - latest=latest, - active=active, - fields=fields - ) - - version_entities = [] - hero_versions = [] - for version in queried_versions: - if version["version"] < 0: - hero_versions.append(version) - else: - version_entities.append(convert_v4_version_to_v3(version)) - - if hero_versions: - subset_ids = set() - versions_nums = set() - for hero_version in hero_versions: - versions_nums.add(abs(hero_version["version"])) - subset_ids.add(hero_version["productId"]) - - hero_eq_versions = con.get_versions( - project_name, - product_ids=subset_ids, - versions=versions_nums, - hero=False, - fields=["id", "version", "productId"] - ) - hero_eq_by_subset_id = collections.defaultdict(list) - for version in hero_eq_versions: - hero_eq_by_subset_id[version["productId"]].append(version) - - for hero_version in hero_versions: - abs_version = abs(hero_version["version"]) - subset_id = hero_version["productId"] - version_id = None - for version in hero_eq_by_subset_id.get(subset_id, []): - if version["version"] == abs_version: - version_id = version["id"] - break - conv_hero = convert_v4_version_to_v3(hero_version) - conv_hero["version_id"] = version_id - version_entities.append(conv_hero) - - return version_entities - - -def get_asset_by_id(project_name, asset_id, fields=None): - assets = get_assets( - project_name, asset_ids=[asset_id], fields=fields - ) - for asset in assets: - return asset - return None - - -def get_asset_by_name(project_name, asset_name, fields=None): - assets = get_assets( - project_name, asset_names=[asset_name], fields=fields - ) - for asset in assets: - return asset - return None - - -def _folders_query(project_name, con, fields, **kwargs): - if fields is None or "tasks" in fields: - folders = get_folders_with_tasks( - con, project_name, fields=fields, **kwargs - ) - - else: - folders = con.get_folders(project_name, fields=fields, **kwargs) - - for folder in folders: - yield folder - - -def get_assets( - project_name, - asset_ids=None, - asset_names=None, - parent_ids=None, - archived=False, - fields=None -): - if not project_name: - return - - active = True - if archived: - active = None - - con = get_ayon_server_api_connection() - fields = folder_fields_v3_to_v4(fields, con) - kwargs = dict( - folder_ids=asset_ids, - parent_ids=parent_ids, - active=active, - ) - if not asset_names: - for folder in _folders_query(project_name, con, fields, **kwargs): - yield convert_v4_folder_to_v3(folder, project_name) - return - - new_asset_names = set() - folder_paths = set() - for name in asset_names: - if "/" in name: - folder_paths.add(name) - else: - new_asset_names.add(name) - - yielded_ids = set() - if folder_paths: - for folder in _folders_query( - project_name, con, fields, folder_paths=folder_paths, **kwargs - ): - yielded_ids.add(folder["id"]) - yield convert_v4_folder_to_v3(folder, project_name) - - if not new_asset_names: - return - - for folder in _folders_query( - project_name, con, fields, folder_names=new_asset_names, **kwargs - ): - if folder["id"] not in yielded_ids: - yielded_ids.add(folder["id"]) - yield convert_v4_folder_to_v3(folder, project_name) - - -def get_archived_assets( - project_name, - asset_ids=None, - asset_names=None, - parent_ids=None, - fields=None -): - return get_assets( - project_name, - asset_ids, - asset_names, - parent_ids, - True, - fields - ) - - -def get_asset_ids_with_subsets(project_name, asset_ids=None): - con = get_ayon_server_api_connection() - return con.get_folder_ids_with_products(project_name, asset_ids) - - -def get_subset_by_id(project_name, subset_id, fields=None): - subsets = get_subsets( - project_name, subset_ids=[subset_id], fields=fields - ) - for subset in subsets: - return subset - return None - - -def get_subset_by_name(project_name, subset_name, asset_id, fields=None): - subsets = get_subsets( - project_name, - subset_names=[subset_name], - asset_ids=[asset_id], - fields=fields - ) - for subset in subsets: - return subset - return None - - -def get_subsets( - project_name, - subset_ids=None, - subset_names=None, - asset_ids=None, - names_by_asset_ids=None, - archived=False, - fields=None -): - return _get_subsets( - project_name, - subset_ids, - subset_names, - asset_ids, - names_by_asset_ids, - archived, - fields=fields - ) - - -def get_subset_families(project_name, subset_ids=None): - con = get_ayon_server_api_connection() - return con.get_product_type_names(project_name, subset_ids) - - -def get_version_by_id(project_name, version_id, fields=None): - versions = get_versions( - project_name, - version_ids=[version_id], - fields=fields, - hero=True - ) - for version in versions: - return version - return None - - -def get_version_by_name(project_name, version, subset_id, fields=None): - versions = get_versions( - project_name, - subset_ids=[subset_id], - versions=[version], - fields=fields - ) - for version in versions: - return version - return None - - -def get_versions( - project_name, - version_ids=None, - subset_ids=None, - versions=None, - hero=False, - fields=None -): - return _get_versions( - project_name, - version_ids, - subset_ids, - versions, - hero=hero, - standard=True, - fields=fields - ) - - -def get_hero_version_by_id(project_name, version_id, fields=None): - versions = get_hero_versions( - project_name, - version_ids=[version_id], - fields=fields - ) - for version in versions: - return version - return None - - -def get_hero_version_by_subset_id( - project_name, subset_id, fields=None -): - versions = get_hero_versions( - project_name, - subset_ids=[subset_id], - fields=fields - ) - for version in versions: - return version - return None - - -def get_hero_versions( - project_name, subset_ids=None, version_ids=None, fields=None -): - return _get_versions( - project_name, - version_ids=version_ids, - subset_ids=subset_ids, - hero=True, - standard=False, - fields=fields - ) - - -def get_last_versions(project_name, subset_ids, active=None, fields=None): - if fields: - fields = set(fields) - fields.add("parent") - - versions = _get_versions( - project_name, - subset_ids=subset_ids, - latest=True, - hero=False, - active=active, - fields=fields - ) - return { - version["parent"]: version - for version in versions - } - - -def get_last_version_by_subset_id(project_name, subset_id, fields=None): - versions = _get_versions( - project_name, - subset_ids=[subset_id], - latest=True, - hero=False, - fields=fields - ) - if not versions: - return None - return versions[0] - - -def get_last_version_by_subset_name( - project_name, - subset_name, - asset_id=None, - asset_name=None, - fields=None -): - if not asset_id and not asset_name: - return None - - if not asset_id: - asset = get_asset_by_name( - project_name, asset_name, fields=["_id"] - ) - if not asset: - return None - asset_id = asset["_id"] - - subset = get_subset_by_name( - project_name, subset_name, asset_id, fields=["_id"] - ) - if not subset: - return None - return get_last_version_by_subset_id( - project_name, subset["_id"], fields=fields - ) - - -def get_output_link_versions(project_name, version_id, fields=None): - if not version_id: - return [] - - con = get_ayon_server_api_connection() - version_links = con.get_version_links( - project_name, version_id, link_direction="out") - - version_ids = { - link["entityId"] - for link in version_links - if link["entityType"] == "version" - } - if not version_ids: - return [] - - return get_versions(project_name, version_ids=version_ids, fields=fields) - - -def version_is_latest(project_name, version_id): - con = get_ayon_server_api_connection() - return con.version_is_latest(project_name, version_id) - - -def get_representation_by_id(project_name, representation_id, fields=None): - representations = get_representations( - project_name, - representation_ids=[representation_id], - fields=fields - ) - for representation in representations: - return representation - return None - - -def get_representation_by_name( - project_name, representation_name, version_id, fields=None -): - representations = get_representations( - project_name, - representation_names=[representation_name], - version_ids=[version_id], - fields=fields - ) - for representation in representations: - return representation - return None - - -def get_representations( - project_name, - representation_ids=None, - representation_names=None, - version_ids=None, - context_filters=None, - names_by_version_ids=None, - archived=False, - standard=True, - fields=None -): - if context_filters is not None: - # TODO should we add the support? - # - there was ability to fitler using regex - raise ValueError("OP v4 can't filter by representation context.") - - if not archived and not standard: - return - - if archived and not standard: - active = False - elif not archived and standard: - active = True - else: - active = None - - con = get_ayon_server_api_connection() - fields = representation_fields_v3_to_v4(fields, con) - if fields and active is not None: - fields.add("active") - - representations = con.get_representations( - project_name, - representation_ids=representation_ids, - representation_names=representation_names, - version_ids=version_ids, - names_by_version_ids=names_by_version_ids, - active=active, - fields=fields - ) - for representation in representations: - yield convert_v4_representation_to_v3(representation) - - -def get_representation_parents(project_name, representation): - if not representation: - return None - - repre_id = representation["_id"] - parents_by_repre_id = get_representations_parents( - project_name, [representation] - ) - return parents_by_repre_id[repre_id] - - -def get_representations_parents(project_name, representations): - repre_ids = { - repre["_id"] - for repre in representations - } - con = get_ayon_server_api_connection() - parents_by_repre_id = con.get_representations_parents(project_name, - repre_ids) - folder_ids = set() - for parents in parents_by_repre_id .values(): - folder_ids.add(parents[2]["id"]) - - tasks_by_folder_id = {} - - new_parents = {} - for repre_id, parents in parents_by_repre_id .items(): - version, subset, folder, project = parents - folder_tasks = tasks_by_folder_id.get(folder["id"]) or {} - folder["tasks"] = folder_tasks - new_parents[repre_id] = ( - convert_v4_version_to_v3(version), - convert_v4_subset_to_v3(subset), - convert_v4_folder_to_v3(folder, project_name), - project - ) - return new_parents - - -def get_archived_representations( - project_name, - representation_ids=None, - representation_names=None, - version_ids=None, - context_filters=None, - names_by_version_ids=None, - fields=None -): - return get_representations( - project_name, - representation_ids=representation_ids, - representation_names=representation_names, - version_ids=version_ids, - context_filters=context_filters, - names_by_version_ids=names_by_version_ids, - archived=True, - standard=False, - fields=fields - ) - - -def get_thumbnail( - project_name, thumbnail_id, entity_type, entity_id, fields=None -): - """Receive thumbnail entity data. - - Args: - project_name (str): Name of project where to look for queried entities. - thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. - entity_type (str): Type of entity for which the thumbnail should be - received. - entity_id (str): Id of entity for which the thumbnail should be - received. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. - - Returns: - None: If thumbnail with specified id was not found. - Dict: Thumbnail entity data which can be reduced to specified 'fields'. - """ - - if not thumbnail_id or not entity_type or not entity_id: - return None - - if entity_type == "asset": - entity_type = "folder" - - elif entity_type == "hero_version": - entity_type = "version" - - return { - "_id": thumbnail_id, - "type": "thumbnail", - "schema": CURRENT_THUMBNAIL_SCHEMA, - "data": { - "entity_type": entity_type, - "entity_id": entity_id - } - } - - -def get_thumbnails(project_name, thumbnail_contexts, fields=None): - """Get thumbnail entities. - - Warning: - This function is not OpenPype compatible. There is none usage of this - function in codebase so there is nothing to convert. The previous - implementation cannot be AYON compatible without entity types. - """ - - thumbnail_items = set() - for thumbnail_context in thumbnail_contexts: - thumbnail_id, entity_type, entity_id = thumbnail_context - thumbnail_item = get_thumbnail( - project_name, thumbnail_id, entity_type, entity_id - ) - if thumbnail_item: - thumbnail_items.add(thumbnail_item) - return list(thumbnail_items) - - -def get_thumbnail_id_from_source(project_name, src_type, src_id): - """Receive thumbnail id from source entity. - - Args: - project_name (str): Name of project where to look for queried entities. - src_type (str): Type of source entity ('asset', 'version'). - src_id (Union[str, ObjectId]): Id of source entity. - - Returns: - ObjectId: Thumbnail id assigned to entity. - None: If Source entity does not have any thumbnail id assigned. - """ - - if not src_type or not src_id: - return None - - if src_type == "version": - version = get_version_by_id( - project_name, src_id, fields=["data.thumbnail_id"] - ) or {} - return version.get("data", {}).get("thumbnail_id") - - if src_type == "asset": - asset = get_asset_by_id( - project_name, src_id, fields=["data.thumbnail_id"] - ) or {} - return asset.get("data", {}).get("thumbnail_id") - - return None - - -def get_workfile_info( - project_name, asset_id, task_name, filename, fields=None -): - if not asset_id or not task_name or not filename: - return None - - con = get_ayon_server_api_connection() - task = con.get_task_by_name( - project_name, asset_id, task_name, fields=["id", "name", "folderId"] - ) - if not task: - return None - - fields = workfile_info_fields_v3_to_v4(fields) - - for workfile_info in con.get_workfiles_info( - project_name, task_ids=[task["id"]], fields=fields - ): - if workfile_info["name"] == filename: - return convert_v4_workfile_info_to_v3(workfile_info, task) - return None diff --git a/openpype/client/server/operations.py b/openpype/client/server/operations.py deleted file mode 100644 index eddc1eaf60..0000000000 --- a/openpype/client/server/operations.py +++ /dev/null @@ -1,880 +0,0 @@ -import copy -import json -import collections -import uuid -import datetime - -from bson.objectid import ObjectId - -from openpype.client.operations_base import ( - REMOVED_VALUE, - CreateOperation, - UpdateOperation, - DeleteOperation, - BaseOperationsSession -) - -from openpype.client.mongo.operations import ( - CURRENT_THUMBNAIL_SCHEMA, - CURRENT_REPRESENTATION_SCHEMA, - CURRENT_HERO_VERSION_SCHEMA, - CURRENT_VERSION_SCHEMA, - CURRENT_SUBSET_SCHEMA, - CURRENT_ASSET_DOC_SCHEMA, - CURRENT_PROJECT_SCHEMA, -) - -from .conversion_utils import ( - convert_create_asset_to_v4, - convert_create_task_to_v4, - convert_create_subset_to_v4, - convert_create_version_to_v4, - convert_create_hero_version_to_v4, - convert_create_representation_to_v4, - convert_create_workfile_info_to_v4, - - convert_update_folder_to_v4, - convert_update_subset_to_v4, - convert_update_version_to_v4, - convert_update_hero_version_to_v4, - convert_update_representation_to_v4, - convert_update_workfile_info_to_v4, -) -from .utils import create_entity_id, get_ayon_server_api_connection - - -def _create_or_convert_to_id(entity_id=None): - if entity_id is None: - return create_entity_id() - - if isinstance(entity_id, ObjectId): - raise TypeError("Type of 'ObjectId' is not supported anymore.") - - # Validate if can be converted to uuid - uuid.UUID(entity_id) - return entity_id - - -def new_project_document( - project_name, project_code, config, data=None, entity_id=None -): - """Create skeleton data of project document. - - Args: - project_name (str): Name of project. Used as identifier of a project. - project_code (str): Shorter version of projet without spaces and - special characters (in most of cases). Should be also considered - as unique name across projects. - config (Dic[str, Any]): Project config consist of roots, templates, - applications and other project Anatomy related data. - data (Dict[str, Any]): Project data with information about it's - attributes (e.g. 'fps' etc.) or integration specific keys. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of project document. - """ - - if data is None: - data = {} - - data["code"] = project_code - - return { - "_id": _create_or_convert_to_id(entity_id), - "name": project_name, - "type": CURRENT_PROJECT_SCHEMA, - "entity_data": data, - "config": config - } - - -def new_asset_document( - name, project_id, parent_id, parents, data=None, entity_id=None -): - """Create skeleton data of asset document. - - Args: - name (str): Is considered as unique identifier of asset in project. - project_id (Union[str, ObjectId]): Id of project doument. - parent_id (Union[str, ObjectId]): Id of parent asset. - parents (List[str]): List of parent assets names. - data (Dict[str, Any]): Asset document data. Empty dictionary is used - if not passed. Value of 'parent_id' is used to fill 'visualParent'. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of asset document. - """ - - if data is None: - data = {} - if parent_id is not None: - parent_id = _create_or_convert_to_id(parent_id) - data["visualParent"] = parent_id - data["parents"] = parents - - return { - "_id": _create_or_convert_to_id(entity_id), - "type": "asset", - "name": name, - # This will be ignored - "parent": project_id, - "data": data, - "schema": CURRENT_ASSET_DOC_SCHEMA - } - - -def new_subset_document(name, family, asset_id, data=None, entity_id=None): - """Create skeleton data of subset document. - - Args: - name (str): Is considered as unique identifier of subset under asset. - family (str): Subset's family. - asset_id (Union[str, ObjectId]): Id of parent asset. - data (Dict[str, Any]): Subset document data. Empty dictionary is used - if not passed. Value of 'family' is used to fill 'family'. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of subset document. - """ - - if data is None: - data = {} - data["family"] = family - return { - "_id": _create_or_convert_to_id(entity_id), - "schema": CURRENT_SUBSET_SCHEMA, - "type": "subset", - "name": name, - "data": data, - "parent": _create_or_convert_to_id(asset_id) - } - - -def new_version_doc(version, subset_id, data=None, entity_id=None): - """Create skeleton data of version document. - - Args: - version (int): Is considered as unique identifier of version - under subset. - subset_id (Union[str, ObjectId]): Id of parent subset. - data (Dict[str, Any]): Version document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_id(entity_id), - "schema": CURRENT_VERSION_SCHEMA, - "type": "version", - "name": int(version), - "parent": _create_or_convert_to_id(subset_id), - "data": data - } - - -def new_hero_version_doc(subset_id, data, version=None, entity_id=None): - """Create skeleton data of hero version document. - - Args: - subset_id (Union[str, ObjectId]): Id of parent subset. - data (Dict[str, Any]): Version document data. - version (int): Version of source version. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if version is None: - version = -1 - elif version > 0: - version = -version - - return { - "_id": _create_or_convert_to_id(entity_id), - "schema": CURRENT_HERO_VERSION_SCHEMA, - "type": "hero_version", - "version": version, - "parent": _create_or_convert_to_id(subset_id), - "data": data - } - - -def new_representation_doc( - name, version_id, context, data=None, entity_id=None -): - """Create skeleton data of representation document. - - Args: - name (str): Representation name considered as unique identifier - of representation under version. - version_id (Union[str, ObjectId]): Id of parent version. - context (Dict[str, Any]): Representation context used for fill template - of to query. - data (Dict[str, Any]): Representation document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of version document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_id(entity_id), - "schema": CURRENT_REPRESENTATION_SCHEMA, - "type": "representation", - "parent": _create_or_convert_to_id(version_id), - "name": name, - "data": data, - - # Imprint shortcut to context for performance reasons. - "context": context - } - - -def new_thumbnail_doc(data=None, entity_id=None): - """Create skeleton data of thumbnail document. - - Args: - data (Dict[str, Any]): Thumbnail document data. - entity_id (Union[str, ObjectId]): Predefined id of document. New id is - created if not passed. - - Returns: - Dict[str, Any]: Skeleton of thumbnail document. - """ - - if data is None: - data = {} - - return { - "_id": _create_or_convert_to_id(entity_id), - "type": "thumbnail", - "schema": CURRENT_THUMBNAIL_SCHEMA, - "data": data - } - - -def new_workfile_info_doc( - filename, asset_id, task_name, files, data=None, entity_id=None -): - """Create skeleton data of workfile info document. - - Workfile document is at this moment used primarily for artist notes. - - Args: - filename (str): Filename of workfile. - asset_id (Union[str, ObjectId]): Id of asset under which workfile live. - task_name (str): Task under which was workfile created. - files (List[str]): List of rootless filepaths related to workfile. - data (Dict[str, Any]): Additional metadata. - - Returns: - Dict[str, Any]: Skeleton of workfile info document. - """ - - if not data: - data = {} - - return { - "_id": _create_or_convert_to_id(entity_id), - "type": "workfile", - "parent": _create_or_convert_to_id(asset_id), - "task_name": task_name, - "filename": filename, - "data": data, - "files": files - } - - -def _prepare_update_data(old_doc, new_doc, replace): - changes = {} - for key, value in new_doc.items(): - if key not in old_doc or value != old_doc[key]: - changes[key] = value - - if replace: - for key in old_doc.keys(): - if key not in new_doc: - changes[key] = REMOVED_VALUE - return changes - - -def prepare_subset_update_data(old_doc, new_doc, replace=True): - """Compare two subset documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_version_update_data(old_doc, new_doc, replace=True): - """Compare two version documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -def prepare_hero_version_update_data(old_doc, new_doc, replace=True): - """Compare two hero version documents and prepare update data. - - Based on compared values will create update data for 'UpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - changes = _prepare_update_data(old_doc, new_doc, replace) - changes.pop("version_id", None) - return changes - - -def prepare_representation_update_data(old_doc, new_doc, replace=True): - """Compare two representation documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - changes = _prepare_update_data(old_doc, new_doc, replace) - context = changes.get("data", {}).get("context") - # Make sure that both 'family' and 'subset' are in changes if - # one of them changed (they'll both become 'product'). - if ( - context - and ("family" in context or "subset" in context) - ): - context["family"] = new_doc["data"]["context"]["family"] - context["subset"] = new_doc["data"]["context"]["subset"] - - return changes - - -def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): - """Compare two workfile info documents and prepare update data. - - Based on compared values will create update data for - 'MongoUpdateOperation'. - - Empty output means that documents are identical. - - Returns: - Dict[str, Any]: Changes between old and new document. - """ - - return _prepare_update_data(old_doc, new_doc, replace) - - -class FailedOperations(Exception): - pass - - -def entity_data_json_default(value): - if isinstance(value, datetime.datetime): - return int(value.timestamp()) - - raise TypeError( - "Object of type {} is not JSON serializable".format(str(type(value))) - ) - - -def failed_json_default(value): - return "< Failed value {} > {}".format(type(value), str(value)) - - -class ServerCreateOperation(CreateOperation): - """Operation to create an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - data (Dict[str, Any]): Data of entity that will be created. - """ - - def __init__(self, project_name, entity_type, data, session): - self._session = session - - if not data: - data = {} - data = copy.deepcopy(data) - if entity_type == "project": - raise ValueError("Project cannot be created using operations") - - tasks = None - if entity_type in "asset": - # TODO handle tasks - entity_type = "folder" - if "data" in data: - tasks = data["data"].get("tasks") - - project = self._session.get_project(project_name) - new_data = convert_create_asset_to_v4(data, project, self.con) - - elif entity_type == "task": - project = self._session.get_project(project_name) - new_data = convert_create_task_to_v4(data, project, self.con) - - elif entity_type == "subset": - new_data = convert_create_subset_to_v4(data, self.con) - entity_type = "product" - - elif entity_type == "version": - new_data = convert_create_version_to_v4(data, self.con) - - elif entity_type == "hero_version": - new_data = convert_create_hero_version_to_v4( - data, project_name, self.con - ) - entity_type = "version" - - elif entity_type in ("representation", "archived_representation"): - new_data = convert_create_representation_to_v4(data, self.con) - entity_type = "representation" - - elif entity_type == "workfile": - new_data = convert_create_workfile_info_to_v4( - data, project_name, self.con - ) - - else: - raise ValueError( - "Unhandled entity type \"{}\"".format(entity_type) - ) - - # Simple check if data can be dumped into json - # - should raise error on 'ObjectId' object - try: - new_data = json.loads( - json.dumps(new_data, default=entity_data_json_default) - ) - - except: - raise ValueError("Couldn't json parse body: {}".format( - json.dumps(new_data, default=failed_json_default) - )) - - super(ServerCreateOperation, self).__init__( - project_name, entity_type, new_data - ) - - if "id" not in self._data: - self._data["id"] = create_entity_id() - - if tasks: - copied_tasks = copy.deepcopy(tasks) - for task_name, task in copied_tasks.items(): - task["name"] = task_name - task["folderId"] = self._data["id"] - self.session.create_entity( - project_name, "task", task, nested_id=self.id - ) - - @property - def con(self): - return self.session.con - - @property - def session(self): - return self._session - - @property - def entity_id(self): - return self._data["id"] - - def to_server_operation(self): - return { - "id": self.id, - "type": "create", - "entityType": self.entity_type, - "entityId": self.entity_id, - "data": self._data - } - - -class ServerUpdateOperation(UpdateOperation): - """Operation to update an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - entity_id (Union[str, ObjectId]): Identifier of an entity. - update_data (Dict[str, Any]): Key -> value changes that will be set in - database. If value is set to 'REMOVED_VALUE' the key will be - removed. Only first level of dictionary is checked (on purpose). - """ - - def __init__( - self, project_name, entity_type, entity_id, update_data, session - ): - self._session = session - - update_data = copy.deepcopy(update_data) - if entity_type == "project": - raise ValueError("Project cannot be created using operations") - - if entity_type in ("asset", "archived_asset"): - new_update_data = convert_update_folder_to_v4( - project_name, entity_id, update_data, self.con - ) - entity_type = "folder" - - elif entity_type == "subset": - new_update_data = convert_update_subset_to_v4( - project_name, entity_id, update_data, self.con - ) - entity_type = "product" - - elif entity_type == "version": - new_update_data = convert_update_version_to_v4( - project_name, entity_id, update_data, self.con - ) - - elif entity_type == "hero_version": - new_update_data = convert_update_hero_version_to_v4( - project_name, entity_id, update_data, self.con - ) - entity_type = "version" - - elif entity_type in ("representation", "archived_representation"): - new_update_data = convert_update_representation_to_v4( - project_name, entity_id, update_data, self.con - ) - entity_type = "representation" - - elif entity_type == "workfile": - new_update_data = convert_update_workfile_info_to_v4( - project_name, entity_id, update_data, self.con - ) - - else: - raise ValueError( - "Unhandled entity type \"{}\"".format(entity_type) - ) - - try: - new_update_data = json.loads( - json.dumps(new_update_data, default=entity_data_json_default) - ) - - except: - raise ValueError("Couldn't json parse body: {}".format( - json.dumps(new_update_data, default=failed_json_default) - )) - - super(ServerUpdateOperation, self).__init__( - project_name, entity_type, entity_id, new_update_data - ) - - @property - def con(self): - return self.session.con - - @property - def session(self): - return self._session - - def to_server_operation(self): - if not self._update_data: - return None - - update_data = {} - for key, value in self._update_data.items(): - if value is REMOVED_VALUE: - value = None - update_data[key] = value - - return { - "id": self.id, - "type": "update", - "entityType": self.entity_type, - "entityId": self.entity_id, - "data": update_data - } - - -class ServerDeleteOperation(DeleteOperation): - """Operation to delete an entity. - - Args: - project_name (str): On which project operation will happen. - entity_type (str): Type of entity on which change happens. - e.g. 'asset', 'representation' etc. - entity_id (Union[str, ObjectId]): Entity id that will be removed. - """ - - def __init__(self, project_name, entity_type, entity_id, session): - self._session = session - - if entity_type == "asset": - entity_type = "folder" - - elif entity_type == "hero_version": - entity_type = "version" - - elif entity_type == "subset": - entity_type = "product" - - super(ServerDeleteOperation, self).__init__( - project_name, entity_type, entity_id - ) - - @property - def con(self): - return self.session.con - - @property - def session(self): - return self._session - - def to_server_operation(self): - return { - "id": self.id, - "type": self.operation_name, - "entityId": self.entity_id, - "entityType": self.entity_type, - } - - -class OperationsSession(BaseOperationsSession): - def __init__(self, con=None, *args, **kwargs): - super(OperationsSession, self).__init__(*args, **kwargs) - if con is None: - con = get_ayon_server_api_connection() - self._con = con - self._project_cache = {} - self._nested_operations = collections.defaultdict(list) - - @property - def con(self): - return self._con - - def get_project(self, project_name): - if project_name not in self._project_cache: - self._project_cache[project_name] = self.con.get_project( - project_name) - return copy.deepcopy(self._project_cache[project_name]) - - def commit(self): - """Commit session operations.""" - - operations, self._operations = self._operations, [] - if not operations: - return - - operations_by_project = collections.defaultdict(list) - for operation in operations: - operations_by_project[operation.project_name].append(operation) - - body_by_id = {} - results = [] - for project_name, operations in operations_by_project.items(): - operations_body = [] - for operation in operations: - body = operation.to_server_operation() - if body is not None: - try: - json.dumps(body) - except: - raise ValueError("Couldn't json parse body: {}".format( - json.dumps( - body, indent=4, default=failed_json_default - ) - )) - - body_by_id[operation.id] = body - operations_body.append(body) - - if operations_body: - result = self._con.post( - "projects/{}/operations".format(project_name), - operations=operations_body, - canFail=False - ) - results.append(result.data) - - for result in results: - if result.get("success"): - continue - - if "operations" not in result: - raise FailedOperations( - "Operation failed. Content: {}".format(str(result)) - ) - - for op_result in result["operations"]: - if not op_result["success"]: - operation_id = op_result["id"] - raise FailedOperations(( - "Operation \"{}\" failed with data:\n{}\nError: {}." - ).format( - operation_id, - json.dumps(body_by_id[operation_id], indent=4), - op_result.get("error", "unknown"), - )) - - def create_entity(self, project_name, entity_type, data, nested_id=None): - """Fast access to 'ServerCreateOperation'. - - Args: - project_name (str): On which project the creation happens. - entity_type (str): Which entity type will be created. - data (Dicst[str, Any]): Entity data. - nested_id (str): Id of other operation from which is triggered - operation -> Operations can trigger suboperations but they - must be added to operations list after it's parent is added. - - Returns: - ServerCreateOperation: Object of update operation. - """ - - operation = ServerCreateOperation( - project_name, entity_type, data, self - ) - - if nested_id: - self._nested_operations[nested_id].append(operation) - else: - self.add(operation) - if operation.id in self._nested_operations: - self.extend(self._nested_operations.pop(operation.id)) - - return operation - - def update_entity( - self, project_name, entity_type, entity_id, update_data, nested_id=None - ): - """Fast access to 'ServerUpdateOperation'. - - Returns: - ServerUpdateOperation: Object of update operation. - """ - - operation = ServerUpdateOperation( - project_name, entity_type, entity_id, update_data, self - ) - if nested_id: - self._nested_operations[nested_id].append(operation) - else: - self.add(operation) - if operation.id in self._nested_operations: - self.extend(self._nested_operations.pop(operation.id)) - return operation - - def delete_entity( - self, project_name, entity_type, entity_id, nested_id=None - ): - """Fast access to 'ServerDeleteOperation'. - - Returns: - ServerDeleteOperation: Object of delete operation. - """ - - operation = ServerDeleteOperation( - project_name, entity_type, entity_id, self - ) - if nested_id: - self._nested_operations[nested_id].append(operation) - else: - self.add(operation) - if operation.id in self._nested_operations: - self.extend(self._nested_operations.pop(operation.id)) - return operation - - -def create_project( - project_name, - project_code, - library_project=False, - preset_name=None, - con=None -): - """Create project using OpenPype settings. - - This project creation function is not validating project document on - creation. It is because project document is created blindly with only - minimum required information about project which is it's name, code, type - and schema. - - Entered project name must be unique and project must not exist yet. - - Note: - This function is here to be OP v4 ready but in v3 has more logic - to do. That's why inner imports are in the body. - - Args: - project_name (str): New project name. Should be unique. - project_code (str): Project's code should be unique too. - library_project (bool): Project is library project. - preset_name (str): Name of anatomy preset. Default is used if not - passed. - con (ServerAPI): Connection to server with logged user. - - Raises: - ValueError: When project name already exists in MongoDB. - - Returns: - dict: Created project document. - """ - - if con is None: - con = get_ayon_server_api_connection() - - return con.create_project( - project_name, - project_code, - library_project, - preset_name - ) - - -def delete_project(project_name, con=None): - if con is None: - con = get_ayon_server_api_connection() - - return con.delete_project(project_name) - - -def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None): - if con is None: - con = get_ayon_server_api_connection() - return con.create_thumbnail(project_name, src_filepath, thumbnail_id) diff --git a/openpype/client/server/utils.py b/openpype/client/server/utils.py deleted file mode 100644 index a9dcf539bd..0000000000 --- a/openpype/client/server/utils.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -import uuid - -import ayon_api - -from openpype.client.operations_base import REMOVED_VALUE - - -class _GlobalCache: - initialized = False - - -def get_ayon_server_api_connection(): - if _GlobalCache.initialized: - con = ayon_api.get_server_api_connection() - else: - from openpype.lib.local_settings import get_local_site_id - - _GlobalCache.initialized = True - site_id = get_local_site_id() - version = os.getenv("AYON_VERSION") - if ayon_api.is_connection_created(): - con = ayon_api.get_server_api_connection() - con.set_site_id(site_id) - con.set_client_version(version) - else: - con = ayon_api.create_connection(site_id, version) - return con - - -def create_entity_id(): - return uuid.uuid1().hex - - -def prepare_attribute_changes(old_entity, new_entity, replace=False): - """Prepare changes of attributes on entities. - - Compare 'attrib' of old and new entity data to prepare only changed - values that should be sent to server for update. - - Example: - >>> # Limited entity data to 'attrib' - >>> old_entity = { - ... "attrib": {"attr_1": 1, "attr_2": "MyString", "attr_3": True} - ... } - >>> new_entity = { - ... "attrib": {"attr_1": 2, "attr_3": True, "attr_4": 3} - ... } - >>> # Changes if replacement should not happen - >>> expected_changes = { - ... "attr_1": 2, - ... "attr_4": 3 - ... } - >>> changes = prepare_attribute_changes(old_entity, new_entity) - >>> changes == expected_changes - True - - >>> # Changes if replacement should happen - >>> expected_changes_replace = { - ... "attr_1": 2, - ... "attr_2": REMOVED_VALUE, - ... "attr_4": 3 - ... } - >>> changes_replace = prepare_attribute_changes( - ... old_entity, new_entity, True) - >>> changes_replace == expected_changes_replace - True - - Args: - old_entity (dict[str, Any]): Data of entity queried from server. - new_entity (dict[str, Any]): Entity data with applied changes. - replace (bool): New entity should fully replace all old entity values. - - Returns: - Dict[str, Any]: Values from new entity only if value has changed. - """ - - attrib_changes = {} - new_attrib = new_entity.get("attrib") - old_attrib = old_entity.get("attrib") - if new_attrib is None: - if not replace: - return attrib_changes - new_attrib = {} - - if old_attrib is None: - return new_attrib - - for attr, new_attr_value in new_attrib.items(): - old_attr_value = old_attrib.get(attr) - if old_attr_value != new_attr_value: - attrib_changes[attr] = new_attr_value - - if replace: - for attr in old_attrib: - if attr not in new_attrib: - attrib_changes[attr] = REMOVED_VALUE - - return attrib_changes - - -def prepare_entity_changes(old_entity, new_entity, replace=False): - """Prepare changes of AYON entities. - - Compare old and new entity to filter values from new data that changed. - - Args: - old_entity (dict[str, Any]): Data of entity queried from server. - new_entity (dict[str, Any]): Entity data with applied changes. - replace (bool): All attributes should be replaced by new values. So - all attribute values that are not on new entity will be removed. - - Returns: - Dict[str, Any]: Only values from new entity that changed. - """ - - changes = {} - for key, new_value in new_entity.items(): - if key == "attrib": - continue - - old_value = old_entity.get(key) - if old_value != new_value: - changes[key] = new_value - - if replace: - for key in old_entity: - if key not in new_entity: - changes[key] = REMOVED_VALUE - - attr_changes = prepare_attribute_changes(old_entity, new_entity, replace) - if attr_changes: - changes["attrib"] = attr_changes - return changes diff --git a/openpype/hosts/aftereffects/addon.py b/openpype/hosts/aftereffects/addon.py deleted file mode 100644 index 79df550312..0000000000 --- a/openpype/hosts/aftereffects/addon.py +++ /dev/null @@ -1,22 +0,0 @@ -from openpype.modules import OpenPypeModule, IHostAddon - - -class AfterEffectsAddon(OpenPypeModule, IHostAddon): - name = "aftereffects" - host_name = "aftereffects" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "WEBSOCKET_URL": "ws://localhost:8097/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".aep"] diff --git a/openpype/hosts/aftereffects/api/README.md b/openpype/hosts/aftereffects/api/README.md deleted file mode 100644 index 9c4bad3689..0000000000 --- a/openpype/hosts/aftereffects/api/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# AfterEffects Integration - -Requirements: This extension requires use of Javascript engine, which is -available since CC 16.0. -Please check your File>Project Settings>Expressions>Expressions Engine - -## Setup - -The After Effects integration requires two components to work; `extension` and `server`. - -### Extension - -To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd). - -``` -ExManCmd /install {path to addon}/api/extension.zxp -``` -OR -download [Anastasiyโ€™s Extension Manager](https://install.anastasiy.com/) - -`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.) - -### Server - -The easiest way to get the server and After Effects launch is with: - -``` -python -c ^"import openpype.hosts.photoshop;openpype.hosts..aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^" -``` - -`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists. - -## Usage - -The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this: - -![Ayon Panel](panel.png "Ayon Panel") - - -## Developing - -### Extension -When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions). - -When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide). - -``` -ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12 -ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon -``` - -### Plugin Examples - -These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py). - -Expected deployed extension location on default Windows: -`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel` - -For easier debugging of Javascript: -https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1 -Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome -then localhost:8092 - -Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 -## Resources - - https://javascript-tools-guide.readthedocs.io/introduction/index.html - - https://github.com/Adobe-CEP/Getting-Started-guides - - https://github.com/Adobe-CEP/CEP-Resources diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py deleted file mode 100644 index e90c3dc5b8..0000000000 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ /dev/null @@ -1,389 +0,0 @@ -import os -import sys -import subprocess -import collections -import logging -import asyncio -import functools -import traceback - - -from wsrpc_aiohttp import ( - WebSocketRoute, - WebSocketAsync -) - -from qtpy import QtCore - -from openpype.lib import Logger -from openpype.tests.lib import is_in_tests -from openpype.pipeline import install_host, legacy_io -from openpype.modules import ModulesManager -from openpype.tools.utils import host_tools, get_openpype_qt_app -from openpype.tools.adobe_webserver.app import WebServerTool - -from .ws_stub import get_stub -from .lib import set_settings - -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) - - -def safe_excepthook(*args): - traceback.print_exception(*args) - - -def main(*subprocess_args): - """Main entrypoint to AE launching, called from pre hook.""" - sys.excepthook = safe_excepthook - - from openpype.hosts.aftereffects.api import AfterEffectsHost - - host = AfterEffectsHost() - install_host(host) - - os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" - app = get_openpype_qt_app() - app.setQuitOnLastWindowClosed(False) - - launcher = ProcessLauncher(subprocess_args) - launcher.start() - - if os.environ.get("HEADLESS_PUBLISH"): - manager = ModulesManager() - webpublisher_addon = manager["webpublisher"] - - launcher.execute_in_main_thread( - functools.partial( - webpublisher_addon.headless_publish, - log, - "CloseAE", - is_in_tests() - ) - ) - - elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): - save = False - if os.getenv("WORKFILES_SAVE_AS"): - save = True - - launcher.execute_in_main_thread( - lambda: host_tools.show_tool_by_name("workfiles", save=save) - ) - - sys.exit(app.exec_()) - - -def show_tool_by_name(tool_name): - kwargs = {} - if tool_name == "loader": - kwargs["use_context"] = True - - host_tools.show_tool_by_name(tool_name, **kwargs) - - -class ProcessLauncher(QtCore.QObject): - """Launches webserver, connects to it, runs main thread.""" - route_name = "AfterEffects" - _main_thread_callbacks = collections.deque() - - def __init__(self, subprocess_args): - self._subprocess_args = subprocess_args - self._log = None - - super(ProcessLauncher, self).__init__() - - # Keep track if launcher was alreadu started - self._started = False - - self._process = None - self._websocket_server = None - - start_process_timer = QtCore.QTimer() - start_process_timer.setInterval(100) - - loop_timer = QtCore.QTimer() - loop_timer.setInterval(200) - - start_process_timer.timeout.connect(self._on_start_process_timer) - loop_timer.timeout.connect(self._on_loop_timer) - - self._start_process_timer = start_process_timer - self._loop_timer = loop_timer - - @property - def log(self): - if self._log is None: - self._log = Logger.get_logger("{}-launcher".format( - self.route_name)) - return self._log - - @property - def websocket_server_is_running(self): - if self._websocket_server is not None: - return self._websocket_server.is_running - return False - - @property - def is_process_running(self): - if self._process is not None: - return self._process.poll() is None - return False - - @property - def is_host_connected(self): - """Returns True if connected, False if app is not running at all.""" - if not self.is_process_running: - return False - - try: - - _stub = get_stub() - if _stub: - return True - except Exception: - pass - - return None - - @classmethod - def execute_in_main_thread(cls, callback): - cls._main_thread_callbacks.append(callback) - - def start(self): - if self._started: - return - self.log.info("Started launch logic of AfterEffects") - self._started = True - self._start_process_timer.start() - - def exit(self): - """ Exit whole application. """ - if self._start_process_timer.isActive(): - self._start_process_timer.stop() - if self._loop_timer.isActive(): - self._loop_timer.stop() - - if self._websocket_server is not None: - self._websocket_server.stop() - - if self._process: - self._process.kill() - self._process.wait() - - QtCore.QCoreApplication.exit() - - def _on_loop_timer(self): - # TODO find better way and catch errors - # Run only callbacks that are in queue at the moment - cls = self.__class__ - for _ in range(len(cls._main_thread_callbacks)): - if cls._main_thread_callbacks: - callback = cls._main_thread_callbacks.popleft() - callback() - - if not self.is_process_running: - self.log.info("Host process is not running. Closing") - self.exit() - - elif not self.websocket_server_is_running: - self.log.info("Websocket server is not running. Closing") - self.exit() - - def _on_start_process_timer(self): - # TODO add try except validations for each part in this method - # Start server as first thing - if self._websocket_server is None: - self._init_server() - return - - # TODO add waiting time - # Wait for webserver - if not self.websocket_server_is_running: - return - - # Start application process - if self._process is None: - self._start_process() - self.log.info("Waiting for host to connect") - return - - # TODO add waiting time - # Wait until host is connected - if self.is_host_connected: - self._start_process_timer.stop() - self._loop_timer.start() - elif ( - not self.is_process_running - or not self.websocket_server_is_running - ): - self.exit() - - def _init_server(self): - if self._websocket_server is not None: - return - - self.log.debug( - "Initialization of websocket server for host communication" - ) - - self._websocket_server = websocket_server = WebServerTool() - if websocket_server.port_occupied( - websocket_server.host_name, - websocket_server.port - ): - self.log.info( - "Server already running, sending actual context and exit." - ) - asyncio.run(websocket_server.send_context_change(self.route_name)) - self.exit() - return - - # Add Websocket route - websocket_server.add_route("*", "/ws/", WebSocketAsync) - # Add after effects route to websocket handler - - print("Adding {} route".format(self.route_name)) - WebSocketAsync.add_route( - self.route_name, AfterEffectsRoute - ) - self.log.info("Starting websocket server for host communication") - websocket_server.start_server() - - def _start_process(self): - if self._process is not None: - return - self.log.info("Starting host process") - try: - self._process = subprocess.Popen( - self._subprocess_args, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - except Exception: - self.log.info("exce", exc_info=True) - self.exit() - - -class AfterEffectsRoute(WebSocketRoute): - """ - One route, mimicking external application (like Harmony, etc). - All functions could be called from client. - 'do_notify' function calls function on the client - mimicking - notification after long running job on the server or similar - """ - instance = None - - def init(self, **kwargs): - # Python __init__ must be return "self". - # This method might return anything. - log.debug("someone called AfterEffects route") - self.instance = self - return kwargs - - # server functions - async def ping(self): - log.debug("someone called AfterEffects route ping") - - # This method calls function on the client side - # client functions - async def set_context(self, project, asset, task): - """ - Sets 'project' and 'asset' to envs, eg. setting context - - Args: - project (str) - asset (str) - """ - log.info("Setting context change") - log.info("project {} asset {} ".format(project, asset)) - if project: - legacy_io.Session["AVALON_PROJECT"] = project - os.environ["AVALON_PROJECT"] = project - if asset: - legacy_io.Session["AVALON_ASSET"] = asset - os.environ["AVALON_ASSET"] = asset - if task: - legacy_io.Session["AVALON_TASK"] = task - os.environ["AVALON_TASK"] = task - - async def read(self): - log.debug("aftereffects.read client calls server server calls " - "aftereffects client") - return await self.socket.call('aftereffects.read') - - # panel routes for tools - async def workfiles_route(self): - self._tool_route("workfiles") - - async def loader_route(self): - self._tool_route("loader") - - async def publish_route(self): - self._tool_route("publisher") - - async def sceneinventory_route(self): - self._tool_route("sceneinventory") - - async def setresolution_route(self): - self._settings_route(False, True) - - async def setframes_route(self): - self._settings_route(True, False) - - async def setall_route(self): - self._settings_route(True, True) - - async def experimental_tools_route(self): - self._tool_route("experimental_tools") - - def _tool_route(self, _tool_name): - """The address accessed when clicking on the buttons.""" - - partial_method = functools.partial(show_tool_by_name, - _tool_name) - - ProcessLauncher.execute_in_main_thread(partial_method) - - # Required return statement. - return "nothing" - - def _settings_route(self, frames, resolution): - partial_method = functools.partial(set_settings, - frames, - resolution) - - ProcessLauncher.execute_in_main_thread(partial_method) - - # Required return statement. - return "nothing" - - def create_placeholder_route(self): - from openpype.hosts.aftereffects.api.workfile_template_builder import \ - create_placeholder - partial_method = functools.partial(create_placeholder) - - ProcessLauncher.execute_in_main_thread(partial_method) - - # Required return statement. - return "nothing" - - def update_placeholder_route(self): - from openpype.hosts.aftereffects.api.workfile_template_builder import \ - update_placeholder - partial_method = functools.partial(update_placeholder) - - ProcessLauncher.execute_in_main_thread(partial_method) - - # Required return statement. - return "nothing" - - def build_workfile_template_route(self): - from openpype.hosts.aftereffects.api.workfile_template_builder import \ - build_workfile_template - partial_method = functools.partial(build_workfile_template) - - ProcessLauncher.execute_in_main_thread(partial_method) - - # Required return statement. - return "nothing" diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py deleted file mode 100644 index e8352c382b..0000000000 --- a/openpype/hosts/aftereffects/api/lib.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import re -import json -import contextlib -import logging - -from openpype.pipeline.context_tools import get_current_context -from openpype.client import get_asset_by_name -from .ws_stub import get_stub - -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context.""" - selection = get_stub().get_selected_items(True, False, False) - try: - yield selection - finally: - pass - - -def get_extension_manifest_path(): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "extension", - "CSXS", - "manifest.xml" - ) - - -def get_unique_layer_name(layers, name): - """ - Gets all layer names and if 'name' is present in them, increases - suffix by 1 (eg. creates unique layer name - for Loader) - Args: - layers (list): of strings, names only - name (string): checked value - - Returns: - (string): name_00X (without version) - """ - names = {} - for layer in layers: - layer_name = re.sub(r'_\d{3}$', '', layer) - if layer_name in names.keys(): - names[layer_name] = names[layer_name] + 1 - else: - names[layer_name] = 1 - occurrences = names.get(name, 0) - - return "{}_{:0>3d}".format(name, occurrences + 1) - - -def get_background_layers(file_url): - """ - Pulls file name from background json file, enrich with folder url for - AE to be able import files. - - Order is important, follows order in json. - - Args: - file_url (str): abs url of background json - - Returns: - (list): of abs paths to images - """ - with open(file_url) as json_file: - data = json.load(json_file) - - layers = list() - bg_folder = os.path.dirname(file_url) - for child in data['children']: - if child.get("filename"): - layers.append(os.path.join(bg_folder, child.get("filename")). - replace("\\", "/")) - else: - for layer in child['children']: - if layer.get("filename"): - layers.append(os.path.join(bg_folder, - layer.get("filename")). - replace("\\", "/")) - return layers - - -def get_asset_settings(asset_doc): - """Get settings on current asset from database. - - Returns: - dict: Scene data. - - """ - asset_data = asset_doc["data"] - fps = asset_data.get("fps", 0) - frame_start = asset_data.get("frameStart", 0) - frame_end = asset_data.get("frameEnd", 0) - handle_start = asset_data.get("handleStart", 0) - handle_end = asset_data.get("handleEnd", 0) - resolution_width = asset_data.get("resolutionWidth", 0) - resolution_height = asset_data.get("resolutionHeight", 0) - duration = (frame_end - frame_start + 1) + handle_start + handle_end - - return { - "fps": fps, - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "duration": duration - } - - -def set_settings(frames, resolution, comp_ids=None, print_msg=True): - """Sets number of frames and resolution to selected comps. - - Args: - frames (bool): True if set frame info - resolution (bool): True if set resolution - comp_ids (list): specific composition ids, if empty - it tries to look for currently selected - print_msg (bool): True throw JS alert with msg - """ - frame_start = frames_duration = fps = width = height = None - current_context = get_current_context() - - asset_doc = get_asset_by_name(current_context["project_name"], - current_context["asset_name"]) - settings = get_asset_settings(asset_doc) - - msg = '' - if frames: - frame_start = settings["frameStart"] - settings["handleStart"] - frames_duration = settings["duration"] - fps = settings["fps"] - msg += f"frame start:{frame_start}, duration:{frames_duration}, "\ - f"fps:{fps}" - if resolution: - width = settings["resolutionWidth"] - height = settings["resolutionHeight"] - msg += f"width:{width} and height:{height}" - - stub = get_stub() - if not comp_ids: - comps = stub.get_selected_items(True, False, False) - comp_ids = [comp.id for comp in comps] - if not comp_ids: - stub.print_msg("Select at least one composition to apply settings.") - return - - for comp_id in comp_ids: - msg = f"Setting for comp {comp_id} " + msg - log.debug(msg) - stub.set_comp_properties(comp_id, frame_start, frames_duration, - fps, width, height) - if print_msg: - stub.print_msg(msg) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py deleted file mode 100644 index e059f7c272..0000000000 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ /dev/null @@ -1,293 +0,0 @@ -import os - -from qtpy import QtWidgets - -import pyblish.api - -from openpype.lib import Logger, register_event_callback -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.aftereffects.api.workfile_template_builder import ( - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin -) -from openpype.pipeline.load import any_outdated_containers -import openpype.hosts.aftereffects - -from openpype.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost -) -from openpype.tools.utils import get_openpype_qt_app - -from .launch_logic import get_stub -from .ws_stub import ConnectionNotEstablishedYet - -log = Logger.get_logger(__name__) - - -HOST_DIR = os.path.dirname( - os.path.abspath(openpype.hosts.aftereffects.__file__) -) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - - -class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "aftereffects" - - def __init__(self): - self._stub = None - super(AfterEffectsHost, self).__init__() - - @property - def stub(self): - """ - Handle pulling stub from PS to run operations on host - Returns: - (AEServerStub) or None - """ - if self._stub: - return self._stub - - try: - stub = get_stub() # only after Photoshop is up - except ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - self._stub = stub - return self._stub - - def install(self): - print("Installing Pype config...") - - pyblish.api.register_host("aftereffects") - pyblish.api.register_plugin_path(PUBLISH_PATH) - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - register_event_callback("application.launched", application_launch) - - def get_workfile_extensions(self): - return [".aep"] - - def save_workfile(self, dst_path=None): - self.stub.saveAs(dst_path, True) - - def open_workfile(self, filepath): - self.stub.open(filepath) - - return True - - def get_current_workfile(self): - try: - full_name = get_stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except ValueError: - print("Nothing opened") - pass - - return None - - def get_containers(self): - return ls() - - def get_context_data(self): - meta = self.stub.get_metadata() - for item in meta: - if item.get("id") == "publish_context": - item.pop("id") - return item - - return {} - - def update_context_data(self, data, changes): - item = data - item["id"] = "publish_context" - self.stub.imprint(item["id"], item) - - def get_workfile_build_placeholder_plugins(self): - return [ - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin - ] - - # created instances section - def list_instances(self): - """List all created instances from current workfile which - will be published. - - Pulls from File > File Info - - For SubsetManager - - Returns: - (list) of dictionaries matching instances format - """ - stub = self.stub - if not stub: - return [] - - instances = [] - layers_meta = stub.get_metadata() - - for instance in layers_meta: - if instance.get("id") == "pyblish.avalon.instance": - instances.append(instance) - return instances - - def remove_instance(self, instance): - """Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = self.stub - - if not stub: - return - - inst_id = instance.get("instance_id") or instance.get("uuid") # legacy - if not inst_id: - log.warning("No instance identifier for {}".format(instance)) - return - - stub.remove_instance(inst_id) - - if instance.get("members"): - item = stub.get_item(instance["members"][0]) - if item: - stub.rename_item(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) - - -def application_launch(): - """Triggered after start of app""" - check_inventory() - - -def ls(): - """Yields containers from active AfterEffects document. - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in AE; once loaded - they are called 'containers'. Used in Manage tool. - - Containers could be on multiple levels, single images/videos/was as a - FootageItem, or multiple items - backgrounds (folder with automatically - created composition and all imported layers). - - Yields: - dict: container - - """ - try: - stub = get_stub() # only after AfterEffects is up - except ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - layers_meta = stub.get_metadata() - for item in stub.get_items(comps=True, - folders=True, - footages=True): - data = stub.read(item, layers_meta) - # Skip non-tagged layers. - if not data: - continue - - # Filter to only containers. - if "container" not in data["id"]: - continue - - # Append transient data - data["objectName"] = item.name.replace(stub.LOADED_ICON, '') - data["layer"] = item - yield data - - -def check_inventory(): - """Checks loaded containers if they are of highest version""" - if not any_outdated_containers(): - return - - # Warn about outdated containers. - _app = get_openpype_qt_app() - - message_box = QtWidgets.QMessageBox() - message_box.setIcon(QtWidgets.QMessageBox.Warning) - msg = "There are outdated containers in the scene." - message_box.setText(msg) - message_box.exec_() - - -def containerise(name, - namespace, - comp, - context, - loader=None, - suffix="_CON"): - """ - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Creates dictionary payloads that gets saved into file metadata. Each - container contains of who loaded (loader) and members (single or multiple - in case of background). - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - comp (AEItem): Composition to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - """ - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - "members": comp.members or [comp.id] - } - - stub = get_stub() - stub.imprint(comp.id, data) - - return comp - - -def cache_and_get_instances(creator): - """Cache instances in shared data. - - Storing all instances as a list as legacy instances might be still present. - Args: - creator (Creator): Plugin which would like to get instances from host. - Returns: - List[]: list of all instances stored in metadata - """ - shared_key = "openpype.photoshop.instances" - if shared_key not in creator.collection_shared_data: - creator.collection_shared_data[shared_key] = \ - creator.host.list_instances() - return creator.collection_shared_data[shared_key] diff --git a/openpype/hosts/aftereffects/api/plugin.py b/openpype/hosts/aftereffects/api/plugin.py deleted file mode 100644 index aa940c0c98..0000000000 --- a/openpype/hosts/aftereffects/api/plugin.py +++ /dev/null @@ -1,12 +0,0 @@ -import six -from abc import ABCMeta - -from openpype.pipeline import LoaderPlugin -from .launch_logic import get_stub - - -@six.add_metaclass(ABCMeta) -class AfterEffectsLoader(LoaderPlugin): - @staticmethod - def get_stub(): - return get_stub() diff --git a/openpype/hosts/aftereffects/api/workfile_template_builder.py b/openpype/hosts/aftereffects/api/workfile_template_builder.py deleted file mode 100644 index 5a97afcef1..0000000000 --- a/openpype/hosts/aftereffects/api/workfile_template_builder.py +++ /dev/null @@ -1,271 +0,0 @@ -import os.path -import uuid -import shutil - -from openpype.pipeline import registered_host -from openpype.tools.workfile_template_build import ( - WorkfileBuildPlaceholderDialog, -) -from openpype.pipeline.workfile.workfile_template_builder import ( - AbstractTemplateBuilder, - PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin -) -from openpype.hosts.aftereffects.api import get_stub -from openpype.hosts.aftereffects.api.lib import set_settings - -PLACEHOLDER_SET = "PLACEHOLDERS_SET" -PLACEHOLDER_ID = "openpype.placeholder" - - -class AETemplateBuilder(AbstractTemplateBuilder): - """Concrete implementation of AbstractTemplateBuilder for AE""" - - def import_template(self, path): - """Import template into current scene. - Block if a template is already loaded. - - Args: - path (str): A path to current template (usually given by - get_template_preset implementation) - - Returns: - bool: Whether the template was successfully imported or not - """ - stub = get_stub() - if not os.path.exists(path): - stub.print_msg(f"Template file on {path} doesn't exist.") - return - - stub.save() - workfile_path = stub.get_active_document_full_name() - shutil.copy2(path, workfile_path) - stub.open(workfile_path) - - return True - - -class AEPlaceholderPlugin(PlaceholderPlugin): - """Contains generic methods for all PlaceholderPlugins.""" - - def collect_placeholders(self): - """Collect info from file metadata about created placeholders. - - Returns: - (list) (LoadPlaceholderItem) - """ - output = [] - scene_placeholders = self._collect_scene_placeholders() - for item in scene_placeholders: - if item.get("plugin_identifier") != self.identifier: - continue - - if isinstance(self, AEPlaceholderLoadPlugin): - item = LoadPlaceholderItem(item["uuid"], - item["data"], - self) - elif isinstance(self, AEPlaceholderCreatePlugin): - item = CreatePlaceholderItem(item["uuid"], - item["data"], - self) - else: - raise NotImplementedError(f"Not implemented for {type(self)}") - - output.append(item) - - return output - - def update_placeholder(self, placeholder_item, placeholder_data): - """Resave changed properties for placeholders""" - item_id, metadata_item = self._get_item(placeholder_item) - stub = get_stub() - if not item_id: - stub.print_msg("Cannot find item for " - f"{placeholder_item.scene_identifier}") - return - metadata_item["data"] = placeholder_data - stub.imprint(item_id, metadata_item) - - def _get_item(self, placeholder_item): - """Returns item id and item metadata for placeholder from file meta""" - stub = get_stub() - placeholder_uuid = placeholder_item.scene_identifier - for metadata_item in stub.get_metadata(): - if not metadata_item.get("is_placeholder"): - continue - if placeholder_uuid in metadata_item.get("uuid"): - return metadata_item["members"][0], metadata_item - return None, None - - def _collect_scene_placeholders(self): - """" Cache placeholder data to shared data. - Returns: - (list) of dicts - """ - placeholder_items = self.builder.get_shared_populate_data( - "placeholder_items" - ) - if not placeholder_items: - placeholder_items = [] - for item in get_stub().get_metadata(): - if not item.get("is_placeholder"): - continue - placeholder_items.append(item) - - self.builder.set_shared_populate_data( - "placeholder_items", placeholder_items - ) - return placeholder_items - - def _imprint_item(self, item_id, name, placeholder_data, stub): - if not item_id: - raise ValueError("Couldn't create a placeholder") - container_data = { - "id": "openpype.placeholder", - "name": name, - "is_placeholder": True, - "plugin_identifier": self.identifier, - "uuid": str(uuid.uuid4()), # scene_identifier - "data": placeholder_data, - "members": [item_id] - } - stub.imprint(item_id, container_data) - - -class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin): - """Adds Create placeholder. - - This adds composition and runs Create - """ - identifier = "aftereffects.create" - label = "AfterEffects create" - - def create_placeholder(self, placeholder_data): - stub = get_stub() - name = "CREATEPLACEHOLDER" - item_id = stub.add_item(name, "COMP") - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Replace 'placeholder' with publishable instance. - - Renames prepared composition name, creates publishable instance, sets - frame/duration settings according to DB. - """ - pre_create_data = {"use_selection": True} - item_id, item = self._get_item(placeholder) - get_stub().select_items([item_id]) - self.populate_create_placeholder(placeholder, pre_create_data) - - # apply settings for populated composition - item_id, metadata_item = self._get_item(placeholder) - set_settings(True, True, [item_id]) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - -class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin): - identifier = "aftereffects.load" - label = "AfterEffects load" - - def create_placeholder(self, placeholder_data): - """Creates AE's Placeholder item in Project items list. - - Sets dummy resolution/duration/fps settings, will be replaced when - populated. - """ - stub = get_stub() - name = "LOADERPLACEHOLDER" - item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Use Openpype Loader from `placeholder` to create new FootageItems - - New FootageItems are created, files are imported. - """ - self.populate_load_placeholder(placeholder) - errors = placeholder.get_errors() - stub = get_stub() - if errors: - stub.print_msg("\n".join(errors)) - else: - if not placeholder.data["keep_placeholder"]: - metadata = stub.get_metadata() - for item in metadata: - if not item.get("is_placeholder"): - continue - scene_identifier = item.get("uuid") - if (scene_identifier and - scene_identifier == placeholder.scene_identifier): - stub.delete_item(item["members"][0]) - stub.remove_instance(placeholder.scene_identifier, metadata) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def load_succeed(self, placeholder, container): - placeholder_item_id, _ = self._get_item(placeholder) - item_id = container.id - get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) - - -def build_workfile_template(*args, **kwargs): - builder = AETemplateBuilder(registered_host()) - builder.build_template(*args, **kwargs) - - -def update_workfile_template(*args): - builder = AETemplateBuilder(registered_host()) - builder.rebuild_template() - - -def create_placeholder(*args): - """Called when new workile placeholder should be created.""" - host = registered_host() - builder = AETemplateBuilder(host) - window = WorkfileBuildPlaceholderDialog(host, builder) - window.exec_() - - -def update_placeholder(*args): - """Called after placeholder item is selected to modify it.""" - host = registered_host() - builder = AETemplateBuilder(host) - - stub = get_stub() - selected_items = stub.get_selected_items(True, True, True) - - if len(selected_items) != 1: - stub.print_msg("Please select just 1 placeholder") - return - - selected_id = selected_items[0].id - placeholder_item = None - - placeholder_items_by_id = { - placeholder_item.scene_identifier: placeholder_item - for placeholder_item in builder.get_placeholders() - } - for metadata_item in stub.get_metadata(): - if not metadata_item.get("is_placeholder"): - continue - if selected_id in metadata_item.get("members"): - placeholder_item = placeholder_items_by_id.get( - metadata_item["uuid"]) - break - - if not placeholder_item: - stub.print_msg("Didn't find placeholder metadata. " - "Remove and re-create placeholder.") - return - - window = WorkfileBuildPlaceholderDialog(host, builder) - window.set_update_mode(placeholder_item) - window.exec_() diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py deleted file mode 100644 index 18f530e272..0000000000 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ /dev/null @@ -1,731 +0,0 @@ -""" - Stub handling connection from server to client. - Used anywhere solution is calling client methods. -""" -import json -import logging - -import attr - -from wsrpc_aiohttp import WebSocketAsync -from openpype.tools.adobe_webserver.app import WebServerTool - - -class ConnectionNotEstablishedYet(Exception): - pass - - -@attr.s -class AEItem(object): - """ - Object denoting Item in AE. Each item is created in AE by any Loader, - but contains same fields, which are being used in later processing. - """ - # metadata - id = attr.ib() # id created by AE, could be used for querying - name = attr.ib() # name of item - item_type = attr.ib(default=None) # item type (footage, folder, comp) - # all imported elements, single for - # regular image, array for Backgrounds - members = attr.ib(factory=list) - frameStart = attr.ib(default=None) - framesDuration = attr.ib(default=None) - frameRate = attr.ib(default=None) - file_name = attr.ib(default=None) - instance_id = attr.ib(default=None) # New Publisher - width = attr.ib(default=None) - height = attr.ib(default=None) - is_placeholder = attr.ib(default=False) - uuid = attr.ib(default=False) - path = attr.ib(default=False) # path to FootageItem to validate - # list of composition Footage is in - containing_comps = attr.ib(factory=list) - - -class AfterEffectsServerStub(): - """ - Stub for calling function on client (Photoshop js) side. - Expects that client is already connected (started when avalon menu - is opened). - 'self.websocketserver.call' is used as async wrapper - """ - PUBLISH_ICON = '\u2117 ' - LOADED_ICON = '\u25bc' - - def __init__(self): - self.websocketserver = WebServerTool.get_instance() - self.client = self.get_client() - self.log = logging.getLogger(self.__class__.__name__) - - @staticmethod - def get_client(): - """ - Return first connected client to WebSocket - TODO implement selection by Route - :return: client - """ - clients = WebSocketAsync.get_clients() - client = None - if len(clients) > 0: - key = list(clients.keys())[0] - client = clients.get(key) - - return client - - def open(self, path): - """ - Open file located at 'path' (local). - Args: - path(string): file path locally - Returns: None - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.open', path=path)) - - return self._handle_return(res) - - def get_metadata(self): - """ - Get complete stored JSON with metadata from AE.Metadata.Label - field. - - It contains containers loaded by any Loader OR instances created - by Creator. - - Returns: - (list) - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.get_metadata')) - metadata = self._handle_return(res) - - return metadata or [] - - def read(self, item, layers_meta=None): - """ - Parses item metadata from Label field of active document. - Used as filter to pick metadata for specific 'item' only. - - Args: - item (AEItem): pulled info from AE - layers_meta (dict): full list from Headline - (load and inject for better performance in loops) - Returns: - (dict): - """ - if layers_meta is None: - layers_meta = self.get_metadata() - for item_meta in layers_meta: - if 'container' in item_meta.get('id') and \ - str(item.id) == str(item_meta.get('members')[0]): - return item_meta - - self.log.debug("Couldn't find layer metadata") - - def imprint(self, item_id, data, all_items=None, items_meta=None): - """ - Save item metadata to Label field of metadata of active document - Args: - item_id (int|str): id of FootageItem or instance_id for workfiles - data(string): json representation for single layer - all_items (list of item): for performance, could be - injected for usage in loop, if not, single call will be - triggered - items_meta(string): json representation from Headline - (for performance - provide only if imprint is in - loop - value should be same) - Returns: None - """ - if not items_meta: - items_meta = self.get_metadata() - - result_meta = [] - # fix existing - is_new = True - - for item_meta in items_meta: - if ((item_meta.get('members') and - str(item_id) == str(item_meta.get('members')[0])) or - item_meta.get("instance_id") == item_id): - is_new = False - if data: - item_meta.update(data) - result_meta.append(item_meta) - else: - result_meta.append(item_meta) - - if is_new: - result_meta.append(data) - - # Ensure only valid ids are stored. - if not all_items: - # loaders create FootageItem now - all_items = self.get_items(comps=True, - folders=True, - footages=True) - item_ids = [int(item.id) for item in all_items] - cleaned_data = [] - for meta in result_meta: - # do not added instance with nonexistend item id - if meta.get("members"): - if int(meta["members"][0]) not in item_ids: - continue - - cleaned_data.append(meta) - - payload = json.dumps(cleaned_data, indent=4) - - res = self.websocketserver.call(self.client.call - ('AfterEffects.imprint', - payload=payload)) - return self._handle_return(res) - - def get_active_document_full_name(self): - """ - Returns absolute path of active document via ws call - Returns(string): file name - """ - res = self.websocketserver.call(self.client.call( - 'AfterEffects.get_active_document_full_name')) - - return self._handle_return(res) - - def get_active_document_name(self): - """ - Returns just a name of active document via ws call - Returns(string): file name - """ - res = self.websocketserver.call(self.client.call( - 'AfterEffects.get_active_document_name')) - - return self._handle_return(res) - - def get_items(self, comps, folders=False, footages=False): - """ - Get all items from Project panel according to arguments. - There are multiple different types: - CompItem (could have multiple layers - source for Creator, - will be rendered) - FolderItem (collection type, currently used for Background - loading) - FootageItem (imported file - created by Loader) - Args: - comps (bool): return CompItems - folders (bool): return FolderItem - footages (bool: return FootageItem - - Returns: - (list) of namedtuples - """ - res = self.websocketserver.call( - self.client.call('AfterEffects.get_items', - comps=comps, - folders=folders, - footages=footages) - ) - return self._to_records(self._handle_return(res)) - - def select_items(self, items): - """ - Select items in Project list - Args: - items (list): of int item ids - """ - self.websocketserver.call( - self.client.call('AfterEffects.select_items', items=items)) - - - def get_selected_items(self, comps, folders=False, footages=False): - """ - Same as get_items but using selected items only - Args: - comps (bool): return CompItems - folders (bool): return FolderItem - footages (bool: return FootageItem - - Returns: - (list) of namedtuples - - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.get_selected_items', - comps=comps, - folders=folders, - footages=footages) - ) - return self._to_records(self._handle_return(res)) - - def add_item(self, name, item_type): - """ - Adds either composition or folder to project item list. - - Args: - name (str) - item_type (str): COMP|FOLDER - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.add_item', - name=name, - item_type=item_type)) - - return self._handle_return(res) - - def get_item(self, item_id): - """ - Returns metadata for particular 'item_id' or None - - Args: - item_id (int, or string) - """ - for item in self.get_items(True, True, True): - if str(item.id) == str(item_id): - return item - - return None - - def import_file(self, path, item_name, import_options=None): - """ - Imports file as a FootageItem. Used in Loader - Args: - path (string): absolute path for asset file - item_name (string): label for created FootageItem - import_options (dict): different files (img vs psd) need different - config - - """ - res = self.websocketserver.call( - self.client.call('AfterEffects.import_file', - path=path, - item_name=item_name, - import_options=import_options) - ) - records = self._to_records(self._handle_return(res)) - if records: - return records.pop() - - def replace_item(self, item_id, path, item_name): - """ Replace FootageItem with new file - - Args: - item_id (int): - path (string):absolute path - item_name (string): label on item in Project list - - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.replace_item', - item_id=item_id, - path=path, item_name=item_name)) - - return self._handle_return(res) - - def rename_item(self, item_id, item_name): - """ Replace item with item_name - - Args: - item_id (int): - item_name (string): label on item in Project list - - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.rename_item', - item_id=item_id, - item_name=item_name)) - - return self._handle_return(res) - - def delete_item(self, item_id): - """ Deletes *Item in a file - Args: - item_id (int): - - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.delete_item', - item_id=item_id)) - - return self._handle_return(res) - - def remove_instance(self, instance_id, metadata=None): - """ - Removes instance with 'instance_id' from file's metadata and - saves them. - - Keep matching item in file though. - - Args: - instance_id(string): instance id - """ - cleaned_data = [] - - if metadata is None: - metadata = self.get_metadata() - - for instance in metadata: - inst_id = instance.get("instance_id") or instance.get("uuid") - if inst_id != instance_id: - cleaned_data.append(instance) - - payload = json.dumps(cleaned_data, indent=4) - res = self.websocketserver.call(self.client.call - ('AfterEffects.imprint', - payload=payload)) - - return self._handle_return(res) - - def is_saved(self): - # TODO - return True - - def set_label_color(self, item_id, color_idx): - """ - Used for highlight additional information in Project panel. - Green color is loaded asset, blue is created asset - Args: - item_id (int): - color_idx (int): 0-16 Label colors from AE Project view - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.set_label_color', - item_id=item_id, - color_idx=color_idx)) - - return self._handle_return(res) - - def get_comp_properties(self, comp_id): - """ Get composition information for render purposes - - Returns startFrame, frameDuration, fps, width, height. - - Args: - comp_id (int): - - Returns: - (AEItem) - - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.get_comp_properties', - item_id=comp_id - )) - - records = self._to_records(self._handle_return(res)) - if records: - return records.pop() - - def set_comp_properties(self, comp_id, start, duration, frame_rate, - width, height): - """ - Set work area to predefined values (from Ftrack). - Work area directs what gets rendered. - Beware of rounding, AE expects seconds, not frames directly. - - Args: - comp_id (int): - start (int): workAreaStart in frames - duration (int): in frames - frame_rate (float): frames in seconds - width (int): resolution width - height (int): resolution height - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.set_comp_properties', - item_id=comp_id, - start=start, - duration=duration, - frame_rate=frame_rate, - width=width, - height=height)) - return self._handle_return(res) - - def save(self): - """ - Saves active document - Returns: None - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.save')) - - return self._handle_return(res) - - def saveAs(self, project_path, as_copy): - """ - Saves active project to aep (copy) or png or jpg - Args: - project_path(string): full local path - as_copy: - Returns: None - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.saveAs', - image_path=project_path, - as_copy=as_copy)) - - return self._handle_return(res) - - def get_render_info(self, comp_id): - """ Get render queue info for render purposes - - Returns: - (list) of (AEItem): with 'file_name' field - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.get_render_info', - comp_id=comp_id)) - - records = self._to_records(self._handle_return(res)) - return records - - def get_audio_url(self, item_id): - """ Get audio layer absolute url for comp - - Args: - item_id (int): composition id - Returns: - (str): absolute path url - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.get_audio_url', - item_id=item_id)) - - return self._handle_return(res) - - def import_background(self, comp_id, comp_name, files): - """ - Imports backgrounds images to existing or new composition. - - If comp_id is not provided, new composition is created, basic - values (width, heights, frameRatio) takes from first imported - image. - - All images from background json are imported as a FootageItem and - separate layer is created for each of them under composition. - - Order of imported 'files' is important. - - Args: - comp_id (int): id of existing composition (null if new) - comp_name (str): used when new composition - files (list): list of absolute paths to import and - add as layers - - Returns: - (AEItem): object with id of created folder, all imported images - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.import_background', - comp_id=comp_id, - comp_name=comp_name, - files=files)) - - records = self._to_records(self._handle_return(res)) - if records: - return records.pop() - - def reload_background(self, comp_id, comp_name, files): - """ - Reloads backgrounds images to existing composition. - - It actually deletes complete folder with imported images and - created composition for safety. - - Args: - comp_id (int): id of existing composition to be overwritten - comp_name (str): new name of composition (could be same as old - if version up only) - files (list): list of absolute paths to import and - add as layers - Returns: - (AEItem): object with id of created folder, all imported images - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.reload_background', - comp_id=comp_id, - comp_name=comp_name, - files=files)) - - records = self._to_records(self._handle_return(res)) - if records: - return records.pop() - - def add_item_as_layer(self, comp_id, item_id): - """ - Adds already imported FootageItem ('item_id') as a new - layer to composition ('comp_id'). - - Args: - comp_id (int): id of target composition - item_id (int): FootageItem.id - comp already found previously - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.add_item_as_layer', - comp_id=comp_id, - item_id=item_id)) - - records = self._to_records(self._handle_return(res)) - if records: - return records.pop() - - def add_item_instead_placeholder(self, placeholder_item_id, item_id): - """ - Adds item_id to layers where plaeholder_item_id is present. - - 1 placeholder could result in multiple loaded containers (eg items) - - Args: - placeholder_item_id (int): id of placeholder item - item_id (int): loaded FootageItem id - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.add_item_instead_placeholder', # noqa - placeholder_item_id=placeholder_item_id, # noqa - item_id=item_id)) - - return self._handle_return(res) - - def add_placeholder(self, name, width, height, fps, duration): - """ - Adds new FootageItem as a placeholder for workfile builder - - Placeholder requires width etc, currently probably only hardcoded - values. - - Args: - name (str) - width (int) - height (int) - fps (float) - duration (int) - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.add_placeholder', - name=name, - width=width, - height=height, - fps=fps, - duration=duration)) - - return self._handle_return(res) - - def render(self, folder_url, comp_id): - """ - Render all renderqueueitem to 'folder_url' - Args: - folder_url(string): local folder path for collecting - Returns: None - """ - res = self.websocketserver.call(self.client.call - ('AfterEffects.render', - folder_url=folder_url, - comp_id=comp_id)) - return self._handle_return(res) - - def get_extension_version(self): - """Returns version number of installed extension.""" - res = self.websocketserver.call(self.client.call( - 'AfterEffects.get_extension_version')) - - return self._handle_return(res) - - def get_app_version(self): - """Returns version number of installed application (17.5...).""" - res = self.websocketserver.call(self.client.call( - 'AfterEffects.get_app_version')) - - return self._handle_return(res) - - def close(self): - res = self.websocketserver.call(self.client.call('AfterEffects.close')) - - return self._handle_return(res) - - def print_msg(self, msg): - """Triggers Javascript alert dialog.""" - self.websocketserver.call(self.client.call - ('AfterEffects.print_msg', - msg=msg)) - - def _handle_return(self, res): - """Wraps return, throws ValueError if 'error' key is present.""" - if res and isinstance(res, str) and res != "undefined": - try: - parsed = json.loads(res) - except json.decoder.JSONDecodeError: - raise ValueError("Received broken JSON {}".format(res)) - - if not parsed: # empty list - return parsed - - first_item = parsed - if isinstance(parsed, list): - first_item = parsed[0] - - if first_item: - if first_item.get("error"): - raise ValueError(first_item["error"]) - # singular values (file name etc) - if first_item.get("result") is not None: - return first_item["result"] - return parsed # parsed - return res - - def _to_records(self, payload): - """ - Converts string json representation into list of AEItem - dot notation access to work. - Returns: - payload(dict): - dictionary from json representation, expected to - come from _handle_return - """ - if not payload: - return [] - - if isinstance(payload, str): # safety fallback - try: - payload = json.loads(payload) - except json.decoder.JSONDecodeError: - raise ValueError("Received broken JSON {}".format(payload)) - - if isinstance(payload, dict): - payload = [payload] - - ret = [] - # convert to AEItem to use dot donation - for d in payload: - if not d: - continue - # currently implemented and expected fields - item = AEItem(d.get('id'), - d.get('name'), - d.get('type'), - d.get('members'), - d.get('frameStart'), - d.get('framesDuration'), - d.get('frameRate'), - d.get('file_name'), - d.get("instance_id"), - d.get("width"), - d.get("height"), - d.get("is_placeholder"), - d.get("uuid"), - d.get("path"), - d.get("containing_comps"),) - - ret.append(item) - return ret - - -def get_stub(): - """ - Convenience function to get server RPC stub to call methods directed - for host (Photoshop). - It expects already created connection, started from client. - Currently created when panel is opened (PS: Window>Extensions>Avalon) - :return: where functions could be called from - """ - ae_stub = AfterEffectsServerStub() - if not ae_stub.client: - raise ConnectionNotEstablishedYet("Connection is not created yet") - - return ae_stub diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py deleted file mode 100644 index fadfc0c206..0000000000 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ /dev/null @@ -1,242 +0,0 @@ -import re - -from openpype import resources -from openpype.lib import BoolDef, UISeparatorDef -from openpype.hosts.aftereffects import api -from openpype.pipeline import ( - Creator, - CreatedInstance, - CreatorError -) -from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances -from openpype.hosts.aftereffects.api.lib import set_settings -from openpype.lib import prepare_template_data -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS - - -class RenderCreator(Creator): - """Creates 'render' instance for publishing. - - Result of 'render' instance is video or sequence of images for particular - composition based of configuration in its RenderQueue. - """ - identifier = "render" - label = "Render" - family = "render" - description = "Render creator" - - create_allow_context_change = True - - # Settings - mark_for_review = True - - def create(self, subset_name_from_ui, data, pre_create_data): - stub = api.get_stub() # only after After Effects is up - - try: - _ = stub.get_active_document_full_name() - except ValueError: - raise CreatorError( - "Please save workfile via Workfile app first!" - ) - - if pre_create_data.get("use_selection"): - comps = stub.get_selected_items( - comps=True, folders=False, footages=False - ) - else: - comps = stub.get_items(comps=True, folders=False, footages=False) - - if not comps: - raise CreatorError( - "Nothing to create. Select composition in Project Bin if " - "'Use selection' is toggled or create at least " - "one composition." - ) - use_composition_name = (pre_create_data.get("use_composition_name") or - len(comps) > 1) - for comp in comps: - composition_name = re.sub( - "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), - "", - comp.name - ) - if use_composition_name: - if "{composition}" not in subset_name_from_ui.lower(): - subset_name_from_ui += "{Composition}" - - dynamic_fill = prepare_template_data({"composition": - composition_name}) - subset_name = subset_name_from_ui.format(**dynamic_fill) - data["composition_name"] = composition_name - else: - subset_name = subset_name_from_ui - subset_name = re.sub(r"\{composition\}", '', subset_name, - flags=re.IGNORECASE) - - for inst in self.create_context.instances: - if subset_name == inst.subset_name: - raise CreatorError("{} already exists".format( - inst.subset_name)) - - data["members"] = [comp.id] - data["orig_comp_name"] = composition_name - - new_instance = CreatedInstance(self.family, subset_name, data, - self) - if "farm" in pre_create_data: - use_farm = pre_create_data["farm"] - new_instance.creator_attributes["farm"] = use_farm - - review = pre_create_data["mark_for_review"] - new_instance. creator_attributes["mark_for_review"] = review - - api.get_stub().imprint(new_instance.id, - new_instance.data_to_store()) - self._add_instance_to_context(new_instance) - - stub.rename_item(comp.id, subset_name) - set_settings(True, True, [comp.id], print_msg=False) - - def get_pre_create_attr_defs(self): - output = [ - BoolDef("use_selection", - tooltip="Composition for publishable instance should be " - "selected by default.", - default=True, label="Use selection"), - BoolDef("use_composition_name", - label="Use composition name in subset"), - UISeparatorDef(), - BoolDef("farm", label="Render on farm"), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - return output - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", label="Render on farm"), - BoolDef( - "mark_for_review", - label="Review", - default=False - ) - ] - - def get_icon(self): - return resources.get_openpype_splash_filepath() - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - # legacy instances have family=='render' or 'renderLocal', use them - creator_id = (instance_data.get("creator_identifier") or - instance_data.get("family", '').replace("Local", '')) - if creator_id == self.identifier: - instance_data = self._handle_legacy(instance_data) - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - api.get_stub().imprint(created_inst.get("instance_id"), - created_inst.data_to_store()) - subset_change = _changes.get("subset") - if subset_change: - api.get_stub().rename_item(created_inst.data["members"][0], - subset_change.new_value) - - def remove_instances(self, instances): - """Removes metadata and renames to original comp name if available.""" - for instance in instances: - self._remove_instance_from_context(instance) - self.host.remove_instance(instance) - - comp_id = instance.data["members"][0] - comp = api.get_stub().get_item(comp_id) - orig_comp_name = instance.data.get("orig_comp_name") - if comp: - if orig_comp_name: - new_comp_name = orig_comp_name - else: - new_comp_name = "dummyCompName" - api.get_stub().rename_item(comp_id, - new_comp_name) - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["aftereffects"]["create"]["RenderCreator"] - ) - - self.mark_for_review = plugin_settings["mark_for_review"] - self.default_variants = plugin_settings.get( - "default_variants", - plugin_settings.get("defaults") or [] - ) - - def get_detail_description(self): - return """Creator for Render instances - - Main publishable item in AfterEffects will be of `render` family. - Result of this item (instance) is picture sequence or video that could - be a final delivery product or loaded and used in another DCCs. - - Select single composition and create instance of 'render' family or - turn off 'Use selection' to create instance for all compositions. - - 'Use composition name in subset' allows to explicitly add composition - name into created subset name. - - Position of composition name could be set in - `project_settings/global/tools/creator/subset_name_profiles` with some - form of '{composition}' placeholder. - - Composition name will be used implicitly if multiple composition should - be handled at same time. - - If {composition} placeholder is not us 'subset_name_profiles' - composition name will be capitalized and set at the end of subset name - if necessary. - - If composition name should be used, it will be cleaned up of characters - that would cause an issue in published file names. - """ - - def get_dynamic_data(self, variant, task_name, asset_doc, - project_name, host_name, instance): - dynamic_data = {} - if instance is not None: - composition_name = instance.get("composition_name") - if composition_name: - dynamic_data["composition"] = composition_name - else: - dynamic_data["composition"] = "{composition}" - - return dynamic_data - - def _handle_legacy(self, instance_data): - """Converts old instances to new format.""" - if not instance_data.get("members"): - instance_data["members"] = [instance_data.get("uuid")] - - if instance_data.get("uuid"): - # uuid not needed, replaced with unique instance_id - api.get_stub().remove_instance(instance_data.get("uuid")) - instance_data.pop("uuid") - - if not instance_data.get("task"): - instance_data["task"] = self.create_context.get_current_task_name() - - if not instance_data.get("creator_attributes"): - is_old_farm = instance_data["family"] != "renderLocal" - instance_data["creator_attributes"] = {"farm": is_old_farm} - instance_data["family"] = self.family - - if instance_data["creator_attributes"].get("mark_for_review") is None: - instance_data["creator_attributes"]["mark_for_review"] = True - - return instance_data diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py deleted file mode 100644 index 5dc3d6592d..0000000000 --- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py +++ /dev/null @@ -1,97 +0,0 @@ -from openpype import AYON_SERVER_ENABLED -import openpype.hosts.aftereffects.api as api -from openpype.client import get_asset_by_name -from openpype.pipeline import ( - AutoCreator, - CreatedInstance -) -from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances - - -class AEWorkfileCreator(AutoCreator): - identifier = "workfile" - family = "workfile" - - default_variant = "Main" - - def get_instance_attr_defs(self): - return [] - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - subset_name = instance_data["subset"] - instance = CreatedInstance( - self.family, subset_name, instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - # nothing to change on workfiles - pass - - def create(self, options=None): - existing_instance = None - for instance in self.create_context.instances: - if instance.family == self.family: - existing_instance = instance - break - - context = self.create_context - project_name = context.get_current_project_name() - asset_name = context.get_current_asset_name() - task_name = context.get_current_task_name() - host_name = context.host_name - - existing_asset_name = None - if existing_instance is not None: - if AYON_SERVER_ENABLED: - existing_asset_name = existing_instance.get("folderPath") - - if existing_asset_name is None: - existing_asset_name = existing_instance["asset"] - - if existing_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - data = { - "task": task_name, - "variant": self.default_variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - data.update(self.get_dynamic_data( - self.default_variant, task_name, asset_doc, - project_name, host_name, None - )) - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(new_instance) - - api.get_stub().imprint(new_instance.get("instance_id"), - new_instance.data_to_store()) - - elif ( - existing_asset_name != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py deleted file mode 100644 index 16f45074aa..0000000000 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ /dev/null @@ -1,108 +0,0 @@ -import re - -from openpype.pipeline import get_representation_path -from openpype.hosts.aftereffects import api - -from openpype.hosts.aftereffects.api.lib import ( - get_background_layers, - get_unique_layer_name, -) - - -class BackgroundLoader(api.AfterEffectsLoader): - """ - Load images from Background family - Creates for each background separate folder with all imported images - from background json AND automatically created composition with layers, - each layer for separate image. - - For each load container is created and stored in project (.aep) - metadata - """ - label = "Load JSON Background" - families = ["background"] - representations = ["json"] - - def load(self, context, name=None, namespace=None, data=None): - stub = self.get_stub() - items = stub.get_items(comps=True) - existing_items = [layer.name.replace(stub.LOADED_ICON, '') - for layer in items] - - comp_name = get_unique_layer_name( - existing_items, - "{}_{}".format(context["asset"]["name"], name)) - - path = self.filepath_from_context(context) - layers = get_background_layers(path) - if not layers: - raise ValueError("No layers found in {}".format(path)) - - comp = stub.import_background(None, stub.LOADED_ICON + comp_name, - layers) - - if not comp: - raise ValueError("Import background failed. " - "Please contact support") - - self[:] = [comp] - namespace = namespace or comp_name - - return api.containerise( - name, - namespace, - comp, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - """ Switch asset or change version """ - stub = self.get_stub() - context = representation.get("context", {}) - _ = container.pop("layer") - - # without iterator number (_001, 002...) - namespace_from_container = re.sub(r'_\d{3}$', '', - container["namespace"]) - comp_name = "{}_{}".format(context["asset"], context["subset"]) - - # switching assets - if namespace_from_container != comp_name: - items = stub.get_items(comps=True) - existing_items = [layer.name for layer in items] - comp_name = get_unique_layer_name( - existing_items, - "{}_{}".format(context["asset"], context["subset"])) - else: # switching version - keep same name - comp_name = container["namespace"] - - path = get_representation_path(representation) - - layers = get_background_layers(path) - comp = stub.reload_background(container["members"][1], - stub.LOADED_ICON + comp_name, - layers) - - # update container - container["representation"] = str(representation["_id"]) - container["name"] = context["subset"] - container["namespace"] = comp_name - container["members"] = comp.members - - stub.imprint(comp.id, container) - - def remove(self, container): - """ - Removes element from scene: deletes layer + removes from file - metadata. - Args: - container (dict): container to be removed - used to get layer_id - """ - stub = self.get_stub() - layer = container.pop("layer") - stub.imprint(layer.id, {}) - stub.delete_item(layer.id) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py deleted file mode 100644 index 8647ba498b..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py +++ /dev/null @@ -1,27 +0,0 @@ -import os - -import pyblish.api - -from openpype.hosts.aftereffects.api import get_stub - - -class CollectAudio(pyblish.api.ContextPlugin): - """Inject audio file url for rendered composition into context. - Needs to run AFTER 'collect_render'. Use collected comp_id to check - if there is an AVLayer in this composition - """ - - order = pyblish.api.CollectorOrder + 0.499 - label = "Collect Audio" - hosts = ["aftereffects"] - - def process(self, context): - for instance in context: - if 'render.farm' in instance.data.get("families", []): - comp_id = instance.data["comp_id"] - if not comp_id: - self.log.debug("No comp_id filled in instance") - continue - context.data["audioFile"] = os.path.normpath( - get_stub().get_audio_url(comp_id) - ).replace("\\", "/") diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_current_file.py b/openpype/hosts/aftereffects/plugins/publish/collect_current_file.py deleted file mode 100644 index a6a6260a6b..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,18 +0,0 @@ -import os - -import pyblish.api - -from openpype.hosts.aftereffects.api import get_stub - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Current File" - hosts = ["aftereffects"] - - def process(self, context): - context.data["currentFile"] = os.path.normpath( - get_stub().get_active_document_full_name() - ).replace("\\", "/") diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_extension_version.py b/openpype/hosts/aftereffects/plugins/publish/collect_extension_version.py deleted file mode 100644 index ef9a81b7fd..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/collect_extension_version.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import re -import pyblish.api - -from openpype.hosts.aftereffects.api import ( - get_stub, - get_extension_manifest_path -) - - -class CollectExtensionVersion(pyblish.api.ContextPlugin): - """ Pulls and compares version of installed extension. - - It is recommended to use same extension as in provided Openpype code. - - Please use Anastasiyโ€™s Extension Manager or ZXPInstaller to update - extension in case of an error. - - You can locate extension.zxp in your installed Openpype code in - `repos/avalon-core/avalon/aftereffects` - """ - # This technically should be a validator, but other collectors might be - # impacted with usage of obsolete extension, so collector that runs first - # was chosen - order = pyblish.api.CollectorOrder - 0.5 - label = "Collect extension version" - hosts = ["aftereffects"] - - optional = True - active = True - - def process(self, context): - installed_version = get_stub().get_extension_version() - - if not installed_version: - raise ValueError("Unknown version, probably old extension") - - manifest_url = get_extension_manifest_path() - - if not os.path.exists(manifest_url): - self.log.debug("Unable to locate extension manifest, not checking") - return - - expected_version = None - with open(manifest_url) as fp: - content = fp.read() - found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', - content) - if found: - expected_version = found[0][1] - - if expected_version != installed_version: - msg = ( - "Expected version '{}' found '{}'\n Please update" - " your installed extension, it might not work properly." - ).format(expected_version, installed_version) - - raise ValueError(msg) diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py deleted file mode 100644 index 49874d6cff..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import re -import tempfile -import attr - -import pyblish.api - -from openpype.settings import get_project_settings -from openpype.pipeline import publish -from openpype.pipeline.publish import RenderInstance - -from openpype.hosts.aftereffects.api import get_stub - - -@attr.s -class AERenderInstance(RenderInstance): - # extend generic, composition name is needed - comp_name = attr.ib(default=None) - comp_id = attr.ib(default=None) - fps = attr.ib(default=None) - projectEntity = attr.ib(default=None) - stagingDir = attr.ib(default=None) - app_version = attr.ib(default=None) - publish_attributes = attr.ib(default={}) - file_names = attr.ib(default=[]) - - -class CollectAERender(publish.AbstractCollectRender): - - order = pyblish.api.CollectorOrder + 0.405 - label = "Collect After Effects Render Layers" - hosts = ["aftereffects"] - - padding_width = 6 - rendered_extension = 'png' - - _stub = None - - @classmethod - def get_stub(cls): - if not cls._stub: - cls._stub = get_stub() - return cls._stub - - def get_instances(self, context): - instances = [] - instances_to_remove = [] - - app_version = CollectAERender.get_stub().get_app_version() - app_version = app_version[0:4] - - current_file = context.data["currentFile"] - version = context.data["version"] - - project_entity = context.data["projectEntity"] - - compositions = CollectAERender.get_stub().get_items(True) - compositions_by_id = {item.id: item for item in compositions} - for inst in context: - if not inst.data.get("active", True): - continue - - family = inst.data["family"] - if family not in ["render", "renderLocal"]: # legacy - continue - - comp_id = int(inst.data["members"][0]) - - comp_info = CollectAERender.get_stub().get_comp_properties( - comp_id) - - if not comp_info: - self.log.warning("Orphaned instance, deleting metadata") - inst_id = inst.data.get("instance_id") or str(comp_id) - CollectAERender.get_stub().remove_instance(inst_id) - continue - - frame_start = comp_info.frameStart - frame_end = round(comp_info.frameStart + - comp_info.framesDuration) - 1 - fps = comp_info.frameRate - # TODO add resolution when supported by extension - - task_name = inst.data.get("task") # legacy - - render_q = CollectAERender.get_stub().get_render_info(comp_id) - if not render_q: - raise ValueError("No file extension set in Render Queue") - render_item = render_q[0] - - instance_families = inst.data.get("families", []) - subset_name = inst.data["subset"] - instance = AERenderInstance( - family="render", - families=instance_families, - version=version, - time="", - source=current_file, - label="{} - {}".format(subset_name, family), - subset=subset_name, - asset=inst.data["asset"], - task=task_name, - attachTo=False, - setMembers='', - publish=True, - name=subset_name, - resolutionWidth=render_item.width, - resolutionHeight=render_item.height, - pixelAspect=1, - tileRendering=False, - tilesX=0, - tilesY=0, - review="review" in instance_families, - frameStart=frame_start, - frameEnd=frame_end, - frameStep=1, - fps=fps, - app_version=app_version, - publish_attributes=inst.data.get("publish_attributes", {}), - file_names=[item.file_name for item in render_q] - ) - - comp = compositions_by_id.get(comp_id) - if not comp: - raise ValueError("There is no composition for item {}". - format(comp_id)) - instance.outputDir = self._get_output_dir(instance) - instance.comp_name = comp.name - instance.comp_id = comp_id - - is_local = "renderLocal" in inst.data["family"] # legacy - if inst.data.get("creator_attributes"): - is_local = not inst.data["creator_attributes"].get("farm") - if is_local: - # for local renders - instance = self._update_for_local(instance, project_entity) - else: - fam = "render.farm" - if fam not in instance.families: - instance.families.append(fam) - instance.renderer = "aerender" - instance.farm = True # to skip integrate - if "review" in instance.families: - # to skip ExtractReview locally - instance.families.remove("review") - - instances.append(instance) - instances_to_remove.append(inst) - - for instance in instances_to_remove: - context.remove(instance) - return instances - - def get_expected_files(self, render_instance): - """ - Returns list of rendered files that should be created by - Deadline. These are not published directly, they are source - for later 'submit_publish_job'. - - Args: - render_instance (RenderInstance): to pull anatomy and parts used - in url - - Returns: - (list) of absolute urls to rendered file - """ - start = render_instance.frameStart - end = render_instance.frameEnd - - base_dir = self._get_output_dir(render_instance) - expected_files = [] - for file_name in render_instance.file_names: - _, ext = os.path.splitext(os.path.basename(file_name)) - ext = ext.replace('.', '') - version_str = "v{:03d}".format(render_instance.version) - if "#" not in file_name: # single frame (mov)W - path = os.path.join(base_dir, "{}_{}_{}.{}".format( - render_instance.asset, - render_instance.subset, - version_str, - ext - )) - expected_files.append(path) - else: - for frame in range(start, end + 1): - path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format( - render_instance.asset, - render_instance.subset, - version_str, - str(frame).zfill(self.padding_width), - ext - )) - expected_files.append(path) - return expected_files - - def _get_output_dir(self, render_instance): - """ - Returns dir path of rendered files, used in submit_publish_job - for metadata.json location. - Should be in separate folder inside of work area. - - Args: - render_instance (RenderInstance): - - Returns: - (str): absolute path to rendered files - """ - # render to folder of workfile - base_dir = os.path.dirname(render_instance.source) - file_name, _ = os.path.splitext( - os.path.basename(render_instance.source)) - base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name) - - # for submit_publish_job - return base_dir - - def _update_for_local(self, instance, project_entity): - """Update old saved instances to current publishing format""" - instance.stagingDir = tempfile.mkdtemp() - instance.projectEntity = project_entity - fam = "render.local" - if fam not in instance.families: - instance.families.append(fam) - - return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py deleted file mode 100644 index 58d2757840..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,100 +0,0 @@ -import os - -import pyblish.api - -from openpype.client import get_asset_name_identifier -from openpype.pipeline.create import get_subset_name - - -class CollectWorkfile(pyblish.api.ContextPlugin): - """ Adds the AE render instances """ - - label = "Collect After Effects Workfile Instance" - order = pyblish.api.CollectorOrder + 0.1 - - default_variant = "Main" - - def process(self, context): - existing_instance = None - for instance in context: - if instance.data["family"] == "workfile": - self.log.debug("Workfile instance found, won't create new") - existing_instance = instance - break - - current_file = context.data["currentFile"] - staging_dir = os.path.dirname(current_file) - scene_file = os.path.basename(current_file) - if existing_instance is None: # old publish - instance = self._get_new_instance(context, scene_file) - else: - instance = existing_instance - - # creating representation - representation = { - 'name': 'aep', - 'ext': 'aep', - 'files': scene_file, - "stagingDir": staging_dir, - } - - if not instance.data.get("representations"): - instance.data["representations"] = [] - instance.data["representations"].append(representation) - - instance.data["publish"] = instance.data["active"] # for DL - - def _get_new_instance(self, context, scene_file): - task = context.data["task"] - version = context.data["version"] - asset_entity = context.data["assetEntity"] - project_entity = context.data["projectEntity"] - - asset_name = get_asset_name_identifier(asset_entity) - - instance_data = { - "active": True, - "asset": asset_name, - "task": task, - "frameStart": context.data['frameStart'], - "frameEnd": context.data['frameEnd'], - "handleStart": context.data['handleStart'], - "handleEnd": context.data['handleEnd'], - "fps": asset_entity["data"]["fps"], - "resolutionWidth": asset_entity["data"].get( - "resolutionWidth", - project_entity["data"]["resolutionWidth"]), - "resolutionHeight": asset_entity["data"].get( - "resolutionHeight", - project_entity["data"]["resolutionHeight"]), - "pixelAspect": 1, - "step": 1, - "version": version - } - - # workfile instance - family = "workfile" - subset = get_subset_name( - family, - self.default_variant, - context.data["anatomyData"]["task"]["name"], - context.data["assetEntity"], - context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"], - project_settings=context.data["project_settings"] - ) - # Create instance - instance = context.create_instance(subset) - - # creating instance data - instance.data.update({ - "subset": subset, - "label": scene_file, - "family": family, - "families": [family], - "representations": list() - }) - - instance.data.update(instance_data) - - return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py deleted file mode 100644 index 343838eb49..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py +++ /dev/null @@ -1,16 +0,0 @@ -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.aftereffects.api import get_stub - - -class ExtractSaveScene(pyblish.api.ContextPlugin): - """Save scene before extraction.""" - - order = publish.Extractor.order - 0.48 - label = "Extract Save Scene" - hosts = ["aftereffects"] - - def process(self, context): - stub = get_stub() - stub.save() diff --git a/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py b/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py deleted file mode 100644 index d8f6ef5d27..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,30 +0,0 @@ -import pyblish.api -from openpype.lib import version_up -from openpype.pipeline.publish import get_errored_plugins_from_context - -from openpype.hosts.aftereffects.api import get_stub - - -class IncrementWorkfile(pyblish.api.InstancePlugin): - """Increment the current workfile. - - Saves the current scene with an increased version number. - """ - - label = "Increment Workfile" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["aftereffects"] - families = ["workfile"] - optional = True - - def process(self, instance): - errored_plugins = get_errored_plugins_from_context(instance.context) - if errored_plugins: - raise RuntimeError( - "Skipping incrementing current file because publishing failed." - ) - - scene_path = version_up(instance.context.data["currentFile"]) - get_stub().saveAs(scene_path, True) - - self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py deleted file mode 100644 index 36f6035d23..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py +++ /dev/null @@ -1,64 +0,0 @@ -import pyblish.api - -from openpype.pipeline import get_current_asset_name -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) -from openpype.hosts.aftereffects.api import get_stub - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset with value from Context.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if (result["error"] is not None and result["instance"] is not None - and result["instance"] not in failed): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - stub = get_stub() - for instance in instances: - data = stub.read(instance[0]) - - data["asset"] = get_current_asset_name() - stub.imprint(instance[0].instance_id, data) - - -class ValidateInstanceAsset(pyblish.api.InstancePlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened at same time, - switching between them would mess with selected context. (From Launcher - or Ftrack). - - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate Instance Asset" - hosts = ["aftereffects"] - actions = [ValidateInstanceAssetRepair] - order = ValidateContentsOrder - - def process(self, instance): - instance_asset = instance.data["asset"] - current_asset = get_current_asset_name() - msg = ( - f"Instance asset {instance_asset} is not the same " - f"as current context {current_asset}." - ) - - if instance_asset != current_asset: - raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py deleted file mode 100644 index 78f98d7445..0000000000 --- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate scene settings. -Requires: - instance -> assetEntity - instance -> anatomyData -""" -import os -import re - -import pyblish.api - -from openpype.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin -) -from openpype.hosts.aftereffects.api import get_asset_settings - - -class ValidateSceneSettings(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """ - Ensures that Composition Settings (right mouse on comp) are same as - in FTrack on task. - - By default checks only duration - how many frames should be rendered. - Compares: - Frame start - Frame end + 1 from FTrack - against - Duration in Composition Settings. - - If this complains: - Check error message where is discrepancy. - Check FTrack task 'pype' section of task attributes for expected - values. - Check/modify rendered Composition Settings. - - If you know what you are doing run publishing again, uncheck this - validation before Validation phase. - """ - - """ - Dev docu: - Could be configured by 'presets/plugins/aftereffects/publish' - - skip_timelines_check - fill task name for which skip validation of - frameStart - frameEnd - fps - handleStart - handleEnd - skip_resolution_check - fill entity type ('asset') to skip validation - resolutionWidth - resolutionHeight - TODO support in extension is missing for now - - By defaults validates duration (how many frames should be published) - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Scene Settings" - families = ["render.farm", "render.local", "render"] - hosts = ["aftereffects"] - optional = True - - skip_timelines_check = [".*"] # * >> skip for all - skip_resolution_check = [".*"] - - def process(self, instance): - """Plugin entry point.""" - # Skip the instance if is not active by data on the instance - if not self.is_active(instance.data): - return - - asset_doc = instance.data["assetEntity"] - expected_settings = get_asset_settings(asset_doc) - self.log.info("config from DB::{}".format(expected_settings)) - - task_name = instance.data["anatomyData"]["task"]["name"] - if any(re.search(pattern, task_name) - for pattern in self.skip_resolution_check): - expected_settings.pop("resolutionWidth") - expected_settings.pop("resolutionHeight") - - if any(re.search(pattern, task_name) - for pattern in self.skip_timelines_check): - expected_settings.pop('fps', None) - expected_settings.pop('frameStart', None) - expected_settings.pop('frameEnd', None) - expected_settings.pop('handleStart', None) - expected_settings.pop('handleEnd', None) - - # handle case where ftrack uses only two decimal places - # 23.976023976023978 vs. 23.98 - fps = instance.data.get("fps") - if fps: - if isinstance(fps, float): - fps = float( - "{:.2f}".format(fps)) - expected_settings["fps"] = fps - - duration = instance.data.get("frameEndHandle") - \ - instance.data.get("frameStartHandle") + 1 - - self.log.debug("validated items::{}".format(expected_settings)) - - current_settings = { - "fps": fps, - "frameStart": instance.data.get("frameStart"), - "frameEnd": instance.data.get("frameEnd"), - "handleStart": instance.data.get("handleStart"), - "handleEnd": instance.data.get("handleEnd"), - "frameStartHandle": instance.data.get("frameStartHandle"), - "frameEndHandle": instance.data.get("frameEndHandle"), - "resolutionWidth": instance.data.get("resolutionWidth"), - "resolutionHeight": instance.data.get("resolutionHeight"), - "duration": duration - } - self.log.info("current_settings:: {}".format(current_settings)) - - invalid_settings = [] - invalid_keys = set() - for key, value in expected_settings.items(): - if value != current_settings[key]: - msg = "'{}' expected: '{}' found: '{}'".format( - key, value, current_settings[key]) - - if key == "duration" and expected_settings.get("handleStart"): - msg += "Handles included in calculation. Remove " \ - "handles in DB or extend frame range in " \ - "Composition Setting." - - invalid_settings.append(msg) - invalid_keys.add(key) - - if invalid_settings: - msg = "Found invalid settings:\n{}".format( - "\n".join(invalid_settings) - ) - - invalid_keys_str = ",".join(invalid_keys) - break_str = "
" - invalid_setting_str = "Found invalid settings:
{}".\ - format(break_str.join(invalid_settings)) - - formatting_data = { - "invalid_setting_str": invalid_setting_str, - "invalid_keys_str": invalid_keys_str - } - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) - - if not os.path.exists(instance.data.get("source")): - scene_url = instance.data.get("source") - msg = "Scene file {} not found (saved under wrong name)".format( - scene_url - ) - formatting_data = { - "scene_url": scene_url - } - raise PublishXmlValidationError(self, msg, key="file_not_found", - formatting_data=formatting_data) diff --git a/openpype/hosts/blender/addon.py b/openpype/hosts/blender/addon.py deleted file mode 100644 index f1da9b808c..0000000000 --- a/openpype/hosts/blender/addon.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class BlenderAddon(OpenPypeModule, IHostAddon): - name = "blender" - host_name = "blender" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - # Prepare path to implementation script - implementation_user_script_path = os.path.join( - BLENDER_ROOT_DIR, - "blender_addon" - ) - - # Add blender implementation script path to PYTHONPATH - python_path = env.get("PYTHONPATH") or "" - python_path_parts = [ - path - for path in python_path.split(os.pathsep) - if path - ] - python_path_parts.insert(0, implementation_user_script_path) - env["PYTHONPATH"] = os.pathsep.join(python_path_parts) - - # Modify Blender user scripts path - previous_user_scripts = set() - # Implementation path is added to set for easier paths check inside - # loops - will be removed at the end - previous_user_scripts.add(implementation_user_script_path) - - openpype_blender_user_scripts = ( - env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or "" - ) - for path in openpype_blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or "" - for path in blender_user_scripts.split(os.pathsep): - if path: - previous_user_scripts.add(os.path.normpath(path)) - - # Remove implementation path from user script paths as is set to - # `BLENDER_USER_SCRIPTS` - previous_user_scripts.remove(implementation_user_script_path) - env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path - - # Set custom user scripts env - env["OPENPYPE_BLENDER_USER_SCRIPTS"] = os.pathsep.join( - previous_user_scripts - ) - - # Define Qt binding if not defined - if not env.get("QT_PREFERRED_BINDING"): - env["QT_PREFERRED_BINDING"] = "PySide2" - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(BLENDER_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".blend"] diff --git a/openpype/hosts/blender/api/action.py b/openpype/hosts/blender/api/action.py deleted file mode 100644 index dc49d6d9ae..0000000000 --- a/openpype/hosts/blender/api/action.py +++ /dev/null @@ -1,47 +0,0 @@ -import bpy - -import pyblish.api - -from openpype.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid objects in Blender when a publish plug-in failed.""" - label = "Select Invalid" - on = "failed" - icon = "search" - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes...") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning( - "Failed plug-in doesn't have any selectable objects." - ) - - bpy.ops.object.select_all(action='DESELECT') - - # Make sure every node is only processed once - invalid = list(set(invalid)) - if not invalid: - self.log.info("No invalid nodes found.") - return - - invalid_names = [obj.name for obj in invalid] - self.log.info( - "Selecting invalid objects: %s", ", ".join(invalid_names) - ) - # Select the objects and also make the last one the active object. - for obj in invalid: - obj.select_set(True) - - bpy.context.view_layer.objects.active = invalid[-1] diff --git a/openpype/hosts/blender/api/lib.py b/openpype/hosts/blender/api/lib.py deleted file mode 100644 index e80ed61bc8..0000000000 --- a/openpype/hosts/blender/api/lib.py +++ /dev/null @@ -1,367 +0,0 @@ -import os -import traceback -import importlib -import contextlib -from typing import Dict, List, Union - -import bpy -import addon_utils -from openpype.lib import Logger - -from . import pipeline - -log = Logger.get_logger(__name__) - - -def load_scripts(paths): - """Copy of `load_scripts` from Blender's implementation. - - It is possible that this function will be changed in future and usage will - be based on Blender version. - """ - import bpy_types - - loaded_modules = set() - - previous_classes = [ - cls - for cls in bpy.types.bpy_struct.__subclasses__() - ] - - def register_module_call(mod): - register = getattr(mod, "register", None) - if register: - try: - register() - except: - traceback.print_exc() - else: - print("\nWarning! '%s' has no register function, " - "this is now a requirement for registerable scripts" % - mod.__file__) - - def unregister_module_call(mod): - unregister = getattr(mod, "unregister", None) - if unregister: - try: - unregister() - except: - traceback.print_exc() - - def test_reload(mod): - # reloading this causes internal errors - # because the classes from this module are stored internally - # possibly to refresh internal references too but for now, best not to. - if mod == bpy_types: - return mod - - try: - return importlib.reload(mod) - except: - traceback.print_exc() - - def test_register(mod): - if mod: - register_module_call(mod) - bpy.utils._global_loaded_modules.append(mod.__name__) - - from bpy_restrict_state import RestrictBlend - - with RestrictBlend(): - for base_path in paths: - for path_subdir in bpy.utils._script_module_dirs: - path = os.path.join(base_path, path_subdir) - if not os.path.isdir(path): - continue - - bpy.utils._sys_path_ensure_prepend(path) - - # Only add to 'sys.modules' unless this is 'startup'. - if path_subdir != "startup": - continue - for mod in bpy.utils.modules_from_path(path, loaded_modules): - test_register(mod) - - addons_paths = [] - for base_path in paths: - addons_path = os.path.join(base_path, "addons") - if not os.path.exists(addons_path): - continue - addons_paths.append(addons_path) - addons_module_path = os.path.join(addons_path, "modules") - if os.path.exists(addons_module_path): - bpy.utils._sys_path_ensure_prepend(addons_module_path) - - if addons_paths: - # Fake addons - origin_paths = addon_utils.paths - - def new_paths(): - paths = origin_paths() + addons_paths - return paths - - addon_utils.paths = new_paths - addon_utils.modules_refresh() - - # load template (if set) - if any(bpy.utils.app_template_paths()): - import bl_app_template_utils - bl_app_template_utils.reset(reload_scripts=False) - del bl_app_template_utils - - for cls in bpy.types.bpy_struct.__subclasses__(): - if cls in previous_classes: - continue - if not getattr(cls, "is_registered", False): - continue - for subcls in cls.__subclasses__(): - if not subcls.is_registered: - print( - "Warning, unregistered class: %s(%s)" % - (subcls.__name__, cls.__name__) - ) - - -def append_user_scripts(): - user_scripts = os.environ.get("OPENPYPE_BLENDER_USER_SCRIPTS") - if not user_scripts: - return - - try: - load_scripts(user_scripts.split(os.pathsep)) - except Exception: - print("Couldn't load user scripts \"{}\"".format(user_scripts)) - traceback.print_exc() - - -def set_app_templates_path(): - # Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`. - # After running Blender, we set that variable to our custom path, so - # that the user can use their custom app templates. - - # We look among the scripts paths for one of the paths that contains - # the app templates. The path must contain the subfolder - # `startup/bl_app_templates_user`. - paths = os.environ.get("OPENPYPE_BLENDER_USER_SCRIPTS").split(os.pathsep) - - app_templates_path = None - for path in paths: - if os.path.isdir( - os.path.join(path, "startup", "bl_app_templates_user")): - app_templates_path = path - break - - if app_templates_path and os.path.isdir(app_templates_path): - os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path - - -def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict): - r"""Write `data` to `node` as userDefined attributes - - Arguments: - node: Long name of node - data: Dictionary of key/value pairs - - Example: - >>> import bpy - >>> def compute(): - ... return 6 - ... - >>> bpy.ops.mesh.primitive_cube_add() - >>> cube = bpy.context.view_layer.objects.active - >>> imprint(cube, { - ... "regularString": "myFamily", - ... "computedValue": lambda: compute() - ... }) - ... - >>> cube['avalon']['computedValue'] - 6 - """ - - imprint_data = dict() - - for key, value in data.items(): - if value is None: - continue - - if callable(value): - # Support values evaluated at imprint - value = value() - - if not isinstance(value, (int, float, bool, str, list, dict)): - raise TypeError(f"Unsupported type: {type(value)}") - - imprint_data[key] = value - - pipeline.metadata_update(node, imprint_data) - - -def lsattr(attr: str, - value: Union[str, int, bool, List, Dict, None] = None) -> List: - r"""Return nodes matching `attr` and `value` - - Arguments: - attr: Name of Blender property - value: Value of attribute. If none - is provided, return all nodes with this attribute. - - Example: - >>> lsattr("id", "myId") - ... [bpy.data.objects["myNode"] - >>> lsattr("id") - ... [bpy.data.objects["myNode"], bpy.data.objects["myOtherNode"]] - - Returns: - list - """ - - return lsattrs({attr: value}) - - -def lsattrs(attrs: Dict) -> List: - r"""Return nodes with the given attribute(s). - - Arguments: - attrs: Name and value pairs of expected matches - - Example: - >>> lsattrs({"age": 5}) # Return nodes with an `age` of 5 - # Return nodes with both `age` and `color` of 5 and blue - >>> lsattrs({"age": 5, "color": "blue"}) - - Returns a list. - - """ - - # For now return all objects, not filtered by scene/collection/view_layer. - matches = set() - for coll in dir(bpy.data): - if not isinstance( - getattr(bpy.data, coll), - bpy.types.bpy_prop_collection, - ): - continue - for node in getattr(bpy.data, coll): - for attr, value in attrs.items(): - avalon_prop = node.get(pipeline.AVALON_PROPERTY) - if not avalon_prop: - continue - if (avalon_prop.get(attr) - and (value is None or avalon_prop.get(attr) == value)): - matches.add(node) - return list(matches) - - -def read(node: bpy.types.bpy_struct_meta_idprop): - """Return user-defined attributes from `node`""" - - data = dict(node.get(pipeline.AVALON_PROPERTY, {})) - - # Ignore hidden/internal data - data = { - key: value - for key, value in data.items() if not key.startswith("_") - } - - return data - - -def get_selected_collections(): - """ - Returns a list of the currently selected collections in the outliner. - - Raises: - RuntimeError: If the outliner cannot be found in the main Blender - window. - - Returns: - list: A list of `bpy.types.Collection` objects that are currently - selected in the outliner. - """ - window = bpy.context.window or bpy.context.window_manager.windows[0] - - try: - area = next( - area for area in window.screen.areas - if area.type == 'OUTLINER') - region = next( - region for region in area.regions - if region.type == 'WINDOW') - except StopIteration as e: - raise RuntimeError("Could not find outliner. An outliner space " - "must be in the main Blender window.") from e - - with bpy.context.temp_override( - window=window, - area=area, - region=region, - screen=window.screen - ): - ids = bpy.context.selected_ids - - return [id for id in ids if isinstance(id, bpy.types.Collection)] - - -def get_selection(include_collections: bool = False) -> List[bpy.types.Object]: - """ - Returns a list of selected objects in the current Blender scene. - - Args: - include_collections (bool, optional): Whether to include selected - collections in the result. Defaults to False. - - Returns: - List[bpy.types.Object]: A list of selected objects. - """ - selection = [obj for obj in bpy.context.scene.objects if obj.select_get()] - - if include_collections: - selection.extend(get_selected_collections()) - - return selection - - -@contextlib.contextmanager -def maintained_selection(): - r"""Maintain selection during context - - Example: - >>> with maintained_selection(): - ... # Modify selection - ... bpy.ops.object.select_all(action='DESELECT') - >>> # Selection restored - """ - - previous_selection = get_selection() - previous_active = bpy.context.view_layer.objects.active - try: - yield - finally: - # Clear the selection - for node in get_selection(): - node.select_set(state=False) - if previous_selection: - for node in previous_selection: - try: - node.select_set(state=True) - except ReferenceError: - # This could happen if a selected node was deleted during - # the context. - log.exception("Failed to reselect") - continue - try: - bpy.context.view_layer.objects.active = previous_active - except ReferenceError: - # This could happen if the active node was deleted during the - # context. - log.exception("Failed to set active object.") - - -@contextlib.contextmanager -def maintained_time(): - """Maintain current frame during context.""" - current_time = bpy.context.scene.frame_current - try: - yield - finally: - bpy.context.scene.frame_current = current_time diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py deleted file mode 100644 index b386dd49d3..0000000000 --- a/openpype/hosts/blender/api/pipeline.py +++ /dev/null @@ -1,575 +0,0 @@ -import os -import sys -import traceback -from typing import Callable, Dict, Iterator, List, Optional - -import bpy - -from . import lib -from . import ops - -import pyblish.api - -from openpype.host import ( - HostBase, - IWorkfileHost, - IPublishHost, - ILoadHost -) -from openpype.client import get_asset_by_name -from openpype.pipeline import ( - schema, - legacy_io, - get_current_project_name, - get_current_asset_name, - register_loader_plugin_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.lib import ( - Logger, - register_event_callback, - emit_event -) -import openpype.hosts.blender -from openpype.settings import get_project_settings -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root, -) - - -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -ORIGINAL_EXCEPTHOOK = sys.excepthook - -AVALON_INSTANCES = "AVALON_INSTANCES" -AVALON_CONTAINERS = "AVALON_CONTAINERS" -AVALON_PROPERTY = 'avalon' -IS_HEADLESS = bpy.app.background - -log = Logger.get_logger(__name__) - - -class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost): - name = "blender" - - def install(self): - """Override install method from HostBase. - Install Blender host functionality.""" - install() - - def get_containers(self) -> Iterator: - """List containers from active Blender scene.""" - return ls() - - def get_workfile_extensions(self) -> List[str]: - """Override get_workfile_extensions method from IWorkfileHost. - Get workfile possible extensions. - - Returns: - List[str]: Workfile extensions. - """ - return file_extensions() - - def save_workfile(self, dst_path: str = None): - """Override save_workfile method from IWorkfileHost. - Save currently opened workfile. - - Args: - dst_path (str): Where the current scene should be saved. Or use - current path if `None` is passed. - """ - save_file(dst_path if dst_path else bpy.data.filepath) - - def open_workfile(self, filepath: str): - """Override open_workfile method from IWorkfileHost. - Open workfile at specified filepath in the host. - - Args: - filepath (str): Path to workfile. - """ - open_file(filepath) - - def get_current_workfile(self) -> str: - """Override get_current_workfile method from IWorkfileHost. - Retrieve currently opened workfile path. - - Returns: - str: Path to currently opened workfile. - """ - return current_file() - - def workfile_has_unsaved_changes(self) -> bool: - """Override wokfile_has_unsaved_changes method from IWorkfileHost. - Returns True if opened workfile has no unsaved changes. - - Returns: - bool: True if scene is saved and False if it has unsaved - modifications. - """ - return has_unsaved_changes() - - def work_root(self, session) -> str: - """Override work_root method from IWorkfileHost. - Modify workdir per host. - - Args: - session (dict): Session context data. - - Returns: - str: Path to new workdir. - """ - return work_root(session) - - def get_context_data(self) -> dict: - """Override abstract method from IPublishHost. - Get global data related to creation-publishing from workfile. - - Returns: - dict: Context data stored using 'update_context_data'. - """ - property = bpy.context.scene.get(AVALON_PROPERTY) - if property: - return property.to_dict() - return {} - - def update_context_data(self, data: dict, changes: dict): - """Override abstract method from IPublishHost. - Store global context data to workfile. - - Args: - data (dict): New data as are. - changes (dict): Only data that has been changed. Each value has - tuple with '(, )' value. - """ - bpy.context.scene[AVALON_PROPERTY] = data - - -def pype_excepthook_handler(*args): - traceback.print_exception(*args) - - -def install(): - """Install Blender configuration for Avalon.""" - sys.excepthook = pype_excepthook_handler - - pyblish.api.register_host("blender") - pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - - register_loader_plugin_path(str(LOAD_PATH)) - register_creator_plugin_path(str(CREATE_PATH)) - - lib.append_user_scripts() - lib.set_app_templates_path() - - register_event_callback("new", on_new) - register_event_callback("open", on_open) - - _register_callbacks() - _register_events() - - if not IS_HEADLESS: - ops.register() - - -def uninstall(): - """Uninstall Blender configuration for Avalon.""" - sys.excepthook = ORIGINAL_EXCEPTHOOK - - pyblish.api.deregister_host("blender") - pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - - deregister_loader_plugin_path(str(LOAD_PATH)) - deregister_creator_plugin_path(str(CREATE_PATH)) - - if not IS_HEADLESS: - ops.unregister() - - -def show_message(title, message): - from openpype.widgets.message_window import Window - from .ops import BlenderApplication - - BlenderApplication.get_app() - - Window( - parent=None, - title=title, - message=message, - level="warning") - - -def message_window(title, message): - from .ops import ( - MainThreadItem, - execute_in_main_thread, - _process_app_events - ) - - mti = MainThreadItem(show_message, title, message) - execute_in_main_thread(mti) - _process_app_events() - - -def get_asset_data(): - project_name = get_current_project_name() - asset_name = get_current_asset_name() - asset_doc = get_asset_by_name(project_name, asset_name) - - return asset_doc.get("data") - - -def set_frame_range(data): - scene = bpy.context.scene - - # Default scene settings - frameStart = scene.frame_start - frameEnd = scene.frame_end - fps = scene.render.fps / scene.render.fps_base - - if not data: - return - - if data.get("frameStart"): - frameStart = data.get("frameStart") - if data.get("frameEnd"): - frameEnd = data.get("frameEnd") - if data.get("fps"): - fps = data.get("fps") - - scene.frame_start = frameStart - scene.frame_end = frameEnd - scene.render.fps = round(fps) - scene.render.fps_base = round(fps) / fps - - -def set_resolution(data): - scene = bpy.context.scene - - # Default scene settings - resolution_x = scene.render.resolution_x - resolution_y = scene.render.resolution_y - - if not data: - return - - if data.get("resolutionWidth"): - resolution_x = data.get("resolutionWidth") - if data.get("resolutionHeight"): - resolution_y = data.get("resolutionHeight") - - scene.render.resolution_x = resolution_x - scene.render.resolution_y = resolution_y - - -def on_new(): - project = os.environ.get("AVALON_PROJECT") - settings = get_project_settings(project).get("blender") - - set_resolution_startup = settings.get("set_resolution_startup") - set_frames_startup = settings.get("set_frames_startup") - - data = get_asset_data() - - if set_resolution_startup: - set_resolution(data) - if set_frames_startup: - set_frame_range(data) - - unit_scale_settings = settings.get("unit_scale_settings") - unit_scale_enabled = unit_scale_settings.get("enabled") - if unit_scale_enabled: - unit_scale = unit_scale_settings.get("base_file_unit_scale") - bpy.context.scene.unit_settings.scale_length = unit_scale - - -def on_open(): - project = os.environ.get("AVALON_PROJECT") - settings = get_project_settings(project).get("blender") - - set_resolution_startup = settings.get("set_resolution_startup") - set_frames_startup = settings.get("set_frames_startup") - - data = get_asset_data() - - if set_resolution_startup: - set_resolution(data) - if set_frames_startup: - set_frame_range(data) - - unit_scale_settings = settings.get("unit_scale_settings") - unit_scale_enabled = unit_scale_settings.get("enabled") - apply_on_opening = unit_scale_settings.get("apply_on_opening") - if unit_scale_enabled and apply_on_opening: - unit_scale = unit_scale_settings.get("base_file_unit_scale") - prev_unit_scale = bpy.context.scene.unit_settings.scale_length - - if unit_scale != prev_unit_scale: - bpy.context.scene.unit_settings.scale_length = unit_scale - - message_window( - "Base file unit scale changed", - "Base file unit scale changed to match the project settings.") - - -@bpy.app.handlers.persistent -def _on_save_pre(*args): - emit_event("before.save") - - -@bpy.app.handlers.persistent -def _on_save_post(*args): - emit_event("save") - - -@bpy.app.handlers.persistent -def _on_load_post(*args): - # Detect new file or opening an existing file - if bpy.data.filepath: - # Likely this was an open operation since it has a filepath - emit_event("open") - else: - emit_event("new") - - ops.OpenFileCacher.post_load() - - -def _register_callbacks(): - """Register callbacks for certain events.""" - def _remove_handler(handlers: List, callback: Callable): - """Remove the callback from the given handler list.""" - - try: - handlers.remove(callback) - except ValueError: - pass - - # TODO (jasper): implement on_init callback? - - # Be sure to remove existig ones first. - _remove_handler(bpy.app.handlers.save_pre, _on_save_pre) - _remove_handler(bpy.app.handlers.save_post, _on_save_post) - _remove_handler(bpy.app.handlers.load_post, _on_load_post) - - bpy.app.handlers.save_pre.append(_on_save_pre) - bpy.app.handlers.save_post.append(_on_save_post) - bpy.app.handlers.load_post.append(_on_load_post) - - log.info("Installed event handler _on_save_pre...") - log.info("Installed event handler _on_save_post...") - log.info("Installed event handler _on_load_post...") - - -def _on_task_changed(): - """Callback for when the task in the context is changed.""" - - # TODO (jasper): Blender has no concept of projects or workspace. - # It would be nice to override 'bpy.ops.wm.open_mainfile' so it takes the - # workdir as starting directory. But I don't know if that is possible. - # Another option would be to create a custom 'File Selector' and add the - # `directory` attribute, so it opens in that directory (does it?). - # https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector - # https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add - workdir = legacy_io.Session["AVALON_WORKDIR"] - log.debug("New working directory: %s", workdir) - - -def _register_events(): - """Install callbacks for specific events.""" - - register_event_callback("taskChanged", _on_task_changed) - log.info("Installed event callback for 'taskChanged'...") - - -def _discover_gui() -> Optional[Callable]: - """Return the most desirable of the currently registered GUIs""" - - # Prefer last registered - guis = reversed(pyblish.api.registered_guis()) - - for gui in guis: - try: - gui = __import__(gui).show - except (ImportError, AttributeError): - continue - else: - return gui - - return None - - -def add_to_avalon_container(container: bpy.types.Collection): - """Add the container to the Avalon container.""" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - - # Link the container to the scene so it's easily visible to the artist - # and can be managed easily. Otherwise it's only found in "Blender - # File" view and it will be removed by Blenders garbage collection, - # unless you set a 'fake user'. - bpy.context.scene.collection.children.link(avalon_container) - - avalon_container.children.link(container) - - # Disable Avalon containers for the view layers. - for view_layer in bpy.context.scene.view_layers: - for child in view_layer.layer_collection.children: - if child.collection == avalon_container: - child.exclude = True - - -def metadata_update(node: bpy.types.bpy_struct_meta_idprop, data: Dict): - """Imprint the node with metadata. - - Existing metadata will be updated. - """ - - if not node.get(AVALON_PROPERTY): - node[AVALON_PROPERTY] = dict() - for key, value in data.items(): - if value is None: - continue - node[AVALON_PROPERTY][key] = value - - -def containerise(name: str, - namespace: str, - nodes: List, - context: Dict, - loader: Optional[str] = None, - suffix: Optional[str] = "CON") -> bpy.types.Collection: - """Bundle `nodes` into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name: Name of resulting assembly - namespace: Namespace under which to host container - nodes: Long names of nodes to containerise - context: Asset information - loader: Name of loader used to produce this container. - suffix: Suffix of container, defaults to `_CON`. - - Returns: - The container assembly - - """ - - node_name = f"{context['asset']['name']}_{name}" - if namespace: - node_name = f"{namespace}:{node_name}" - if suffix: - node_name = f"{node_name}_{suffix}" - container = bpy.data.collections.new(name=node_name) - # Link the children nodes - for obj in nodes: - container.objects.link(obj) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - } - - metadata_update(container, data) - add_to_avalon_container(container) - - return container - - -def containerise_existing( - container: bpy.types.Collection, - name: str, - namespace: str, - context: Dict, - loader: Optional[str] = None, - suffix: Optional[str] = "CON") -> bpy.types.Collection: - """Imprint or update container with metadata. - - Arguments: - name: Name of resulting assembly - namespace: Namespace under which to host container - context: Asset information - loader: Name of loader used to produce this container. - suffix: Suffix of container, defaults to `_CON`. - - Returns: - The container assembly - """ - - node_name = container.name - if suffix: - node_name = f"{node_name}_{suffix}" - container.name = node_name - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - } - - metadata_update(container, data) - add_to_avalon_container(container) - - return container - - -def parse_container(container: bpy.types.Collection, - validate: bool = True) -> Dict: - """Return the container node's full container data. - - Args: - container: A container node name. - validate: turn the validation for the container on or off - - Returns: - The container schema data for this container node. - - """ - - data = lib.read(container) - - # Append transient data - data["objectName"] = container.name - - if validate: - schema.validate(data) - - return data - - -def ls() -> Iterator: - """List containers from active Blender scene. - - This is the host-equivalent of api.ls(), but instead of listing assets on - disk, it lists assets already loaded in Blender; once loaded they are - called containers. - """ - - for container in lib.lsattr("id", AVALON_CONTAINER_ID): - yield parse_container(container) - - -def publish(): - """Shorthand to publish from within host.""" - - return pyblish.util.publish() diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py deleted file mode 100644 index b1ff3e4a09..0000000000 --- a/openpype/hosts/blender/api/plugin.py +++ /dev/null @@ -1,533 +0,0 @@ -"""Shared functionality for pipeline plugins for Blender.""" - -import itertools -from pathlib import Path -from typing import Dict, List, Optional - -import bpy - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import ( - Creator, - CreatedInstance, - LoaderPlugin, -) -from openpype.lib import BoolDef - -from .pipeline import ( - AVALON_CONTAINERS, - AVALON_INSTANCES, - AVALON_PROPERTY, -) -from .ops import ( - MainThreadItem, - execute_in_main_thread -) -from .lib import imprint - -VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"] - - -def prepare_scene_name( - asset: str, subset: str, namespace: Optional[str] = None -) -> str: - """Return a consistent name for an asset.""" - name = f"{asset}" - if namespace: - name = f"{name}_{namespace}" - name = f"{name}_{subset}" - - # Blender name for a collection or object cannot be longer than 63 - # characters. If the name is longer, it will raise an error. - if len(name) > 63: - raise ValueError(f"Scene name '{name}' would be too long.") - - return name - - -def get_unique_number( - asset: str, subset: str -) -> str: - """Return a unique number based on the asset name.""" - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - return "01" - # Check the names of both object and collection containers - obj_asset_groups = avalon_container.objects - obj_group_names = { - c.name for c in obj_asset_groups - if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)} - coll_asset_groups = avalon_container.children - coll_group_names = { - c.name for c in coll_asset_groups - if c.get(AVALON_PROPERTY)} - container_names = obj_group_names.union(coll_group_names) - count = 1 - name = f"{asset}_{count:0>2}_{subset}" - while name in container_names: - count += 1 - name = f"{asset}_{count:0>2}_{subset}" - return f"{count:0>2}" - - -def prepare_data(data, container_name=None): - name = data.name - local_data = data.make_local() - if container_name: - local_data.name = f"{container_name}:{name}" - else: - local_data.name = f"{name}" - return local_data - - -def create_blender_context(active: Optional[bpy.types.Object] = None, - selected: Optional[bpy.types.Object] = None, - window: Optional[bpy.types.Window] = None): - """Create a new Blender context. If an object is passed as - parameter, it is set as selected and active. - """ - - if not isinstance(selected, list): - selected = [selected] - - override_context = bpy.context.copy() - - windows = [window] if window else bpy.context.window_manager.windows - - for win in windows: - for area in win.screen.areas: - if area.type == 'VIEW_3D': - for region in area.regions: - if region.type == 'WINDOW': - override_context['window'] = win - override_context['screen'] = win.screen - override_context['area'] = area - override_context['region'] = region - override_context['scene'] = bpy.context.scene - override_context['active_object'] = active - override_context['selected_objects'] = selected - return override_context - raise Exception("Could not create a custom Blender context.") - - -def get_parent_collection(collection): - """Get the parent of the input collection""" - check_list = [bpy.context.scene.collection] - - for c in check_list: - if collection.name in c.children.keys(): - return c - check_list.extend(c.children) - - return None - - -def get_local_collection_with_name(name): - for collection in bpy.data.collections: - if collection.name == name and collection.library is None: - return collection - return None - - -def deselect_all(): - """Deselect all objects in the scene. - - Blender gives context error if trying to deselect object that it isn't - in object mode. - """ - modes = [] - active = bpy.context.view_layer.objects.active - - for obj in bpy.data.objects: - if obj.mode != 'OBJECT': - modes.append((obj, obj.mode)) - bpy.context.view_layer.objects.active = obj - bpy.ops.object.mode_set(mode='OBJECT') - - bpy.ops.object.select_all(action='DESELECT') - - for p in modes: - bpy.context.view_layer.objects.active = p[0] - bpy.ops.object.mode_set(mode=p[1]) - - bpy.context.view_layer.objects.active = active - - -class BaseCreator(Creator): - """Base class for Blender Creator plug-ins.""" - defaults = ['Main'] - - create_as_asset_group = False - - @staticmethod - def cache_subsets(shared_data): - """Cache instances for Creators shared data. - - Create `blender_cached_subsets` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `blender_cached_legacy_subsets` key and fill it with - all legacy subsets from this family as a value. # key or value? - - Args: - shared_data(Dict[str, Any]): Shared data. - - Return: - Dict[str, Any]: Shared data with cached subsets. - """ - if not shared_data.get('blender_cached_subsets'): - cache = {} - cache_legacy = {} - - avalon_instances = bpy.data.collections.get(AVALON_INSTANCES) - avalon_instance_objs = ( - avalon_instances.objects if avalon_instances else [] - ) - - for obj_or_col in itertools.chain( - avalon_instance_objs, - bpy.data.collections - ): - avalon_prop = obj_or_col.get(AVALON_PROPERTY, {}) - if not avalon_prop: - continue - - if avalon_prop.get('id') != 'pyblish.avalon.instance': - continue - - creator_id = avalon_prop.get('creator_identifier') - if creator_id: - # Creator instance - cache.setdefault(creator_id, []).append(obj_or_col) - else: - family = avalon_prop.get('family') - if family: - # Legacy creator instance - cache_legacy.setdefault(family, []).append(obj_or_col) - - shared_data["blender_cached_subsets"] = cache - shared_data["blender_cached_legacy_subsets"] = cache_legacy - - return shared_data - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - """Override abstract method from Creator. - Create new instance and store it. - - Args: - subset_name(str): Subset name of created instance. - instance_data(dict): Instance base data. - pre_create_data(dict): Data based on pre creation attributes. - Those may affect how creator works. - """ - # Get Instance Container or create it if it does not exist - instances = bpy.data.collections.get(AVALON_INSTANCES) - if not instances: - instances = bpy.data.collections.new(name=AVALON_INSTANCES) - bpy.context.scene.collection.children.link(instances) - - # Create asset group - if AYON_SERVER_ENABLED: - asset_name = instance_data["folderPath"].split("/")[-1] - else: - asset_name = instance_data["asset"] - - name = prepare_scene_name(asset_name, subset_name) - if self.create_as_asset_group: - # Create instance as empty - instance_node = bpy.data.objects.new(name=name, object_data=None) - instance_node.empty_display_type = 'SINGLE_ARROW' - instances.objects.link(instance_node) - else: - # Create instance collection - instance_node = bpy.data.collections.new(name=name) - instances.children.link(instance_node) - - self.set_instance_data(subset_name, instance_data) - - instance = CreatedInstance( - self.family, subset_name, instance_data, self - ) - instance.transient_data["instance_node"] = instance_node - self._add_instance_to_context(instance) - - imprint(instance_node, instance_data) - - return instance_node - - def collect_instances(self): - """Override abstract method from BaseCreator. - Collect existing instances related to this creator plugin.""" - - # Cache subsets in shared data - self.cache_subsets(self.collection_shared_data) - - # Get cached subsets - cached_subsets = self.collection_shared_data.get( - "blender_cached_subsets" - ) - if not cached_subsets: - return - - # Process only instances that were created by this creator - for instance_node in cached_subsets.get(self.identifier, []): - property = instance_node.get(AVALON_PROPERTY) - # Create instance object from existing data - instance = CreatedInstance.from_existing( - instance_data=property.to_dict(), - creator=self - ) - instance.transient_data["instance_node"] = instance_node - - # Add instance to create context - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - """Override abstract method from BaseCreator. - Store changes of existing instances so they can be recollected. - - Args: - update_list(List[UpdateData]): Changed instances - and their changes, as a list of tuples. - """ - - if AYON_SERVER_ENABLED: - asset_name_key = "folderPath" - else: - asset_name_key = "asset" - - for created_instance, changes in update_list: - data = created_instance.data_to_store() - node = created_instance.transient_data["instance_node"] - if not node: - # We can't update if we don't know the node - self.log.error( - f"Unable to update instance {created_instance} " - f"without instance node." - ) - return - - # Rename the instance node in the scene if subset or asset changed. - # Do not rename the instance if the family is workfile, as the - # workfile instance is included in the AVALON_CONTAINER collection. - if ( - "subset" in changes.changed_keys - or asset_name_key in changes.changed_keys - ) and created_instance.family != "workfile": - asset_name = data[asset_name_key] - if AYON_SERVER_ENABLED: - asset_name = asset_name.split("/")[-1] - name = prepare_scene_name( - asset=asset_name, subset=data["subset"] - ) - node.name = name - - imprint(node, data) - - def remove_instances(self, instances: List[CreatedInstance]): - - for instance in instances: - node = instance.transient_data["instance_node"] - - if isinstance(node, bpy.types.Collection): - for children in node.children_recursive: - if isinstance(children, bpy.types.Collection): - bpy.data.collections.remove(children) - else: - bpy.data.objects.remove(children) - - bpy.data.collections.remove(node) - elif isinstance(node, bpy.types.Object): - bpy.data.objects.remove(node) - - self._remove_instance_from_context(instance) - - def set_instance_data( - self, - subset_name: str, - instance_data: dict - ): - """Fill instance data with required items. - - Args: - subset_name(str): Subset name of created instance. - instance_data(dict): Instance base data. - instance_node(bpy.types.ID): Instance node in blender scene. - """ - if not instance_data: - instance_data = {} - - instance_data.update( - { - "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier, - "subset": subset_name, - } - ) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", - label="Use selection", - default=True) - ] - - -class Loader(LoaderPlugin): - """Base class for Loader plug-ins.""" - - hosts = ["blender"] - - -class AssetLoader(LoaderPlugin): - """A basic AssetLoader for Blender - - This will implement the basic logic for linking/appending assets - into another Blender scene. - - The `update` method should be implemented by a sub-class, because - it's different for different types (e.g. model, rig, animation, - etc.). - """ - - @staticmethod - def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]: - """Get the 'instance empty' that holds the collection instance.""" - for node in nodes: - if not isinstance(node, bpy.types.Object): - continue - if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION' - and node.instance_collection and node.name == instance_name): - return node - return None - - @staticmethod - def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]: - """Get the 'instance collection' (container) for this asset.""" - for node in nodes: - if not isinstance(node, bpy.types.Collection): - continue - if node.name == instance_name: - return node - return None - - @staticmethod - def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library: - """Find the library file from the container. - - It traverses the objects from this collection, checks if there is only - 1 library from which the objects come from and returns the library. - - Warning: - No nested collections are supported at the moment! - """ - assert not container.children, "Nested collections are not supported." - assert container.objects, "The collection doesn't contain any objects." - libraries = set() - for obj in container.objects: - assert obj.library, f"'{obj.name}' is not linked." - libraries.add(obj.library) - - assert len( - libraries) == 1, "'{container.name}' contains objects from more then 1 library." - - return list(libraries)[0] - - def process_asset(self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None): - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def load(self, - context: dict, - name: Optional[str] = None, - namespace: Optional[str] = None, - options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: - """ Run the loader on Blender main thread""" - mti = MainThreadItem(self._load, context, name, namespace, options) - execute_in_main_thread(mti) - - def _load(self, - context: dict, - name: Optional[str] = None, - namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[bpy.types.Collection]: - """Load asset via database - - Arguments: - context: Full parenthood of representation to load - name: Use pre-defined name - namespace: Use pre-defined namespace - options: Additional settings dictionary - """ - # TODO: make it possible to add the asset several times by - # just re-using the collection - filepath = self.filepath_from_context(context) - assert Path(filepath).exists(), f"{filepath} doesn't exist." - - asset = context["asset"]["name"] - subset = context["subset"]["name"] - unique_number = get_unique_number( - asset, subset - ) - namespace = namespace or f"{asset}_{unique_number}" - name = name or prepare_scene_name( - asset, subset, unique_number - ) - - nodes = self.process_asset( - context=context, - name=name, - namespace=namespace, - options=options, - ) - - # Only containerise if anything was loaded by the Loader. - if not nodes: - return None - - # Only containerise if it's not already a collection from a .blend file. - # representation = context["representation"]["name"] - # if representation != "blend": - # from openpype.hosts.blender.api.pipeline import containerise - # return containerise( - # name=name, - # namespace=namespace, - # nodes=nodes, - # context=context, - # loader=self.__class__.__name__, - # ) - - # asset = context["asset"]["name"] - # subset = context["subset"]["name"] - # instance_name = prepare_scene_name( - # asset, subset, unique_number - # ) + '_CON' - - # return self._get_instance_collection(instance_name, nodes) - - def exec_update(self, container: Dict, representation: Dict): - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def update(self, container: Dict, representation: Dict): - """ Run the update on Blender main thread""" - mti = MainThreadItem(self.exec_update, container, representation) - execute_in_main_thread(mti) - - def exec_remove(self, container: Dict) -> bool: - """Must be implemented by a sub-class""" - raise NotImplementedError("Must be implemented by a sub-class") - - def remove(self, container: Dict) -> bool: - """ Run the remove on Blender main thread""" - mti = MainThreadItem(self.exec_remove, container) - execute_in_main_thread(mti) diff --git a/openpype/hosts/blender/blender_addon/startup/init.py b/openpype/hosts/blender/blender_addon/startup/init.py deleted file mode 100644 index 603691675d..0000000000 --- a/openpype/hosts/blender/blender_addon/startup/init.py +++ /dev/null @@ -1,10 +0,0 @@ -from openpype.pipeline import install_host -from openpype.hosts.blender.api import BlenderHost - - -def register(): - install_host(BlenderHost()) - - -def unregister(): - pass diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py deleted file mode 100644 index 2aa3a5e49a..0000000000 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ /dev/null @@ -1,231 +0,0 @@ -import os -import re -import subprocess -from platform import system -from openpype.lib.applications import PreLaunchHook, LaunchTypes - - -class InstallPySideToBlender(PreLaunchHook): - """Install Qt binding to blender's python packages. - - Prelaunch hook does 2 things: - 1.) Blender's python packages are pushed to the beginning of PYTHONPATH. - 2.) Check if blender has installed PySide2 and will try to install if not. - - For pipeline implementation is required to have Qt binding installed in - blender's python packages. - """ - - app_groups = {"blender"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Prelaunch hook is not crucial - try: - self.inner_execute() - except Exception: - self.log.warning( - "Processing of {} crashed.".format(self.__class__.__name__), - exc_info=True - ) - - def inner_execute(self): - # Get blender's python directory - version_regex = re.compile(r"^[2-4]\.[0-9]+$") - - platform = system().lower() - executable = self.launch_context.executable.executable_path - expected_executable = "blender" - if platform == "windows": - expected_executable += ".exe" - - if os.path.basename(executable).lower() != expected_executable: - self.log.info(( - f"Executable does not lead to {expected_executable} file." - "Can't determine blender's python to check/install PySide2." - )) - return - - versions_dir = os.path.dirname(executable) - if platform == "darwin": - versions_dir = os.path.join( - os.path.dirname(versions_dir), "Resources" - ) - version_subfolders = [] - for dir_entry in os.scandir(versions_dir): - if dir_entry.is_dir() and version_regex.match(dir_entry.name): - version_subfolders.append(dir_entry.name) - - if not version_subfolders: - self.log.info( - "Didn't find version subfolder next to Blender executable" - ) - return - - if len(version_subfolders) > 1: - self.log.info(( - "Found more than one version subfolder next" - " to blender executable. {}" - ).format(", ".join([ - '"./{}"'.format(name) - for name in version_subfolders - ]))) - return - - version_subfolder = version_subfolders[0] - - python_dir = os.path.join(versions_dir, version_subfolder, "python") - python_lib = os.path.join(python_dir, "lib") - python_version = "python" - - if platform != "windows": - for dir_entry in os.scandir(python_lib): - if dir_entry.is_dir() and dir_entry.name.startswith("python"): - python_lib = dir_entry.path - python_version = dir_entry.name - break - - # Change PYTHONPATH to contain blender's packages as first - python_paths = [ - python_lib, - os.path.join(python_lib, "site-packages"), - ] - python_path = self.launch_context.env.get("PYTHONPATH") or "" - for path in python_path.split(os.pathsep): - if path: - python_paths.append(path) - - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) - - # Get blender's python executable - python_bin = os.path.join(python_dir, "bin") - if platform == "windows": - python_executable = os.path.join(python_bin, "python.exe") - else: - python_executable = os.path.join(python_bin, python_version) - # Check for python with enabled 'pymalloc' - if not os.path.exists(python_executable): - python_executable += "m" - - if not os.path.exists(python_executable): - self.log.warning( - "Couldn't find python executable for blender. {}".format( - executable - ) - ) - return - - # Check if PySide2 is installed and skip if yes - if self.is_pyside_installed(python_executable): - self.log.debug("Blender has already installed PySide2.") - return - - # Install PySide2 in blender's python - if platform == "windows": - result = self.install_pyside_windows(python_executable) - else: - result = self.install_pyside(python_executable) - - if result: - self.log.info("Successfully installed PySide2 module to blender.") - else: - self.log.warning("Failed to install PySide2 module to blender.") - - def install_pyside_windows(self, python_executable): - """Install PySide2 python module to blender's python. - - Installation requires administration rights that's why it is required - to use "pywin32" module which can execute command's and ask for - administration rights. - """ - try: - import win32api - import win32con - import win32process - import win32event - import pywintypes - from win32comext.shell.shell import ShellExecuteEx - from win32comext.shell import shellcon - except Exception: - self.log.warning("Couldn't import \"pywin32\" modules") - return - - try: - # Parameters - # - use "-m pip" as module pip to install PySide2 and argument - # "--ignore-installed" is to force install module to blender's - # site-packages and make sure it is binary compatible - parameters = "-m pip install --ignore-installed PySide2" - - # Execute command and ask for administrator's rights - process_info = ShellExecuteEx( - nShow=win32con.SW_SHOWNORMAL, - fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, - lpVerb="runas", - lpFile=python_executable, - lpParameters=parameters, - lpDirectory=os.path.dirname(python_executable) - ) - process_handle = process_info["hProcess"] - win32event.WaitForSingleObject(process_handle, win32event.INFINITE) - returncode = win32process.GetExitCodeProcess(process_handle) - return returncode == 0 - except pywintypes.error: - pass - - def install_pyside(self, python_executable): - """Install PySide2 python module to blender's python.""" - try: - # Parameters - # - use "-m pip" as module pip to install PySide2 and argument - # "--ignore-installed" is to force install module to blender's - # site-packages and make sure it is binary compatible - args = [ - python_executable, - "-m", - "pip", - "install", - "--ignore-installed", - "PySide2", - ] - process = subprocess.Popen( - args, stdout=subprocess.PIPE, universal_newlines=True - ) - process.communicate() - return process.returncode == 0 - except PermissionError: - self.log.warning( - "Permission denied with command:" - "\"{}\".".format(" ".join(args)) - ) - except OSError as error: - self.log.warning(f"OS error has occurred: \"{error}\".") - except subprocess.SubprocessError: - pass - - def is_pyside_installed(self, python_executable): - """Check if PySide2 module is in blender's pip list. - - Check that PySide2 is installed directly in blender's site-packages. - It is possible that it is installed in user's site-packages but that - may be incompatible with blender's python. - """ - # Get pip list from blender's python executable - args = [python_executable, "-m", "pip", "list"] - process = subprocess.Popen(args, stdout=subprocess.PIPE) - stdout, _ = process.communicate() - lines = stdout.decode().split(os.linesep) - # Second line contain dashes that define maximum length of module name. - # Second column of dashes define maximum length of module version. - package_dashes, *_ = lines[1].split(" ") - package_len = len(package_dashes) - - # Got through printed lines starting at line 3 - for idx in range(2, len(lines)): - line = lines[idx] - if not line: - continue - package_name = line[0:package_len].strip() - if package_name.lower() == "pyside2": - return True - return False diff --git a/openpype/hosts/blender/plugins/create/convert_legacy.py b/openpype/hosts/blender/plugins/create/convert_legacy.py deleted file mode 100644 index f05a6b1f5a..0000000000 --- a/openpype/hosts/blender/plugins/create/convert_legacy.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -"""Converter for legacy Houdini subsets.""" -from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin -from openpype.hosts.blender.api.lib import imprint - - -class BlenderLegacyConvertor(SubsetConvertorPlugin): - """Find and convert any legacy subsets in the scene. - - This Converter will find all legacy subsets in the scene and will - transform them to the current system. Since the old subsets doesn't - retain any information about their original creators, the only mapping - we can do is based on their families. - - Its limitation is that you can have multiple creators creating subset - of the same family and there is no way to handle it. This code should - nevertheless cover all creators that came with OpenPype. - - """ - identifier = "io.openpype.creators.blender.legacy" - family_to_id = { - "action": "io.openpype.creators.blender.action", - "camera": "io.openpype.creators.blender.camera", - "animation": "io.openpype.creators.blender.animation", - "blendScene": "io.openpype.creators.blender.blendscene", - "layout": "io.openpype.creators.blender.layout", - "model": "io.openpype.creators.blender.model", - "pointcache": "io.openpype.creators.blender.pointcache", - "render": "io.openpype.creators.blender.render", - "review": "io.openpype.creators.blender.review", - "rig": "io.openpype.creators.blender.rig", - } - - def __init__(self, *args, **kwargs): - super(BlenderLegacyConvertor, self).__init__(*args, **kwargs) - self.legacy_subsets = {} - - def find_instances(self): - """Find legacy subsets in the scene. - - Legacy subsets are the ones that doesn't have `creator_identifier` - parameter on them. - - This is using cached entries done in - :py:meth:`~BaseCreator.cache_subsets()` - - """ - self.legacy_subsets = self.collection_shared_data.get( - "blender_cached_legacy_subsets") - if not self.legacy_subsets: - return - self.add_convertor_item( - "Found {} incompatible subset{}".format( - len(self.legacy_subsets), - "s" if len(self.legacy_subsets) > 1 else "" - ) - ) - - def convert(self): - """Convert all legacy subsets to current. - - It is enough to add `creator_identifier` and `instance_node`. - - """ - if not self.legacy_subsets: - return - - for family, instance_nodes in self.legacy_subsets.items(): - if family in self.family_to_id: - for instance_node in instance_nodes: - creator_identifier = self.family_to_id[family] - self.log.info( - "Converting {} to {}".format(instance_node.name, - creator_identifier) - ) - imprint(instance_node, data={ - "creator_identifier": creator_identifier - }) diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py deleted file mode 100644 index 3a91b2d5ff..0000000000 --- a/openpype/hosts/blender/plugins/create/create_animation.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Create an animation asset.""" - -from openpype.hosts.blender.api import plugin, lib - - -class CreateAnimation(plugin.BaseCreator): - """Animation output for character rigs.""" - - identifier = "io.openpype.creators.blender.animation" - label = "Animation" - family = "animation" - icon = "male" - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - subset_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - selected = lib.get_selection() - for obj in selected: - collection.objects.link(obj) - elif pre_create_data.get("asset_group"): - # Use for Load Blend automated creation of animation instances - # upon loading rig files - obj = pre_create_data.get("asset_group") - collection.objects.link(obj) - - return collection diff --git a/openpype/hosts/blender/plugins/create/create_camera.py b/openpype/hosts/blender/plugins/create/create_camera.py deleted file mode 100644 index 2e2e6cec22..0000000000 --- a/openpype/hosts/blender/plugins/create/create_camera.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Create a camera asset.""" - -import bpy - -from openpype.hosts.blender.api import plugin, lib -from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES - - -class CreateCamera(plugin.BaseCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.camera" - label = "Camera" - family = "camera" - icon = "video-camera" - - create_as_asset_group = True - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - - asset_group = super().create(subset_name, - instance_data, - pre_create_data) - - bpy.context.view_layer.objects.active = asset_group - if pre_create_data.get("use_selection"): - for obj in lib.get_selection(): - obj.parent = asset_group - else: - plugin.deselect_all() - camera = bpy.data.cameras.new(subset_name) - camera_obj = bpy.data.objects.new(subset_name, camera) - - instances = bpy.data.collections.get(AVALON_INSTANCES) - instances.objects.link(camera_obj) - - bpy.context.view_layer.objects.active = asset_group - camera_obj.parent = asset_group - - return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py deleted file mode 100644 index 16d227e50e..0000000000 --- a/openpype/hosts/blender/plugins/create/create_layout.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Create a layout asset.""" - -import bpy - -from openpype.hosts.blender.api import plugin, lib - - -class CreateLayout(plugin.BaseCreator): - """Layout output for character rigs.""" - - identifier = "io.openpype.creators.blender.layout" - label = "Layout" - family = "layout" - icon = "cubes" - - create_as_asset_group = True - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - - asset_group = super().create(subset_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py deleted file mode 100644 index 2f3f61728b..0000000000 --- a/openpype/hosts/blender/plugins/create/create_model.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Create a model asset.""" - -import bpy - -from openpype.hosts.blender.api import plugin, lib - - -class CreateModel(plugin.BaseCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.model" - label = "Model" - family = "model" - icon = "cube" - - create_as_asset_group = True - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - asset_group = super().create(subset_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py deleted file mode 100644 index b3329bcb3b..0000000000 --- a/openpype/hosts/blender/plugins/create/create_pointcache.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Create a pointcache asset.""" - -from openpype.hosts.blender.api import plugin, lib - - -class CreatePointcache(plugin.BaseCreator): - """Polygonal static geometry.""" - - identifier = "io.openpype.creators.blender.pointcache" - label = "Point Cache" - family = "pointcache" - icon = "gears" - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - subset_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - objects = lib.get_selection() - for obj in objects: - collection.objects.link(obj) - if obj.type == 'EMPTY': - objects.extend(obj.children) - - return collection diff --git a/openpype/hosts/blender/plugins/create/create_render.py b/openpype/hosts/blender/plugins/create/create_render.py deleted file mode 100644 index 7fb3e5eb00..0000000000 --- a/openpype/hosts/blender/plugins/create/create_render.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Create render.""" -import bpy - -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.render_lib import prepare_rendering - - -class CreateRenderlayer(plugin.BaseCreator): - """Single baked camera.""" - - identifier = "io.openpype.creators.blender.render" - label = "Render" - family = "render" - icon = "eye" - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - try: - # Run parent create method - collection = super().create( - subset_name, instance_data, pre_create_data - ) - - prepare_rendering(collection) - except Exception: - # Remove the instance if there was an error - bpy.data.collections.remove(collection) - raise - - # TODO: this is undesiderable, but it's the only way to be sure that - # the file is saved before the render starts. - # Blender, by design, doesn't set the file as dirty if modifications - # happen by script. So, when creating the instance and setting the - # render settings, the file is not marked as dirty. This means that - # there is the risk of sending to deadline a file without the right - # settings. Even the validator to check that the file is saved will - # detect the file as saved, even if it isn't. The only solution for - # now it is to force the file to be saved. - bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) - - return collection diff --git a/openpype/hosts/blender/plugins/create/create_review.py b/openpype/hosts/blender/plugins/create/create_review.py deleted file mode 100644 index 940bcbea22..0000000000 --- a/openpype/hosts/blender/plugins/create/create_review.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Create review.""" - -from openpype.hosts.blender.api import plugin, lib - - -class CreateReview(plugin.BaseCreator): - """Single baked camera.""" - - identifier = "io.openpype.creators.blender.review" - label = "Review" - family = "review" - icon = "video-camera" - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - # Run parent create method - collection = super().create( - subset_name, instance_data, pre_create_data - ) - - if pre_create_data.get("use_selection"): - selected = lib.get_selection() - for obj in selected: - collection.objects.link(obj) - - return collection diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py deleted file mode 100644 index d63b8d56ff..0000000000 --- a/openpype/hosts/blender/plugins/create/create_rig.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Create a rig asset.""" - -import bpy - -from openpype.hosts.blender.api import plugin, lib - - -class CreateRig(plugin.BaseCreator): - """Artist-friendly rig with controls to direct motion.""" - - identifier = "io.openpype.creators.blender.rig" - label = "Rig" - family = "rig" - icon = "wheelchair" - - create_as_asset_group = True - - def create( - self, subset_name: str, instance_data: dict, pre_create_data: dict - ): - asset_group = super().create(subset_name, - instance_data, - pre_create_data) - - # Add selected objects to instance - if pre_create_data.get("use_selection"): - bpy.context.view_layer.objects.active = asset_group - for obj in lib.get_selection(): - obj.parent = asset_group - - return asset_group diff --git a/openpype/hosts/blender/plugins/create/create_workfile.py b/openpype/hosts/blender/plugins/create/create_workfile.py deleted file mode 100644 index 6b168f4c84..0000000000 --- a/openpype/hosts/blender/plugins/create/create_workfile.py +++ /dev/null @@ -1,125 +0,0 @@ -import bpy - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import CreatedInstance, AutoCreator -from openpype.client import get_asset_by_name -from openpype.hosts.blender.api.plugin import BaseCreator -from openpype.hosts.blender.api.pipeline import ( - AVALON_PROPERTY, - AVALON_CONTAINERS -) - - -class CreateWorkfile(BaseCreator, AutoCreator): - """Workfile auto-creator. - - The workfile instance stores its data on the `AVALON_CONTAINERS` collection - as custom attributes, because unlike other instances it doesn't have an - instance node of its own. - - """ - identifier = "io.openpype.creators.blender.workfile" - label = "Workfile" - family = "workfile" - icon = "fa5.file" - - def create(self): - """Create workfile instances.""" - workfile_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), - None, - ) - - project_name = self.project_name - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - existing_asset_name = None - if workfile_instance is not None: - if AYON_SERVER_ENABLED: - existing_asset_name = workfile_instance.get("folderPath") - - if existing_asset_name is None: - existing_asset_name = workfile_instance["asset"] - - if not workfile_instance: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - task_name, task_name, asset_doc, project_name, host_name - ) - data = { - "task": task_name, - "variant": task_name, - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - data.update( - self.get_dynamic_data( - task_name, - task_name, - asset_doc, - project_name, - host_name, - workfile_instance, - ) - ) - self.log.info("Auto-creating workfile instance...") - workfile_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(workfile_instance) - - elif ( - existing_asset_name != asset_name - or workfile_instance["task"] != task_name - ): - # Update instance context if it's different - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - task_name, task_name, asset_doc, project_name, host_name - ) - if AYON_SERVER_ENABLED: - workfile_instance["folderPath"] = asset_name - else: - workfile_instance["asset"] = asset_name - - workfile_instance["task"] = task_name - workfile_instance["subset"] = subset_name - - instance_node = bpy.data.collections.get(AVALON_CONTAINERS) - if not instance_node: - instance_node = bpy.data.collections.new(name=AVALON_CONTAINERS) - workfile_instance.transient_data["instance_node"] = instance_node - - def collect_instances(self): - - instance_node = bpy.data.collections.get(AVALON_CONTAINERS) - if not instance_node: - return - - property = instance_node.get(AVALON_PROPERTY) - if not property: - return - - # Create instance object from existing data - instance = CreatedInstance.from_existing( - instance_data=property.to_dict(), - creator=self - ) - instance.transient_data["instance_node"] = instance_node - - # Add instance to create context - self._add_instance_to_context(instance) - - def remove_instances(self, instances): - for instance in instances: - node = instance.transient_data["instance_node"] - del node[AVALON_PROPERTY] - - self._remove_instance_from_context(instance) diff --git a/openpype/hosts/blender/plugins/load/load_animation.py b/openpype/hosts/blender/plugins/load/load_animation.py deleted file mode 100644 index fd087553f0..0000000000 --- a/openpype/hosts/blender/plugins/load/load_animation.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Load an animation in Blender.""" - -from typing import Dict, List, Optional - -import bpy - -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY - - -class BlendAnimationLoader(plugin.AssetLoader): - """Load animations from a .blend file. - - Warning: - Loading the same asset more then once is not properly supported at the - moment. - """ - - families = ["animation"] - representations = ["blend"] - - label = "Link Animation" - icon = "code-fork" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - - with bpy.data.libraries.load( - libpath, link=True, relative=False - ) as (data_from, data_to): - data_to.objects = data_from.objects - data_to.actions = data_from.actions - - container = data_to.objects[0] - - assert container, "No asset group found" - - target_namespace = container.get(AVALON_PROPERTY).get('namespace') - - action = data_to.actions[0].make_local().copy() - - for obj in bpy.data.objects: - if obj.get(AVALON_PROPERTY) and obj.get(AVALON_PROPERTY).get( - 'namespace') == target_namespace: - if obj.children[0]: - if not obj.children[0].animation_data: - obj.children[0].animation_data_create() - obj.children[0].animation_data.action = action - break - - bpy.data.objects.remove(container) - - filename = bpy.path.basename(libpath) - # Blender has a limit of 63 characters for any data name. - # If the filename is longer, it will be truncated. - if len(filename) > 63: - filename = filename[:63] - library = bpy.data.libraries.get(filename) - bpy.data.libraries.remove(library) diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py deleted file mode 100644 index 367fff03f0..0000000000 --- a/openpype/hosts/blender/plugins/load/load_audio.py +++ /dev/null @@ -1,224 +0,0 @@ -"""Load audio in Blender.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from openpype.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class AudioLoader(plugin.AssetLoader): - """Load audio in Blender.""" - - families = ["audio"] - representations = ["wav"] - - label = "Load Audio" - icon = "volume-up" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - asset = context["asset"]["name"] - subset = context["subset"]["name"] - - asset_name = plugin.prepare_scene_name(asset, subset) - unique_number = plugin.get_unique_number(asset, subset) - group_name = plugin.prepare_scene_name(asset, subset, unique_number) - namespace = namespace or f"{asset}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - # Blender needs the Sequence Editor in the current window, to be able - # to load the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After loading the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - bpy.ops.sequencer.sound_strip_add(filepath=libpath, frame_start=1) - - window_manager.windows[-1].screen.areas[0].type = old_type - - p = Path(libpath) - audio = p.name - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": str(context["representation"]["_id"]), - "libpath": libpath, - "asset_name": asset_name, - "parent": str(context["representation"]["parent"]), - "family": context["representation"]["context"]["family"], - "objectName": group_name, - "audio": audio - } - - objects = [] - self[:] = objects - return [objects] - - def exec_update(self, container: Dict, representation: Dict): - """Update an audio strip in the sequence editor. - - Arguments: - container (openpype:container-1.0): Container to update, - from `host.ls()`. - representation (openpype:representation-1.0): Representation to - update, from `host.ls()`. - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(representation)) - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - old_audio = container["audio"] - p = Path(libpath) - new_audio = p.name - - # Blender needs the Sequence Editor in the current window, to be able - # to update the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After updating the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - # We deselect all sequencer strips, and then select the one we - # need to remove. - bpy.ops.sequencer.select_all(action='DESELECT') - scene = bpy.context.scene - scene.sequence_editor.sequences_all[old_audio].select = True - - bpy.ops.sequencer.delete() - bpy.data.sounds.remove(bpy.data.sounds[old_audio]) - - bpy.ops.sequencer.sound_strip_add( - filepath=str(libpath), frame_start=1) - - window_manager.windows[-1].screen.areas[0].type = old_type - - metadata["libpath"] = str(libpath) - metadata["representation"] = str(representation["_id"]) - metadata["parent"] = str(representation["parent"]) - metadata["audio"] = new_audio - - def exec_remove(self, container: Dict) -> bool: - """Remove an audio strip from the sequence editor and the container. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - audio = container["audio"] - - # Blender needs the Sequence Editor in the current window, to be able - # to remove the audio. We take one of the areas in the window, save its - # type, and switch to the Sequence Editor. After removing the audio, - # we switch back to the previous area. - window_manager = bpy.context.window_manager - old_type = window_manager.windows[-1].screen.areas[0].type - window_manager.windows[-1].screen.areas[0].type = "SEQUENCE_EDITOR" - - # We override the context to load the audio in the sequence editor. - oc = bpy.context.copy() - oc["area"] = window_manager.windows[-1].screen.areas[0] - - with bpy.context.temp_override(**oc): - # We deselect all sequencer strips, and then select the one we - # need to remove. - bpy.ops.sequencer.select_all(action='DESELECT') - scene = bpy.context.scene - scene.sequence_editor.sequences_all[audio].select = True - bpy.ops.sequencer.delete() - - window_manager.windows[-1].screen.areas[0].type = old_type - - bpy.data.sounds.remove(bpy.data.sounds[audio]) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/openpype/hosts/blender/plugins/load/load_camera_abc.py b/openpype/hosts/blender/plugins/load/load_camera_abc.py deleted file mode 100644 index ecd6bb98f1..0000000000 --- a/openpype/hosts/blender/plugins/load/load_camera_abc.py +++ /dev/null @@ -1,211 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from openpype.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.blender.api import plugin, lib -from openpype.hosts.blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class AbcCameraLoader(plugin.AssetLoader): - """Load a camera from Alembic file. - - Stores the imported asset in an empty named after the asset. - """ - - families = ["camera"] - representations = ["abc"] - - label = "Load Camera (ABC)" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == "CAMERA": - bpy.data.cameras.remove(obj.data) - elif obj.type == "EMPTY": - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name): - plugin.deselect_all() - - bpy.ops.wm.alembic_import(filepath=libpath) - - objects = lib.get_selection() - - for obj in objects: - obj.parent = asset_group - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != "EMPTY": - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, - context: dict, - name: str, - namespace: Optional[str] = None, - options: Optional[Dict] = None, - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - - asset = context["asset"]["name"] - subset = context["subset"]["name"] - - asset_name = plugin.prepare_scene_name(asset, subset) - unique_number = plugin.get_unique_number(asset, subset) - group_name = plugin.prepare_scene_name(asset, subset, unique_number) - namespace = namespace or f"{asset}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - self._process(libpath, asset_group, group_name) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or "", - "loader": str(self.__class__.__name__), - "representation": str(context["representation"]["_id"]), - "libpath": libpath, - "asset_name": asset_name, - "parent": str(context["representation"]["parent"]), - "family": context["representation"]["context"]["family"], - "objectName": group_name, - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}") - assert libpath, ( - f"No existing library file found for {container['objectName']}") - assert libpath.is_file(), f"The file doesn't exist: {libpath}" - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}") - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = str( - Path(bpy.path.abspath(group_libpath)).resolve()) - normalized_libpath = str( - Path(bpy.path.abspath(str(libpath))).resolve()) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - mat = asset_group.matrix_basis.copy() - - self._remove(asset_group) - self._process(str(libpath), asset_group, object_name) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = str(representation["_id"]) - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py deleted file mode 100644 index 2d53d3e573..0000000000 --- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py +++ /dev/null @@ -1,221 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from openpype.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.blender.api import plugin, lib -from openpype.hosts.blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class FbxCameraLoader(plugin.AssetLoader): - """Load a camera from FBX. - - Stores the imported asset in an empty named after the asset. - """ - - families = ["camera"] - representations = ["fbx"] - - label = "Load Camera (FBX)" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == 'CAMERA': - bpy.data.cameras.remove(obj.data) - elif obj.type == 'EMPTY': - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name): - plugin.deselect_all() - - collection = bpy.context.view_layer.active_layer_collection.collection - - bpy.ops.import_scene.fbx(filepath=libpath) - - parent = bpy.context.scene.collection - - objects = lib.get_selection() - - for obj in objects: - obj.parent = asset_group - - for obj in objects: - parent.objects.link(obj) - collection.objects.unlink(obj) - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != 'EMPTY': - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - asset = context["asset"]["name"] - subset = context["subset"]["name"] - - asset_name = plugin.prepare_scene_name(asset, subset) - unique_number = plugin.get_unique_number(asset, subset) - group_name = plugin.prepare_scene_name(asset, subset, unique_number) - namespace = namespace or f"{asset}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - self._process(libpath, asset_group, group_name) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": str(context["representation"]["_id"]), - "libpath": libpath, - "asset_name": asset_name, - "parent": str(context["representation"]["parent"]), - "family": context["representation"]["context"]["family"], - "objectName": group_name - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - mat = asset_group.matrix_basis.copy() - - self._remove(asset_group) - self._process(str(libpath), asset_group, object_name) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = str(representation["_id"]) - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py deleted file mode 100644 index 8fce53a5d5..0000000000 --- a/openpype/hosts/blender/plugins/load/load_fbx.py +++ /dev/null @@ -1,276 +0,0 @@ -"""Load an asset in Blender from an Alembic file.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import bpy - -from openpype.pipeline import ( - get_representation_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.blender.api import plugin, lib -from openpype.hosts.blender.api.pipeline import ( - AVALON_CONTAINERS, - AVALON_PROPERTY, -) - - -class FbxModelLoader(plugin.AssetLoader): - """Load FBX models. - - Stores the imported asset in an empty named after the asset. - """ - - families = ["model", "rig"] - representations = ["fbx"] - - label = "Load FBX" - icon = "code-fork" - color = "orange" - - def _remove(self, asset_group): - objects = list(asset_group.children) - - for obj in objects: - if obj.type == 'MESH': - for material_slot in list(obj.material_slots): - if material_slot.material: - bpy.data.materials.remove(material_slot.material) - bpy.data.meshes.remove(obj.data) - elif obj.type == 'ARMATURE': - objects.extend(obj.children) - bpy.data.armatures.remove(obj.data) - elif obj.type == 'CURVE': - bpy.data.curves.remove(obj.data) - elif obj.type == 'EMPTY': - objects.extend(obj.children) - bpy.data.objects.remove(obj) - - def _process(self, libpath, asset_group, group_name, action): - plugin.deselect_all() - - collection = bpy.context.view_layer.active_layer_collection.collection - - bpy.ops.import_scene.fbx(filepath=libpath) - - parent = bpy.context.scene.collection - - imported = lib.get_selection() - - empties = [obj for obj in imported if obj.type == 'EMPTY'] - - container = None - - for empty in empties: - if not empty.parent: - container = empty - break - - assert container, "No asset group found" - - # Children must be linked before parents, - # otherwise the hierarchy will break - objects = [] - nodes = list(container.children) - - for obj in nodes: - obj.parent = asset_group - - bpy.data.objects.remove(container) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - objects.reverse() - - for obj in objects: - parent.objects.link(obj) - collection.objects.unlink(obj) - - for obj in objects: - name = obj.name - obj.name = f"{group_name}:{name}" - if obj.type != 'EMPTY': - name_data = obj.data.name - obj.data.name = f"{group_name}:{name_data}" - - if obj.type == 'MESH': - for material_slot in obj.material_slots: - name_mat = material_slot.material.name - material_slot.material.name = f"{group_name}:{name_mat}" - elif obj.type == 'ARMATURE': - anim_data = obj.animation_data - if action is not None: - anim_data.action = action - elif anim_data.action is not None: - name_action = anim_data.action.name - anim_data.action.name = f"{group_name}:{name_action}" - - if not obj.get(AVALON_PROPERTY): - obj[AVALON_PROPERTY] = dict() - - avalon_info = obj[AVALON_PROPERTY] - avalon_info.update({"container_name": group_name}) - - plugin.deselect_all() - - return objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - libpath = self.filepath_from_context(context) - asset = context["asset"]["name"] - subset = context["subset"]["name"] - - asset_name = plugin.prepare_scene_name(asset, subset) - unique_number = plugin.get_unique_number(asset, subset) - group_name = plugin.prepare_scene_name(asset, subset, unique_number) - namespace = namespace or f"{asset}_{unique_number}" - - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) - - asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) - - objects = self._process(libpath, asset_group, group_name, None) - - objects = [] - nodes = list(asset_group.children) - - for obj in nodes: - objects.append(obj) - nodes.extend(list(obj.children)) - - bpy.context.scene.collection.objects.link(asset_group) - - asset_group[AVALON_PROPERTY] = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or '', - "loader": str(self.__class__.__name__), - "representation": str(context["representation"]["_id"]), - "libpath": libpath, - "asset_name": asset_name, - "parent": str(context["representation"]["parent"]), - "family": context["representation"]["context"]["family"], - "objectName": group_name - } - - self[:] = objects - return objects - - def exec_update(self, container: Dict, representation: Dict): - """Update the loaded asset. - - This will remove all objects of the current collection, load the new - ones and add them to the collection. - If the objects of the collection are used in another collection they - will not be removed, only unlinked. Normally this should not be the - case though. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - libpath = Path(get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert asset_group, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - metadata = asset_group.get(AVALON_PROPERTY) - group_libpath = metadata["libpath"] - - normalized_group_libpath = ( - str(Path(bpy.path.abspath(group_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_group_libpath, - normalized_libpath, - ) - if normalized_group_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - # Get the armature of the rig - objects = asset_group.children - armatures = [obj for obj in objects if obj.type == 'ARMATURE'] - action = None - - if armatures: - armature = armatures[0] - - if armature.animation_data and armature.animation_data.action: - action = armature.animation_data.action - - mat = asset_group.matrix_basis.copy() - self._remove(asset_group) - - self._process(str(libpath), asset_group, object_name, action) - - asset_group.matrix_basis = mat - - metadata["libpath"] = str(libpath) - metadata["representation"] = str(representation["_id"]) - - def exec_remove(self, container: Dict) -> bool: - """Remove an existing container from a Blender scene. - - Arguments: - container (openpype:container-1.0): Container to remove, - from `host.ls()`. - - Returns: - bool: Whether the container was deleted. - - Warning: - No nested collections are supported at the moment! - """ - object_name = container["objectName"] - asset_group = bpy.data.objects.get(object_name) - - if not asset_group: - return False - - self._remove(asset_group) - - bpy.data.objects.remove(asset_group) - - return True diff --git a/openpype/hosts/blender/plugins/load/load_look.py b/openpype/hosts/blender/plugins/load/load_look.py deleted file mode 100644 index 8d3118d83b..0000000000 --- a/openpype/hosts/blender/plugins/load/load_look.py +++ /dev/null @@ -1,222 +0,0 @@ -"""Load a model asset in Blender.""" - -from pathlib import Path -from pprint import pformat -from typing import Dict, List, Optional - -import os -import json -import bpy - -from openpype.pipeline import get_representation_path -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import ( - containerise_existing, - AVALON_PROPERTY -) - - -class BlendLookLoader(plugin.AssetLoader): - """Load models from a .blend file. - - Because they come from a .blend file we can simply link the collection that - contains the model. There is no further need to 'containerise' it. - """ - - families = ["look"] - representations = ["json"] - - label = "Load Look" - icon = "code-fork" - color = "orange" - - def get_all_children(self, obj): - children = list(obj.children) - - for child in children: - children.extend(child.children) - - return children - - def _process(self, libpath, container_name, objects): - with open(libpath, "r") as fp: - data = json.load(fp) - - path = os.path.dirname(libpath) - materials_path = f"{path}/resources" - - materials = [] - - for entry in data: - file = entry.get('fbx_filename') - if file is None: - continue - - bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}") - - mesh = [o for o in bpy.context.scene.objects if o.select_get()][0] - material = mesh.data.materials[0] - material.name = f"{material.name}:{container_name}" - - texture_file = entry.get('tga_filename') - if texture_file: - node_tree = material.node_tree - pbsdf = node_tree.nodes['Principled BSDF'] - base_color = pbsdf.inputs[0] - tex_node = base_color.links[0].from_node - tex_node.image.filepath = f"{materials_path}/{texture_file}" - - materials.append(material) - - for obj in objects: - for child in self.get_all_children(obj): - mesh_name = child.name.split(':')[0] - if mesh_name == material.name.split(':')[0]: - child.data.materials.clear() - child.data.materials.append(material) - break - - bpy.data.objects.remove(mesh) - - return materials, objects - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - - libpath = self.filepath_from_context(context) - asset = context["asset"]["name"] - subset = context["subset"]["name"] - - lib_container = plugin.prepare_scene_name( - asset, subset - ) - unique_number = plugin.get_unique_number( - asset, subset - ) - namespace = namespace or f"{asset}_{unique_number}" - container_name = plugin.prepare_scene_name( - asset, subset, unique_number - ) - - container = bpy.data.collections.new(lib_container) - container.name = container_name - containerise_existing( - container, - name, - namespace, - context, - self.__class__.__name__, - ) - - metadata = container.get(AVALON_PROPERTY) - - metadata["libpath"] = libpath - metadata["lib_container"] = lib_container - - selected = [o for o in bpy.context.scene.objects if o.select_get()] - - materials, objects = self._process(libpath, container_name, selected) - - # Save the list of imported materials in the metadata container - metadata["objects"] = objects - metadata["materials"] = materials - - metadata["parent"] = str(context["representation"]["parent"]) - metadata["family"] = context["representation"]["context"]["family"] - - nodes = list(container.objects) - nodes.append(container) - self[:] = nodes - return nodes - - def update(self, container: Dict, representation: Dict): - collection = bpy.data.collections.get(container["objectName"]) - libpath = Path(get_representation_path(representation)) - extension = libpath.suffix.lower() - - self.log.info( - "Container: %s\nRepresentation: %s", - pformat(container, indent=2), - pformat(representation, indent=2), - ) - - assert collection, ( - f"The asset is not loaded: {container['objectName']}" - ) - assert not (collection.children), ( - "Nested collections are not supported." - ) - assert libpath, ( - "No existing library file found for {container['objectName']}" - ) - assert libpath.is_file(), ( - f"The file doesn't exist: {libpath}" - ) - assert extension in plugin.VALID_EXTENSIONS, ( - f"Unsupported file: {libpath}" - ) - - collection_metadata = collection.get(AVALON_PROPERTY) - collection_libpath = collection_metadata["libpath"] - - normalized_collection_libpath = ( - str(Path(bpy.path.abspath(collection_libpath)).resolve()) - ) - normalized_libpath = ( - str(Path(bpy.path.abspath(str(libpath))).resolve()) - ) - self.log.debug( - "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", - normalized_collection_libpath, - normalized_libpath, - ) - if normalized_collection_libpath == normalized_libpath: - self.log.info("Library already loaded, not updating...") - return - - for obj in collection_metadata['objects']: - for child in self.get_all_children(obj): - child.data.materials.clear() - - for material in collection_metadata['materials']: - bpy.data.materials.remove(material) - - namespace = collection_metadata['namespace'] - name = collection_metadata['name'] - - container_name = f"{namespace}_{name}" - - materials, objects = self._process( - libpath, container_name, collection_metadata['objects']) - - collection_metadata["objects"] = objects - collection_metadata["materials"] = materials - collection_metadata["libpath"] = str(libpath) - collection_metadata["representation"] = str(representation["_id"]) - - def remove(self, container: Dict) -> bool: - collection = bpy.data.collections.get(container["objectName"]) - if not collection: - return False - - collection_metadata = collection.get(AVALON_PROPERTY) - - for obj in collection_metadata['objects']: - for child in self.get_all_children(obj): - child.data.materials.clear() - - for material in collection_metadata['materials']: - bpy.data.materials.remove(material) - - bpy.data.collections.remove(collection) - - return True diff --git a/openpype/hosts/blender/plugins/publish/collect_current_file.py b/openpype/hosts/blender/plugins/publish/collect_current_file.py deleted file mode 100644 index 91c88f2e28..0000000000 --- a/openpype/hosts/blender/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,15 +0,0 @@ -import pyblish.api -from openpype.hosts.blender.api import workio - - -class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.5 - label = "Blender Current File" - hosts = ["blender"] - - def process(self, context): - """Inject the current working file""" - current_file = workio.current_file() - context.data["currentFile"] = current_file diff --git a/openpype/hosts/blender/plugins/publish/collect_render.py b/openpype/hosts/blender/plugins/publish/collect_render.py deleted file mode 100644 index da02f99052..0000000000 --- a/openpype/hosts/blender/plugins/publish/collect_render.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect render data.""" - -import os -import re - -import bpy - -from openpype.hosts.blender.api import colorspace -import pyblish.api - - -class CollectBlenderRender(pyblish.api.InstancePlugin): - """Gather all publishable render instances.""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["blender"] - families = ["render"] - label = "Collect Render" - sync_workfile_version = False - - @staticmethod - def generate_expected_beauty( - render_product, frame_start, frame_end, frame_step, ext - ): - """ - Generate the expected files for the render product for the beauty - render. This returns a list of files that should be rendered. It - replaces the sequence of `#` with the frame number. - """ - path = os.path.dirname(render_product) - file = os.path.basename(render_product) - - expected_files = [] - - for frame in range(frame_start, frame_end + 1, frame_step): - frame_str = str(frame).rjust(4, "0") - filename = re.sub("#+", frame_str, file) - expected_file = f"{os.path.join(path, filename)}.{ext}" - expected_files.append(expected_file.replace("\\", "/")) - - return { - "beauty": expected_files - } - - @staticmethod - def generate_expected_aovs( - aov_file_product, frame_start, frame_end, frame_step, ext - ): - """ - Generate the expected files for the render product for the beauty - render. This returns a list of files that should be rendered. It - replaces the sequence of `#` with the frame number. - """ - expected_files = {} - - for aov_name, aov_file in aov_file_product: - path = os.path.dirname(aov_file) - file = os.path.basename(aov_file) - - aov_files = [] - - for frame in range(frame_start, frame_end + 1, frame_step): - frame_str = str(frame).rjust(4, "0") - filename = re.sub("#+", frame_str, file) - expected_file = f"{os.path.join(path, filename)}.{ext}" - aov_files.append(expected_file.replace("\\", "/")) - - expected_files[aov_name] = aov_files - - return expected_files - - def process(self, instance): - context = instance.context - - instance_node = instance.data["transientData"]["instance_node"] - render_data = instance_node.get("render_data") - - assert render_data, "No render data found." - - render_product = render_data.get("render_product") - aov_file_product = render_data.get("aov_file_product") - ext = render_data.get("image_format") - multilayer = render_data.get("multilayer_exr") - - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - frame_handle_start = context.data["frameStartHandle"] - frame_handle_end = context.data["frameEndHandle"] - - expected_beauty = self.generate_expected_beauty( - render_product, int(frame_start), int(frame_end), - int(bpy.context.scene.frame_step), ext) - - expected_aovs = self.generate_expected_aovs( - aov_file_product, int(frame_start), int(frame_end), - int(bpy.context.scene.frame_step), ext) - - expected_files = expected_beauty | expected_aovs - - instance.data.update({ - "families": ["render", "render.farm"], - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_handle_start, - "frameEndHandle": frame_handle_end, - "fps": context.data["fps"], - "byFrameStep": bpy.context.scene.frame_step, - "review": render_data.get("review", False), - "multipartExr": ext == "exr" and multilayer, - "farm": True, - "expectedFiles": [expected_files], - # OCIO not currently implemented in Blender, but the following - # settings are required by the schema, so it is hardcoded. - # TODO: Implement OCIO in Blender - "colorspaceConfig": "", - "colorspaceDisplay": "sRGB", - "colorspaceView": "ACES 1.0 SDR-video", - "renderProducts": colorspace.ARenderProduct(), - }) diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx.py b/openpype/hosts/blender/plugins/publish/extract_fbx.py deleted file mode 100644 index aed6df1d3d..0000000000 --- a/openpype/hosts/blender/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,92 +0,0 @@ -import os - -import bpy - -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY - - -class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin): - """Extract as FBX.""" - - label = "Extract FBX" - hosts = ["blender"] - families = ["model", "rig"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - asset_name = instance.data["assetEntity"]["name"] - subset = instance.data["subset"] - instance_name = f"{asset_name}_{subset}" - filename = f"{instance_name}.fbx" - filepath = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction..") - - plugin.deselect_all() - - asset_group = instance.data["transientData"]["instance_node"] - - selected = [] - for obj in instance: - obj.select_set(True) - selected.append(obj) - - context = plugin.create_blender_context( - active=asset_group, selected=selected) - - new_materials = [] - new_materials_objs = [] - objects = list(asset_group.children) - - for obj in objects: - objects.extend(obj.children) - if obj.type == 'MESH' and len(obj.data.materials) == 0: - mat = bpy.data.materials.new(obj.name) - obj.data.materials.append(mat) - new_materials.append(mat) - new_materials_objs.append(obj) - - scale_length = bpy.context.scene.unit_settings.scale_length - bpy.context.scene.unit_settings.scale_length = 0.01 - - with bpy.context.temp_override(**context): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - mesh_smooth_type='FACE', - add_leaf_bones=False - ) - - bpy.context.scene.unit_settings.scale_length = scale_length - - plugin.deselect_all() - - for mat in new_materials: - bpy.data.materials.remove(mat) - - for obj in new_materials_objs: - obj.data.materials.pop() - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, representation) diff --git a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py b/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py deleted file mode 100644 index 1cb8dac0cf..0000000000 --- a/openpype/hosts/blender/plugins/publish/extract_fbx_animation.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -import json - -import bpy -import bpy_extras -import bpy_extras.anim_utils - -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY - - -def get_all_parents(obj): - """Get all recursive parents of object""" - result = [] - while True: - obj = obj.parent - if not obj: - break - result.append(obj) - return result - - -def get_highest_root(objects): - # Get the highest object that is also in the collection - included_objects = {obj.name_full for obj in objects} - num_parents_to_obj = {} - for obj in objects: - if isinstance(obj, bpy.types.Object): - parents = get_all_parents(obj) - # included parents - parents = [parent for parent in parents if - parent.name_full in included_objects] - if not parents: - # A node without parents must be a highest root - return obj - - num_parents_to_obj.setdefault(len(parents), obj) - - minimum_parent = min(num_parents_to_obj) - return num_parents_to_obj[minimum_parent] - - -class ExtractAnimationFBX( - publish.Extractor, - publish.OptionalPyblishPluginMixin, -): - """Extract as animation.""" - - label = "Extract FBX" - hosts = ["blender"] - families = ["animation"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - asset_group = instance.data["transientData"]["instance_node"] - - # Get objects in this collection (but not in children collections) - # and for those objects include the children hierarchy - # TODO: Would it make more sense for the Collect Instance collector - # to also always retrieve all the children? - objects = set(asset_group.objects) - - # From the direct children of the collection find the 'root' node - # that we want to export - it is the 'highest' node in a hierarchy - root = get_highest_root(objects) - - for obj in list(objects): - objects.update(obj.children_recursive) - - # Find all armatures among the objects, assume to find only one - armatures = [obj for obj in objects if obj.type == "ARMATURE"] - if not armatures: - raise RuntimeError( - f"Unable to find ARMATURE in collection: " - f"{asset_group.name}" - ) - elif len(armatures) > 1: - self.log.warning( - "Found more than one ARMATURE, using " - f"only first of: {armatures}" - ) - armature = armatures[0] - - object_action_pairs = [] - original_actions = [] - - starting_frames = [] - ending_frames = [] - - # For each armature, we make a copy of the current action - if armature.animation_data and armature.animation_data.action: - curr_action = armature.animation_data.action - copy_action = curr_action.copy() - - curr_frame_range = curr_action.frame_range - - starting_frames.append(curr_frame_range[0]) - ending_frames.append(curr_frame_range[1]) - else: - self.log.info( - f"Armature '{armature.name}' has no animation, " - f"skipping FBX animation extraction for {instance}." - ) - return - - asset_group_name = asset_group.name - asset_name = asset_group.get(AVALON_PROPERTY).get("asset_name") - if asset_name: - # Rename for the export; this data is only present when loaded - # from a JSON Layout (layout family) - asset_group.name = asset_name - - # Remove : from the armature name for the export - armature_name = armature.name - original_name = armature_name.split(':')[1] - armature.name = original_name - - object_action_pairs.append((armature, copy_action)) - original_actions.append(curr_action) - - # We compute the starting and ending frames - max_frame = min(starting_frames) - min_frame = max(ending_frames) - - # We bake the copy of the current action for each object - bpy_extras.anim_utils.bake_action_objects( - object_action_pairs, - frames=range(int(min_frame), int(max_frame)), - do_object=False, - do_clean=False - ) - - for obj in bpy.data.objects: - obj.select_set(False) - - root.select_set(True) - armature.select_set(True) - asset_name = instance.data["assetEntity"]["name"] - subset = instance.data["subset"] - instance_name = f"{asset_name}_{subset}" - fbx_filename = f"{instance_name}_{armature.name}.fbx" - filepath = os.path.join(stagingdir, fbx_filename) - - override = plugin.create_blender_context( - active=root, selected=[root, armature]) - - with bpy.context.temp_override(**override): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - bake_anim_use_nla_strips=False, - bake_anim_use_all_actions=False, - add_leaf_bones=False, - armature_nodetype='ROOT', - object_types={'EMPTY', 'ARMATURE'} - ) - - armature.name = armature_name - asset_group.name = asset_group_name - root.select_set(True) - armature.select_set(False) - - # We delete the baked action and set the original one back - for i in range(0, len(object_action_pairs)): - pair = object_action_pairs[i] - action = original_actions[i] - - if action: - pair[0].animation_data.action = action - - if pair[1]: - pair[1].user_clear() - bpy.data.actions.remove(pair[1]) - - json_filename = f"{instance_name}.json" - json_path = os.path.join(stagingdir, json_filename) - - json_dict = { - "instance_name": asset_group.get(AVALON_PROPERTY).get("objectName") - } - - # collection = instance.data.get("name") - # container = None - # for obj in bpy.data.collections[collection].objects: - # if obj.type == "ARMATURE": - # container_name = obj.get("avalon").get("container_name") - # container = bpy.data.collections[container_name] - # if container: - # json_dict = { - # "instance_name": container.get("avalon").get("instance_name") - # } - - with open(json_path, "w+") as file: - json.dump(json_dict, fp=file, indent=2) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - fbx_representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': fbx_filename, - "stagingDir": stagingdir, - } - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - instance.data["representations"].append(json_representation) - - self.log.debug("Extracted instance '{}' to: {}".format( - instance.name, fbx_representation)) diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py deleted file mode 100644 index 383c3bdcc5..0000000000 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ /dev/null @@ -1,266 +0,0 @@ -import os -import json - -import bpy -import bpy_extras -import bpy_extras.anim_utils - -from openpype.client import get_representation_by_name -from openpype.pipeline import publish -from openpype.hosts.blender.api import plugin -from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY - - -class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin): - """Extract a layout.""" - - label = "Extract Layout (JSON)" - hosts = ["blender"] - families = ["layout"] - optional = True - - def _export_animation(self, asset, instance, stagingdir, fbx_count): - n = fbx_count - - for obj in asset.children: - if obj.type != "ARMATURE": - continue - - object_action_pairs = [] - original_actions = [] - - starting_frames = [] - ending_frames = [] - - # For each armature, we make a copy of the current action - curr_action = None - copy_action = None - - if obj.animation_data and obj.animation_data.action: - curr_action = obj.animation_data.action - copy_action = curr_action.copy() - - curr_frame_range = curr_action.frame_range - - starting_frames.append(curr_frame_range[0]) - ending_frames.append(curr_frame_range[1]) - else: - self.log.info("Object has no animation.") - continue - - asset_group_name = asset.name - asset.name = asset.get(AVALON_PROPERTY).get("asset_name") - - armature_name = obj.name - original_name = armature_name.split(':')[1] - obj.name = original_name - - object_action_pairs.append((obj, copy_action)) - original_actions.append(curr_action) - - # We compute the starting and ending frames - max_frame = min(starting_frames) - min_frame = max(ending_frames) - - # We bake the copy of the current action for each object - bpy_extras.anim_utils.bake_action_objects( - object_action_pairs, - frames=range(int(min_frame), int(max_frame)), - do_object=False, - do_clean=False - ) - - for o in bpy.data.objects: - o.select_set(False) - - asset.select_set(True) - obj.select_set(True) - fbx_filename = f"{n:03d}.fbx" - filepath = os.path.join(stagingdir, fbx_filename) - - override = plugin.create_blender_context( - active=asset, selected=[asset, obj]) - with bpy.context.temp_override(**override): - # We export the fbx - bpy.ops.export_scene.fbx( - filepath=filepath, - use_active_collection=False, - use_selection=True, - bake_anim_use_nla_strips=False, - bake_anim_use_all_actions=False, - add_leaf_bones=False, - armature_nodetype='ROOT', - object_types={'EMPTY', 'ARMATURE'} - ) - obj.name = armature_name - asset.name = asset_group_name - asset.select_set(False) - obj.select_set(False) - - # We delete the baked action and set the original one back - for i in range(0, len(object_action_pairs)): - pair = object_action_pairs[i] - action = original_actions[i] - - if action: - pair[0].animation_data.action = action - - if pair[1]: - pair[1].user_clear() - bpy.data.actions.remove(pair[1]) - - return fbx_filename, n + 1 - - return None, n - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_data = [] - fbx_files = [] - - asset_group = instance.data["transientData"]["instance_node"] - - fbx_count = 0 - - project_name = instance.context.data["projectEntity"]["name"] - for asset in asset_group.children: - metadata = asset.get(AVALON_PROPERTY) - if not metadata: - # Avoid raising error directly if there's just invalid data - # inside the instance; better to log it to the artist - # TODO: This should actually be validated in a validator - self.log.warning( - f"Found content in layout that is not a loaded " - f"asset, skipping: {asset.name_full}" - ) - continue - - version_id = metadata["parent"] - family = metadata["family"] - - self.log.debug("Parent: {}".format(version_id)) - # Get blend reference - blend = get_representation_by_name( - project_name, "blend", version_id, fields=["_id"] - ) - blend_id = None - if blend: - blend_id = blend["_id"] - # Get fbx reference - fbx = get_representation_by_name( - project_name, "fbx", version_id, fields=["_id"] - ) - fbx_id = None - if fbx: - fbx_id = fbx["_id"] - # Get abc reference - abc = get_representation_by_name( - project_name, "abc", version_id, fields=["_id"] - ) - abc_id = None - if abc: - abc_id = abc["_id"] - - json_element = {} - if blend_id: - json_element["reference"] = str(blend_id) - if fbx_id: - json_element["reference_fbx"] = str(fbx_id) - if abc_id: - json_element["reference_abc"] = str(abc_id) - json_element["family"] = family - json_element["instance_name"] = asset.name - json_element["asset_name"] = metadata["asset_name"] - json_element["file_path"] = metadata["libpath"] - - json_element["transform"] = { - "translation": { - "x": asset.location.x, - "y": asset.location.y, - "z": asset.location.z - }, - "rotation": { - "x": asset.rotation_euler.x, - "y": asset.rotation_euler.y, - "z": asset.rotation_euler.z - }, - "scale": { - "x": asset.scale.x, - "y": asset.scale.y, - "z": asset.scale.z - } - } - - json_element["transform_matrix"] = [] - - for row in list(asset.matrix_world.transposed()): - json_element["transform_matrix"].append(list(row)) - - json_element["basis"] = [ - [1, 0, 0, 0], - [0, -1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1] - ] - - # Extract the animation as well - if family == "rig": - f, n = self._export_animation( - asset, instance, stagingdir, fbx_count) - if f: - fbx_files.append(f) - json_element["animation"] = f - fbx_count = n - - json_data.append(json_element) - - asset_name = instance.data["assetEntity"]["name"] - subset = instance.data["subset"] - instance_name = f"{asset_name}_{subset}" - json_filename = f"{instance_name}.json" - - json_path = os.path.join(stagingdir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(json_representation) - - self.log.debug(fbx_files) - - if len(fbx_files) == 1: - fbx_representation = { - 'name': 'fbx', - 'ext': '000.fbx', - 'files': fbx_files[0], - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - elif len(fbx_files) > 1: - fbx_representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': fbx_files, - "stagingDir": stagingdir, - } - instance.data["representations"].append(fbx_representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, json_representation) diff --git a/openpype/hosts/blender/plugins/publish/extract_playblast.py b/openpype/hosts/blender/plugins/publish/extract_playblast.py deleted file mode 100644 index a78aa14138..0000000000 --- a/openpype/hosts/blender/plugins/publish/extract_playblast.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import clique - -import bpy - -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.blender.api import capture -from openpype.hosts.blender.api.lib import maintained_time - - -class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin): - """ - Extract viewport playblast. - - Takes review camera and creates review Quicktime video based on viewport - capture. - """ - - label = "Extract Playblast" - hosts = ["blender"] - families = ["review"] - optional = True - order = pyblish.api.ExtractorOrder + 0.01 - - def process(self, instance): - if not self.is_active(instance.data): - return - - # get scene fps - fps = instance.data.get("fps") - if fps is None: - fps = bpy.context.scene.render.fps - instance.data["fps"] = fps - - self.log.debug(f"fps: {fps}") - - # If start and end frames cannot be determined, - # get them from Blender timeline. - start = instance.data.get("frameStart", bpy.context.scene.frame_start) - end = instance.data.get("frameEnd", bpy.context.scene.frame_end) - - self.log.debug(f"start: {start}, end: {end}") - assert end > start, "Invalid time range !" - - # get cameras - camera = instance.data("review_camera", None) - - # get isolate objects list - isolate = instance.data("isolate", None) - - # get output path - stagingdir = self.staging_dir(instance) - asset_name = instance.data["assetEntity"]["name"] - subset = instance.data["subset"] - filename = f"{asset_name}_{subset}" - - path = os.path.join(stagingdir, filename) - - self.log.debug(f"Outputting images to {path}") - - project_settings = instance.context.data["project_settings"]["blender"] - presets = project_settings["publish"]["ExtractPlayblast"]["presets"] - preset = presets.get("default") - preset.update({ - "camera": camera, - "start_frame": start, - "end_frame": end, - "filename": path, - "overwrite": True, - "isolate": isolate, - }) - preset.setdefault( - "image_settings", - { - "file_format": "PNG", - "color_mode": "RGB", - "color_depth": "8", - "compression": 15, - }, - ) - - with maintained_time(): - path = capture(**preset) - - self.log.debug(f"playblast path {path}") - - collected_files = os.listdir(stagingdir) - collections, remainder = clique.assemble( - collected_files, - patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"], - ) - - if len(collections) > 1: - raise RuntimeError( - f"More than one collection found in stagingdir: {stagingdir}" - ) - elif len(collections) == 0: - raise RuntimeError( - f"No collection found in stagingdir: {stagingdir}" - ) - - frame_collection = collections[0] - - self.log.debug(f"Found collection of interest {frame_collection}") - - instance.data.setdefault("representations", []) - - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - representation = { - "name": "png", - "ext": "png", - "files": list(frame_collection), - "stagingDir": stagingdir, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "tags": tags, - "camera_name": camera - } - instance.data["representations"].append(representation) diff --git a/openpype/hosts/blender/plugins/publish/extract_thumbnail.py b/openpype/hosts/blender/plugins/publish/extract_thumbnail.py deleted file mode 100644 index e593e0de27..0000000000 --- a/openpype/hosts/blender/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import glob - -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.blender.api import capture -from openpype.hosts.blender.api.lib import maintained_time - -import bpy - - -class ExtractThumbnail(publish.Extractor): - """Extract viewport thumbnail. - - Takes review camera and creates a thumbnail based on viewport - capture. - - """ - - label = "Extract Thumbnail" - hosts = ["blender"] - families = ["review"] - order = pyblish.api.ExtractorOrder + 0.01 - presets = {} - - def process(self, instance): - self.log.debug("Extracting capture..") - - if instance.data.get("thumbnailSource"): - self.log.debug("Thumbnail source found, skipping...") - return - - stagingdir = self.staging_dir(instance) - asset_name = instance.data["assetEntity"]["name"] - subset = instance.data["subset"] - filename = f"{asset_name}_{subset}" - - path = os.path.join(stagingdir, filename) - - self.log.debug(f"Outputting images to {path}") - - camera = instance.data.get("review_camera", "AUTO") - start = instance.data.get("frameStart", bpy.context.scene.frame_start) - family = instance.data.get("family") - isolate = instance.data("isolate", None) - - preset = self.presets.get(family, {}) - - preset.update({ - "camera": camera, - "start_frame": start, - "end_frame": start, - "filename": path, - "overwrite": True, - "isolate": isolate, - }) - preset.setdefault( - "image_settings", - { - "file_format": "JPEG", - "color_mode": "RGB", - "quality": 100, - }, - ) - - with maintained_time(): - path = capture(**preset) - - thumbnail = os.path.basename(self._fix_output_path(path)) - - self.log.debug(f"thumbnail: {thumbnail}") - - instance.data.setdefault("representations", []) - - representation = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail, - "stagingDir": stagingdir, - "thumbnail": True - } - instance.data["representations"].append(representation) - - def _fix_output_path(self, filepath): - """"Workaround to return correct filepath. - - To workaround this we just glob.glob() for any file extensions and - assume the latest modified file is the correct file and return it. - - """ - # Catch cancelled playblast - if filepath is None: - self.log.warning( - "Playblast did not result in output path. " - "Playblast is probably interrupted." - ) - return None - - if not os.path.exists(filepath): - files = glob.glob(f"{filepath}.*.jpg") - - if not files: - raise RuntimeError(f"Couldn't find playblast from: {filepath}") - filepath = max(files, key=os.path.getmtime) - - return filepath diff --git a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py b/openpype/hosts/blender/plugins/publish/increment_workfile_version.py deleted file mode 100644 index 9f8d20aedc..0000000000 --- a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,32 +0,0 @@ -import pyblish.api -from openpype.pipeline.publish import OptionalPyblishPluginMixin -from openpype.hosts.blender.api.workio import save_file - - -class IncrementWorkfileVersion( - pyblish.api.ContextPlugin, - OptionalPyblishPluginMixin -): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 0.9 - label = "Increment Workfile Version" - optional = True - hosts = ["blender"] - families = ["animation", "model", "rig", "action", "layout", "blendScene", - "pointcache", "render.farm"] - - def process(self, context): - if not self.is_active(context.data): - return - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - from openpype.lib import version_up - path = context.data["currentFile"] - filepath = version_up(path) - - save_file(filepath, copy=False) - - self.log.debug('Incrementing blender workfile version') diff --git a/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py b/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py deleted file mode 100644 index bb243f08cc..0000000000 --- a/openpype/hosts/blender/plugins/publish/validate_deadline_publish.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -import bpy - -import pyblish.api -from openpype.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from openpype.hosts.blender.api.render_lib import prepare_rendering - - -class ValidateDeadlinePublish(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates Render File Directory is - not the same in every submission - """ - - order = ValidateContentsOrder - families = ["render"] - hosts = ["blender"] - label = "Validate Render Output for Deadline" - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - filepath = bpy.data.filepath - file = os.path.basename(filepath) - filename, ext = os.path.splitext(file) - if filename not in bpy.context.scene.render.filepath: - raise PublishValidationError( - "Render output folder " - "doesn't match the blender scene name! " - "Use Repair action to " - "fix the folder file path." - ) - - @classmethod - def repair(cls, instance): - container = instance.data["transientData"]["instance_node"] - prepare_rendering(container) - bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) - cls.log.debug("Reset the render output folder...") diff --git a/openpype/hosts/blender/plugins/publish/validate_file_saved.py b/openpype/hosts/blender/plugins/publish/validate_file_saved.py deleted file mode 100644 index 442f856e05..0000000000 --- a/openpype/hosts/blender/plugins/publish/validate_file_saved.py +++ /dev/null @@ -1,61 +0,0 @@ -import bpy - -import pyblish.api - -from openpype.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError -) - - -class SaveWorkfileAction(pyblish.api.Action): - """Save Workfile.""" - label = "Save Workfile" - on = "failed" - icon = "save" - - def process(self, context, plugin): - bpy.ops.wm.avalon_workfiles() - - -class ValidateFileSaved(pyblish.api.ContextPlugin, - OptionalPyblishPluginMixin): - """Validate that the workfile has been saved.""" - - order = pyblish.api.ValidatorOrder - 0.01 - hosts = ["blender"] - label = "Validate File Saved" - optional = False - exclude_families = [] - actions = [SaveWorkfileAction] - - def process(self, context): - if not self.is_active(context.data): - return - - if not context.data["currentFile"]: - # File has not been saved at all and has no filename - raise PublishValidationError( - "Current file is empty. Save the file before continuing." - ) - - # Do not validate workfile has unsaved changes if only instances - # present of families that should be excluded - families = { - instance.data["family"] for instance in context - # Consider only enabled instances - if instance.data.get("publish", True) - and instance.data.get("active", True) - } - - def is_excluded(family): - return any(family in exclude_family - for exclude_family in self.exclude_families) - - if all(is_excluded(family) for family in families): - self.log.debug("Only excluded families found, skipping workfile " - "unsaved changes validation..") - return - - if bpy.data.is_dirty: - raise PublishValidationError("Workfile has unsaved changes.") diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py deleted file mode 100644 index 060bccbd04..0000000000 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py +++ /dev/null @@ -1,66 +0,0 @@ -from typing import List - -import bpy - -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -import openpype.hosts.blender.api.action - - -class ValidateMeshHasUvs( - pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin, -): - """Validate that the current mesh has UV's.""" - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Mesh Has UVs" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] - optional = True - - @staticmethod - def has_uvs(obj: bpy.types.Object) -> bool: - """Check if an object has uv's.""" - if not obj.data.uv_layers: - return False - for uv_layer in obj.data.uv_layers: - for polygon in obj.data.polygons: - for loop_index in polygon.loop_indices: - if ( - loop_index >= len(uv_layer.data) - or not uv_layer.data[loop_index].uv - ): - return False - - return True - - @classmethod - def get_invalid(cls, instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': - if obj.mode != "OBJECT": - cls.log.warning( - f"Mesh object {obj.name} should be in 'OBJECT' mode" - " to be properly checked." - ) - if not cls.has_uvs(obj): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - f"Meshes found in instance without valid UV's: {invalid}" - ) diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py deleted file mode 100644 index 7f77bbe38c..0000000000 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import List - -import bpy - -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -import openpype.hosts.blender.api.action - - -class ValidateMeshNoNegativeScale(pyblish.api.Validator, - OptionalPyblishPluginMixin): - """Ensure that meshes don't have a negative scale.""" - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Mesh No Negative Scale" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance) -> List: - invalid = [] - for obj in instance: - if isinstance(obj, bpy.types.Object) and obj.type == 'MESH': - if any(v < 0 for v in obj.scale): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - f"Meshes found in instance with negative scale: {names}" - ) diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py deleted file mode 100644 index 1fb9535ee4..0000000000 --- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import List - -import mathutils -import bpy - -import pyblish.api - -import openpype.hosts.blender.api.action -from openpype.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) - - -class ValidateTransformZero(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Transforms can't have any values - - To solve this issue, try freezing the transforms. So long - as the transforms, rotation and scale values are zero, - you're all good. - - """ - - order = ValidateContentsOrder - hosts = ["blender"] - families = ["model"] - label = "Transform Zero" - actions = [openpype.hosts.blender.api.action.SelectInvalidAction] - - _identity = mathutils.Matrix() - - @classmethod - def get_invalid(cls, instance) -> List: - invalid = [] - for obj in instance: - if ( - isinstance(obj, bpy.types.Object) - and obj.matrix_basis != cls._identity - ): - invalid.append(obj) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - names = ", ".join(obj.name for obj in invalid) - raise PublishValidationError( - "Objects found in instance which do not" - f" have transform set to zero: {names}" - ) diff --git a/openpype/hosts/celaction/addon.py b/openpype/hosts/celaction/addon.py deleted file mode 100644 index 9158010011..0000000000 --- a/openpype/hosts/celaction/addon.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class CelactionAddon(OpenPypeModule, IHostAddon): - name = "celaction" - host_name = "celaction" - - def initialize(self, module_settings): - self.enabled = True - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(CELACTION_ROOT_DIR, "hooks") - ] - - def add_implementation_envs(self, env, _app): - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".scn"] diff --git a/openpype/hosts/celaction/scripts/publish_cli.py b/openpype/hosts/celaction/scripts/publish_cli.py deleted file mode 100644 index 39d3f1a94d..0000000000 --- a/openpype/hosts/celaction/scripts/publish_cli.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import sys - -import pyblish.api -import pyblish.util - -import openpype.hosts.celaction -from openpype.lib import Logger -from openpype.tools.utils import host_tools -from openpype.pipeline import install_openpype_plugins - - -log = Logger.get_logger("celaction") - -PUBLISH_HOST = "celaction" -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") - - -def main(): - # Registers pype's Global pyblish plugins - install_openpype_plugins() - - if os.path.exists(PUBLISH_PATH): - log.info(f"Registering path: {PUBLISH_PATH}") - pyblish.api.register_plugin_path(PUBLISH_PATH) - - pyblish.api.register_host(PUBLISH_HOST) - pyblish.api.register_target("local") - - return host_tools.show_publish() - - -if __name__ == "__main__": - result = main() - sys.exit(not bool(result)) diff --git a/openpype/hosts/flame/addon.py b/openpype/hosts/flame/addon.py deleted file mode 100644 index d9359fc5bf..0000000000 --- a/openpype/hosts/flame/addon.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class FlameAddon(OpenPypeModule, IHostAddon): - name = "flame" - host_name = "flame" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to DL_PYTHON_HOOK_PATH - env["DL_PYTHON_HOOK_PATH"] = os.path.join(HOST_DIR, "startup") - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(HOST_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".otoc"] diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py deleted file mode 100644 index ab713aed84..0000000000 --- a/openpype/hosts/flame/api/lib.py +++ /dev/null @@ -1,1272 +0,0 @@ -import sys -import os -import re -import json -import pickle -import clique -import tempfile -import traceback -import itertools -import contextlib -import xml.etree.cElementTree as cET -from copy import deepcopy, copy -from xml.etree import ElementTree as ET -from pprint import pformat - -from openpype.lib import Logger, run_subprocess - -from .constants import ( - MARKER_COLOR, - MARKER_DURATION, - MARKER_NAME, - COLOR_MAP, - MARKER_PUBLISH_DEFAULT -) - -log = Logger.get_logger(__name__) - -FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") - - -class CTX: - # singleton used for passing data between api modules - app_framework = None - flame_apps = [] - selection = None - - -@contextlib.contextmanager -def io_preferences_file(klass, filepath, write=False): - try: - flag = "w" if write else "r" - yield open(filepath, flag) - - except IOError as _error: - klass.log.info("Unable to work with preferences `{}`: {}".format( - filepath, _error)) - - -class FlameAppFramework(object): - # flameAppFramework class takes care of preferences - - class prefs_dict(dict): - - def __init__(self, master, name, **kwargs): - self.name = name - self.master = master - if not self.master.get(self.name): - self.master[self.name] = {} - self.master[self.name].__init__() - - def __getitem__(self, k): - return self.master[self.name].__getitem__(k) - - def __setitem__(self, k, v): - return self.master[self.name].__setitem__(k, v) - - def __delitem__(self, k): - return self.master[self.name].__delitem__(k) - - def get(self, k, default=None): - return self.master[self.name].get(k, default) - - def setdefault(self, k, default=None): - return self.master[self.name].setdefault(k, default) - - def pop(self, *args, **kwargs): - return self.master[self.name].pop(*args, **kwargs) - - def update(self, mapping=(), **kwargs): - self.master[self.name].update(mapping, **kwargs) - - def __contains__(self, k): - return self.master[self.name].__contains__(k) - - def copy(self): # don"t delegate w/ super - dict.copy() -> dict :( - return type(self)(self) - - def keys(self): - return self.master[self.name].keys() - - @classmethod - def fromkeys(cls, keys, v=None): - return cls.master[cls.name].fromkeys(keys, v) - - def __repr__(self): - return "{0}({1})".format( - type(self).__name__, self.master[self.name].__repr__()) - - def master_keys(self): - return self.master.keys() - - def __init__(self): - self.name = self.__class__.__name__ - self.bundle_name = "OpenPypeFlame" - # self.prefs scope is limited to flame project and user - self.prefs = {} - self.prefs_user = {} - self.prefs_global = {} - self.log = log - - try: - import flame - self.flame = flame - self.flame_project_name = self.flame.project.current_project.name - self.flame_user_name = flame.users.current_user.name - except Exception: - self.flame = None - self.flame_project_name = None - self.flame_user_name = None - - import socket - self.hostname = socket.gethostname() - - if sys.platform == "darwin": - self.prefs_folder = os.path.join( - os.path.expanduser("~"), - "Library", - "Caches", - "OpenPype", - self.bundle_name - ) - elif sys.platform.startswith("linux"): - self.prefs_folder = os.path.join( - os.path.expanduser("~"), - ".OpenPype", - self.bundle_name) - - self.prefs_folder = os.path.join( - self.prefs_folder, - self.hostname, - ) - - self.log.info("[{}] waking up".format(self.__class__.__name__)) - - try: - self.load_prefs() - except RuntimeError: - self.save_prefs() - - # menu auto-refresh defaults - if not self.prefs_global.get("menu_auto_refresh"): - self.prefs_global["menu_auto_refresh"] = { - "media_panel": True, - "batch": True, - "main_menu": True, - "timeline_menu": True - } - - self.apps = [] - - def get_pref_file_paths(self): - - prefix = self.prefs_folder + os.path.sep + self.bundle_name - prefs_file_path = "_".join([ - prefix, self.flame_user_name, - self.flame_project_name]) + ".prefs" - prefs_user_file_path = "_".join([ - prefix, self.flame_user_name]) + ".prefs" - prefs_global_file_path = prefix + ".prefs" - - return (prefs_file_path, prefs_user_file_path, prefs_global_file_path) - - def load_prefs(self): - - (proj_pref_path, user_pref_path, - glob_pref_path) = self.get_pref_file_paths() - - with io_preferences_file(self, proj_pref_path) as prefs_file: - self.prefs = pickle.load(prefs_file) - self.log.info( - "Project - preferences contents:\n{}".format( - pformat(self.prefs) - )) - - with io_preferences_file(self, user_pref_path) as prefs_file: - self.prefs_user = pickle.load(prefs_file) - self.log.info( - "User - preferences contents:\n{}".format( - pformat(self.prefs_user) - )) - - with io_preferences_file(self, glob_pref_path) as prefs_file: - self.prefs_global = pickle.load(prefs_file) - self.log.info( - "Global - preferences contents:\n{}".format( - pformat(self.prefs_global) - )) - - return True - - def save_prefs(self): - # make sure the preference folder is available - if not os.path.isdir(self.prefs_folder): - try: - os.makedirs(self.prefs_folder) - except Exception: - self.log.info("Unable to create folder {}".format( - self.prefs_folder)) - return False - - # get all pref file paths - (proj_pref_path, user_pref_path, - glob_pref_path) = self.get_pref_file_paths() - - with io_preferences_file(self, proj_pref_path, True) as prefs_file: - pickle.dump(self.prefs, prefs_file) - self.log.info( - "Project - preferences contents:\n{}".format( - pformat(self.prefs) - )) - - with io_preferences_file(self, user_pref_path, True) as prefs_file: - pickle.dump(self.prefs_user, prefs_file) - self.log.info( - "User - preferences contents:\n{}".format( - pformat(self.prefs_user) - )) - - with io_preferences_file(self, glob_pref_path, True) as prefs_file: - pickle.dump(self.prefs_global, prefs_file) - self.log.info( - "Global - preferences contents:\n{}".format( - pformat(self.prefs_global) - )) - - return True - - -def get_current_project(): - import flame - return flame.project.current_project - - -def get_current_sequence(selection): - import flame - - def segment_to_sequence(_segment): - track = _segment.parent - version = track.parent - return version.parent - - process_timeline = None - - if len(selection) == 1: - if isinstance(selection[0], flame.PySequence): - process_timeline = selection[0] - if isinstance(selection[0], flame.PySegment): - process_timeline = segment_to_sequence(selection[0]) - else: - for segment in selection: - if isinstance(segment, flame.PySegment): - process_timeline = segment_to_sequence(segment) - break - - return process_timeline - - -def rescan_hooks(): - import flame - try: - flame.execute_shortcut("Rescan Python Hooks") - except Exception: - pass - - -def get_metadata(project_name, _log=None): - # TODO: can be replaced by MediaInfoFile class method - from adsk.libwiretapPythonClientAPI import ( - WireTapClient, - WireTapServerHandle, - WireTapNodeHandle, - WireTapStr - ) - - class GetProjectColorPolicy(object): - def __init__(self, host_name=None, _log=None): - # Create a connection to the Backburner manager using the Wiretap - # python API. - # - self.log = _log or log - self.host_name = host_name or "localhost" - self._wiretap_client = WireTapClient() - if not self._wiretap_client.init(): - raise Exception("Could not initialize Wiretap Client") - self._server = WireTapServerHandle( - "{}:IFFFS".format(self.host_name)) - - def process(self, project_name): - policy_node_handle = WireTapNodeHandle( - self._server, - "/projects/{}/syncolor/policy".format(project_name) - ) - self.log.info(policy_node_handle) - - policy = WireTapStr() - if not policy_node_handle.getNodeTypeStr(policy): - self.log.warning( - "Could not retrieve policy of '%s': %s" % ( - policy_node_handle.getNodeId().id(), - policy_node_handle.lastError() - ) - ) - - return policy.c_str() - - policy_wiretap = GetProjectColorPolicy(_log=_log) - return policy_wiretap.process(project_name) - - -def get_segment_data_marker(segment, with_marker=None): - """ - Get openpype track item tag created by creator or loader plugin. - - Attributes: - segment (flame.PySegment): flame api object - with_marker (bool)[optional]: if true it will return also marker object - - Returns: - dict: openpype tag data - - Returns(with_marker=True): - flame.PyMarker, dict - """ - for marker in segment.markers: - comment = marker.comment.get_value() - color = marker.colour.get_value() - name = marker.name.get_value() - - if (name == MARKER_NAME) and ( - color == COLOR_MAP[MARKER_COLOR]): - if not with_marker: - return json.loads(comment) - else: - return marker, json.loads(comment) - - -def set_segment_data_marker(segment, data=None): - """ - Set openpype track item tag to input segment. - - Attributes: - segment (flame.PySegment): flame api object - - Returns: - dict: json loaded data - """ - data = data or dict() - - marker_data = get_segment_data_marker(segment, True) - - if marker_data: - # get available openpype tag if any - marker, tag_data = marker_data - # update tag data with new data - tag_data.update(data) - # update marker with tag data - marker.comment = json.dumps(tag_data) - else: - # update tag data with new data - marker = create_segment_data_marker(segment) - # add tag data to marker's comment - marker.comment = json.dumps(data) - - -def set_publish_attribute(segment, value): - """ Set Publish attribute in input Tag object - - Attribute: - segment (flame.PySegment)): flame api object - value (bool): True or False - """ - tag_data = get_segment_data_marker(segment) - tag_data["publish"] = value - - # set data to the publish attribute - set_segment_data_marker(segment, tag_data) - - -def get_publish_attribute(segment): - """ Get Publish attribute from input Tag object - - Attribute: - segment (flame.PySegment)): flame api object - - Returns: - bool: True or False - """ - tag_data = get_segment_data_marker(segment) - - if not tag_data: - set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT) - return MARKER_PUBLISH_DEFAULT - - return tag_data["publish"] - - -def create_segment_data_marker(segment): - """ Create openpype marker on a segment. - - Attributes: - segment (flame.PySegment): flame api object - - Returns: - flame.PyMarker: flame api object - """ - # get duration of segment - duration = segment.record_duration.relative_frame - # calculate start frame of the new marker - start_frame = int(segment.record_in.relative_frame) + int(duration / 2) - # create marker - marker = segment.create_marker(start_frame) - # set marker name - marker.name = MARKER_NAME - # set duration - marker.duration = MARKER_DURATION - # set colour - marker.colour = COLOR_MAP[MARKER_COLOR] # Red - - return marker - - -def get_sequence_segments(sequence, selected=False): - segments = [] - # loop versions in sequence - for ver in sequence.versions: - # loop track in versions - for track in ver.tracks: - # ignore all empty tracks and hidden too - if len(track.segments) == 0 and track.hidden: - continue - # loop all segment in remaining tracks - for segment in track.segments: - if segment.name.get_value() == "": - continue - if segment.hidden.get_value() is True: - continue - if ( - selected is True - and segment.selected.get_value() is not True - ): - continue - # add it to original selection - segments.append(segment) - return segments - - -@contextlib.contextmanager -def maintained_segment_selection(sequence): - """Maintain selection during context - - Attributes: - sequence (flame.PySequence): python api object - - Yield: - list of flame.PySegment - - Example: - >>> with maintained_segment_selection(sequence) as selected_segments: - ... for segment in selected_segments: - ... segment.selected = False - >>> print(segment.selected) - True - """ - selected_segments = get_sequence_segments(sequence, True) - try: - # do the operation on selected segments - yield selected_segments - finally: - # reset all selected clips - reset_segment_selection(sequence) - # select only original selection of segments - for segment in selected_segments: - segment.selected = True - - -def reset_segment_selection(sequence): - """Deselect all selected nodes - """ - for ver in sequence.versions: - for track in ver.tracks: - if len(track.segments) == 0 and track.hidden: - continue - for segment in track.segments: - segment.selected = False - - -def _get_shot_tokens_values(clip, tokens): - old_value = None - output = {} - - if not clip.shot_name: - return output - - old_value = clip.shot_name.get_value() - - for token in tokens: - clip.shot_name.set_value(token) - _key = str(re.sub("[<>]", "", token)).replace(" ", "_") - - try: - output[_key] = int(clip.shot_name.get_value()) - except ValueError: - output[_key] = clip.shot_name.get_value() - - clip.shot_name.set_value(old_value) - - return output - - -def get_segment_attributes(segment): - if segment.name.get_value() == "": - return None - - # Add timeline segment to tree - clip_data = { - "shot_name": segment.shot_name.get_value(), - "segment_name": segment.name.get_value(), - "segment_comment": segment.comment.get_value(), - "tape_name": segment.tape_name, - "source_name": segment.source_name, - "fpath": segment.file_path, - "PySegment": segment - } - - # head and tail with forward compatibility - if segment.head: - # `infinite` can be also returned - if isinstance(segment.head, str): - clip_data["segment_head"] = 0 - else: - clip_data["segment_head"] = int(segment.head) - if segment.tail: - # `infinite` can be also returned - if isinstance(segment.tail, str): - clip_data["segment_tail"] = 0 - else: - clip_data["segment_tail"] = int(segment.tail) - - # add all available shot tokens - shot_tokens = _get_shot_tokens_values(segment, [ - "", "", "", "", "", - "", "" - ]) - clip_data.update(shot_tokens) - - # populate shot source metadata - segment_attrs = [ - "record_duration", "record_in", "record_out", - "source_duration", "source_in", "source_out" - ] - segment_attrs_data = {} - for attr_name in segment_attrs: - if not hasattr(segment, attr_name): - continue - attr = getattr(segment, attr_name) - segment_attrs_data[attr_name] = str(attr).replace("+", ":") - - if attr_name in ["record_in", "record_out"]: - clip_data[attr_name] = attr.relative_frame - else: - clip_data[attr_name] = attr.frame - - clip_data["segment_timecodes"] = segment_attrs_data - - return clip_data - - -def get_clips_in_reels(project): - output_clips = [] - project_desktop = project.current_workspace.desktop - - for reel_group in project_desktop.reel_groups: - for reel in reel_group.reels: - for clip in reel.clips: - clip_data = { - "PyClip": clip, - "fps": float(str(clip.frame_rate)[:-4]) - } - - attrs = [ - "name", "width", "height", - "ratio", "sample_rate", "bit_depth" - ] - - for attr in attrs: - val = getattr(clip, attr) - clip_data[attr] = val - - version = clip.versions[-1] - track = version.tracks[-1] - for segment in track.segments: - segment_data = get_segment_attributes(segment) - clip_data.update(segment_data) - - output_clips.append(clip_data) - - return output_clips - - -def get_reformated_filename(filename, padded=True): - """ - Return fixed python expression path - - Args: - filename (str): file name - - Returns: - type: string with reformated path - - Example: - get_reformated_filename("plate.1001.exr") > plate.%04d.exr - - """ - found = FRAME_PATTERN.search(filename) - - if not found: - log.info("File name is not sequence: {}".format(filename)) - return filename - - padding = get_padding_from_filename(filename) - - replacement = "%0{}d".format(padding) if padded else "%d" - start_idx, end_idx = found.span(1) - - return replacement.join( - [filename[:start_idx], filename[end_idx:]] - ) - - -def get_padding_from_filename(filename): - """ - Return padding number from Flame path style - - Args: - filename (str): file name - - Returns: - int: padding number - - Example: - get_padding_from_filename("plate.0001.exr") > 4 - - """ - found = get_frame_from_filename(filename) - - return len(found) if found else None - - -def get_frame_from_filename(filename): - """ - Return sequence number from Flame path style - - Args: - filename (str): file name - - Returns: - int: sequence frame number - - Example: - def get_frame_from_filename(path): - ("plate.0001.exr") > 0001 - - """ - - found = re.findall(FRAME_PATTERN, filename) - - return found.pop() if found else None - - -@contextlib.contextmanager -def maintained_object_duplication(item): - """Maintain input item duplication - - Attributes: - item (any flame.PyObject): python api object - - Yield: - duplicate input PyObject type - """ - import flame - # Duplicate the clip to avoid modifying the original clip - duplicate = flame.duplicate(item) - - try: - # do the operation on selected segments - yield duplicate - finally: - # delete the item at the end - flame.delete(duplicate) - - -@contextlib.contextmanager -def maintained_temp_file_path(suffix=None): - _suffix = suffix or "" - - try: - # Store dumped json to temporary file - temporary_file = tempfile.mktemp( - suffix=_suffix, prefix="flame_maintained_") - yield temporary_file.replace("\\", "/") - - except IOError as _error: - raise IOError( - "Not able to create temp json file: {}".format(_error)) - - finally: - # Remove the temporary json - os.remove(temporary_file) - - -def get_clip_segment(flame_clip): - name = flame_clip.name.get_value() - version = flame_clip.versions[0] - track = version.tracks[0] - segments = track.segments - - if len(segments) < 1: - raise ValueError("Clip `{}` has no segments!".format(name)) - - if len(segments) > 1: - raise ValueError("Clip `{}` has too many segments!".format(name)) - - return segments[0] - - -def get_batch_group_from_desktop(name): - project = get_current_project() - project_desktop = project.current_workspace.desktop - - for bgroup in project_desktop.batch_groups: - if bgroup.name.get_value() in name: - return bgroup - - -class MediaInfoFile(object): - """Class to get media info file clip data - - Raises: - IOError: MEDIA_SCRIPT_PATH path doesn't exists - TypeError: Not able to generate clip xml data file - ET.ParseError: Missing clip in xml clip data - IOError: Not able to save xml clip data to file - - Attributes: - str: `MEDIA_SCRIPT_PATH` path to flame binary - logging.Logger: `log` logger - - TODO: add method for getting metadata to dict - """ - MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info" - - log = log - - _clip_data = None - _start_frame = None - _fps = None - _drop_mode = None - _file_pattern = None - - def __init__(self, path, logger=None): - - # replace log if any - if logger: - self.log = logger - - # test if `dl_get_media_info` path exists - self._validate_media_script_path() - - # derivate other feed variables - feed_basename = os.path.basename(path) - feed_dir = os.path.dirname(path) - feed_ext = os.path.splitext(feed_basename)[1][1:].lower() - - with maintained_temp_file_path(".clip") as tmp_path: - self.log.info("Temp File: {}".format(tmp_path)) - self._generate_media_info_file(tmp_path, feed_ext, feed_dir) - - # get collection containing feed_basename from path - self.file_pattern = self._get_collection( - feed_basename, feed_dir, feed_ext) - - if ( - not self.file_pattern - and os.path.exists(os.path.join(feed_dir, feed_basename)) - ): - self.file_pattern = feed_basename - - # get clip data and make them single if there is multiple - # clips data - xml_data = self._make_single_clip_media_info( - tmp_path, feed_basename, self.file_pattern) - self.log.debug("xml_data: {}".format(xml_data)) - self.log.debug("type: {}".format(type(xml_data))) - - # get all time related data and assign them - self._get_time_info_from_origin(xml_data) - self.log.debug("start_frame: {}".format(self.start_frame)) - self.log.debug("fps: {}".format(self.fps)) - self.log.debug("drop frame: {}".format(self.drop_mode)) - self.clip_data = xml_data - - def _get_collection(self, feed_basename, feed_dir, feed_ext): - """ Get collection string - - Args: - feed_basename (str): file base name - feed_dir (str): file's directory - feed_ext (str): file extension - - Raises: - AttributeError: feed_ext is not matching feed_basename - - Returns: - str: collection basename with range of sequence - """ - partialname = self._separate_file_head(feed_basename, feed_ext) - self.log.debug("__ partialname: {}".format(partialname)) - - # make sure partial input basename is having correct extensoon - if not partialname: - raise AttributeError( - "Wrong input attributes. Basename - {}, Ext - {}".format( - feed_basename, feed_ext - ) - ) - - # get all related files - files = [ - f for f in os.listdir(feed_dir) - if partialname == self._separate_file_head(f, feed_ext) - ] - - # ignore reminders as we dont need them - collections = clique.assemble(files)[0] - - # in case no collection found return None - # it is probably just single file - if not collections: - return - - # we expect only one collection - collection = collections[0] - - self.log.debug("__ collection: {}".format(collection)) - - if collection.is_contiguous(): - return self._format_collection(collection) - - # add `[` in front to make sure it want capture - # shot name with the same number - number_from_path = self._separate_number(feed_basename, feed_ext) - search_number_pattern = "[" + number_from_path - # convert to multiple collections - _continues_colls = collection.separate() - for _coll in _continues_colls: - coll_to_text = self._format_collection( - _coll, len(number_from_path)) - self.log.debug("__ coll_to_text: {}".format(coll_to_text)) - if search_number_pattern in coll_to_text: - return coll_to_text - - @staticmethod - def _format_collection(collection, padding=None): - padding = padding or collection.padding - # if no holes then return collection - head = collection.format("{head}") - tail = collection.format("{tail}") - range_template = "[{{:0{0}d}}-{{:0{0}d}}]".format( - padding) - ranges = range_template.format( - min(collection.indexes), - max(collection.indexes) - ) - # if no holes then return collection - return "{}{}{}".format(head, ranges, tail) - - def _separate_file_head(self, basename, extension): - """ Get only head with out sequence and extension - - Args: - basename (str): file base name - extension (str): file extension - - Returns: - str: file head - """ - # in case sequence file - found = re.findall( - r"(.*)[._][\d]*(?=.{})".format(extension), - basename, - ) - if found: - return found.pop() - - # in case single file - name, ext = os.path.splitext(basename) - - if extension == ext[1:]: - return name - - def _separate_number(self, basename, extension): - """ Get only sequence number as string - - Args: - basename (str): file base name - extension (str): file extension - - Returns: - str: number with padding - """ - # in case sequence file - found = re.findall( - r"[._]([\d]*)(?=.{})".format(extension), - basename, - ) - if found: - return found.pop() - - @property - def clip_data(self): - """Clip's xml clip data - - Returns: - xml.etree.ElementTree: xml data - """ - return self._clip_data - - @clip_data.setter - def clip_data(self, data): - self._clip_data = data - - @property - def start_frame(self): - """ Clip's starting frame found in timecode - - Returns: - int: number of frames - """ - return self._start_frame - - @start_frame.setter - def start_frame(self, number): - self._start_frame = int(number) - - @property - def fps(self): - """ Clip's frame rate - - Returns: - float: frame rate - """ - return self._fps - - @fps.setter - def fps(self, fl_number): - self._fps = float(fl_number) - - @property - def drop_mode(self): - """ Clip's drop frame mode - - Returns: - str: drop frame flag - """ - return self._drop_mode - - @drop_mode.setter - def drop_mode(self, text): - self._drop_mode = str(text) - - @property - def file_pattern(self): - """Clips file patter - - Returns: - str: file pattern. ex. file.[1-2].exr - """ - return self._file_pattern - - @file_pattern.setter - def file_pattern(self, fpattern): - self._file_pattern = fpattern - - def _validate_media_script_path(self): - if not os.path.isfile(self.MEDIA_SCRIPT_PATH): - raise IOError("Media Script does not exist: `{}`".format( - self.MEDIA_SCRIPT_PATH)) - - def _generate_media_info_file(self, fpath, feed_ext, feed_dir): - """ Generate media info xml .clip file - - Args: - fpath (str): .clip file path - feed_ext (str): file extension to be filtered - feed_dir (str): look up directory - - Raises: - TypeError: Type error if it fails - """ - # Create cmd arguments for gettig xml file info file - cmd_args = [ - self.MEDIA_SCRIPT_PATH, - "-e", feed_ext, - "-o", fpath, - feed_dir - ] - - try: - # execute creation of clip xml template data - run_subprocess(cmd_args) - except TypeError as error: - raise TypeError( - "Error creating `{}` due: {}".format(fpath, error)) - - def _make_single_clip_media_info(self, fpath, feed_basename, path_pattern): - """ Separate only relative clip object form .clip file - - Args: - fpath (str): clip file path - feed_basename (str): search basename - path_pattern (str): search file pattern (file.[1-2].exr) - - Raises: - ET.ParseError: if nothing found - - Returns: - ET.Element: xml element data of matching clip - """ - with open(fpath) as f: - lines = f.readlines() - _added_root = itertools.chain( - "", deepcopy(lines)[1:], "") - new_root = ET.fromstringlist(_added_root) - - # find the clip which is matching to my input name - xml_clips = new_root.findall("clip") - matching_clip = None - for xml_clip in xml_clips: - clip_name = xml_clip.find("name").text - self.log.debug("__ clip_name: `{}`".format(clip_name)) - if clip_name not in feed_basename: - continue - - # test path pattern - for out_track in xml_clip.iter("track"): - for out_feed in out_track.iter("feed"): - for span in out_feed.iter("span"): - # start frame - span_path = span.find("path") - self.log.debug( - "__ span_path.text: {}, path_pattern: {}".format( - span_path.text, path_pattern - ) - ) - if path_pattern in span_path.text: - matching_clip = xml_clip - - if matching_clip is None: - # return warning there is missing clip - raise ET.ParseError( - "Missing clip in `{}`. Available clips {}".format( - feed_basename, [ - xml_clip.find("name").text - for xml_clip in xml_clips - ] - )) - - return matching_clip - - def _get_time_info_from_origin(self, xml_data): - """Set time info to class attributes - - Args: - xml_data (ET.Element): clip data - """ - try: - for out_track in xml_data.iter("track"): - for out_feed in out_track.iter("feed"): - # start frame - out_feed_nb_ticks_obj = out_feed.find( - "startTimecode/nbTicks") - self.start_frame = out_feed_nb_ticks_obj.text - - # fps - out_feed_fps_obj = out_feed.find( - "startTimecode/rate") - self.fps = out_feed_fps_obj.text - - # drop frame mode - out_feed_drop_mode_obj = out_feed.find( - "startTimecode/dropMode") - self.drop_mode = out_feed_drop_mode_obj.text - break - except Exception as msg: - self.log.warning(msg) - - @staticmethod - def write_clip_data_to_file(fpath, xml_element_data): - """ Write xml element of clip data to file - - Args: - fpath (string): file path - xml_element_data (xml.etree.ElementTree.Element): xml data - - Raises: - IOError: If data could not be written to file - """ - try: - # save it as new file - tree = cET.ElementTree(xml_element_data) - tree.write( - fpath, xml_declaration=True, - method="xml", encoding="UTF-8" - ) - except IOError as error: - raise IOError( - "Not able to write data to file: {}".format(error)) - - -class TimeEffectMetadata(object): - log = log - _data = {} - _retime_modes = { - 0: "speed", - 1: "timewarp", - 2: "duration" - } - - def __init__(self, segment, logger=None): - if logger: - self.log = logger - - self._data = self._get_metadata(segment) - - @property - def data(self): - """ Returns timewarp effect data - - Returns: - dict: retime data - """ - return self._data - - def _get_metadata(self, segment): - effects = segment.effects or [] - for effect in effects: - if effect.type == "Timewarp": - with maintained_temp_file_path(".timewarp_node") as tmp_path: - self.log.info("Temp File: {}".format(tmp_path)) - effect.save_setup(tmp_path) - return self._get_attributes_from_xml(tmp_path) - - return {} - - def _get_attributes_from_xml(self, tmp_path): - with open(tmp_path, "r") as tw_setup_file: - tw_setup_string = tw_setup_file.read() - tw_setup_file.close() - - tw_setup_xml = ET.fromstring(tw_setup_string) - tw_setup = self._dictify(tw_setup_xml) - # pprint(tw_setup) - try: - tw_setup_state = tw_setup["Setup"]["State"][0] - mode = int( - tw_setup_state["TW_RetimerMode"][0]["_text"] - ) - r_data = { - "type": self._retime_modes[mode], - "effectStart": int( - tw_setup["Setup"]["Base"][0]["Range"][0]["Start"]), - "effectEnd": int( - tw_setup["Setup"]["Base"][0]["Range"][0]["End"]) - } - - if mode == 0: # speed - r_data[self._retime_modes[mode]] = float( - tw_setup_state["TW_Speed"] - [0]["Channel"][0]["Value"][0]["_text"] - ) / 100 - elif mode == 1: # timewarp - print("timing") - r_data[self._retime_modes[mode]] = self._get_anim_keys( - tw_setup_state["TW_Timing"] - ) - elif mode == 2: # duration - r_data[self._retime_modes[mode]] = { - "start": { - "source": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][0]["Value"][0]["_text"] - ), - "timeline": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][0]["Frame"][0]["_text"] - ) - }, - "end": { - "source": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][1]["Value"][0]["_text"] - ), - "timeline": int( - tw_setup_state["TW_DurationTiming"][0]["Channel"] - [0]["KFrames"][0]["Key"][1]["Frame"][0]["_text"] - ) - } - } - except Exception: - lines = traceback.format_exception(*sys.exc_info()) - self.log.error("\n".join(lines)) - return - - return r_data - - def _get_anim_keys(self, setup_cat, index=None): - return_data = { - "extrapolation": ( - setup_cat[0]["Channel"][0]["Extrap"][0]["_text"] - ), - "animKeys": [] - } - for key in setup_cat[0]["Channel"][0]["KFrames"][0]["Key"]: - if index and int(key["Index"]) != index: - continue - key_data = { - "source": float(key["Value"][0]["_text"]), - "timeline": float(key["Frame"][0]["_text"]), - "index": int(key["Index"]), - "curveMode": key["CurveMode"][0]["_text"], - "curveOrder": key["CurveOrder"][0]["_text"] - } - if key.get("TangentMode"): - key_data["tangentMode"] = key["TangentMode"][0]["_text"] - - return_data["animKeys"].append(key_data) - - return return_data - - def _dictify(self, xml_, root=True): - """ Convert xml object to dictionary - - Args: - xml_ (xml.etree.ElementTree.Element): xml data - root (bool, optional): is root available. Defaults to True. - - Returns: - dict: dictionarized xml - """ - - if root: - return {xml_.tag: self._dictify(xml_, False)} - - d = copy(xml_.attrib) - if xml_.text: - d["_text"] = xml_.text - - for x in xml_.findall("./*"): - if x.tag not in d: - d[x.tag] = [] - d[x.tag].append(self._dictify(x, False)) - return d diff --git a/openpype/hosts/flame/api/menu.py b/openpype/hosts/flame/api/menu.py deleted file mode 100644 index e8bdf32ebd..0000000000 --- a/openpype/hosts/flame/api/menu.py +++ /dev/null @@ -1,256 +0,0 @@ -from copy import deepcopy -from pprint import pformat - -from qtpy import QtWidgets - -from openpype.pipeline import get_current_project_name -from openpype.tools.utils.host_tools import HostToolsHelper - -menu_group_name = 'OpenPype' - -default_flame_export_presets = { - 'Publish': { - 'PresetVisibility': 2, - 'PresetType': 0, - 'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml' - }, - 'Preview': { - 'PresetVisibility': 3, - 'PresetType': 2, - 'PresetFile': 'Generate Preview.xml' - }, - 'Thumbnail': { - 'PresetVisibility': 3, - 'PresetType': 0, - 'PresetFile': 'Generate Thumbnail.xml' - } -} - - -def callback_selection(selection, function): - import openpype.hosts.flame.api as opfapi - opfapi.CTX.selection = selection - print("Hook Selection: \n\t{}".format( - pformat({ - index: (type(item), item.name) - for index, item in enumerate(opfapi.CTX.selection)}) - )) - function() - - -class _FlameMenuApp(object): - def __init__(self, framework): - self.name = self.__class__.__name__ - self.framework = framework - self.log = framework.log - self.menu_group_name = menu_group_name - self.dynamic_menu_data = {} - - # flame module is only available when a - # flame project is loaded and initialized - self.flame = None - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - self.flame_project_name = flame.project.current_project.name - self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name) - self.prefs_user = self.framework.prefs_dict( - self.framework.prefs_user, self.name) - self.prefs_global = self.framework.prefs_dict( - self.framework.prefs_global, self.name) - - self.mbox = QtWidgets.QMessageBox() - project_name = get_current_project_name() - self.menu = { - "actions": [{ - 'name': project_name or "project", - 'isEnabled': False - }], - "name": self.menu_group_name - } - self.tools_helper = HostToolsHelper() - - def __getattr__(self, name): - def method(*args, **kwargs): - print('calling %s' % name) - return method - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuProjectConnect(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Workfiles...", - "execute": lambda x: self.tools_helper.show_workfiles() - }) - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: self.tools_helper.show_loader() - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuTimeline(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Create...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_creator) - }) - menu['actions'].append({ - "name": "Publish...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_publish) - }) - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: self.tools_helper.show_loader() - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') - - -class FlameMenuUniversal(_FlameMenuApp): - - # flameMenuProjectconnect app takes care of the preferences dialog as well - - def __init__(self, framework): - _FlameMenuApp.__init__(self, framework) - - def __getattr__(self, name): - def method(*args, **kwargs): - project = self.dynamic_menu_data.get(name) - if project: - self.link_project(project) - return method - - def build_menu(self): - if not self.flame: - return [] - - menu = deepcopy(self.menu) - - menu['actions'].append({ - "name": "Load...", - "execute": lambda x: callback_selection( - x, self.tools_helper.show_loader) - }) - menu['actions'].append({ - "name": "Manage...", - "execute": lambda x: self.tools_helper.show_scene_inventory() - }) - menu['actions'].append({ - "name": "Library...", - "execute": lambda x: self.tools_helper.show_library_loader() - }) - return menu - - def refresh(self, *args, **kwargs): - self.rescan() - - def rescan(self, *args, **kwargs): - if not self.flame: - try: - import flame - self.flame = flame - except ImportError: - self.flame = None - - if self.flame: - self.flame.execute_shortcut('Rescan Python Hooks') - self.log.info('Rescan Python Hooks') diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py deleted file mode 100644 index d6fbf750ba..0000000000 --- a/openpype/hosts/flame/api/pipeline.py +++ /dev/null @@ -1,179 +0,0 @@ -""" -Basic avalon integration -""" -import os -import contextlib -from pyblish import api as pyblish - -from openpype.lib import Logger -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from .lib import ( - set_segment_data_marker, - set_publish_attribute, - maintained_segment_selection, - get_current_sequence, - reset_segment_selection -) -from .. import HOST_DIR - -API_DIR = os.path.join(HOST_DIR, "api") -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -AVALON_CONTAINERS = "AVALON_CONTAINERS" - -log = Logger.get_logger(__name__) - - -def install(): - pyblish.register_host("flame") - pyblish.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - log.info("OpenPype Flame plug-ins registered ...") - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) - - log.info("OpenPype Flame host installed ...") - - -def uninstall(): - pyblish.deregister_host("flame") - - log.info("Deregistering Flame plug-ins..") - pyblish.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - - # register callback for switching publishable - pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) - - log.info("OpenPype Flame host uninstalled ...") - - -def containerise(flame_clip_segment, - name, - namespace, - context, - loader=None, - data=None): - - data_imprint = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - } - - if data: - for k, v in data.items(): - data_imprint[k] = v - - log.debug("_ data_imprint: {}".format(data_imprint)) - - set_segment_data_marker(flame_clip_segment, data_imprint) - - return True - - -def ls(): - """List available containers. - """ - return [] - - -def parse_container(tl_segment, validate=True): - """Return container data from timeline_item's openpype tag. - """ - # TODO: parse_container - pass - - -def update_container(tl_segment, data=None): - """Update container data to input timeline_item's openpype tag. - """ - # TODO: update_container - pass - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - # from openpype.hosts.resolve import ( - # set_publish_attribute - # ) - - # # Whether instances should be passthrough based on new value - # timeline_item = instance.data["item"] - # set_publish_attribute(timeline_item, new_value) - - -def remove_instance(instance): - """Remove instance marker from track item.""" - # TODO: remove_instance - pass - - -def list_instances(): - """List all created instances from current workfile.""" - # TODO: list_instances - pass - - -def imprint(segment, data=None): - """ - Adding openpype data to Flame timeline segment. - - Also including publish attribute into tag. - - Arguments: - segment (flame.PySegment)): flame api object - data (dict): Any data which needst to be imprinted - - Examples: - data = { - 'asset': 'sq020sh0280', - 'family': 'render', - 'subset': 'subsetMain' - } - """ - data = data or {} - - set_segment_data_marker(segment, data) - - # add publish attribute - set_publish_attribute(segment, True) - - -@contextlib.contextmanager -def maintained_selection(): - import flame - from .lib import CTX - - # check if segment is selected - if isinstance(CTX.selection[0], flame.PySegment): - sequence = get_current_sequence(CTX.selection) - - try: - with maintained_segment_selection(sequence) as selected: - yield - finally: - # reset all selected clips - reset_segment_selection(sequence) - # select only original selection of segments - for segment in selected: - segment.selected = True diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py deleted file mode 100644 index 3289187fa0..0000000000 --- a/openpype/hosts/flame/api/plugin.py +++ /dev/null @@ -1,1089 +0,0 @@ -import os -import re -import shutil -from copy import deepcopy -from xml.etree import ElementTree as ET - -import qargparse -from qtpy import QtCore, QtWidgets - -from openpype import style -from openpype.lib import Logger, StringTemplate -from openpype.pipeline import LegacyCreator, LoaderPlugin -from openpype.pipeline.colorspace import get_remapped_colorspace_to_native -from openpype.settings import get_current_project_settings - -from . import constants -from . import lib as flib -from . import pipeline as fpipeline - -log = Logger.get_logger(__name__) - - -class CreatorWidget(QtWidgets.QDialog): - - # output items - items = dict() - _results_back = None - - def __init__(self, name, info, ui_inputs, parent=None): - super(CreatorWidget, self).__init__(parent) - - self.setObjectName(name) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.setWindowTitle(name or "Pype Creator Input") - self.resize(500, 700) - - # Where inputs and labels are set - self.content_widget = [QtWidgets.QWidget(self)] - top_layout = QtWidgets.QFormLayout(self.content_widget[0]) - top_layout.setObjectName("ContentLayout") - top_layout.addWidget(Spacer(5, self)) - - # first add widget tag line - top_layout.addWidget(QtWidgets.QLabel(info)) - - # main dynamic layout - self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAsNeeded) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOn) - self.scroll_area.setHorizontalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOff) - self.scroll_area.setWidgetResizable(True) - - self.content_widget.append(self.scroll_area) - - scroll_widget = QtWidgets.QWidget(self) - in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) - self.content_layout = [in_scroll_area] - - # add preset data into input widget layout - self.items = self.populate_widgets(ui_inputs) - self.scroll_area.setWidget(scroll_widget) - - # Confirmation buttons - btns_widget = QtWidgets.QWidget(self) - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - - cancel_btn = QtWidgets.QPushButton("Cancel") - btns_layout.addWidget(cancel_btn) - - ok_btn = QtWidgets.QPushButton("Ok") - btns_layout.addWidget(ok_btn) - - # Main layout of the dialog - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.setSpacing(0) - - # adding content widget - for w in self.content_widget: - main_layout.addWidget(w) - - main_layout.addWidget(btns_widget) - - ok_btn.clicked.connect(self._on_ok_clicked) - cancel_btn.clicked.connect(self._on_cancel_clicked) - - self.setStyleSheet(style.load_stylesheet()) - - @classmethod - def set_results_back(cls, value): - cls._results_back = value - - @classmethod - def get_results_back(cls): - return cls._results_back - - def _on_ok_clicked(self): - log.debug("ok is clicked: {}".format(self.items)) - results_back = self._values(self.items) - self.set_results_back(results_back) - self.close() - - def _on_cancel_clicked(self): - self.set_results_back(None) - self.close() - - def showEvent(self, event): - self.set_results_back(None) - super(CreatorWidget, self).showEvent(event) - - def _values(self, data, new_data=None): - new_data = new_data or dict() - for k, v in data.items(): - new_data[k] = { - "target": None, - "value": None - } - if v["type"] == "dict": - new_data[k]["target"] = v["target"] - new_data[k]["value"] = self._values(v["value"]) - if v["type"] == "section": - new_data.pop(k) - new_data = self._values(v["value"], new_data) - elif getattr(v["value"], "currentText", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].currentText() - elif getattr(v["value"], "isChecked", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].isChecked() - elif getattr(v["value"], "value", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].value() - elif getattr(v["value"], "text", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].text() - - return new_data - - def camel_case_split(self, text): - matches = re.finditer( - '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) - return " ".join([str(m.group(0)).capitalize() for m in matches]) - - def create_row(self, layout, type_name, text, **kwargs): - # get type attribute from qwidgets - attr = getattr(QtWidgets, type_name) - - # convert label text to normal capitalized text with spaces - label_text = self.camel_case_split(text) - - # assign the new text to label widget - label = QtWidgets.QLabel(label_text) - label.setObjectName("LineLabel") - - # create attribute name text strip of spaces - attr_name = text.replace(" ", "") - - # create attribute and assign default values - setattr( - self, - attr_name, - attr(parent=self)) - - # assign the created attribute to variable - item = getattr(self, attr_name) - for func, val in kwargs.items(): - if getattr(item, func): - func_attr = getattr(item, func) - func_attr(val) - - # add to layout - layout.addRow(label, item) - - return item - - def populate_widgets(self, data, content_layout=None): - """ - Populate widget from input dict. - - Each plugin has its own set of widget rows defined in dictionary - each row values should have following keys: `type`, `target`, - `label`, `order`, `value` and optionally also `toolTip`. - - Args: - data (dict): widget rows or organized groups defined - by types `dict` or `section` - content_layout (QtWidgets.QFormLayout)[optional]: used when nesting - - Returns: - dict: redefined data dict updated with created widgets - - """ - - content_layout = content_layout or self.content_layout[-1] - # fix order of process by defined order value - ordered_keys = list(data.keys()) - for k, v in data.items(): - try: - # try removing a key from index which should - # be filled with new - ordered_keys.pop(v["order"]) - except IndexError: - pass - # add key into correct order - ordered_keys.insert(v["order"], k) - - # process ordered - for k in ordered_keys: - v = data[k] - tool_tip = v.get("toolTip", "") - if v["type"] == "dict": - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - if v["type"] == "section": - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - elif v["type"] == "QLineEdit": - data[k]["value"] = self.create_row( - content_layout, "QLineEdit", v["label"], - setText=v["value"], setToolTip=tool_tip) - elif v["type"] == "QComboBox": - data[k]["value"] = self.create_row( - content_layout, "QComboBox", v["label"], - addItems=v["value"], setToolTip=tool_tip) - elif v["type"] == "QCheckBox": - data[k]["value"] = self.create_row( - content_layout, "QCheckBox", v["label"], - setChecked=v["value"], setToolTip=tool_tip) - elif v["type"] == "QSpinBox": - data[k]["value"] = self.create_row( - content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMinimum=0, - setMaximum=100000, setToolTip=tool_tip) - return data - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class Creator(LegacyCreator): - """Creator class wrapper - """ - clip_color = constants.COLOR_MAP["purple"] - rename_index = None - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - self.presets = get_current_project_settings()[ - "flame"]["create"].get(self.__class__.__name__, {}) - - # adding basic current context flame objects - self.project = flib.get_current_project() - self.sequence = flib.get_current_sequence(flib.CTX.selection) - - if (self.options or {}).get("useSelection"): - self.selected = flib.get_sequence_segments(self.sequence, True) - else: - self.selected = flib.get_sequence_segments(self.sequence) - - def create_widget(self, *args, **kwargs): - widget = CreatorWidget(*args, **kwargs) - widget.exec_() - return widget.get_results_back() - - -class PublishableClip: - """ - Convert a segment to publishable instance - - Args: - segment (flame.PySegment): flame api object - kwargs (optional): additional data needed for rename=True (presets) - - Returns: - flame.PySegment: flame api object - """ - vertical_clip_match = {} - marker_data = {} - types = { - "shot": "shot", - "folder": "folder", - "episode": "episode", - "sequence": "sequence", - "track": "sequence", - } - - # parents search pattern - parents_search_pattern = r"\{([a-z]*?)\}" - - # default templates for non-ui use - rename_default = False - hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" - clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" - subset_name_default = "[ track name ]" - review_track_default = "[ none ]" - subset_family_default = "plate" - count_from_default = 10 - count_steps_default = 10 - vertical_sync_default = False - driving_layer_default = "" - index_from_segment_default = False - use_shot_name_default = False - include_handles_default = False - retimed_handles_default = True - retimed_framerange_default = True - - def __init__(self, segment, **kwargs): - self.rename_index = kwargs["rename_index"] - self.family = kwargs["family"] - self.log = kwargs["log"] - - # get main parent objects - self.current_segment = segment - sequence_name = flib.get_current_sequence([segment]).name.get_value() - self.sequence_name = str(sequence_name).replace(" ", "_") - - self.clip_data = flib.get_segment_attributes(segment) - # segment (clip) main attributes - self.cs_name = self.clip_data["segment_name"] - self.cs_index = int(self.clip_data["segment"]) - self.shot_name = self.clip_data["shot_name"] - - # get track name and index - self.track_index = int(self.clip_data["track"]) - track_name = self.clip_data["track_name"] - self.track_name = str(track_name).replace(" ", "_").replace( - "*", "noname{}".format(self.track_index)) - - # adding tag.family into tag - if kwargs.get("avalon"): - self.marker_data.update(kwargs["avalon"]) - - # add publish attribute to marker data - self.marker_data.update({"publish": True}) - - # adding ui inputs if any - self.ui_inputs = kwargs.get("ui_inputs", {}) - - self.log.info("Inside of plugin: {}".format( - self.marker_data - )) - # populate default data before we get other attributes - self._populate_segment_default_data() - - # use all populated default data to create all important attributes - self._populate_attributes() - - # create parents with correct types - self._create_parents() - - def convert(self): - - # solve segment data and add them to marker data - self._convert_to_marker_data() - - # if track name is in review track name and also if driving track name - # is not in review track name: skip tag creation - if (self.track_name in self.review_layer) and ( - self.driving_layer not in self.review_layer): - return - - # deal with clip name - new_name = self.marker_data.pop("newClipName") - - if self.rename and not self.use_shot_name: - # rename segment - self.current_segment.name = str(new_name) - self.marker_data["asset"] = str(new_name) - elif self.use_shot_name: - self.marker_data["asset"] = self.shot_name - self.marker_data["hierarchyData"]["shot"] = self.shot_name - else: - self.marker_data["asset"] = self.cs_name - self.marker_data["hierarchyData"]["shot"] = self.cs_name - - if self.marker_data["heroTrack"] and self.review_layer: - self.marker_data["reviewTrack"] = self.review_layer - else: - self.marker_data["reviewTrack"] = None - - # create pype tag on track_item and add data - fpipeline.imprint(self.current_segment, self.marker_data) - - return self.current_segment - - def _populate_segment_default_data(self): - """ Populate default formatting data from segment. """ - - self.current_segment_default_data = { - "_folder_": "shots", - "_sequence_": self.sequence_name, - "_track_": self.track_name, - "_clip_": self.cs_name, - "_trackIndex_": self.track_index, - "_clipIndex_": self.cs_index - } - - def _populate_attributes(self): - """ Populate main object attributes. """ - # segment frame range and parent track name for vertical sync check - self.clip_in = int(self.clip_data["record_in"]) - self.clip_out = int(self.clip_data["record_out"]) - - # define ui inputs if non gui mode was used - self.shot_num = self.cs_index - self.log.debug( - "____ self.shot_num: {}".format(self.shot_num)) - - # ui_inputs data or default values if gui was not used - self.rename = self.ui_inputs.get( - "clipRename", {}).get("value") or self.rename_default - self.use_shot_name = self.ui_inputs.get( - "useShotName", {}).get("value") or self.use_shot_name_default - self.clip_name = self.ui_inputs.get( - "clipName", {}).get("value") or self.clip_name_default - self.hierarchy = self.ui_inputs.get( - "hierarchy", {}).get("value") or self.hierarchy_default - self.hierarchy_data = self.ui_inputs.get( - "hierarchyData", {}).get("value") or \ - self.current_segment_default_data.copy() - self.index_from_segment = self.ui_inputs.get( - "segmentIndex", {}).get("value") or self.index_from_segment_default - self.count_from = self.ui_inputs.get( - "countFrom", {}).get("value") or self.count_from_default - self.count_steps = self.ui_inputs.get( - "countSteps", {}).get("value") or self.count_steps_default - self.subset_name = self.ui_inputs.get( - "subsetName", {}).get("value") or self.subset_name_default - self.subset_family = self.ui_inputs.get( - "subsetFamily", {}).get("value") or self.subset_family_default - self.vertical_sync = self.ui_inputs.get( - "vSyncOn", {}).get("value") or self.vertical_sync_default - self.driving_layer = self.ui_inputs.get( - "vSyncTrack", {}).get("value") or self.driving_layer_default - self.review_track = self.ui_inputs.get( - "reviewTrack", {}).get("value") or self.review_track_default - self.audio = self.ui_inputs.get( - "audio", {}).get("value") or False - self.include_handles = self.ui_inputs.get( - "includeHandles", {}).get("value") or self.include_handles_default - self.retimed_handles = ( - self.ui_inputs.get("retimedHandles", {}).get("value") - or self.retimed_handles_default - ) - self.retimed_framerange = ( - self.ui_inputs.get("retimedFramerange", {}).get("value") - or self.retimed_framerange_default - ) - - # build subset name from layer name - if self.subset_name == "[ track name ]": - self.subset_name = self.track_name - - # create subset for publishing - self.subset = self.subset_family + self.subset_name.capitalize() - - def _replace_hash_to_expression(self, name, text): - """ Replace hash with number in correct padding. """ - _spl = text.split("#") - _len = (len(_spl) - 1) - _repl = "{{{0}:0>{1}}}".format(name, _len) - return text.replace(("#" * _len), _repl) - - def _convert_to_marker_data(self): - """ Convert internal data to marker data. - - Populating the marker data into internal variable self.marker_data - """ - # define vertical sync attributes - hero_track = True - self.review_layer = "" - if self.vertical_sync and self.track_name not in self.driving_layer: - # if it is not then define vertical sync as None - hero_track = False - - # increasing steps by index of rename iteration - if not self.index_from_segment: - self.count_steps *= self.rename_index - - hierarchy_formatting_data = {} - hierarchy_data = deepcopy(self.hierarchy_data) - _data = self.current_segment_default_data.copy() - if self.ui_inputs: - # adding tag metadata from ui - for _k, _v in self.ui_inputs.items(): - if _v["target"] == "tag": - self.marker_data[_k] = _v["value"] - - # driving layer is set as positive match - if hero_track or self.vertical_sync: - # mark review layer - if self.review_track and ( - self.review_track not in self.review_track_default): - # if review layer is defined and not the same as default - self.review_layer = self.review_track - - # shot num calculate - if self.index_from_segment: - # use clip index from timeline - self.shot_num = self.count_steps * self.cs_index - else: - if self.rename_index == 0: - self.shot_num = self.count_from - else: - self.shot_num = self.count_from + self.count_steps - - # clip name sequence number - _data.update({"shot": self.shot_num}) - - # solve # in test to pythonic expression - for _k, _v in hierarchy_data.items(): - if "#" not in _v["value"]: - continue - hierarchy_data[ - _k]["value"] = self._replace_hash_to_expression( - _k, _v["value"]) - - # fill up pythonic expresisons in hierarchy data - for k, _v in hierarchy_data.items(): - hierarchy_formatting_data[k] = _v["value"].format(**_data) - else: - # if no gui mode then just pass default data - hierarchy_formatting_data = hierarchy_data - - tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formatting_data - ) - - tag_hierarchy_data.update({"heroTrack": True}) - if hero_track and self.vertical_sync: - self.vertical_clip_match.update({ - (self.clip_in, self.clip_out): tag_hierarchy_data - }) - - if not hero_track and self.vertical_sync: - # driving layer is set as negative match - for (_in, _out), hero_data in self.vertical_clip_match.items(): - """ - Since only one instance of hero clip is expected in - `self.vertical_clip_match`, this will loop only once - until none hero clip will be matched with hero clip. - - `tag_hierarchy_data` will be set only once for every - clip which is not hero clip. - """ - _hero_data = deepcopy(hero_data) - _hero_data.update({"heroTrack": False}) - if _in <= self.clip_in and _out >= self.clip_out: - data_subset = hero_data["subset"] - # add track index in case duplicity of names in hero data - if self.subset in data_subset: - _hero_data["subset"] = self.subset + str( - self.track_index) - # in case track name and subset name is the same then add - if self.subset_name == self.track_name: - _hero_data["subset"] = self.subset - # assign data to return hierarchy data to tag - tag_hierarchy_data = _hero_data - break - - # add data to return data dict - self.marker_data.update(tag_hierarchy_data) - - def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): - """ Solve marker data from hierarchy data and templates. """ - # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) - clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) - - # remove shot from hierarchy data: is not needed anymore - hierarchy_formatting_data.pop("shot") - - return { - "newClipName": clip_name_filled, - "hierarchy": hierarchy_filled, - "parents": self.parents, - "hierarchyData": hierarchy_formatting_data, - "subset": self.subset, - "family": self.subset_family, - "families": [self.family] - } - - def _convert_to_entity(self, type, template): - """ Converting input key to key with type. """ - # convert to entity type - entity_type = self.types.get(type, None) - - assert entity_type, "Missing entity type for `{}`".format( - type - ) - - # first collect formatting data to use for formatting template - formatting_data = {} - for _k, _v in self.hierarchy_data.items(): - value = _v["value"].format( - **self.current_segment_default_data) - formatting_data[_k] = value - - return { - "entity_type": entity_type, - "entity_name": template.format( - **formatting_data - ) - } - - def _create_parents(self): - """ Create parents and return it in list. """ - self.parents = [] - - pattern = re.compile(self.parents_search_pattern) - - par_split = [(pattern.findall(t).pop(), t) - for t in self.hierarchy.split("/")] - - for type, template in par_split: - parent = self._convert_to_entity(type, template) - self.parents.append(parent) - - -# Publishing plugin functions - -# Loader plugin functions -class ClipLoader(LoaderPlugin): - """A basic clip loader for Flame - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - log = log - - options = [ - qargparse.Boolean( - "handles", - label="Set handles", - default=0, - help="Also set handles to clip as In/Out marks" - ) - ] - - _mapping = None - _host_settings = None - - def apply_settings(cls, project_settings, system_settings): - - plugin_type_settings = ( - project_settings - .get("flame", {}) - .get("load", {}) - ) - - if not plugin_type_settings: - return - - plugin_name = cls.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - if not plugin_settings: - return - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - print(" - is disabled by preset") - elif option == "representations": - continue - else: - print(" - setting `{}`: `{}`".format(option, value)) - setattr(cls, option, value) - - def get_colorspace(self, context): - """Get colorspace name - - Look either to version data or representation data. - - Args: - context (dict): version context data - - Returns: - str: colorspace name or None - """ - version = context['version'] - version_data = version.get("data", {}) - colorspace = version_data.get( - "colorspace", None - ) - - if ( - not colorspace - or colorspace == "Unknown" - ): - colorspace = context["representation"]["data"].get( - "colorspace", None) - - return colorspace - - @classmethod - def get_native_colorspace(cls, input_colorspace): - """Return native colorspace name. - - Args: - input_colorspace (str | None): colorspace name - - Returns: - str: native colorspace name defined in mapping or None - """ - # TODO: rewrite to support only pipeline's remapping - if not cls._host_settings: - cls._host_settings = get_current_project_settings()["flame"] - - # [Deprecated] way of remapping - if not cls._mapping: - mapping = ( - cls._host_settings["imageio"]["profilesMapping"]["inputs"]) - cls._mapping = { - input["ocioName"]: input["flameName"] - for input in mapping - } - - native_name = cls._mapping.get(input_colorspace) - - if not native_name: - native_name = get_remapped_colorspace_to_native( - input_colorspace, "flame", cls._host_settings["imageio"]) - - return native_name - - -class OpenClipSolver(flib.MediaInfoFile): - create_new_clip = False - - log = log - - def __init__(self, openclip_file_path, feed_data, logger=None): - self.out_file = openclip_file_path - - # replace log if any - if logger: - self.log = logger - - # new feed variables: - feed_path = feed_data.pop("path") - - # initialize parent class - super(OpenClipSolver, self).__init__( - feed_path, - logger=logger - ) - - # get other metadata - self.feed_version_name = feed_data["version"] - self.feed_colorspace = feed_data.get("colorspace") - self.log.debug("feed_version_name: {}".format(self.feed_version_name)) - - # layer rename variables - self.layer_rename_template = feed_data["layer_rename_template"] - self.layer_rename_patterns = feed_data["layer_rename_patterns"] - self.context_data = feed_data["context_data"] - - # derivate other feed variables - self.feed_basename = os.path.basename(feed_path) - self.feed_dir = os.path.dirname(feed_path) - self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower() - self.log.debug("feed_ext: {}".format(self.feed_ext)) - self.log.debug("out_file: {}".format(self.out_file)) - if not self._is_valid_tmp_file(self.out_file): - self.create_new_clip = True - - def _is_valid_tmp_file(self, file): - # check if file exists - if os.path.isfile(file): - # test also if file is not empty - with open(file) as f: - lines = f.readlines() - - if len(lines) > 2: - return True - - # file is probably corrupted - os.remove(file) - return False - - def make(self): - - if self.create_new_clip: - # New openClip - self._create_new_open_clip() - else: - self._update_open_clip() - - def _clear_handler(self, xml_object): - for handler in xml_object.findall("./handler"): - self.log.info("Handler found") - xml_object.remove(handler) - - def _create_new_open_clip(self): - self.log.info("Building new openClip") - - for tmp_xml_track in self.clip_data.iter("track"): - # solve track (layer) name - self._rename_track_name(tmp_xml_track) - - tmp_xml_feeds = tmp_xml_track.find('feeds') - tmp_xml_feeds.set('currentVersion', self.feed_version_name) - - for tmp_feed in tmp_xml_track.iter("feed"): - tmp_feed.set('vuid', self.feed_version_name) - - # add colorspace if any is set - if self.feed_colorspace: - self._add_colorspace(tmp_feed, self.feed_colorspace) - - self._clear_handler(tmp_feed) - - tmp_xml_versions_obj = self.clip_data.find('versions') - tmp_xml_versions_obj.set('currentVersion', self.feed_version_name) - for xml_new_version in tmp_xml_versions_obj: - xml_new_version.set('uid', self.feed_version_name) - xml_new_version.set('type', 'version') - - self._clear_handler(self.clip_data) - self.log.info("Adding feed version: {}".format(self.feed_basename)) - - self.write_clip_data_to_file(self.out_file, self.clip_data) - - def _get_xml_track_obj_by_uid(self, xml_data, uid): - # loop all tracks of input xml data - for xml_track in xml_data.iter("track"): - track_uid = xml_track.get("uid") - self.log.debug( - ">> track_uid:uid: {}:{}".format(track_uid, uid)) - - # get matching uids - if uid == track_uid: - return xml_track - - def _rename_track_name(self, xml_track_data): - layer_uid = xml_track_data.get("uid") - name_obj = xml_track_data.find("name") - layer_name = name_obj.text - - if ( - self.layer_rename_patterns - and not any( - re.search(lp_.lower(), layer_name.lower()) - for lp_ in self.layer_rename_patterns - ) - ): - return - - formatting_data = self._update_formatting_data( - layerName=layer_name, - layerUID=layer_uid - ) - name_obj.text = StringTemplate( - self.layer_rename_template - ).format(formatting_data) - - def _update_formatting_data(self, **kwargs): - """ Updating formatting data for layer rename - - Attributes: - key=value (optional): will be included to formatting data - as {key: value} - Returns: - dict: anatomy context data for formatting - """ - self.log.debug(">> self.clip_data: {}".format(self.clip_data)) - clip_name_obj = self.clip_data.find("name") - data = { - "originalBasename": clip_name_obj.text - } - # include version context data - data.update(self.context_data) - # include input kwargs data - data.update(kwargs) - return data - - def _update_open_clip(self): - self.log.info("Updating openClip ..") - - out_xml = ET.parse(self.out_file) - out_xml = out_xml.getroot() - - self.log.debug(">> out_xml: {}".format(out_xml)) - # loop tmp tracks - updated_any = False - for tmp_xml_track in self.clip_data.iter("track"): - # solve track (layer) name - self._rename_track_name(tmp_xml_track) - - # get tmp track uid - tmp_track_uid = tmp_xml_track.get("uid") - self.log.debug(">> tmp_track_uid: {}".format(tmp_track_uid)) - - # get out data track by uid - out_track_element = self._get_xml_track_obj_by_uid( - out_xml, tmp_track_uid) - self.log.debug( - ">> out_track_element: {}".format(out_track_element)) - - # loop tmp feeds - for tmp_xml_feed in tmp_xml_track.iter("feed"): - new_path_obj = tmp_xml_feed.find( - "spans/span/path") - new_path = new_path_obj.text - - # check if feed path already exists in track's feeds - if ( - out_track_element is not None - and self._feed_exists(out_track_element, new_path) - ): - continue - - # rename versions on feeds - tmp_xml_feed.set('vuid', self.feed_version_name) - self._clear_handler(tmp_xml_feed) - - # update fps from MediaInfoFile class - if self.fps is not None: - tmp_feed_fps_obj = tmp_xml_feed.find( - "startTimecode/rate") - tmp_feed_fps_obj.text = str(self.fps) - - # update start_frame from MediaInfoFile class - if self.start_frame is not None: - tmp_feed_nb_ticks_obj = tmp_xml_feed.find( - "startTimecode/nbTicks") - tmp_feed_nb_ticks_obj.text = str(self.start_frame) - - # update drop_mode from MediaInfoFile class - if self.drop_mode is not None: - tmp_feed_drop_mode_obj = tmp_xml_feed.find( - "startTimecode/dropMode") - tmp_feed_drop_mode_obj.text = str(self.drop_mode) - - # add colorspace if any is set - if self.feed_colorspace is not None: - self._add_colorspace(tmp_xml_feed, self.feed_colorspace) - - # then append/update feed to correct track in output - if out_track_element: - self.log.debug("updating track element ..") - # update already present track - out_feeds = out_track_element.find('feeds') - out_feeds.set('currentVersion', self.feed_version_name) - out_feeds.append(tmp_xml_feed) - - self.log.info( - "Appending new feed: {}".format( - self.feed_version_name)) - else: - self.log.debug("adding new track element ..") - # create new track as it doesnt exists yet - # set current version to feeds on tmp - tmp_xml_feeds = tmp_xml_track.find('feeds') - tmp_xml_feeds.set('currentVersion', self.feed_version_name) - out_tracks = out_xml.find("tracks") - out_tracks.append(tmp_xml_track) - - updated_any = True - - if updated_any: - # Append vUID to versions - out_xml_versions_obj = out_xml.find('versions') - out_xml_versions_obj.set( - 'currentVersion', self.feed_version_name) - new_version_obj = ET.Element( - "version", {"type": "version", "uid": self.feed_version_name}) - out_xml_versions_obj.insert(0, new_version_obj) - - self._clear_handler(out_xml) - - # fist create backup - self._create_openclip_backup_file(self.out_file) - - self.log.info("Adding feed version: {}".format( - self.feed_version_name)) - - self.write_clip_data_to_file(self.out_file, out_xml) - - self.log.debug("OpenClip Updated: {}".format(self.out_file)) - - def _feed_exists(self, xml_data, path): - # loop all available feed paths and check if - # the path is not already in file - for src_path in xml_data.iter('path'): - if path == src_path.text: - self.log.warning( - "Not appending file as it already is in .clip file") - return True - - def _create_openclip_backup_file(self, file): - bck_file = "{}.bak".format(file) - # if backup does not exist - if not os.path.isfile(bck_file): - shutil.copy2(file, bck_file) - else: - # in case it exists and is already multiplied - created = False - for _i in range(1, 99): - bck_file = "{name}.bak.{idx:0>2}".format( - name=file, - idx=_i) - # create numbered backup file - if not os.path.isfile(bck_file): - shutil.copy2(file, bck_file) - created = True - break - # in case numbered does not exists - if not created: - bck_file = "{}.bak.last".format(file) - shutil.copy2(file, bck_file) - - def _add_colorspace(self, feed_obj, profile_name): - feed_storage_obj = feed_obj.find("storageFormat") - feed_clr_obj = feed_storage_obj.find("colourSpace") - if feed_clr_obj is not None: - feed_clr_obj = ET.Element( - "colourSpace", {"type": "string"}) - feed_clr_obj.text = profile_name - feed_storage_obj.append(feed_clr_obj) diff --git a/openpype/hosts/flame/api/utils.py b/openpype/hosts/flame/api/utils.py deleted file mode 100644 index 80a5c47e89..0000000000 --- a/openpype/hosts/flame/api/utils.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Flame utils for syncing scripts -""" - -import os -import shutil -from openpype.lib import Logger -log = Logger.get_logger(__name__) - - -def _sync_utility_scripts(env=None): - """ Synchronizing basic utlility scripts for flame. - - To be able to run start OpenPype within Flame we have to copy - all utility_scripts and additional FLAME_SCRIPT_DIR into - `/opt/Autodesk/shared/python`. This will be always synchronizing those - folders. - """ - from .. import HOST_DIR - - env = env or os.environ - - # initiate inputs - scripts = {} - fsd_env = env.get("FLAME_SCRIPT_DIRS", "") - flame_shared_dir = "/opt/Autodesk/shared/python" - - fsd_paths = [os.path.join( - HOST_DIR, - "api", - "utility_scripts" - )] - - # collect script dirs - log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals())) - log.info("fsd_paths: `{fsd_paths}`".format(**locals())) - - # add application environment setting for FLAME_SCRIPT_DIR - # to script path search - for _dirpath in fsd_env.split(os.pathsep): - if not os.path.isdir(_dirpath): - log.warning("Path is not a valid dir: `{_dirpath}`".format( - **locals())) - continue - fsd_paths.append(_dirpath) - - # collect scripts from dirs - for path in fsd_paths: - scripts.update({path: os.listdir(path)}) - - remove_black_list = [] - for _k, s_list in scripts.items(): - remove_black_list += s_list - - log.info("remove_black_list: `{remove_black_list}`".format(**locals())) - log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals())) - log.info("Flame Scripts: `{scripts}`".format(**locals())) - - # make sure no script file is in folder - if next(iter(os.listdir(flame_shared_dir)), None): - for _itm in os.listdir(flame_shared_dir): - skip = False - - # skip all scripts and folders which are not maintained - if _itm not in remove_black_list: - skip = True - - # do not skip if pyc in extension - if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]: - skip = False - - # continue if skip in true - if skip: - continue - - path = os.path.join(flame_shared_dir, _itm) - log.info("Removing `{path}`...".format(**locals())) - - try: - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - except PermissionError as msg: - log.warning( - "Not able to remove: `{}`, Problem with: `{}`".format( - path, - msg - ) - ) - - # copy scripts into Resolve's utility scripts dir - for dirpath, scriptlist in scripts.items(): - # directory and scripts list - for _script in scriptlist: - # script in script list - src = os.path.join(dirpath, _script) - dst = os.path.join(flame_shared_dir, _script) - log.info("Copying `{src}` to `{dst}`...".format(**locals())) - - try: - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) - except (PermissionError, FileExistsError) as msg: - log.warning( - "Not able to copy to: `{}`, Problem with: `{}`".format( - dst, - msg - ) - ) - - -def setup(env=None): - """ Wrapper installer started from - `flame/hooks/pre_flame_setup.py` - """ - env = env or os.environ - - # synchronize resolve utility scripts - _sync_utility_scripts(env) - - log.info("Flame OpenPype wrapper has been installed") - - -def get_flame_version(): - import flame - - return { - "full": flame.get_version(), - "major": flame.get_version_major(), - "minor": flame.get_version_minor(), - "patch": flame.get_version_patch() - } - - -def get_flame_install_root(): - return "/opt/Autodesk" diff --git a/openpype/hosts/flame/api/workio.py b/openpype/hosts/flame/api/workio.py deleted file mode 100644 index e49321c75a..0000000000 --- a/openpype/hosts/flame/api/workio.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Host API required Work Files tool""" - -import os -from openpype.lib import Logger -# from .. import ( -# get_project_manager, -# get_current_project -# ) - - -log = Logger.get_logger(__name__) - -exported_projet_ext = ".otoc" - - -def file_extensions(): - return [exported_projet_ext] - - -def has_unsaved_changes(): - pass - - -def save_file(filepath): - pass - - -def open_file(filepath): - pass - - -def current_file(): - pass - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/flame/plugins/create/create_shot_clip.py b/openpype/hosts/flame/plugins/create/create_shot_clip.py deleted file mode 100644 index b01354c313..0000000000 --- a/openpype/hosts/flame/plugins/create/create_shot_clip.py +++ /dev/null @@ -1,307 +0,0 @@ -from copy import deepcopy -import openpype.hosts.flame.api as opfapi - - -class CreateShotClip(opfapi.Creator): - """Publishable clip""" - - label = "Create Publishable Clip" - family = "clip" - icon = "film" - defaults = ["Main"] - - presets = None - - def process(self): - # Creator copy of object attributes that are modified during `process` - presets = deepcopy(self.presets) - gui_inputs = self.get_gui_inputs() - - # get key pares from presets and match it on ui inputs - for k, v in gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if presets.get(_k) is not None: - gui_inputs[k][ - "value"][_k]["value"] = presets[_k] - - if presets.get(k) is not None: - gui_inputs[k]["value"] = presets[k] - - # open widget for plugins inputs - results_back = self.create_widget( - "Pype publish attributes creator", - "Define sequential rename and fill hierarchy data.", - gui_inputs - ) - - if len(self.selected) < 1: - return - - if not results_back: - print("Operation aborted") - return - - # get ui output for track name for vertical sync - v_sync_track = results_back["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_segments = [] - unsorted_selected_segments = [] - for _segment in self.selected: - if _segment.parent.name.get_value() in v_sync_track: - sorted_selected_segments.append(_segment) - else: - unsorted_selected_segments.append(_segment) - - sorted_selected_segments.extend(unsorted_selected_segments) - - kwargs = { - "log": self.log, - "ui_inputs": results_back, - "avalon": self.data, - "family": self.data["family"] - } - - for i, segment in enumerate(sorted_selected_segments): - kwargs["rename_index"] = i - # convert track item to timeline media pool item - opfapi.PublishableClip(segment, **kwargs).convert() - - def get_gui_inputs(self): - gui_tracks = self._get_video_track_names( - opfapi.get_current_sequence(opfapi.CTX.selection) - ) - return deepcopy({ - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "useShotName": { - "value": True, - "type": "QCheckBox", - "label": "Use Shot Name", - "target": "ui", - "toolTip": "Use name form Shot name clip attribute", # noqa - "order": 1}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 2}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 3}, - "segmentIndex": { - "value": True, - "type": "QCheckBox", - "label": "Segment index", - "target": "ui", - "toolTip": "Take number from segment index", # noqa - "order": 4}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 5}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 6}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be hero for all others", # noqa - "order": 1} - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "subsetName": { - "value": ["[ track name ]", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Subset Name", - "target": "ui", - "toolTip": "chose subset name pattern, if [ track name ] is selected, name of track layer will be used", # noqa - "order": 0}, - "subsetFamily": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Subset Family", - "target": "ui", "toolTip": "What use of this subset is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process subsets with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resloution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "frameRangeAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0 - }, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle Start", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1 - }, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle End", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2 - }, - "includeHandles": { - "value": False, - "type": "QCheckBox", - "label": "Include handles", - "target": "tag", - "toolTip": "By default handles are excluded", # noqa - "order": 3 - }, - "retimedHandles": { - "value": True, - "type": "QCheckBox", - "label": "Retimed handles", - "target": "tag", - "toolTip": "By default handles are retimed.", # noqa - "order": 4 - }, - "retimedFramerange": { - "value": True, - "type": "QCheckBox", - "label": "Retimed framerange", - "target": "tag", - "toolTip": "By default framerange is retimed.", # noqa - "order": 5 - } - } - } - }) - - def _get_video_track_names(self, sequence): - track_names = [] - for ver in sequence.versions: - for track in ver.tracks: - track_names.append(track.name.get_value()) - - return track_names diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py deleted file mode 100644 index ca4eab0f63..0000000000 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ /dev/null @@ -1,275 +0,0 @@ -from copy import deepcopy -import os -import flame -from pprint import pformat -import openpype.hosts.flame.api as opfapi -from openpype.lib import StringTemplate -from openpype.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) - - -class LoadClip(opfapi.ClipLoader): - """Load a subset to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - families = ["render2d", "source", "plate", "render", "review"] - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip" - order = -10 - icon = "code-fork" - color = "orange" - - # settings - reel_group_name = "OpenPype_Reels" - reel_name = "Loaded" - clip_name_template = "{asset}_{subset}<_{output}>" - - """ Anatomy keys from version context data and dynamically added: - - {layerName} - original layer name token - - {layerUID} - original layer UID token - - {originalBasename} - original clip name taken from file - """ - layer_rename_template = "{asset}_{subset}<_{output}>" - layer_rename_patterns = [] - - def load(self, context, name, namespace, options): - - # get flame objects - fproject = flame.project.current_project - self.fpd = fproject.current_workspace.desktop - - # load clip to timeline and get main variables - version = context['version'] - version_data = version.get("data", {}) - version_name = version.get("name", None) - colorspace = self.get_colorspace(context) - - # in case output is not in context replace key to representation - if not context["representation"]["context"].get("output"): - self.clip_name_template = self.clip_name_template.replace( - "output", "representation") - self.layer_rename_template = self.layer_rename_template.replace( - "output", "representation") - - formatting_data = deepcopy(context["representation"]["context"]) - clip_name = StringTemplate(self.clip_name_template).format( - formatting_data) - - # convert colorspace with ocio to flame mapping - # in imageio flame section - colorspace = self.get_native_colorspace(colorspace) - self.log.info("Loading with colorspace: `{}`".format(colorspace)) - - # create workfile path - workfile_dir = os.environ["AVALON_WORKDIR"] - openclip_dir = os.path.join( - workfile_dir, clip_name - ) - openclip_path = os.path.join( - openclip_dir, clip_name + ".clip" - ) - if not os.path.exists(openclip_dir): - os.makedirs(openclip_dir) - - # prepare clip data from context ad send it to openClipLoader - path = self.filepath_from_context(context) - loading_context = { - "path": path.replace("\\", "/"), - "colorspace": colorspace, - "version": "v{:0>3}".format(version_name), - "layer_rename_template": self.layer_rename_template, - "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formatting_data - } - self.log.debug(pformat( - loading_context - )) - self.log.debug(openclip_path) - - # make openpype clip file - opfapi.OpenClipSolver( - openclip_path, loading_context, logger=self.log).make() - - # prepare Reel group in actual desktop - opc = self._get_clip( - clip_name, - openclip_path - ) - - # add additional metadata from the version to imprint Avalon knob - add_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - - # move all version data keys to tag data - data_imprint = {} - for key in add_keys: - data_imprint.update({ - key: version_data.get(key, str(None)) - }) - - # add variables related to version context - data_imprint.update({ - "version": version_name, - "colorspace": colorspace, - "objectName": clip_name - }) - - # TODO: finish the containerisation - # opc_segment = opfapi.get_clip_segment(opc) - - # return opfapi.containerise( - # opc_segment, - # name, namespace, context, - # self.__class__.__name__, - # data_imprint) - - return opc - - def _get_clip(self, name, clip_path): - reel = self._get_reel() - # with maintained openclip as opc - matching_clip = [cl for cl in reel.clips - if cl.name.get_value() == name] - if matching_clip: - return matching_clip.pop() - else: - created_clips = flame.import_clips(str(clip_path), reel) - return created_clips.pop() - - def _get_reel(self): - - matching_rgroup = [ - rg for rg in self.fpd.reel_groups - if rg.name.get_value() == self.reel_group_name - ] - - if not matching_rgroup: - reel_group = self.fpd.create_reel_group(str(self.reel_group_name)) - for _r in reel_group.reels: - if "reel" not in _r.name.get_value().lower(): - continue - self.log.debug("Removing: {}".format(_r.name)) - flame.delete(_r) - else: - reel_group = matching_rgroup.pop() - - matching_reel = [ - re for re in reel_group.reels - if re.name.get_value() == self.reel_name - ] - - if not matching_reel: - reel_group = reel_group.create_reel(str(self.reel_name)) - else: - reel_group = matching_reel.pop() - - return reel_group - - def _get_segment_from_clip(self, clip): - # unwrapping segment from input clip - pass - - # def switch(self, container, representation): - # self.update(container, representation) - - # def update(self, container, representation): - # """ Updating previously loaded clips - # """ - - # # load clip to timeline and get main variables - # name = container['name'] - # namespace = container['namespace'] - # track_item = phiero.get_track_items( - # track_item_name=namespace) - # version = io.find_one({ - # "type": "version", - # "_id": representation["parent"] - # }) - # version_data = version.get("data", {}) - # version_name = version.get("name", None) - # colorspace = version_data.get("colorspace", None) - # object_name = "{}_{}".format(name, namespace) - # file = get_representation_path(representation).replace("\\", "/") - # clip = track_item.source() - - # # reconnect media to new path - # clip.reconnectMedia(file) - - # # set colorspace - # if colorspace: - # clip.setSourceMediaColourTransform(colorspace) - - # # add additional metadata from the version to imprint Avalon knob - # add_keys = [ - # "frameStart", "frameEnd", "source", "author", - # "fps", "handleStart", "handleEnd" - # ] - - # # move all version data keys to tag data - # data_imprint = {} - # for key in add_keys: - # data_imprint.update({ - # key: version_data.get(key, str(None)) - # }) - - # # add variables related to version context - # data_imprint.update({ - # "representation": str(representation["_id"]), - # "version": version_name, - # "colorspace": colorspace, - # "objectName": object_name - # }) - - # # update color of clip regarding the version order - # self.set_item_color(track_item, version) - - # return phiero.update_container(track_item, data_imprint) - - # def remove(self, container): - # """ Removing previously loaded clips - # """ - # # load clip to timeline and get main variables - # namespace = container['namespace'] - # track_item = phiero.get_track_items( - # track_item_name=namespace) - # track = track_item.parent() - - # # remove track item from track - # track.removeItem(track_item) - - # @classmethod - # def multiselection(cls, track_item): - # if not cls.track: - # cls.track = track_item.parent() - # cls.sequence = cls.track.parent() - - # @classmethod - # def set_item_color(cls, track_item, version): - - # clip = track_item.source() - # # define version name - # version_name = version.get("name", None) - # # get all versions in list - # versions = io.find({ - # "type": "version", - # "parent": version["parent"] - # }).distinct('name') - - # max_version = max(versions) - - # # set clip colour - # if version_name == max_version: - # clip.binItem().setColor(cls.clip_color_last) - # else: - # clip.binItem().setColor(cls.clip_color) diff --git a/openpype/hosts/flame/plugins/publish/extract_otio_file.py b/openpype/hosts/flame/plugins/publish/extract_otio_file.py deleted file mode 100644 index e5bfa42ce6..0000000000 --- a/openpype/hosts/flame/plugins/publish/extract_otio_file.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import pyblish.api -import opentimelineio as otio -from openpype.pipeline import publish - - -class ExtractOTIOFile(publish.Extractor): - """ - Extractor export OTIO file - """ - - label = "Extract OTIO file" - order = pyblish.api.ExtractorOrder - 0.45 - families = ["workfile"] - hosts = ["flame"] - - def process(self, instance): - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - name = instance.data["name"] - staging_dir = self.staging_dir(instance) - - otio_timeline = instance.context.data["otioTimeline"] - # create otio timeline representation - otio_file_name = name + ".otio" - otio_file_path = os.path.join(staging_dir, otio_file_name) - - # export otio file to temp dir - otio.adapters.write_to_file(otio_timeline, otio_file_path) - - representation_otio = { - 'name': "otio", - 'ext': "otio", - 'files': otio_file_name, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation_otio) - - self.log.info("Added OTIO file representation: {}".format( - representation_otio)) diff --git a/openpype/hosts/fusion/addon.py b/openpype/hosts/fusion/addon.py deleted file mode 100644 index 8343f3c79d..0000000000 --- a/openpype/hosts/fusion/addon.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import re -from openpype.modules import OpenPypeModule, IHostAddon -from openpype.lib import Logger - -FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - -# FUSION_VERSIONS_DICT is used by the pre-launch hooks -# The keys correspond to all currently supported Fusion versions -# Each value is a list of corresponding Python home variables and a profile -# number, which is used by the profile hook to set Fusion profile variables. -FUSION_VERSIONS_DICT = { - 9: ("FUSION_PYTHON36_HOME", 9), - 16: ("FUSION16_PYTHON36_HOME", 16), - 17: ("FUSION16_PYTHON36_HOME", 16), - 18: ("FUSION_PYTHON3_HOME", 16), -} - - -def get_fusion_version(app_name): - """ - The function is triggered by the prelaunch hooks to get the fusion version. - - `app_name` is obtained by prelaunch hooks from the - `launch_context.env.get("AVALON_APP_NAME")`. - - To get a correct Fusion version, a version number should be present - in the `applications/fusion/variants` key - of the Blackmagic Fusion Application Settings. - """ - - log = Logger.get_logger(__name__) - - if not app_name: - return - - app_version_candidates = re.findall(r"\d+", app_name) - if not app_version_candidates: - return - for app_version in app_version_candidates: - if int(app_version) in FUSION_VERSIONS_DICT: - return int(app_version) - else: - log.info( - "Unsupported Fusion version: {app_version}".format( - app_version=app_version - ) - ) - - -class FusionAddon(OpenPypeModule, IHostAddon): - name = "fusion" - host_name = "fusion" - - def initialize(self, module_settings): - self.enabled = True - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [os.path.join(FUSION_HOST_DIR, "hooks")] - - def add_implementation_envs(self, env, app): - # Set default values if are not already set via settings - - defaults = {"OPENPYPE_LOG_NO_COLORS": "Yes"} - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".comp"] diff --git a/openpype/hosts/fusion/api/action.py b/openpype/hosts/fusion/api/action.py deleted file mode 100644 index 66b787c2f1..0000000000 --- a/openpype/hosts/fusion/api/action.py +++ /dev/null @@ -1,60 +0,0 @@ -import pyblish.api - - -from openpype.hosts.fusion.api.lib import get_current_comp -from openpype.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Fusion when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context( - context, - plugin=plugin, - ) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning( - "Plug-in returned to be invalid, " - "but has no selectable nodes." - ) - - if not invalid: - # Assume relevant comp is current comp and clear selection - self.log.info("No invalid tools found.") - comp = get_current_comp() - flow = comp.CurrentFrame.FlowView - flow.Select() # No args equals clearing selection - return - - # Assume a single comp - first_tool = invalid[0] - comp = first_tool.Comp() - flow = comp.CurrentFrame.FlowView - flow.Select() # No args equals clearing selection - names = set() - for tool in invalid: - flow.Select(tool, True) - comp.SetActiveTool(tool) - names.add(tool.Name) - self.log.info( - "Selecting invalid tools: %s" % ", ".join(sorted(names)) - ) diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py deleted file mode 100644 index 85f9c54a73..0000000000 --- a/openpype/hosts/fusion/api/lib.py +++ /dev/null @@ -1,295 +0,0 @@ -import os -import sys -import re -import contextlib - -from openpype.lib import Logger -from openpype.client import ( - get_asset_by_name, - get_subset_by_name, - get_last_version_by_subset_id, - get_representation_by_id, - get_representation_by_name, - get_representation_parents, -) -from openpype.pipeline import ( - switch_container, - get_current_project_name, -) -from openpype.pipeline.context_tools import get_current_project_asset - -self = sys.modules[__name__] -self._project = None - - -def update_frame_range(start, end, comp=None, set_render_range=True, - handle_start=0, handle_end=0): - """Set Fusion comp's start and end frame range - - Args: - start (float, int): start frame - end (float, int): end frame - comp (object, Optional): comp object from fusion - set_render_range (bool, Optional): When True this will also set the - composition's render start and end frame. - handle_start (float, int, Optional): frame handles before start frame - handle_end (float, int, Optional): frame handles after end frame - - Returns: - None - - """ - - if not comp: - comp = get_current_comp() - - # Convert any potential none type to zero - handle_start = handle_start or 0 - handle_end = handle_end or 0 - - attrs = { - "COMPN_GlobalStart": start - handle_start, - "COMPN_GlobalEnd": end + handle_end - } - - # set frame range - if set_render_range: - attrs.update({ - "COMPN_RenderStart": start, - "COMPN_RenderEnd": end - }) - - with comp_lock_and_undo_chunk(comp): - comp.SetAttrs(attrs) - - -def set_asset_framerange(): - """Set Comp's frame range based on current asset""" - asset_doc = get_current_project_asset() - start = asset_doc["data"]["frameStart"] - end = asset_doc["data"]["frameEnd"] - handle_start = asset_doc["data"]["handleStart"] - handle_end = asset_doc["data"]["handleEnd"] - update_frame_range(start, end, set_render_range=True, - handle_start=handle_start, - handle_end=handle_end) - - -def set_asset_resolution(): - """Set Comp's resolution width x height default based on current asset""" - asset_doc = get_current_project_asset() - width = asset_doc["data"]["resolutionWidth"] - height = asset_doc["data"]["resolutionHeight"] - comp = get_current_comp() - - print("Setting comp frame format resolution to {}x{}".format(width, - height)) - comp.SetPrefs({ - "Comp.FrameFormat.Width": width, - "Comp.FrameFormat.Height": height, - }) - - -def validate_comp_prefs(comp=None, force_repair=False): - """Validate current comp defaults with asset settings. - - Validates fps, resolutionWidth, resolutionHeight, aspectRatio. - - This does *not* validate frameStart, frameEnd, handleStart and handleEnd. - """ - - if comp is None: - comp = get_current_comp() - - log = Logger.get_logger("validate_comp_prefs") - - fields = [ - "name", - "data.fps", - "data.resolutionWidth", - "data.resolutionHeight", - "data.pixelAspect" - ] - asset_doc = get_current_project_asset(fields=fields) - asset_data = asset_doc["data"] - - comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") - - # Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert - # the data to something that is more sensible to Fusion - asset_data["pixelAspectX"] = asset_data.pop("pixelAspect") - asset_data["pixelAspectY"] = 1.0 - - validations = [ - ("fps", "Rate", "FPS"), - ("resolutionWidth", "Width", "Resolution Width"), - ("resolutionHeight", "Height", "Resolution Height"), - ("pixelAspectX", "AspectX", "Pixel Aspect Ratio X"), - ("pixelAspectY", "AspectY", "Pixel Aspect Ratio Y") - ] - - invalid = [] - for key, comp_key, label in validations: - asset_value = asset_data[key] - comp_value = comp_frame_format_prefs.get(comp_key) - if asset_value != comp_value: - invalid_msg = "{} {} should be {}".format(label, - comp_value, - asset_value) - invalid.append(invalid_msg) - - if not force_repair: - # Do not log warning if we force repair anyway - log.warning( - "Comp {pref} {value} does not match asset " - "'{asset_name}' {pref} {asset_value}".format( - pref=label, - value=comp_value, - asset_name=asset_doc["name"], - asset_value=asset_value) - ) - - if invalid: - - def _on_repair(): - attributes = dict() - for key, comp_key, _label in validations: - value = asset_data[key] - comp_key_full = "Comp.FrameFormat.{}".format(comp_key) - attributes[comp_key_full] = value - comp.SetPrefs(attributes) - - if force_repair: - log.info("Applying default Comp preferences..") - _on_repair() - return - - from . import menu - from openpype.widgets import popup - from openpype.style import load_stylesheet - dialog = popup.Popup(parent=menu.menu) - dialog.setWindowTitle("Fusion comp has invalid configuration") - - msg = "Comp preferences mismatches '{}'".format(asset_doc["name"]) - msg += "\n" + "\n".join(invalid) - dialog.setMessage(msg) - dialog.setButtonText("Repair") - dialog.on_clicked.connect(_on_repair) - dialog.show() - dialog.raise_() - dialog.activateWindow() - dialog.setStyleSheet(load_stylesheet()) - - -@contextlib.contextmanager -def maintained_selection(comp=None): - """Reset comp selection from before the context after the context""" - if comp is None: - comp = get_current_comp() - - previous_selection = comp.GetToolList(True).values() - try: - yield - finally: - flow = comp.CurrentFrame.FlowView - flow.Select() # No args equals clearing selection - if previous_selection: - for tool in previous_selection: - flow.Select(tool, True) - - -@contextlib.contextmanager -def maintained_comp_range(comp=None, - global_start=True, - global_end=True, - render_start=True, - render_end=True): - """Reset comp frame ranges from before the context after the context""" - if comp is None: - comp = get_current_comp() - - comp_attrs = comp.GetAttrs() - preserve_attrs = {} - if global_start: - preserve_attrs["COMPN_GlobalStart"] = comp_attrs["COMPN_GlobalStart"] - if global_end: - preserve_attrs["COMPN_GlobalEnd"] = comp_attrs["COMPN_GlobalEnd"] - if render_start: - preserve_attrs["COMPN_RenderStart"] = comp_attrs["COMPN_RenderStart"] - if render_end: - preserve_attrs["COMPN_RenderEnd"] = comp_attrs["COMPN_RenderEnd"] - - try: - yield - finally: - comp.SetAttrs(preserve_attrs) - - -def get_frame_path(path): - """Get filename for the Fusion Saver with padded number as '#' - - >>> get_frame_path("C:/test.exr") - ('C:/test', 4, '.exr') - - >>> get_frame_path("filename.00.tif") - ('filename.', 2, '.tif') - - >>> get_frame_path("foobar35.tif") - ('foobar', 2, '.tif') - - Args: - path (str): The path to render to. - - Returns: - tuple: head, padding, tail (extension) - - """ - filename, ext = os.path.splitext(path) - - # Find a final number group - match = re.match('.*?([0-9]+)$', filename) - if match: - padding = len(match.group(1)) - # remove number from end since fusion - # will swap it with the frame number - filename = filename[:-padding] - else: - padding = 4 # default Fusion padding - - return filename, padding, ext - - -def get_fusion_module(): - """Get current Fusion instance""" - fusion = getattr(sys.modules["__main__"], "fusion", None) - return fusion - - -def get_bmd_library(): - """Get bmd library""" - bmd = getattr(sys.modules["__main__"], "bmd", None) - return bmd - - -def get_current_comp(): - """Get current comp in this session""" - fusion = get_fusion_module() - if fusion is not None: - comp = fusion.CurrentComp - return comp - - -@contextlib.contextmanager -def comp_lock_and_undo_chunk( - comp, - undo_queue_name="Script CMD", - keep_undo=True, -): - """Lock comp and open an undo chunk during the context""" - try: - comp.Lock() - comp.StartUndo(undo_queue_name) - yield - finally: - comp.Unlock() - comp.EndUndo(keep_undo) diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py deleted file mode 100644 index 0b9ad1a43b..0000000000 --- a/openpype/hosts/fusion/api/menu.py +++ /dev/null @@ -1,189 +0,0 @@ -import os -import sys - -from qtpy import QtWidgets, QtCore, QtGui - -from openpype.tools.utils import host_tools -from openpype.style import load_stylesheet -from openpype.lib import register_event_callback -from openpype.hosts.fusion.scripts import ( - duplicate_with_inputs, -) -from openpype.hosts.fusion.api.lib import ( - set_asset_framerange, - set_asset_resolution, -) -from openpype.pipeline import get_current_asset_name -from openpype.resources import get_openpype_icon_filepath - -from .pipeline import FusionEventHandler -from .pulse import FusionPulse - - -MENU_LABEL = os.environ["AVALON_LABEL"] - - -self = sys.modules[__name__] -self.menu = None - - -class OpenPypeMenu(QtWidgets.QWidget): - def __init__(self, *args, **kwargs): - super(OpenPypeMenu, self).__init__(*args, **kwargs) - - self.setObjectName(f"{MENU_LABEL}Menu") - - icon_path = get_openpype_icon_filepath() - icon = QtGui.QIcon(icon_path) - self.setWindowIcon(icon) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowMinimizeButtonHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.render_mode_widget = None - self.setWindowTitle(MENU_LABEL) - - asset_label = QtWidgets.QLabel("Context", self) - asset_label.setStyleSheet( - """QLabel { - font-size: 14px; - font-weight: 600; - color: #5f9fb8; - }""" - ) - asset_label.setAlignment(QtCore.Qt.AlignHCenter) - - workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) - create_btn = QtWidgets.QPushButton("Create...", self) - load_btn = QtWidgets.QPushButton("Load...", self) - publish_btn = QtWidgets.QPushButton("Publish...", self) - manager_btn = QtWidgets.QPushButton("Manage...", self) - libload_btn = QtWidgets.QPushButton("Library...", self) - set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) - set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) - duplicate_with_inputs_btn = QtWidgets.QPushButton( - "Duplicate with input connections", self - ) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(10, 20, 10, 20) - - layout.addWidget(asset_label) - - layout.addSpacing(20) - - layout.addWidget(workfiles_btn) - - layout.addSpacing(20) - - layout.addWidget(create_btn) - layout.addWidget(load_btn) - layout.addWidget(publish_btn) - layout.addWidget(manager_btn) - - layout.addSpacing(20) - - layout.addWidget(libload_btn) - - layout.addSpacing(20) - - layout.addWidget(set_framerange_btn) - layout.addWidget(set_resolution_btn) - - layout.addSpacing(20) - - layout.addWidget(duplicate_with_inputs_btn) - - self.setLayout(layout) - - # Store reference so we can update the label - self.asset_label = asset_label - - workfiles_btn.clicked.connect(self.on_workfile_clicked) - create_btn.clicked.connect(self.on_create_clicked) - publish_btn.clicked.connect(self.on_publish_clicked) - load_btn.clicked.connect(self.on_load_clicked) - manager_btn.clicked.connect(self.on_manager_clicked) - libload_btn.clicked.connect(self.on_libload_clicked) - duplicate_with_inputs_btn.clicked.connect( - self.on_duplicate_with_inputs_clicked - ) - set_resolution_btn.clicked.connect(self.on_set_resolution_clicked) - set_framerange_btn.clicked.connect(self.on_set_framerange_clicked) - - self._callbacks = [] - self.register_callback("taskChanged", self.on_task_changed) - self.on_task_changed() - - # Force close current process if Fusion is closed - self._pulse = FusionPulse(parent=self) - self._pulse.start() - - # Detect Fusion events as OpenPype events - self._event_handler = FusionEventHandler(parent=self) - self._event_handler.start() - - def on_task_changed(self): - # Update current context label - label = get_current_asset_name() - self.asset_label.setText(label) - - def register_callback(self, name, fn): - # Create a wrapper callback that we only store - # for as long as we want it to persist as callback - def _callback(*args): - fn() - - self._callbacks.append(_callback) - register_event_callback(name, _callback) - - def deregister_all_callbacks(self): - self._callbacks[:] = [] - - def on_workfile_clicked(self): - host_tools.show_workfiles() - - def on_create_clicked(self): - host_tools.show_publisher(tab="create") - - def on_publish_clicked(self): - host_tools.show_publisher(tab="publish") - - def on_load_clicked(self): - host_tools.show_loader(use_context=True) - - def on_manager_clicked(self): - host_tools.show_scene_inventory() - - def on_libload_clicked(self): - host_tools.show_library_loader() - - def on_duplicate_with_inputs_clicked(self): - duplicate_with_inputs.duplicate_with_input_connections() - - def on_set_resolution_clicked(self): - set_asset_resolution() - - def on_set_framerange_clicked(self): - set_asset_framerange() - - -def launch_openpype_menu(): - app = QtWidgets.QApplication(sys.argv) - - pype_menu = OpenPypeMenu() - - stylesheet = load_stylesheet() - pype_menu.setStyleSheet(stylesheet) - - pype_menu.show() - self.menu = pype_menu - - result = app.exec_() - print("Shutting down..") - sys.exit(result) diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py deleted file mode 100644 index a886086758..0000000000 --- a/openpype/hosts/fusion/api/pipeline.py +++ /dev/null @@ -1,399 +0,0 @@ -""" -Basic avalon integration -""" -import os -import sys -import logging -import contextlib - -import pyblish.api -from qtpy import QtCore - -from openpype.lib import ( - Logger, - register_event_callback, - emit_event -) -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - register_inventory_action_path, - AVALON_CONTAINER_ID, -) -from openpype.pipeline.load import any_outdated_containers -from openpype.hosts.fusion import FUSION_HOST_DIR -from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from openpype.tools.utils import host_tools - - -from .lib import ( - get_current_comp, - comp_lock_and_undo_chunk, - validate_comp_prefs -) - -log = Logger.get_logger(__name__) - -PLUGINS_DIR = os.path.join(FUSION_HOST_DIR, "plugins") - -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class FusionLogHandler(logging.Handler): - # Keep a reference to fusion's Print function (Remote Object) - _print = None - - @property - def print(self): - if self._print is not None: - # Use cached - return self._print - - _print = getattr(sys.modules["__main__"], "fusion").Print - if _print is None: - # Backwards compatibility: Print method on Fusion instance was - # added around Fusion 17.4 and wasn't available on PyRemote Object - # before - _print = get_current_comp().Print - self._print = _print - return _print - - def emit(self, record): - entry = self.format(record) - self.print(entry) - - -class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "fusion" - - def install(self): - """Install fusion-specific functionality of OpenPype. - - This is where you install menus and register families, data - and loaders into fusion. - - It is called automatically when installing via - `openpype.pipeline.install_host(openpype.hosts.fusion.api)` - - See the Maya equivalent for inspiration on how to implement this. - - """ - # Remove all handlers associated with the root logger object, because - # that one always logs as "warnings" incorrectly. - for handler in logging.root.handlers[:]: - logging.root.removeHandler(handler) - - # Attach default logging handler that prints to active comp - logger = logging.getLogger() - formatter = logging.Formatter(fmt="%(message)s\n") - handler = FusionLogHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.DEBUG) - - pyblish.api.register_host("fusion") - pyblish.api.register_plugin_path(PUBLISH_PATH) - log.info("Registering Fusion plug-ins..") - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - # Register events - register_event_callback("open", on_after_open) - register_event_callback("save", on_save) - register_event_callback("new", on_new) - - # region workfile io api - def has_unsaved_changes(self): - comp = get_current_comp() - return comp.GetAttrs()["COMPB_Modified"] - - def get_workfile_extensions(self): - return [".comp"] - - def save_workfile(self, dst_path=None): - comp = get_current_comp() - comp.Save(dst_path) - - def open_workfile(self, filepath): - # Hack to get fusion, see - # openpype.hosts.fusion.api.pipeline.get_current_comp() - fusion = getattr(sys.modules["__main__"], "fusion", None) - - return fusion.LoadComp(filepath) - - def get_current_workfile(self): - comp = get_current_comp() - current_filepath = comp.GetAttrs()["COMPS_FileName"] - if not current_filepath: - return None - - return current_filepath - - def work_root(self, session): - work_dir = session["AVALON_WORKDIR"] - scene_dir = session.get("AVALON_SCENEDIR") - if scene_dir: - return os.path.join(work_dir, scene_dir) - else: - return work_dir - # endregion - - @contextlib.contextmanager - def maintained_selection(self): - from .lib import maintained_selection - return maintained_selection() - - def get_containers(self): - return ls() - - def update_context_data(self, data, changes): - comp = get_current_comp() - comp.SetData("openpype", data) - - def get_context_data(self): - comp = get_current_comp() - return comp.GetData("openpype") or {} - - -def on_new(event): - comp = event["Rets"]["comp"] - validate_comp_prefs(comp, force_repair=True) - - -def on_save(event): - comp = event["sender"] - validate_comp_prefs(comp) - - -def on_after_open(event): - comp = event["sender"] - validate_comp_prefs(comp) - - if any_outdated_containers(): - log.warning("Scene has outdated content.") - - # Find OpenPype menu to attach to - from . import menu - - def _on_show_scene_inventory(): - # ensure that comp is active - frame = comp.CurrentFrame - if not frame: - print("Comp is closed, skipping show scene inventory") - return - frame.ActivateFrame() # raise comp window - host_tools.show_scene_inventory() - - from openpype.widgets import popup - from openpype.style import load_stylesheet - dialog = popup.Popup(parent=menu.menu) - dialog.setWindowTitle("Fusion comp has outdated content") - dialog.setMessage("There are outdated containers in " - "your Fusion comp.") - dialog.on_clicked.connect(_on_show_scene_inventory) - dialog.show() - dialog.raise_() - dialog.activateWindow() - dialog.setStyleSheet(load_stylesheet()) - - -def ls(): - """List containers from active Fusion scene - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Fusion; once loaded - they are called 'containers' - - Yields: - dict: container - - """ - - comp = get_current_comp() - tools = comp.GetToolList(False).values() - - for tool in tools: - container = parse_container(tool) - if container: - yield container - - -def imprint_container(tool, - name, - namespace, - context, - loader=None): - """Imprint a Loader with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - tool (object): The node in Fusion to imprint as container, usually a - Loader. - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - - Returns: - None - - """ - - data = [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", str(name)), - ("namespace", str(namespace)), - ("loader", str(loader)), - ("representation", str(context["representation"]["_id"])), - ] - - for key, value in data: - tool.SetData("avalon.{}".format(key), value) - - -def parse_container(tool): - """Returns imprinted container data of a tool - - This reads the imprinted data from `imprint_container`. - - """ - - data = tool.GetData('avalon') - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - # Store the tool's name - container["objectName"] = tool.Name - - # Store reference to the tool object - container["_tool"] = tool - - return container - - -class FusionEventThread(QtCore.QThread): - """QThread which will periodically ping Fusion app for any events. - The fusion.UIManager must be set up to be notified of events before they'll - be reported by this thread, for example: - fusion.UIManager.AddNotify("Comp_Save", None) - - """ - - on_event = QtCore.Signal(dict) - - def run(self): - - app = getattr(sys.modules["__main__"], "app", None) - if app is None: - # No Fusion app found - return - - # As optimization store the GetEvent method directly because every - # getattr of UIManager.GetEvent tries to resolve the Remote Function - # through the PyRemoteObject - get_event = app.UIManager.GetEvent - delay = int(os.environ.get("OPENPYPE_FUSION_CALLBACK_INTERVAL", 1000)) - while True: - if self.isInterruptionRequested(): - return - - # Process all events that have been queued up until now - while True: - event = get_event(False) - if not event: - break - self.on_event.emit(event) - - # Wait some time before processing events again - # to not keep blocking the UI - self.msleep(delay) - - -class FusionEventHandler(QtCore.QObject): - """Emits OpenPype events based on Fusion events captured in a QThread. - - This will emit the following OpenPype events based on Fusion actions: - save: Comp_Save, Comp_SaveAs - open: Comp_Opened - new: Comp_New - - To use this you can attach it to you Qt UI so it runs in the background. - E.g. - >>> handler = FusionEventHandler(parent=window) - >>> handler.start() - - - """ - ACTION_IDS = [ - "Comp_Save", - "Comp_SaveAs", - "Comp_New", - "Comp_Opened" - ] - - def __init__(self, parent=None): - super(FusionEventHandler, self).__init__(parent=parent) - - # Set up Fusion event callbacks - fusion = getattr(sys.modules["__main__"], "fusion", None) - ui = fusion.UIManager - - # Add notifications for the ones we want to listen to - notifiers = [] - for action_id in self.ACTION_IDS: - notifier = ui.AddNotify(action_id, None) - notifiers.append(notifier) - - # TODO: Not entirely sure whether these must be kept to avoid - # garbage collection - self._notifiers = notifiers - - self._event_thread = FusionEventThread(parent=self) - self._event_thread.on_event.connect(self._on_event) - - def start(self): - self._event_thread.start() - - def stop(self): - self._event_thread.stop() - - def _on_event(self, event): - """Handle Fusion events to emit OpenPype events""" - if not event: - return - - what = event["what"] - - # Comp Save - if what in {"Comp_Save", "Comp_SaveAs"}: - if not event["Rets"].get("success"): - # If the Save action is cancelled it will still emit an - # event but with "success": False so we ignore those cases - return - # Comp was saved - emit_event("save", data=event) - return - - # Comp New - elif what in {"Comp_New"}: - emit_event("new", data=event) - - # Comp Opened - elif what in {"Comp_Opened"}: - emit_event("open", data=event) diff --git a/openpype/hosts/fusion/api/plugin.py b/openpype/hosts/fusion/api/plugin.py deleted file mode 100644 index 63a74fbdb5..0000000000 --- a/openpype/hosts/fusion/api/plugin.py +++ /dev/null @@ -1,221 +0,0 @@ -from copy import deepcopy -import os - -from openpype.hosts.fusion.api import ( - get_current_comp, - comp_lock_and_undo_chunk, -) - -from openpype.lib import ( - BoolDef, - EnumDef, -) -from openpype.pipeline import ( - legacy_io, - Creator, - CreatedInstance -) - - -class GenericCreateSaver(Creator): - default_variants = ["Main", "Mask"] - description = "Fusion Saver to generate image sequence" - icon = "fa5.eye" - - instance_attributes = [ - "reviewable" - ] - - settings_category = "fusion" - - image_format = "exr" - - # TODO: This should be renamed together with Nuke so it is aligned - temp_rendering_path_template = ( - "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}") - - def create(self, subset_name, instance_data, pre_create_data): - self.pass_pre_attributes_to_instance(instance_data, pre_create_data) - - instance = CreatedInstance( - family=self.family, - subset_name=subset_name, - data=instance_data, - creator=self, - ) - data = instance.data_to_store() - comp = get_current_comp() - with comp_lock_and_undo_chunk(comp): - args = (-32768, -32768) # Magical position numbers - saver = comp.AddTool("Saver", *args) - - self._update_tool_with_data(saver, data=data) - - # Register the CreatedInstance - self._imprint(saver, data) - - # Insert the transient data - instance.transient_data["tool"] = saver - - self._add_instance_to_context(instance) - - return instance - - def collect_instances(self): - comp = get_current_comp() - tools = comp.GetToolList(False, "Saver").values() - for tool in tools: - data = self.get_managed_tool_data(tool) - if not data: - continue - - # Add instance - created_instance = CreatedInstance.from_existing(data, self) - - # Collect transient data - created_instance.transient_data["tool"] = tool - - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - new_data = created_inst.data_to_store() - tool = created_inst.transient_data["tool"] - self._update_tool_with_data(tool, new_data) - self._imprint(tool, new_data) - - def remove_instances(self, instances): - for instance in instances: - # Remove the tool from the scene - - tool = instance.transient_data["tool"] - if tool: - tool.Delete() - - # Remove the collected CreatedInstance to remove from UI directly - self._remove_instance_from_context(instance) - - def _imprint(self, tool, data): - # Save all data in a "openpype.{key}" = value data - - # Instance id is the tool's name so we don't need to imprint as data - data.pop("instance_id", None) - - active = data.pop("active", None) - if active is not None: - # Use active value to set the passthrough state - tool.SetAttrs({"TOOLB_PassThrough": not active}) - - for key, value in data.items(): - tool.SetData(f"openpype.{key}", value) - - def _update_tool_with_data(self, tool, data): - """Update tool node name and output path based on subset data""" - if "subset" not in data: - return - - original_subset = tool.GetData("openpype.subset") - original_format = tool.GetData( - "openpype.creator_attributes.image_format" - ) - - subset = data["subset"] - if ( - original_subset != subset - or original_format != data["creator_attributes"]["image_format"] - ): - self._configure_saver_tool(data, tool, subset) - - def _configure_saver_tool(self, data, tool, subset): - formatting_data = deepcopy(data) - - # get frame padding from anatomy templates - frame_padding = self.project_anatomy.templates["frame_padding"] - - # get output format - ext = data["creator_attributes"]["image_format"] - - # Subset change detected - workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"]) - formatting_data.update({ - "workdir": workdir, - "frame": "0" * frame_padding, - "ext": ext, - "product": { - "name": formatting_data["subset"], - "type": formatting_data["family"], - }, - }) - - # build file path to render - filepath = self.temp_rendering_path_template.format(**formatting_data) - - comp = get_current_comp() - tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath)) - - # Rename tool - if tool.Name != subset: - print(f"Renaming {tool.Name} -> {subset}") - tool.SetAttrs({"TOOLS_Name": subset}) - - def get_managed_tool_data(self, tool): - """Return data of the tool if it matches creator identifier""" - data = tool.GetData("openpype") - if not isinstance(data, dict): - return - - required = { - "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier, - } - for key, value in required.items(): - if key not in data or data[key] != value: - return - - # Get active state from the actual tool state - attrs = tool.GetAttrs() - passthrough = attrs["TOOLB_PassThrough"] - data["active"] = not passthrough - - # Override publisher's UUID generation because tool names are - # already unique in Fusion in a comp - data["instance_id"] = tool.Name - - return data - - def get_instance_attr_defs(self): - """Settings for publish page""" - return self.get_pre_create_attr_defs() - - def pass_pre_attributes_to_instance(self, instance_data, pre_create_data): - creator_attrs = instance_data["creator_attributes"] = {} - for pass_key in pre_create_data.keys(): - creator_attrs[pass_key] = pre_create_data[pass_key] - - def _get_render_target_enum(self): - rendering_targets = { - "local": "Local machine rendering", - "frames": "Use existing frames", - } - if "farm_rendering" in self.instance_attributes: - rendering_targets["farm"] = "Farm rendering" - - return EnumDef( - "render_target", items=rendering_targets, label="Render target" - ) - - def _get_reviewable_bool(self): - return BoolDef( - "review", - default=("reviewable" in self.instance_attributes), - label="Review", - ) - - def _get_image_format_enum(self): - image_format_options = ["exr", "tga", "tif", "png", "jpg"] - return EnumDef( - "image_format", - items=image_format_options, - default=self.image_format, - label="Output Image Format", - ) diff --git a/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs b/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs deleted file mode 100644 index 9c67af7db9..0000000000 --- a/openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs +++ /dev/null @@ -1,19 +0,0 @@ -{ -Locked = true, -Global = { - Paths = { - Map = { - ["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon", - ["Config:"] = "UserPaths:Config;AYON:Config", - ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts", - }, - }, - Script = { - PythonVersion = 3, - Python3Forced = true - }, - UserInterface = { - Language = "en_US" - }, - }, -} diff --git a/openpype/hosts/fusion/deploy/openpype/Config/menu.fu b/openpype/hosts/fusion/deploy/openpype/Config/menu.fu deleted file mode 100644 index 85134d2c62..0000000000 --- a/openpype/hosts/fusion/deploy/openpype/Config/menu.fu +++ /dev/null @@ -1,60 +0,0 @@ -{ - Action - { - ID = "OpenPype_Menu", - Category = "OpenPype", - Name = "OpenPype Menu", - - Targets = - { - Composition = - { - Execute = _Lua [=[ - local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py") - if bmd.fileexists(scriptPath) == false then - print("[OpenPype Error] Can't run file: " .. scriptPath) - else - target:RunScript(scriptPath) - end - ]=], - }, - }, - }, - Action - { - ID = "OpenPype_Install_PySide2", - Category = "OpenPype", - Name = "Install PySide2", - - Targets = - { - Composition = - { - Execute = _Lua [=[ - local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py") - if bmd.fileexists(scriptPath) == false then - print("[OpenPype Error] Can't run file: " .. scriptPath) - else - target:RunScript(scriptPath) - end - ]=], - }, - }, - }, - Menus - { - Target = "ChildFrame", - - Before "Help" - { - Sub "OpenPype" - { - "OpenPype_Menu{}", - "_", - Sub "Admin" { - "OpenPype_Install_PySide2{}" - } - } - }, - }, -} diff --git a/openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs b/openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs deleted file mode 100644 index 0035a38990..0000000000 --- a/openpype/hosts/fusion/deploy/openpype/fusion_shared.prefs +++ /dev/null @@ -1,19 +0,0 @@ -{ -Locked = true, -Global = { - Paths = { - Map = { - ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype", - ["Config:"] = "UserPaths:Config;OpenPype:Config", - ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts", - }, - }, - Script = { - PythonVersion = 3, - Python3Forced = true - }, - UserInterface = { - Language = "en_US" - }, - }, -} diff --git a/openpype/hosts/fusion/hooks/pre_pyside_install.py b/openpype/hosts/fusion/hooks/pre_pyside_install.py deleted file mode 100644 index f98aeda233..0000000000 --- a/openpype/hosts/fusion/hooks/pre_pyside_install.py +++ /dev/null @@ -1,186 +0,0 @@ -import os -import subprocess -import platform -import uuid - -from openpype.lib.applications import PreLaunchHook, LaunchTypes - - -class InstallPySideToFusion(PreLaunchHook): - """Automatically installs Qt binding to fusion's python packages. - - Check if fusion has installed PySide2 and will try to install if not. - - For pipeline implementation is required to have Qt binding installed in - fusion's python packages. - """ - - app_groups = {"fusion"} - order = 2 - launch_types = {LaunchTypes.local} - - def execute(self): - # Prelaunch hook is not crucial - try: - settings = self.data["project_settings"][self.host_name] - if not settings["hooks"]["InstallPySideToFusion"]["enabled"]: - return - self.inner_execute() - except Exception: - self.log.warning( - "Processing of {} crashed.".format(self.__class__.__name__), - exc_info=True - ) - - def inner_execute(self): - self.log.debug("Check for PySide2 installation.") - - fusion_python3_home = self.data.get("fusion_python3_home") - if not fusion_python3_home: - self.log.warning("'fusion_python3_home' was not provided. " - "Installation of PySide2 not possible") - return - - if platform.system().lower() == "windows": - exe_filenames = ["python.exe"] - else: - exe_filenames = ["python3", "python"] - - for exe_filename in exe_filenames: - python_executable = os.path.join(fusion_python3_home, exe_filename) - if os.path.exists(python_executable): - break - - if not os.path.exists(python_executable): - self.log.warning( - "Couldn't find python executable for fusion. {}".format( - python_executable - ) - ) - return - - # Check if PySide2 is installed and skip if yes - if self._is_pyside_installed(python_executable): - self.log.debug("Fusion has already installed PySide2.") - return - - self.log.debug("Installing PySide2.") - # Install PySide2 in fusion's python - if self._windows_require_permissions( - os.path.dirname(python_executable)): - result = self._install_pyside_windows(python_executable) - else: - result = self._install_pyside(python_executable) - - if result: - self.log.info("Successfully installed PySide2 module to fusion.") - else: - self.log.warning("Failed to install PySide2 module to fusion.") - - def _install_pyside_windows(self, python_executable): - """Install PySide2 python module to fusion's python. - - Installation requires administration rights that's why it is required - to use "pywin32" module which can execute command's and ask for - administration rights. - """ - try: - import win32api - import win32con - import win32process - import win32event - import pywintypes - from win32comext.shell.shell import ShellExecuteEx - from win32comext.shell import shellcon - except Exception: - self.log.warning("Couldn't import \"pywin32\" modules") - return False - - try: - # Parameters - # - use "-m pip" as module pip to install PySide2 and argument - # "--ignore-installed" is to force install module to fusion's - # site-packages and make sure it is binary compatible - parameters = "-m pip install --ignore-installed PySide2" - - # Execute command and ask for administrator's rights - process_info = ShellExecuteEx( - nShow=win32con.SW_SHOWNORMAL, - fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, - lpVerb="runas", - lpFile=python_executable, - lpParameters=parameters, - lpDirectory=os.path.dirname(python_executable) - ) - process_handle = process_info["hProcess"] - win32event.WaitForSingleObject(process_handle, - win32event.INFINITE) - returncode = win32process.GetExitCodeProcess(process_handle) - return returncode == 0 - except pywintypes.error: - return False - - def _install_pyside(self, python_executable): - """Install PySide2 python module to fusion's python.""" - try: - # Parameters - # - use "-m pip" as module pip to install PySide2 and argument - # "--ignore-installed" is to force install module to fusion's - # site-packages and make sure it is binary compatible - env = dict(os.environ) - del env['PYTHONPATH'] - args = [ - python_executable, - "-m", - "pip", - "install", - "--ignore-installed", - "PySide2", - ] - process = subprocess.Popen( - args, stdout=subprocess.PIPE, universal_newlines=True, - env=env - ) - process.communicate() - return process.returncode == 0 - except PermissionError: - self.log.warning( - "Permission denied with command:" - "\"{}\".".format(" ".join(args)) - ) - except OSError as error: - self.log.warning(f"OS error has occurred: \"{error}\".") - except subprocess.SubprocessError: - pass - - def _is_pyside_installed(self, python_executable): - """Check if PySide2 module is in fusion's pip list.""" - args = [python_executable, "-c", "from qtpy import QtWidgets"] - process = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - _, stderr = process.communicate() - stderr = stderr.decode() - if stderr: - return False - return True - - def _windows_require_permissions(self, dirpath): - if platform.system().lower() != "windows": - return False - - try: - # Attempt to create a temporary file in the folder - temp_file_path = os.path.join(dirpath, uuid.uuid4().hex) - with open(temp_file_path, "w"): - pass - os.remove(temp_file_path) # Clean up temporary file - return False - - except PermissionError: - return True - - except BaseException as exc: - print(("Failed to determine if root requires permissions." - "Unexpected error: {}").format(exc)) - return False diff --git a/openpype/hosts/fusion/plugins/create/create_workfile.py b/openpype/hosts/fusion/plugins/create/create_workfile.py deleted file mode 100644 index 4092086ea4..0000000000 --- a/openpype/hosts/fusion/plugins/create/create_workfile.py +++ /dev/null @@ -1,118 +0,0 @@ -from openpype.hosts.fusion.api import ( - get_current_comp -) -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_by_name -from openpype.pipeline import ( - AutoCreator, - CreatedInstance, -) - - -class FusionWorkfileCreator(AutoCreator): - identifier = "workfile" - family = "workfile" - label = "Workfile" - icon = "fa5.file" - - default_variant = "Main" - - create_allow_context_change = False - - data_key = "openpype_workfile" - - def collect_instances(self): - - comp = get_current_comp() - data = comp.GetData(self.data_key) - if not data: - return - - instance = CreatedInstance( - family=self.family, - subset_name=data["subset"], - data=data, - creator=self - ) - instance.transient_data["comp"] = comp - - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - comp = created_inst.transient_data["comp"] - if not hasattr(comp, "SetData"): - # Comp is not alive anymore, likely closed by the user - self.log.error("Workfile comp not found for existing instance." - " Comp might have been closed in the meantime.") - continue - - # Imprint data into the comp - data = created_inst.data_to_store() - comp.SetData(self.data_key, data) - - def create(self, options=None): - - comp = get_current_comp() - if not comp: - self.log.error("Unable to find current comp") - return - - existing_instance = None - for instance in self.create_context.instances: - if instance.family == self.family: - existing_instance = instance - break - - project_name = self.create_context.get_current_project_name() - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - if existing_instance is None: - existing_instance_asset = None - elif AYON_SERVER_ENABLED: - existing_instance_asset = existing_instance["folderPath"] - else: - existing_instance_asset = existing_instance["asset"] - - if existing_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - data = { - "task": task_name, - "variant": self.default_variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - data.update(self.get_dynamic_data( - self.default_variant, task_name, asset_doc, - project_name, host_name, None - )) - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - new_instance.transient_data["comp"] = comp - self._add_instance_to_context(new_instance) - - elif ( - existing_instance_asset != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name diff --git a/openpype/hosts/fusion/plugins/inventory/select_containers.py b/openpype/hosts/fusion/plugins/inventory/select_containers.py deleted file mode 100644 index d554b73a5b..0000000000 --- a/openpype/hosts/fusion/plugins/inventory/select_containers.py +++ /dev/null @@ -1,27 +0,0 @@ -from openpype.pipeline import InventoryAction - - -class FusionSelectContainers(InventoryAction): - - label = "Select Containers" - icon = "mouse-pointer" - color = "#d8d8d8" - - def process(self, containers): - from openpype.hosts.fusion.api import ( - get_current_comp, - comp_lock_and_undo_chunk - ) - - tools = [i["_tool"] for i in containers] - - comp = get_current_comp() - flow = comp.CurrentFrame.FlowView - - with comp_lock_and_undo_chunk(comp, self.label): - # Clear selection - flow.Select() - - # Select tool - for tool in tools: - flow.Select(tool) diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py deleted file mode 100644 index 94ba361b50..0000000000 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ /dev/null @@ -1,80 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" - -from openpype.pipeline import load - - -class FusionSetFrameRangeLoader(load.LoaderPlugin): - """Set frame range excluding pre- and post-handles""" - - families = ["animation", - "camera", - "imagesequence", - "render", - "yeticache", - "pointcache", - "render"] - representations = ["*"] - extensions = {"*"} - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - from openpype.hosts.fusion.api import lib - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - lib.update_frame_range(start, end) - - -class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set frame range including pre- and post-handles""" - - families = ["animation", - "camera", - "imagesequence", - "render", - "yeticache", - "pointcache", - "render"] - representations = ["*"] - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - from openpype.hosts.fusion.api import lib - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - # Include handles - start -= version_data.get("handleStart", 0) - end += version_data.get("handleEnd", 0) - - lib.update_frame_range(start, end) diff --git a/openpype/hosts/fusion/plugins/load/load_alembic.py b/openpype/hosts/fusion/plugins/load/load_alembic.py deleted file mode 100644 index 9b6d1e12b4..0000000000 --- a/openpype/hosts/fusion/plugins/load/load_alembic.py +++ /dev/null @@ -1,71 +0,0 @@ -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.fusion.api import ( - imprint_container, - get_current_comp, - comp_lock_and_undo_chunk -) - - -class FusionLoadAlembicMesh(load.LoaderPlugin): - """Load Alembic mesh into Fusion""" - - families = ["pointcache", "model"] - representations = ["*"] - extensions = {"abc"} - - label = "Load alembic mesh" - order = -10 - icon = "code-fork" - color = "orange" - - tool_type = "SurfaceAlembicMesh" - - def load(self, context, name, namespace, data): - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context['asset']['name'] - - # Create the Loader with the filename path set - comp = get_current_comp() - with comp_lock_and_undo_chunk(comp, "Create tool"): - - path = self.filepath_from_context(context) - - args = (-32768, -32768) - tool = comp.AddTool(self.tool_type, *args) - tool["Filename"] = path - - imprint_container(tool, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """Update Alembic path""" - - tool = container["_tool"] - assert tool.ID == self.tool_type, f"Must be {self.tool_type}" - comp = tool.Comp() - - path = get_representation_path(representation) - - with comp_lock_and_undo_chunk(comp, "Update tool"): - tool["Filename"] = path - - # Update the imprinted representation - tool.SetData("avalon.representation", str(representation["_id"])) - - def remove(self, container): - tool = container["_tool"] - assert tool.ID == self.tool_type, f"Must be {self.tool_type}" - comp = tool.Comp() - - with comp_lock_and_undo_chunk(comp, "Remove tool"): - tool.Delete() diff --git a/openpype/hosts/fusion/plugins/load/load_fbx.py b/openpype/hosts/fusion/plugins/load/load_fbx.py deleted file mode 100644 index d15d2c33d7..0000000000 --- a/openpype/hosts/fusion/plugins/load/load_fbx.py +++ /dev/null @@ -1,86 +0,0 @@ -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.fusion.api import ( - imprint_container, - get_current_comp, - comp_lock_and_undo_chunk, -) - - -class FusionLoadFBXMesh(load.LoaderPlugin): - """Load FBX mesh into Fusion""" - - families = ["*"] - representations = ["*"] - extensions = { - "3ds", - "amc", - "aoa", - "asf", - "bvh", - "c3d", - "dae", - "dxf", - "fbx", - "htr", - "mcd", - "obj", - "trc", - } - - label = "Load FBX mesh" - order = -10 - icon = "code-fork" - color = "orange" - - tool_type = "SurfaceFBXMesh" - - def load(self, context, name, namespace, data): - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context["asset"]["name"] - - # Create the Loader with the filename path set - comp = get_current_comp() - with comp_lock_and_undo_chunk(comp, "Create tool"): - path = self.filepath_from_context(context) - - args = (-32768, -32768) - tool = comp.AddTool(self.tool_type, *args) - tool["ImportFile"] = path - - imprint_container( - tool, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - ) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """Update path""" - - tool = container["_tool"] - assert tool.ID == self.tool_type, f"Must be {self.tool_type}" - comp = tool.Comp() - - path = get_representation_path(representation) - - with comp_lock_and_undo_chunk(comp, "Update tool"): - tool["ImportFile"] = path - - # Update the imprinted representation - tool.SetData("avalon.representation", str(representation["_id"])) - - def remove(self, container): - tool = container["_tool"] - assert tool.ID == self.tool_type, f"Must be {self.tool_type}" - comp = tool.Comp() - - with comp_lock_and_undo_chunk(comp, "Remove tool"): - tool.Delete() diff --git a/openpype/hosts/fusion/plugins/load/load_workfile.py b/openpype/hosts/fusion/plugins/load/load_workfile.py deleted file mode 100644 index 14e36ca8fd..0000000000 --- a/openpype/hosts/fusion/plugins/load/load_workfile.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Import workfiles into your current comp. -As all imported nodes are free floating and will probably be changed there -is no update or reload function added for this plugin -""" - -from openpype.pipeline import load - -from openpype.hosts.fusion.api import ( - get_current_comp, - get_bmd_library, -) - - -class FusionLoadWorkfile(load.LoaderPlugin): - """Load the content of a workfile into Fusion""" - - families = ["workfile"] - representations = ["*"] - extensions = {"comp"} - - label = "Load Workfile" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - # Get needed elements - bmd = get_bmd_library() - comp = get_current_comp() - path = self.filepath_from_context(context) - - # Paste the content of the file into the current comp - comp.Paste(bmd.readfile(path)) diff --git a/openpype/hosts/fusion/plugins/publish/collect_inputs.py b/openpype/hosts/fusion/plugins/publish/collect_inputs.py deleted file mode 100644 index f23e4d0268..0000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_inputs.py +++ /dev/null @@ -1,116 +0,0 @@ -import pyblish.api - -from openpype.pipeline import registered_host - - -def collect_input_containers(tools): - """Collect containers that contain any of the node in `nodes`. - - This will return any loaded Avalon container that contains at least one of - the nodes. As such, the Avalon container is an input for it. Or in short, - there are member nodes of that container. - - Returns: - list: Input avalon containers - - """ - - # Lookup by node ids - lookup = frozenset([tool.Name for tool in tools]) - - containers = [] - host = registered_host() - for container in host.ls(): - - name = container["_tool"].Name - - # We currently assume no "groups" as containers but just single tools - # like a single "Loader" operator. As such we just check whether the - # Loader is part of the processing queue. - if name in lookup: - containers.append(container) - - return containers - - -def iter_upstream(tool): - """Yields all upstream inputs for the current tool. - - Yields: - tool: The input tools. - - """ - - def get_connected_input_tools(tool): - """Helper function that returns connected input tools for a tool.""" - inputs = [] - - # Filter only to actual types that will have sensible upstream - # connections. So we ignore just "Number" inputs as they can be - # many to iterate, slowing things down quite a bit - and in practice - # they don't have upstream connections. - VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D'] - for type_ in VALID_INPUT_TYPES: - for input_ in tool.GetInputList(type_).values(): - output = input_.GetConnectedOutput() - if output: - input_tool = output.GetTool() - inputs.append(input_tool) - - return inputs - - # Initialize process queue with the node's inputs itself - queue = get_connected_input_tools(tool) - - # We keep track of which node names we have processed so far, to ensure we - # don't process the same hierarchy again. We are not pushing the tool - # itself into the set as that doesn't correctly recognize the same tool. - # Since tool names are unique in a comp in Fusion we rely on that. - collected = set(tool.Name for tool in queue) - - # Traverse upstream references for all nodes and yield them as we - # process the queue. - while queue: - upstream_tool = queue.pop() - yield upstream_tool - - # Find upstream tools that are not collected yet. - upstream_inputs = get_connected_input_tools(upstream_tool) - upstream_inputs = [t for t in upstream_inputs if - t.Name not in collected] - - queue.extend(upstream_inputs) - collected.update(tool.Name for tool in upstream_inputs) - - -class CollectUpstreamInputs(pyblish.api.InstancePlugin): - """Collect source input containers used for this publish. - - This will include `inputs` data of which loaded publishes were used in the - generation of this publish. This leaves an upstream trace to what was used - as input. - - """ - - label = "Collect Inputs" - order = pyblish.api.CollectorOrder + 0.2 - hosts = ["fusion"] - families = ["render", "image"] - - def process(self, instance): - - # Get all upstream and include itself - if not any(instance[:]): - self.log.debug("No tool found in instance, skipping..") - return - - tool = instance[0] - nodes = list(iter_upstream(tool)) - nodes.append(tool) - - # Collect containers for the given set of nodes - containers = collect_input_containers(nodes) - - inputs = [c["representation"] for c in containers] - instance.data["inputRepresentations"] = inputs - self.log.debug("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/fusion/plugins/publish/collect_render.py b/openpype/hosts/fusion/plugins/publish/collect_render.py deleted file mode 100644 index 366eaa905c..0000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_render.py +++ /dev/null @@ -1,210 +0,0 @@ -import os -import attr -import pyblish.api - -from openpype.pipeline import publish -from openpype.pipeline.publish import RenderInstance -from openpype.hosts.fusion.api.lib import get_frame_path - - -@attr.s -class FusionRenderInstance(RenderInstance): - # extend generic, composition name is needed - fps = attr.ib(default=None) - projectEntity = attr.ib(default=None) - stagingDir = attr.ib(default=None) - app_version = attr.ib(default=None) - tool = attr.ib(default=None) - workfileComp = attr.ib(default=None) - publish_attributes = attr.ib(default={}) - frameStartHandle = attr.ib(default=None) - frameEndHandle = attr.ib(default=None) - - -class CollectFusionRender( - publish.AbstractCollectRender, - publish.ColormanagedPyblishPluginMixin -): - - order = pyblish.api.CollectorOrder + 0.09 - label = "Collect Fusion Render" - hosts = ["fusion"] - - def get_instances(self, context): - - comp = context.data.get("currentComp") - comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") - aspect_x = comp_frame_format_prefs["AspectX"] - aspect_y = comp_frame_format_prefs["AspectY"] - - instances = [] - instances_to_remove = [] - - current_file = context.data["currentFile"] - version = context.data["version"] - - project_entity = context.data["projectEntity"] - - for inst in context: - if not inst.data.get("active", True): - continue - - family = inst.data["family"] - if family not in ["render", "image"]: - continue - - task_name = context.data["task"] - tool = inst.data["transientData"]["tool"] - - instance_families = inst.data.get("families", []) - subset_name = inst.data["subset"] - instance = FusionRenderInstance( - family=family, - tool=tool, - workfileComp=comp, - families=instance_families, - version=version, - time="", - source=current_file, - label=inst.data["label"], - subset=subset_name, - asset=inst.data["asset"], - task=task_name, - attachTo=False, - setMembers='', - publish=True, - name=subset_name, - resolutionWidth=comp_frame_format_prefs.get("Width"), - resolutionHeight=comp_frame_format_prefs.get("Height"), - pixelAspect=aspect_x / aspect_y, - tileRendering=False, - tilesX=0, - tilesY=0, - review="review" in instance_families, - frameStart=inst.data["frameStart"], - frameEnd=inst.data["frameEnd"], - handleStart=inst.data["handleStart"], - handleEnd=inst.data["handleEnd"], - frameStartHandle=inst.data["frameStartHandle"], - frameEndHandle=inst.data["frameEndHandle"], - frameStep=1, - fps=comp_frame_format_prefs.get("Rate"), - app_version=comp.GetApp().Version, - publish_attributes=inst.data.get("publish_attributes", {}) - ) - - render_target = inst.data["creator_attributes"]["render_target"] - - # Add render target family - render_target_family = f"render.{render_target}" - if render_target_family not in instance.families: - instance.families.append(render_target_family) - - # Add render target specific data - if render_target in {"local", "frames"}: - instance.projectEntity = project_entity - - if render_target == "farm": - fam = "render.farm" - if fam not in instance.families: - instance.families.append(fam) - instance.farm = True # to skip integrate - if "review" in instance.families: - # to skip ExtractReview locally - instance.families.remove("review") - - # add new instance to the list and remove the original - # instance since it is not needed anymore - instances.append(instance) - instances_to_remove.append(inst) - - for instance in instances_to_remove: - context.remove(instance) - - return instances - - def post_collecting_action(self): - for instance in self._context: - if "render.frames" in instance.data.get("families", []): - # adding representation data to the instance - self._update_for_frames(instance) - - def get_expected_files(self, render_instance): - """ - Returns list of rendered files that should be created by - Deadline. These are not published directly, they are source - for later 'submit_publish_job'. - - Args: - render_instance (RenderInstance): to pull anatomy and parts used - in url - - Returns: - (list) of absolute urls to rendered file - """ - start = render_instance.frameStart - render_instance.handleStart - end = render_instance.frameEnd + render_instance.handleEnd - - comp = render_instance.workfileComp - path = comp.MapPath( - render_instance.tool["Clip"][ - render_instance.workfileComp.TIME_UNDEFINED - ] - ) - output_dir = os.path.dirname(path) - render_instance.outputDir = output_dir - - basename = os.path.basename(path) - - head, padding, ext = get_frame_path(basename) - - expected_files = [] - for frame in range(start, end + 1): - expected_files.append( - os.path.join( - output_dir, - f"{head}{str(frame).zfill(padding)}{ext}" - ) - ) - - return expected_files - - def _update_for_frames(self, instance): - """Updating instance for render.frames family - - Adding representation data to the instance. Also setting - colorspaceData to the representation based on file rules. - """ - - expected_files = instance.data["expectedFiles"] - - start = instance.data["frameStart"] - instance.data["handleStart"] - - path = expected_files[0] - basename = os.path.basename(path) - staging_dir = os.path.dirname(path) - _, padding, ext = get_frame_path(basename) - - repre = { - "name": ext[1:], - "ext": ext[1:], - "frameStart": f"%0{padding}d" % start, - "files": [os.path.basename(f) for f in expected_files], - "stagingDir": staging_dir, - } - - self.set_representation_colorspace( - representation=repre, - context=instance.context, - ) - - # review representation - if instance.data.get("review", False): - repre["tags"] = ["review"] - - # add the repre to the instance - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(repre) - - return instance diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py deleted file mode 100644 index 068df22c06..0000000000 --- a/openpype/hosts/fusion/plugins/publish/extract_render_local.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -import logging -import contextlib -import collections -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.fusion.api import comp_lock_and_undo_chunk -from openpype.hosts.fusion.api.lib import get_frame_path, maintained_comp_range - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def enabled_savers(comp, savers): - """Enable only the `savers` in Comp during the context. - - Any Saver tool in the passed composition that is not in the savers list - will be set to passthrough during the context. - - Args: - comp (object): Fusion composition object. - savers (list): List of Saver tool objects. - - """ - passthrough_key = "TOOLB_PassThrough" - original_states = {} - enabled_saver_names = {saver.Name for saver in savers} - - all_savers = comp.GetToolList(False, "Saver").values() - savers_by_name = {saver.Name: saver for saver in all_savers} - - try: - for saver in all_savers: - original_state = saver.GetAttrs()[passthrough_key] - original_states[saver.Name] = original_state - - # The passthrough state we want to set (passthrough != enabled) - state = saver.Name not in enabled_saver_names - if state != original_state: - saver.SetAttrs({passthrough_key: state}) - yield - finally: - for saver_name, original_state in original_states.items(): - saver = savers_by_name[saver_name] - saver.SetAttrs({"TOOLB_PassThrough": original_state}) - - -class FusionRenderLocal( - pyblish.api.InstancePlugin, - publish.ColormanagedPyblishPluginMixin -): - """Render the current Fusion composition locally.""" - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Render Local" - hosts = ["fusion"] - families = ["render.local"] - - is_rendered_key = "_fusionrenderlocal_has_rendered" - - def process(self, instance): - - # Start render - result = self.render(instance) - if result is False: - raise RuntimeError(f"Comp render failed for {instance}") - - self._add_representation(instance) - - # Log render status - self.log.info( - "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( - nm=instance.data["name"], - ast=instance.data["asset"], - tsk=instance.data["task"], - ) - ) - - def render(self, instance): - """Render instance. - - We try to render the minimal amount of times by combining the instances - that have a matching frame range in one Fusion render. Then for the - batch of instances we store whether the render succeeded or failed. - - """ - - if self.is_rendered_key in instance.data: - # This instance was already processed in batch with another - # instance, so we just return the render result directly - self.log.debug(f"Instance {instance} was already rendered") - return instance.data[self.is_rendered_key] - - instances_by_frame_range = self.get_render_instances_by_frame_range( - instance.context - ) - - # Render matching batch of instances that share the same frame range - frame_range = self.get_instance_render_frame_range(instance) - render_instances = instances_by_frame_range[frame_range] - - # We initialize render state false to indicate it wasn't successful - # yet to keep track of whether Fusion succeeded. This is for cases - # where an error below this might cause the comp render result not - # to be stored for the instances of this batch - for render_instance in render_instances: - render_instance.data[self.is_rendered_key] = False - - savers_to_render = [inst.data["tool"] for inst in render_instances] - current_comp = instance.context.data["currentComp"] - frame_start, frame_end = frame_range - - self.log.info( - f"Starting Fusion render frame range {frame_start}-{frame_end}" - ) - saver_names = ", ".join(saver.Name for saver in savers_to_render) - self.log.info(f"Rendering tools: {saver_names}") - - with comp_lock_and_undo_chunk(current_comp): - with maintained_comp_range(current_comp): - with enabled_savers(current_comp, savers_to_render): - result = current_comp.Render( - { - "Start": frame_start, - "End": frame_end, - "Wait": True, - } - ) - - # Store the render state for all the rendered instances - for render_instance in render_instances: - render_instance.data[self.is_rendered_key] = bool(result) - - return result - - def _add_representation(self, instance): - """Add representation to instance""" - - expected_files = instance.data["expectedFiles"] - - start = instance.data["frameStart"] - instance.data["handleStart"] - - path = expected_files[0] - _, padding, ext = get_frame_path(path) - - staging_dir = os.path.dirname(path) - - files = [os.path.basename(f) for f in expected_files] - if len(expected_files) == 1: - files = files[0] - - repre = { - "name": ext[1:], - "ext": ext[1:], - "frameStart": f"%0{padding}d" % start, - "files": files, - "stagingDir": staging_dir, - } - - self.set_representation_colorspace( - representation=repre, - context=instance.context, - ) - - # review representation - if instance.data.get("review", False): - repre["tags"] = ["review"] - - # add the repre to the instance - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(repre) - - return instance - - def get_render_instances_by_frame_range(self, context): - """Return enabled render.local instances grouped by their frame range. - - Arguments: - context (pyblish.Context): The pyblish context - - Returns: - dict: (start, end): instances mapping - - """ - - instances_to_render = [ - instance for instance in context if - # Only active instances - instance.data.get("publish", True) and - # Only render.local instances - "render.local" in instance.data.get("families", []) - ] - - # Instances by frame ranges - instances_by_frame_range = collections.defaultdict(list) - for instance in instances_to_render: - start, end = self.get_instance_render_frame_range(instance) - instances_by_frame_range[(start, end)].append(instance) - - return dict(instances_by_frame_range) - - def get_instance_render_frame_range(self, instance): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - return start, end diff --git a/openpype/hosts/fusion/plugins/publish/increment_current_file.py b/openpype/hosts/fusion/plugins/publish/increment_current_file.py deleted file mode 100644 index 08a65bf52d..0000000000 --- a/openpype/hosts/fusion/plugins/publish/increment_current_file.py +++ /dev/null @@ -1,44 +0,0 @@ -import pyblish.api - -from openpype.pipeline import OptionalPyblishPluginMixin -from openpype.pipeline import KnownPublishError - - -class FusionIncrementCurrentFile( - pyblish.api.ContextPlugin, OptionalPyblishPluginMixin -): - """Increment the current file. - - Saves the current file with an increased version number. - - """ - - label = "Increment workfile version" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["fusion"] - optional = True - - def process(self, context): - if not self.is_active(context.data): - return - - from openpype.lib import version_up - from openpype.pipeline.publish import get_errored_plugins_from_context - - errored_plugins = get_errored_plugins_from_context(context) - if any( - plugin.__name__ == "FusionSubmitDeadline" - for plugin in errored_plugins - ): - raise KnownPublishError( - "Skipping incrementing current file because " - "submission to render farm failed." - ) - - comp = context.data.get("currentComp") - assert comp, "Must have comp" - - current_filepath = context.data["currentFile"] - new_filepath = version_up(current_filepath) - - comp.Save(new_filepath) diff --git a/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py b/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py deleted file mode 100644 index d1693ef3dc..0000000000 --- a/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py +++ /dev/null @@ -1,55 +0,0 @@ -from collections import defaultdict - -import pyblish.api -from openpype.pipeline import PublishValidationError - -from openpype.hosts.fusion.api.action import SelectInvalidAction - - -class ValidateUniqueSubsets(pyblish.api.ContextPlugin): - """Ensure all instances have a unique subset name""" - - order = pyblish.api.ValidatorOrder - label = "Validate Unique Subsets" - families = ["render", "image"] - hosts = ["fusion"] - actions = [SelectInvalidAction] - - @classmethod - def get_invalid(cls, context): - - # Collect instances per subset per asset - instances_per_subset_asset = defaultdict(lambda: defaultdict(list)) - for instance in context: - asset = instance.data.get("asset", context.data.get("asset")) - subset = instance.data.get("subset", context.data.get("subset")) - instances_per_subset_asset[asset][subset].append(instance) - - # Find which asset + subset combination has more than one instance - # Those are considered invalid because they'd integrate to the same - # destination. - invalid = [] - for asset, instances_per_subset in instances_per_subset_asset.items(): - for subset, instances in instances_per_subset.items(): - if len(instances) > 1: - cls.log.warning( - "{asset} > {subset} used by more than " - "one instance: {instances}".format( - asset=asset, - subset=subset, - instances=instances - ) - ) - invalid.extend(instances) - - # Return tools for the invalid instances so they can be selected - invalid = [instance.data["tool"] for instance in invalid] - - return invalid - - def process(self, context): - invalid = self.get_invalid(context) - if invalid: - raise PublishValidationError("Multiple instances are set to " - "the same asset > subset.", - title=self.label) diff --git a/openpype/hosts/harmony/addon.py b/openpype/hosts/harmony/addon.py deleted file mode 100644 index efef40ab92..0000000000 --- a/openpype/hosts/harmony/addon.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -HARMONY_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class HarmonyAddon(OpenPypeModule, IHostAddon): - name = "harmony" - host_name = "harmony" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - openharmony_path = os.path.join( - HARMONY_HOST_DIR, "vendor", "OpenHarmony" - ) - # TODO check if is already set? What to do if is already set? - env["LIB_OPENHARMONY_PATH"] = openharmony_path - - def get_workfile_extensions(self): - return [".zip"] diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md deleted file mode 100644 index be3920fe29..0000000000 --- a/openpype/hosts/harmony/api/README.md +++ /dev/null @@ -1,655 +0,0 @@ -# Harmony Integration - -## Setup - -The easiest way to setup for using Toon Boom Harmony is to use the built-in launch: - -``` -python -c "import openpype.hosts.harmony.api as harmony;harmony.launch("path/to/harmony/executable")" -``` - -Communication with Harmony happens with a server/client relationship where the server is in the Python process and the client is in the Harmony process. Messages between Python and Harmony are required to be dictionaries, which are serialized to strings: -``` -+------------+ -| | -| Python | -| Process | -| | -| +--------+ | -| | | | -| | Main | | -| | Thread | | -| | | | -| +----^---+ | -| || | -| || | -| +---v----+ | +---------+ -| | | | | | -| | Server +-------> Harmony | -| | Thread <-------+ Process | -| | | | | | -| +--------+ | +---------+ -+------------+ -``` - -Server/client now uses stricter protocol to handle communication. This is necessary because of precise control over data passed between server/client. Each message is prepended with 6 bytes: -``` -| A | H | 0x00 | 0x00 | 0x00 | 0x00 | ... - -``` -First two bytes are *magic* bytes stands for **A**valon **H**armony. Next four bytes hold length of the message `...` encoded as 32bit unsigned integer. This way we know how many bytes to read from the socket and if we need more or we need to parse multiple messages. - - -## Usage - -The integration creates an `Openpype` menu entry where all related tools are located. - -**NOTE: Menu creation can be temperamental. The best way is to launch Harmony and do nothing else until Harmony is fully launched.** - -### Work files - -Because Harmony projects are directories, this integration uses `.zip` as work file extension. Internally the project directories are stored under `[User]/.avalon/harmony`. Whenever the user saves the `.xstage` file, the integration zips up the project directory and moves it to the Avalon project path. Zipping and moving happens in the background. - -### Show Workfiles on launch - -You can show the Workfiles app when Harmony launches by setting environment variable `AVALON_HARMONY_WORKFILES_ON_LAUNCH=1`. - -## Developing - -### Low level messaging -To send from Python to Harmony you can use the exposed method: -```python -import openpype.hosts.harmony.api as harmony -from uuid import uuid4 - - -func = """function %s_hello(person) -{ - return ("Hello " + person + "!"); -} -%s_hello -""" % (uuid4(), uuid4()) -print(harmony.send({"function": func, "args": ["Python"]})["result"]) -``` -**NOTE:** Its important to declare the function at the end of the function string. You can have multiple functions within your function string, but the function declared at the end is what gets executed. - -To send a function with multiple arguments its best to declare the arguments within the function: -```python -import openpype.hosts.harmony.api as harmony -from uuid import uuid4 - -signature = str(uuid4()).replace("-", "_") -func = """function %s_hello(args) -{ - var greeting = args[0]; - var person = args[1]; - return (greeting + " " + person + "!"); -} -%s_hello -""" % (signature, signature) -print(harmony.send({"function": func, "args": ["Hello", "Python"]})["result"]) -``` - -### Caution - -When naming your functions be aware that they are executed in global scope. They can potentially clash with Harmony own function and object names. -For example `func` is already existing Harmony object. When you call your function `func` it will overwrite in global scope the one from Harmony, causing -erratic behavior of Harmony. Openpype is prefixing those function names with [UUID4](https://docs.python.org/3/library/uuid.html) making chance of such clash minimal. -See above examples how that works. This will result in function named `38dfcef0_a6d7_4064_8069_51fe99ab276e_hello()`. -You can find list of Harmony object and function in Harmony documentation. - -### Higher level (recommended) - -Instead of sending functions directly to Harmony, it is more efficient and safe to just add your code to `js/PypeHarmony.js` or utilize `{"script": "..."}` method. - -#### Extending PypeHarmony.js - -Add your function to `PypeHarmony.js`. For example: - -```javascript -PypeHarmony.myAwesomeFunction = function() { - someCoolStuff(); -}; -``` -Then you can call that javascript code from your Python like: - -```Python -import openpype.hosts.harmony.api as harmony - -harmony.send({"function": "PypeHarmony.myAwesomeFunction"}); - -``` - -#### Using Script method - -You can also pass whole scripts into harmony and call their functions later as needed. - -For example, you have bunch of javascript files: - -```javascript -/* Master.js */ - -var Master = { - Foo = {}; - Boo = {}; -}; - -/* FileA.js */ -var Foo = function() {}; - -Foo.prototype.A = function() { - someAStuff(); -} - -// This will construct object Foo and add it to Master namespace. -Master.Foo = new Foo(); - -/* FileB.js */ -var Boo = function() {}; - -Boo.prototype.B = function() { - someBStuff(); -} - -// This will construct object Boo and add it to Master namespace. -Master.Boo = new Boo(); -``` - -Now in python, just read all those files and send them to Harmony. - -```python -from pathlib import Path -import openpype.hosts.harmony.api as harmony - -path_to_js = Path('/path/to/my/js') -script_to_send = "" - -for file in path_to_js.iterdir(): - if file.suffix == ".js": - script_to_send += file.read_text() - -harmony.send({"script": script_to_send}) - -# and use your code in Harmony -harmony.send({"function": "Master.Boo.B"}) - -``` - -### Scene Save -Instead of sending a request to Harmony with `scene.saveAll` please use: -```python -import openpype.hosts.harmony.api as harmony -harmony.save_scene() -``` - -
- Click to expand for details on scene save. - - Because Openpype tools does not deal well with folders for a single entity like a Harmony scene, this integration has implemented to use zip files to encapsulate the Harmony scene folders. Saving scene in Harmony via menu or CTRL+S will not result in producing zip file, only saving it from Workfiles will. This is because - zipping process can take some time in which we cannot block user from saving again. If xstage file is changed during zipping process it will produce corrupted zip - archive. -
- -### Plugin Examples -These plugins were made with the [polly config](https://github.com/mindbender-studio/config). - -#### Creator Plugin -```python -import openpype.hosts.harmony.api as harmony -from uuid import uuid4 - - -class CreateComposite(harmony.Creator): - """Composite node for publish.""" - - name = "compositeDefault" - label = "Composite" - family = "mindbender.template" - - def __init__(self, *args, **kwargs): - super(CreateComposite, self).__init__(*args, **kwargs) -``` - -The creator plugin can be configured to use other node types. For example here is a write node creator: -```python -import openpype.hosts.harmony.api as harmony - - -class CreateRender(harmony.Creator): - """Composite node for publishing renders.""" - - name = "writeDefault" - label = "Write" - family = "mindbender.imagesequence" - node_type = "WRITE" - - def __init__(self, *args, **kwargs): - super(CreateRender, self).__init__(*args, **kwargs) - - def setup_node(self, node): - signature = str(uuid4()).replace("-", "_") - func = """function %s_func(args) - { - node.setTextAttr(args[0], "DRAWING_TYPE", 1, "PNG4"); - } - %s_func - """ % (signature, signature) - harmony.send( - {"function": func, "args": [node]} - ) -``` - -#### Collector Plugin -```python -import pyblish.api -import openpype.hosts.harmony.api as harmony - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by nodes metadata. - - This collector takes into account assets that are associated with - a composite node and marked with a unique identifier; - - Identifier: - id (str): "pyblish.avalon.instance" - """ - - label = "Instances" - order = pyblish.api.CollectorOrder - hosts = ["harmony"] - - def process(self, context): - nodes = harmony.send( - {"function": "node.getNodes", "args": [["COMPOSITE"]]} - )["result"] - - for node in nodes: - data = harmony.read(node) - - # Skip non-tagged nodes. - if not data: - continue - - # Skip containers. - if "container" in data["id"]: - continue - - instance = context.create_instance(node.split("/")[-1]) - instance.append(node) - instance.data.update(data) - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) -``` - -#### Extractor Plugin -```python -import os - -import pyblish.api -import openpype.hosts.harmony.api as harmony - -import clique - - -class ExtractImage(pyblish.api.InstancePlugin): - """Produce a flattened image file from instance. - This plug-in only takes into account the nodes connected to the composite. - """ - label = "Extract Image Sequence" - order = pyblish.api.ExtractorOrder - hosts = ["harmony"] - families = ["mindbender.imagesequence"] - - def process(self, instance): - project_path = harmony.send( - {"function": "scene.currentProjectPath"} - )["result"] - - # Store reference for integration - if "files" not in instance.data: - instance.data["files"] = list() - - # Store display source node for later. - display_node = "Top/Display" - signature = str(uuid4()).replace("-", "_") - func = """function %s_func(display_node) - { - var source_node = null; - if (node.isLinked(display_node, 0)) - { - source_node = node.srcNode(display_node, 0); - node.unlink(display_node, 0); - } - return source_node - } - %s_func - """ % (signature, signature) - display_source_node = harmony.send( - {"function": func, "args": [display_node]} - )["result"] - - # Perform extraction - path = os.path.join( - os.path.normpath( - project_path - ).replace("\\", "/"), - instance.data["name"] - ) - if not os.path.exists(path): - os.makedirs(path) - - render_func = """function frameReady(frame, celImage) - {{ - var path = "{path}/{filename}" + frame + ".png"; - celImage.imageFileAs(path, "", "PNG4"); - }} - function %s_func(composite_node) - {{ - node.link(composite_node, 0, "{display_node}", 0); - render.frameReady.connect(frameReady); - render.setRenderDisplay("{display_node}"); - render.renderSceneAll(); - render.frameReady.disconnect(frameReady); - }} - %s_func - """ % (signature, signature) - restore_func = """function %s_func(args) - { - var display_node = args[0]; - var display_source_node = args[1]; - if (node.isLinked(display_node, 0)) - { - node.unlink(display_node, 0); - } - node.link(display_source_node, 0, display_node, 0); - } - %s_func - """ % (signature, signature) - - with harmony.maintained_selection(): - self.log.info("Extracting %s" % str(list(instance))) - - harmony.send( - { - "function": render_func.format( - path=path.replace("\\", "/"), - filename=os.path.basename(path), - display_node=display_node - ), - "args": [instance[0]] - } - ) - - # Restore display. - if display_source_node: - harmony.send( - { - "function": restore_func, - "args": [display_node, display_source_node] - } - ) - - files = os.listdir(path) - collections, remainder = clique.assemble(files, minimum_items=1) - assert not remainder, ( - "There shouldn't have been a remainder for '%s': " - "%s" % (instance[0], remainder) - ) - assert len(collections) == 1, ( - "There should only be one image sequence in {}. Found: {}".format( - path, len(collections) - ) - ) - - data = { - "subset": collections[0].head, - "isSeries": True, - "stagingDir": path, - "files": list(collections[0]), - } - instance.data.update(data) - - self.log.info("Extracted {instance} to {path}".format(**locals())) -``` - -#### Loader Plugin -```python -import os - -import openpype.hosts.harmony.api as harmony - -signature = str(uuid4()).replace("-", "_") -copy_files = """function copyFile(srcFilename, dstFilename) -{ - var srcFile = new PermanentFile(srcFilename); - var dstFile = new PermanentFile(dstFilename); - srcFile.copy(dstFile); -} -""" - -import_files = """function %s_import_files() -{ - var PNGTransparencyMode = 0; // Premultiplied with Black - var TGATransparencyMode = 0; // Premultiplied with Black - var SGITransparencyMode = 0; // Premultiplied with Black - var LayeredPSDTransparencyMode = 1; // Straight - var FlatPSDTransparencyMode = 2; // Premultiplied with White - - function getUniqueColumnName( column_prefix ) - { - var suffix = 0; - // finds if unique name for a column - var column_name = column_prefix; - while(suffix < 2000) - { - if(!column.type(column_name)) - break; - - suffix = suffix + 1; - column_name = column_prefix + "_" + suffix; - } - return column_name; - } - - function import_files(args) - { - var root = args[0]; - var files = args[1]; - var name = args[2]; - var start_frame = args[3]; - - var vectorFormat = null; - var extension = null; - var filename = files[0]; - - var pos = filename.lastIndexOf("."); - if( pos < 0 ) - return null; - - extension = filename.substr(pos+1).toLowerCase(); - - if(extension == "jpeg") - extension = "jpg"; - if(extension == "tvg") - { - vectorFormat = "TVG" - extension ="SCAN"; // element.add() will use this. - } - - var elemId = element.add( - name, - "BW", - scene.numberOfUnitsZ(), - extension.toUpperCase(), - vectorFormat - ); - if (elemId == -1) - { - // hum, unknown file type most likely -- let's skip it. - return null; // no read to add. - } - - var uniqueColumnName = getUniqueColumnName(name); - column.add(uniqueColumnName , "DRAWING"); - column.setElementIdOfDrawing(uniqueColumnName, elemId); - - var read = node.add(root, name, "READ", 0, 0, 0); - var transparencyAttr = node.getAttr( - read, frame.current(), "READ_TRANSPARENCY" - ); - var opacityAttr = node.getAttr(read, frame.current(), "OPACITY"); - transparencyAttr.setValue(true); - opacityAttr.setValue(true); - - var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE"); - alignmentAttr.setValue("ASIS"); - - var transparencyModeAttr = node.getAttr( - read, frame.current(), "applyMatteToColor" - ); - if (extension == "png") - transparencyModeAttr.setValue(PNGTransparencyMode); - if (extension == "tga") - transparencyModeAttr.setValue(TGATransparencyMode); - if (extension == "sgi") - transparencyModeAttr.setValue(SGITransparencyMode); - if (extension == "psd") - transparencyModeAttr.setValue(FlatPSDTransparencyMode); - - node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName); - - // Create a drawing for each file. - for( var i =0; i <= files.length - 1; ++i) - { - timing = start_frame + i - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(elemId, timing, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename(elemId, timing.toString()); - copyFile( files[i], drawingFilePath ); - - column.setEntry(uniqueColumnName, 1, timing, timing.toString()); - } - return read; - } - import_files(); -} -%s_import_files -""" % (signature, signature) - -replace_files = """function %s_replace_files(args) -{ - var files = args[0]; - var _node = args[1]; - var start_frame = args[2]; - - var _column = node.linkedColumn(_node, "DRAWING.ELEMENT"); - - // Delete existing drawings. - var timings = column.getDrawingTimings(_column); - for( var i =0; i <= timings.length - 1; ++i) - { - column.deleteDrawingAt(_column, parseInt(timings[i])); - } - - // Create new drawings. - for( var i =0; i <= files.length - 1; ++i) - { - timing = start_frame + i - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(node.getElementId(_node), timing, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename( - node.getElementId(_node), timing.toString() - ); - copyFile( files[i], drawingFilePath ); - - column.setEntry(_column, 1, timing, timing.toString()); - } -} -%s_replace_files -""" % (signature, signature) - - -class ImageSequenceLoader(load.LoaderPlugin): - """Load images - Stores the imported asset in a container named after the asset. - """ - families = ["mindbender.imagesequence"] - representations = ["*"] - - def load(self, context, name=None, namespace=None, data=None): - files = [] - for f in context["version"]["data"]["files"]: - files.append( - os.path.join( - context["version"]["data"]["stagingDir"], f - ).replace("\\", "/") - ) - - read_node = harmony.send( - { - "function": copy_files + import_files, - "args": ["Top", files, context["version"]["data"]["subset"], 1] - } - )["result"] - - self[:] = [read_node] - - return harmony.containerise( - name, - namespace, - read_node, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - node = container.pop("node") - - project_name = get_current_project_name() - version = get_version_by_id(project_name, representation["parent"]) - files = [] - for f in version["data"]["files"]: - files.append( - os.path.join( - version["data"]["stagingDir"], f - ).replace("\\", "/") - ) - - harmony.send( - { - "function": copy_files + replace_files, - "args": [files, node, 1] - } - ) - - harmony.imprint( - node, {"representation": str(representation["_id"])} - ) - - def remove(self, container): - node = container.pop("node") - signature = str(uuid4()).replace("-", "_") - func = """function %s_deleteNode(_node) - { - node.deleteNode(_node, true, true); - } - %_deleteNode - """ % (signature, signature) - harmony.send( - {"function": func, "args": [node]} - ) - - def switch(self, container, representation): - self.update(container, representation) -``` - -## Resources -- https://github.com/diegogarciahuerta/tk-harmony -- https://github.com/cfourney/OpenHarmony -- [Toon Boom Discord](https://discord.gg/syAjy4H) -- [Toon Boom TD](https://discord.gg/yAjyQtZ) diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py deleted file mode 100644 index b009dabb44..0000000000 --- a/openpype/hosts/harmony/api/lib.py +++ /dev/null @@ -1,625 +0,0 @@ -# -*- coding: utf-8 -*- -"""Utility functions used for Avalon - Harmony integration.""" -import subprocess -import threading -import os -import random -import zipfile -import sys -import filecmp -import shutil -import logging -import contextlib -import json -import signal -import time -from uuid import uuid4 -from qtpy import QtWidgets, QtCore, QtGui -import collections - -from .server import Server - -from openpype.tools.stdout_broker.app import StdOutBroker -from openpype.tools.utils import host_tools -from openpype import style -from openpype.lib.applications import get_non_python_host_kwargs - -# Setup logging. -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) - - -class ProcessContext: - server = None - pid = None - process = None - application_path = None - callback_queue = collections.deque() - workfile_path = None - port = None - stdout_broker = None - workfile_tool = None - - @classmethod - def execute_in_main_thread(cls, func_to_call_from_main_thread): - cls.callback_queue.append(func_to_call_from_main_thread) - - @classmethod - def main_thread_listen(cls): - if cls.callback_queue: - callback = cls.callback_queue.popleft() - callback() - if cls.process is not None and cls.process.poll() is not None: - log.info("Server is not running, closing") - ProcessContext.stdout_broker.stop() - QtWidgets.QApplication.quit() - - -def signature(postfix="func") -> str: - """Return random ECMA6 compatible function name. - - Args: - postfix (str): name to append to random string. - Returns: - str: random function name. - - """ - return "f{}_{}".format(str(uuid4()).replace("-", "_"), postfix) - - -class _ZipFile(zipfile.ZipFile): - """Extended check for windows invalid characters.""" - - # this is extending default zipfile table for few invalid characters - # that can come from Mac - _windows_illegal_characters = ":<>|\"?*\r\n\x00" - _windows_illegal_name_trans_table = str.maketrans( - _windows_illegal_characters, - "_" * len(_windows_illegal_characters) - ) - - -def main(*subprocess_args): - # coloring in StdOutBroker - os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" - app = QtWidgets.QApplication([]) - app.setQuitOnLastWindowClosed(False) - icon = QtGui.QIcon(style.get_app_icon_path()) - app.setWindowIcon(icon) - - ProcessContext.stdout_broker = StdOutBroker('harmony') - ProcessContext.stdout_broker.start() - launch(*subprocess_args) - - loop_timer = QtCore.QTimer() - loop_timer.setInterval(20) - - loop_timer.timeout.connect(ProcessContext.main_thread_listen) - loop_timer.start() - - sys.exit(app.exec_()) - - -def setup_startup_scripts(): - """Manages installation of avalon's TB_sceneOpened.js for Harmony launch. - - If a studio already has defined "TOONBOOM_GLOBAL_SCRIPT_LOCATION", copies - the TB_sceneOpened.js to that location if the file is different. - Otherwise, will set the env var to point to the avalon/harmony folder. - - Admins should be aware that this will overwrite TB_sceneOpened in the - "TOONBOOM_GLOBAL_SCRIPT_LOCATION", and that if they want to have additional - logic, they will need to one of the following: - * Create a Harmony package to manage startup logic - * Use TB_sceneOpenedUI.js instead to manage startup logic - * Add their startup logic to avalon/harmony/TB_sceneOpened.js - """ - avalon_dcc_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), - "api") - startup_js = "TB_sceneOpened.js" - - if os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"): - - avalon_harmony_startup = os.path.join(avalon_dcc_dir, startup_js) - - env_harmony_startup = os.path.join( - os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"), startup_js) - - if not filecmp.cmp(avalon_harmony_startup, env_harmony_startup): - try: - shutil.copy(avalon_harmony_startup, env_harmony_startup) - except Exception as e: - log.error(e) - log.warning( - "Failed to copy {0} to {1}! " - "Defaulting to Avalon TOONBOOM_GLOBAL_SCRIPT_LOCATION." - .format(avalon_harmony_startup, env_harmony_startup)) - - os.environ["TOONBOOM_GLOBAL_SCRIPT_LOCATION"] = avalon_dcc_dir - else: - os.environ["TOONBOOM_GLOBAL_SCRIPT_LOCATION"] = avalon_dcc_dir - - -def check_libs(): - """Check if `OpenHarmony`_ is available. - - Avalon expects either path in `LIB_OPENHARMONY_PATH` or `openHarmony.js` - present in `TOONBOOM_GLOBAL_SCRIPT_LOCATION`. - - Throws: - RuntimeError: If openHarmony is not found. - - .. _OpenHarmony: - https://github.com/cfourney/OpenHarmony - - """ - if not os.getenv("LIB_OPENHARMONY_PATH"): - - if os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"): - if os.path.exists( - os.path.join( - os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION"), - "openHarmony.js")): - - os.environ["LIB_OPENHARMONY_PATH"] = \ - os.getenv("TOONBOOM_GLOBAL_SCRIPT_LOCATION") - return - - else: - log.error(("Cannot find OpenHarmony library. " - "Please set path to it in LIB_OPENHARMONY_PATH " - "environment variable.")) - raise RuntimeError("Missing OpenHarmony library.") - - -def launch(application_path, *args): - """Set Harmony for launch. - - Launches Harmony and the server, then starts listening on the main thread - for callbacks from the server. This is to have Qt applications run in the - main thread. - - Args: - application_path (str): Path to Harmony. - - """ - from openpype.pipeline import install_host - from openpype.hosts.harmony import api as harmony - - install_host(harmony) - - ProcessContext.port = random.randrange(49152, 65535) - os.environ["AVALON_HARMONY_PORT"] = str(ProcessContext.port) - ProcessContext.application_path = application_path - - # Launch Harmony. - setup_startup_scripts() - check_libs() - - if not os.environ.get("AVALON_HARMONY_WORKFILES_ON_LAUNCH", False): - open_empty_workfile() - return - - ProcessContext.workfile_tool = host_tools.get_tool_by_name("workfiles") - host_tools.show_workfiles(save=False) - ProcessContext.execute_in_main_thread(check_workfiles_tool) - - -def check_workfiles_tool(): - if ProcessContext.workfile_tool.isVisible(): - ProcessContext.execute_in_main_thread(check_workfiles_tool) - elif not ProcessContext.workfile_path: - open_empty_workfile() - - -def open_empty_workfile(): - zip_file = os.path.join(os.path.dirname(__file__), "temp.zip") - temp_path = get_local_harmony_path(zip_file) - if os.path.exists(temp_path): - log.info(f"removing existing {temp_path}") - try: - shutil.rmtree(temp_path) - except Exception as e: - log.critical(f"cannot clear {temp_path}") - raise Exception(f"cannot clear {temp_path}") from e - - launch_zip_file(zip_file) - - -def get_local_harmony_path(filepath): - """From the provided path get the equivalent local Harmony path.""" - basename = os.path.splitext(os.path.basename(filepath))[0] - harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony") - return os.path.join(harmony_path, basename) - - -def launch_zip_file(filepath): - """Launch a Harmony application instance with the provided zip file. - - Args: - filepath (str): Path to file. - """ - print(f"Localizing {filepath}") - - temp_path = get_local_harmony_path(filepath) - scene_name = os.path.basename(temp_path) - if os.path.exists(os.path.join(temp_path, scene_name)): - # unzipped with duplicated scene_name - temp_path = os.path.join(temp_path, scene_name) - - scene_path = os.path.join( - temp_path, scene_name + ".xstage" - ) - - unzip = False - if os.path.exists(scene_path): - # Check remote scene is newer than local. - if os.path.getmtime(scene_path) < os.path.getmtime(filepath): - try: - shutil.rmtree(temp_path) - except Exception as e: - log.error(e) - raise Exception("Cannot delete working folder") from e - unzip = True - else: - unzip = True - - if unzip: - with _ZipFile(filepath, "r") as zip_ref: - zip_ref.extractall(temp_path) - - if os.path.exists(os.path.join(temp_path, scene_name)): - # unzipped with duplicated scene_name - temp_path = os.path.join(temp_path, scene_name) - - # Close existing scene. - if ProcessContext.pid: - os.kill(ProcessContext.pid, signal.SIGTERM) - - # Stop server. - if ProcessContext.server: - ProcessContext.server.stop() - - # Launch Avalon server. - ProcessContext.server = Server(ProcessContext.port) - ProcessContext.server.start() - # thread = threading.Thread(target=self.server.start) - # thread.daemon = True - # thread.start() - - # Save workfile path for later. - ProcessContext.workfile_path = filepath - - # find any xstage files is directory, prefer the one with the same name - # as directory (plus extension) - xstage_files = [] - for _, _, files in os.walk(temp_path): - for file in files: - if os.path.splitext(file)[1] == ".xstage": - xstage_files.append(file) - - if not os.path.basename("temp.zip"): - if not xstage_files: - ProcessContext.server.stop() - print("no xstage file was found") - return - - # try to use first available - scene_path = os.path.join( - temp_path, xstage_files[0] - ) - - # prefer the one named as zip file - zip_based_name = "{}.xstage".format( - os.path.splitext(os.path.basename(filepath))[0]) - - if zip_based_name in xstage_files: - scene_path = os.path.join( - temp_path, zip_based_name - ) - - if not os.path.exists(scene_path): - print("error: cannot determine scene file {}".format(scene_path)) - ProcessContext.server.stop() - return - - print("Launching {}".format(scene_path)) - kwargs = get_non_python_host_kwargs({}, False) - process = subprocess.Popen( - [ProcessContext.application_path, scene_path], - **kwargs - ) - ProcessContext.pid = process.pid - ProcessContext.process = process - ProcessContext.stdout_broker.host_connected() - - -def on_file_changed(path, threaded=True): - """Threaded zipping and move of the project directory. - - This method is called when the `.xstage` file is changed. - """ - log.debug("File changed: " + path) - - if ProcessContext.workfile_path is None: - return - - if threaded: - thread = threading.Thread( - target=zip_and_move, - args=(os.path.dirname(path), ProcessContext.workfile_path) - ) - thread.start() - else: - zip_and_move(os.path.dirname(path), ProcessContext.workfile_path) - - -def zip_and_move(source, destination): - """Zip a directory and move to `destination`. - - Args: - source (str): Directory to zip and move to destination. - destination (str): Destination file path to zip file. - - """ - os.chdir(os.path.dirname(source)) - shutil.make_archive(os.path.basename(source), "zip", source) - with _ZipFile(os.path.basename(source) + ".zip") as zr: - if zr.testzip() is not None: - raise Exception("File archive is corrupted.") - shutil.move(os.path.basename(source) + ".zip", destination) - log.debug(f"Saved '{source}' to '{destination}'") - - -def show(tool_name): - """Call show on "module_name". - - This allows to make a QApplication ahead of time and always "exec_" to - prevent crashing. - - Args: - module_name (str): Name of module to call "show" on. - - """ - # Requests often get doubled up when showing tools, so we wait a second for - # requests to be received properly. - time.sleep(1) - - kwargs = {} - if tool_name == "loader": - kwargs["use_context"] = True - - ProcessContext.execute_in_main_thread( - lambda: host_tools.show_tool_by_name(tool_name, **kwargs) - ) - - # Required return statement. - return "nothing" - - -def get_scene_data(): - try: - return send( - { - "function": "AvalonHarmony.getSceneData" - })["result"] - except json.decoder.JSONDecodeError: - # Means no scene metadata has been made before. - return {} - except KeyError: - # Means no existing scene metadata has been made. - return {} - - -def set_scene_data(data): - """Write scene data to metadata. - - Args: - data (dict): Data to write. - - """ - # Write scene data. - send( - { - "function": "AvalonHarmony.setSceneData", - "args": data - }) - - -def read(node_id): - """Read object metadata in to a dictionary. - - Args: - node_id (str): Path to node or id of object. - - Returns: - dict - """ - scene_data = get_scene_data() - if node_id in scene_data: - return scene_data[node_id] - - return {} - - -def remove(node_id): - """ - Remove node data from scene metadata. - - Args: - node_id (str): full name (eg. 'Top/renderAnimation') - """ - data = get_scene_data() - del data[node_id] - set_scene_data(data) - - -def delete_node(node): - """ Physically delete node from scene. """ - send( - { - "function": "AvalonHarmony.deleteNode", - "args": node - } - ) - - -def imprint(node_id, data, remove=False): - """Write `data` to the `node` as json. - - Arguments: - node_id (str): Path to node or id of object. - data (dict): Dictionary of key/value pairs. - remove (bool): Removes the data from the scene. - - Example: - >>> from openpype.hosts.harmony.api import lib - >>> node = "Top/Display" - >>> data = {"str": "something", "int": 1, "float": 0.32, "bool": True} - >>> lib.imprint(layer, data) - """ - scene_data = get_scene_data() - - if remove and (node_id in scene_data): - scene_data.pop(node_id, None) - else: - if node_id in scene_data: - scene_data[node_id].update(data) - else: - scene_data[node_id] = data - - set_scene_data(scene_data) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context.""" - - selected_nodes = send( - { - "function": "AvalonHarmony.getSelectedNodes" - })["result"] - - try: - yield selected_nodes - finally: - selected_nodes = send( - { - "function": "AvalonHarmony.selectNodes", - "args": selected_nodes - } - ) - - -def send(request): - """Public method for sending requests to Harmony.""" - return ProcessContext.server.send(request) - - -def select_nodes(nodes): - """ Selects nodes in Node View """ - _ = send( - { - "function": "AvalonHarmony.selectNodes", - "args": nodes - } - ) - - -@contextlib.contextmanager -def maintained_nodes_state(nodes): - """Maintain nodes states during context.""" - # Collect current state. - states = send( - { - "function": "AvalonHarmony.areEnabled", "args": nodes - })["result"] - - # Disable all nodes. - send( - { - "function": "AvalonHarmony.disableNodes", "args": nodes - }) - - try: - yield - finally: - send( - { - "function": "AvalonHarmony.setState", - "args": [nodes, states] - }) - - -def save_scene(): - """Save the Harmony scene safely. - - The built-in (to Avalon) background zip and moving of the Harmony scene - folder, interfers with server/client communication by sending two requests - at the same time. This only happens when sending "scene.saveAll()". This - method prevents this double request and safely saves the scene. - - """ - # Need to turn off the background watcher else the communication with - # the server gets spammed with two requests at the same time. - scene_path = send( - {"function": "AvalonHarmony.saveScene"})["result"] - - # Manually update the remote file. - on_file_changed(scene_path, threaded=False) - - # Re-enable the background watcher. - send({"function": "AvalonHarmony.enableFileWather"}) - - -def save_scene_as(filepath): - """Save Harmony scene as `filepath`.""" - scene_dir = os.path.dirname(filepath) - destination = os.path.join( - os.path.dirname(ProcessContext.workfile_path), - os.path.splitext(os.path.basename(filepath))[0] + ".zip" - ) - - if os.path.exists(scene_dir): - try: - shutil.rmtree(scene_dir) - except Exception as e: - log.error(f"Cannot remove {scene_dir}") - raise Exception(f"Cannot remove {scene_dir}") from e - - send( - {"function": "scene.saveAs", "args": [scene_dir]} - )["result"] - - zip_and_move(scene_dir, destination) - - ProcessContext.workfile_path = destination - - send( - {"function": "AvalonHarmony.addPathToWatcher", "args": filepath} - ) - - -def find_node_by_name(name, node_type): - """Find node by its name. - - Args: - name (str): Name of the Node. (without part before '/') - node_type (str): Type of the Node. - 'READ' - for loaded data with Loaders (background) - 'GROUP' - for loaded data with Loaders (templates) - 'WRITE' - render nodes - - Returns: - str: FQ Node name. - - """ - nodes = send( - {"function": "node.getNodes", "args": [[node_type]]} - )["result"] - for node in nodes: - node_name = node.split("/")[-1] - if name == node_name: - return node - - return None diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py deleted file mode 100644 index 285ee806a1..0000000000 --- a/openpype/hosts/harmony/api/pipeline.py +++ /dev/null @@ -1,345 +0,0 @@ -import os -from pathlib import Path -import logging - -import pyblish.api - -from openpype.lib import register_event_callback -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.pipeline.load import get_outdated_containers -from openpype.pipeline.context_tools import get_current_project_asset - -from openpype.hosts.harmony import HARMONY_HOST_DIR -import openpype.hosts.harmony.api as harmony - - -log = logging.getLogger("openpype.hosts.harmony") - -PLUGINS_DIR = os.path.join(HARMONY_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -def set_scene_settings(settings): - """Set correct scene settings in Harmony. - - Args: - settings (dict): Scene settings. - - Returns: - dict: Dictionary of settings to set. - - """ - harmony.send( - {"function": "PypeHarmony.setSceneSettings", "args": settings}) - - -def get_asset_settings(): - """Get settings on current asset from database. - - Returns: - dict: Scene data. - - """ - - asset_doc = get_current_project_asset() - asset_data = asset_doc["data"] - fps = asset_data.get("fps") - frame_start = asset_data.get("frameStart") - frame_end = asset_data.get("frameEnd") - handle_start = asset_data.get("handleStart") - handle_end = asset_data.get("handleEnd") - resolution_width = asset_data.get("resolutionWidth") - resolution_height = asset_data.get("resolutionHeight") - entity_type = asset_data.get("entityType") - - scene_data = { - "fps": fps, - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "entityType": entity_type - } - - return scene_data - - -def ensure_scene_settings(): - """Validate if Harmony scene has valid settings.""" - settings = get_asset_settings() - - invalid_settings = [] - valid_settings = {} - for key, value in settings.items(): - if value is None: - invalid_settings.append(key) - else: - valid_settings[key] = value - - # Warn about missing attributes. - if invalid_settings: - msg = "Missing attributes:" - for item in invalid_settings: - msg += f"\n{item}" - - harmony.send( - {"function": "PypeHarmony.message", "args": msg}) - - set_scene_settings(valid_settings) - - -def check_inventory(): - """Check is scene contains outdated containers. - - If it does it will colorize outdated nodes and display warning message - in Harmony. - """ - - outdated_containers = get_outdated_containers() - if not outdated_containers: - return - - # Colour nodes. - outdated_nodes = [] - for container in outdated_containers: - if container["loader"] == "ImageSequenceLoader": - outdated_nodes.append( - harmony.find_node_by_name(container["name"], "READ") - ) - harmony.send({"function": "PypeHarmony.setColor", "args": outdated_nodes}) - - # Warn about outdated containers. - msg = "There are outdated containers in the scene." - harmony.send({"function": "PypeHarmony.message", "args": msg}) - - -def application_launch(event): - """Event that is executed after Harmony is launched.""" - # fills OPENPYPE_HARMONY_JS - pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js" - pype_harmony_js = pype_harmony_path.read_text() - - # go through js/creators, loaders and publish folders and load all scripts - script = "" - for item in ["creators", "loaders", "publish"]: - dir_to_scan = Path(__file__).parent.parent / "js" / item - for child in dir_to_scan.iterdir(): - script += child.read_text() - - # send scripts to Harmony - harmony.send({"script": pype_harmony_js}) - harmony.send({"script": script}) - inject_avalon_js() - - # ensure_scene_settings() - check_inventory() - - -def export_template(backdrops, nodes, filepath): - """Export Template to file. - - Args: - backdrops (list): List of backdrops to export. - nodes (list): List of nodes to export. - filepath (str): Path where to save Template. - - """ - harmony.send({ - "function": "PypeHarmony.exportTemplate", - "args": [ - backdrops, - nodes, - os.path.basename(filepath), - os.path.dirname(filepath) - ] - }) - - -def install(): - """Install Pype as host config.""" - print("Installing Pype config ...") - - pyblish.api.register_host("harmony") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - log.info(PUBLISH_PATH) - - # Register callbacks. - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) - - register_event_callback("application.launched", application_launch) - - -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node enabling on instance toggles.""" - node = None - if instance.data.get("setMembers"): - node = instance.data["setMembers"][0] - - if node: - harmony.send( - { - "function": "PypeHarmony.toggleInstance", - "args": [node, new_value] - } - ) - - -def inject_avalon_js(): - """Inject AvalonHarmony.js into Harmony.""" - avalon_harmony_js = Path(__file__).parent.joinpath("js/AvalonHarmony.js") - script = avalon_harmony_js.read_text() - # send AvalonHarmony.js to Harmony - harmony.send({"script": script}) - - -def ls(): - """Yields containers from Harmony scene. - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Harmony; once loaded - they are called 'containers'. - - Yields: - dict: container - """ - objects = harmony.get_scene_data() or {} - for _, data in objects.items(): - # Skip non-tagged objects. - if not data: - continue - - # Filter to only containers. - if "container" not in data.get("id"): - continue - - if not data.get("objectName"): # backward compatibility - data["objectName"] = data["name"] - yield data - - -def list_instances(remove_orphaned=True): - """ - List all created instances from current workfile which - will be published. - - Pulls from File > File Info - - For SubsetManager, by default it check if instance has matching node - in the scene, if not, instance gets deleted from metadata. - - Returns: - (list) of dictionaries matching instances format - """ - objects = harmony.get_scene_data() or {} - instances = [] - for key, data in objects.items(): - # Skip non-tagged objects. - if not data: - continue - - # Filter out containers. - if "container" in data.get("id"): - continue - - data['uuid'] = key - - if remove_orphaned: - node_name = key.split("/")[-1] - located_node = harmony.find_node_by_name(node_name, 'WRITE') - if not located_node: - print("Removing orphaned instance {}".format(key)) - harmony.remove(key) - continue - - instances.append(data) - - return instances - - -def remove_instance(instance): - """ - Remove instance from current workfile metadata and from scene! - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - node = instance.get("uuid") - harmony.remove(node) - harmony.delete_node(node) - - -def select_instance(instance): - """ - Select instance in Node View - - Args: - instance (dict): instance representation from subsetmanager model - """ - harmony.select_nodes([instance.get("uuid")]) - - -def containerise(name, - namespace, - node, - context, - loader=None, - suffix=None, - nodes=None): - """Imprint node with metadata. - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly. - namespace (str): Namespace under which to host container. - node (str): Node to containerise. - context (dict): Asset information. - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Path of container assembly. - """ - if not nodes: - nodes = [] - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - "nodes": nodes - } - - harmony.imprint(node, data) - - return node diff --git a/openpype/hosts/harmony/api/plugin.py b/openpype/hosts/harmony/api/plugin.py deleted file mode 100644 index c55d200d30..0000000000 --- a/openpype/hosts/harmony/api/plugin.py +++ /dev/null @@ -1,70 +0,0 @@ -from openpype.pipeline import LegacyCreator -import openpype.hosts.harmony.api as harmony - - -class Creator(LegacyCreator): - """Creator plugin to create instances in Harmony. - - By default a Composite node is created to support any number of nodes in - an instance, but any node type is supported. - If the selection is used, the selected nodes will be connected to the - created node. - """ - - defaults = ["Main"] - node_type = "COMPOSITE" - - def setup_node(self, node): - """Prepare node as container. - - Args: - node (str): Path to node. - """ - harmony.send( - { - "function": "AvalonHarmony.setupNodeForCreator", - "args": node - } - ) - - def process(self): - """Plugin entry point.""" - existing_node_names = harmony.send( - { - "function": "AvalonHarmony.getNodesNamesByType", - "args": self.node_type - })["result"] - - # Dont allow instances with the same name. - msg = "Instance with name \"{}\" already exists.".format(self.name) - for name in existing_node_names: - if self.name.lower() == name.lower(): - harmony.send( - { - "function": "AvalonHarmony.message", "args": msg - } - ) - return False - - with harmony.maintained_selection() as selection: - node = None - - if (self.options or {}).get("useSelection") and selection: - node = harmony.send( - { - "function": "AvalonHarmony.createContainer", - "args": [self.name, self.node_type, selection[-1]] - } - )["result"] - else: - node = harmony.send( - { - "function": "AvalonHarmony.createContainer", - "args": [self.name, self.node_type] - } - )["result"] - - harmony.imprint(node, self.data) - self.setup_node(node) - - return node diff --git a/openpype/hosts/harmony/plugins/create/create_render.py b/openpype/hosts/harmony/plugins/create/create_render.py deleted file mode 100644 index 4350efbfbe..0000000000 --- a/openpype/hosts/harmony/plugins/create/create_render.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create render node.""" -import openpype.hosts.harmony.api as harmony -from openpype.hosts.harmony.api import plugin - - -class CreateRender(plugin.Creator): - """Composite node for publishing renders.""" - - name = "renderDefault" - label = "Render" - family = "render" - node_type = "WRITE" - - def __init__(self, *args, **kwargs): - """Constructor.""" - super(CreateRender, self).__init__(*args, **kwargs) - - def setup_node(self, node): - """Set render node.""" - self_name = self.__class__.__name__ - path = "render/{0}/{0}.".format(node.split("/")[-1]) - harmony.send( - { - "function": f"PypeHarmony.Creators.{self_name}.create", - "args": [node, path] - }) diff --git a/openpype/hosts/harmony/plugins/load/load_audio.py b/openpype/hosts/harmony/plugins/load/load_audio.py deleted file mode 100644 index e18a6de097..0000000000 --- a/openpype/hosts/harmony/plugins/load/load_audio.py +++ /dev/null @@ -1,62 +0,0 @@ -from openpype.pipeline import ( - load, - get_representation_path, -) -import openpype.hosts.harmony.api as harmony - -sig = harmony.signature() -func = """ -function getUniqueColumnName( column_prefix ) -{ - var suffix = 0; - // finds if unique name for a column - var column_name = column_prefix; - while(suffix < 2000) - { - if(!column.type(column_name)) - break; - - suffix = suffix + 1; - column_name = column_prefix + "_" + suffix; - } - return column_name; -} - -function %s(args) -{ - var uniqueColumnName = getUniqueColumnName(args[0]); - column.add(uniqueColumnName , "SOUND"); - column.importSound(uniqueColumnName, 1, args[1]); -} -%s -""" % (sig, sig) - - -class ImportAudioLoader(load.LoaderPlugin): - """Import audio.""" - - families = ["shot", "audio"] - representations = ["wav"] - label = "Import Audio" - - def load(self, context, name=None, namespace=None, data=None): - wav_file = get_representation_path(context["representation"]) - harmony.send( - {"function": func, "args": [context["subset"]["name"], wav_file]} - ) - - subset_name = context["subset"]["name"] - - return harmony.containerise( - subset_name, - namespace, - subset_name, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - pass - - def remove(self, container): - pass diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py deleted file mode 100644 index 853d347c2e..0000000000 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ /dev/null @@ -1,373 +0,0 @@ -import os -import json - -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.pipeline.context_tools import is_representation_from_latest -import openpype.hosts.harmony.api as harmony - - -copy_files = """function copyFile(srcFilename, dstFilename) -{ - var srcFile = new PermanentFile(srcFilename); - var dstFile = new PermanentFile(dstFilename); - srcFile.copy(dstFile); -} -""" - -import_files = """var PNGTransparencyMode = 1; //Premultiplied with Black -var TGATransparencyMode = 0; //Premultiplied with Black -var SGITransparencyMode = 0; //Premultiplied with Black -var LayeredPSDTransparencyMode = 1; //Straight -var FlatPSDTransparencyMode = 2; //Premultiplied with White - -function getUniqueColumnName( column_prefix ) -{ - var suffix = 0; - // finds if unique name for a column - var column_name = column_prefix; - while(suffix < 2000) - { - if(!column.type(column_name)) - break; - - suffix = suffix + 1; - column_name = column_prefix + "_" + suffix; - } - return column_name; -} - -function import_files(args) -{ - var root = args[0]; - var files = args[1]; - var name = args[2]; - var start_frame = args[3]; - - var vectorFormat = null; - var extension = null; - var filename = files[0]; - - var pos = filename.lastIndexOf("."); - if( pos < 0 ) - return null; - - extension = filename.substr(pos+1).toLowerCase(); - - if(extension == "jpeg") - extension = "jpg"; - if(extension == "tvg") - { - vectorFormat = "TVG" - extension ="SCAN"; // element.add() will use this. - } - - var elemId = element.add( - name, - "BW", - scene.numberOfUnitsZ(), - extension.toUpperCase(), - vectorFormat - ); - if (elemId == -1) - { - // hum, unknown file type most likely -- let's skip it. - return null; // no read to add. - } - - var uniqueColumnName = getUniqueColumnName(name); - column.add(uniqueColumnName , "DRAWING"); - column.setElementIdOfDrawing(uniqueColumnName, elemId); - - var read = node.add(root, name, "READ", 0, 0, 0); - var transparencyAttr = node.getAttr( - read, frame.current(), "READ_TRANSPARENCY" - ); - var opacityAttr = node.getAttr(read, frame.current(), "OPACITY"); - transparencyAttr.setValue(true); - opacityAttr.setValue(true); - - var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE"); - alignmentAttr.setValue("ASIS"); - - var transparencyModeAttr = node.getAttr( - read, frame.current(), "applyMatteToColor" - ); - if (extension == "png") - transparencyModeAttr.setValue(PNGTransparencyMode); - if (extension == "tga") - transparencyModeAttr.setValue(TGATransparencyMode); - if (extension == "sgi") - transparencyModeAttr.setValue(SGITransparencyMode); - if (extension == "psd") - transparencyModeAttr.setValue(FlatPSDTransparencyMode); - if (extension == "jpg") - transparencyModeAttr.setValue(LayeredPSDTransparencyMode); - - node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName); - - if (files.length == 1) - { - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(elemId, 1, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename(elemId, "1"); - copyFile(files[0], drawingFilePath); - // Expose the image for the entire frame range. - for( var i =0; i <= frame.numberOf() - 1; ++i) - { - timing = start_frame + i - column.setEntry(uniqueColumnName, 1, timing, "1"); - } - } else { - // Create a drawing for each file. - for( var i =0; i <= files.length - 1; ++i) - { - timing = start_frame + i - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(elemId, timing, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename(elemId, timing.toString()); - copyFile( files[i], drawingFilePath ); - - column.setEntry(uniqueColumnName, 1, timing, timing.toString()); - } - } - - var green_color = new ColorRGBA(0, 255, 0, 255); - node.setColor(read, green_color); - - return read; -} -import_files -""" - -replace_files = """var PNGTransparencyMode = 1; //Premultiplied with Black -var TGATransparencyMode = 0; //Premultiplied with Black -var SGITransparencyMode = 0; //Premultiplied with Black -var LayeredPSDTransparencyMode = 1; //Straight -var FlatPSDTransparencyMode = 2; //Premultiplied with White - -function replace_files(args) -{ - var files = args[0]; - MessageLog.trace(files); - MessageLog.trace(files.length); - var _node = args[1]; - var start_frame = args[2]; - - var _column = node.linkedColumn(_node, "DRAWING.ELEMENT"); - var elemId = column.getElementIdOfDrawing(_column); - - // Delete existing drawings. - var timings = column.getDrawingTimings(_column); - for( var i =0; i <= timings.length - 1; ++i) - { - column.deleteDrawingAt(_column, parseInt(timings[i])); - } - - - var filename = files[0]; - var pos = filename.lastIndexOf("."); - if( pos < 0 ) - return null; - var extension = filename.substr(pos+1).toLowerCase(); - - if(extension == "jpeg") - extension = "jpg"; - - var transparencyModeAttr = node.getAttr( - _node, frame.current(), "applyMatteToColor" - ); - if (extension == "png") - transparencyModeAttr.setValue(PNGTransparencyMode); - if (extension == "tga") - transparencyModeAttr.setValue(TGATransparencyMode); - if (extension == "sgi") - transparencyModeAttr.setValue(SGITransparencyMode); - if (extension == "psd") - transparencyModeAttr.setValue(FlatPSDTransparencyMode); - if (extension == "jpg") - transparencyModeAttr.setValue(LayeredPSDTransparencyMode); - - if (files.length == 1) - { - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(elemId, 1, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename(elemId, "1"); - copyFile(files[0], drawingFilePath); - MessageLog.trace(files[0]); - MessageLog.trace(drawingFilePath); - // Expose the image for the entire frame range. - for( var i =0; i <= frame.numberOf() - 1; ++i) - { - timing = start_frame + i - column.setEntry(_column, 1, timing, "1"); - } - } else { - // Create a drawing for each file. - for( var i =0; i <= files.length - 1; ++i) - { - timing = start_frame + i - // Create a drawing drawing, 'true' indicate that the file exists. - Drawing.create(elemId, timing, true); - // Get the actual path, in tmp folder. - var drawingFilePath = Drawing.filename(elemId, timing.toString()); - copyFile( files[i], drawingFilePath ); - - column.setEntry(_column, 1, timing, timing.toString()); - } - } - - var green_color = new ColorRGBA(0, 255, 0, 255); - node.setColor(_node, green_color); -} -replace_files -""" - - -class BackgroundLoader(load.LoaderPlugin): - """Load images - Stores the imported asset in a container named after the asset. - """ - families = ["background"] - representations = ["json"] - - def load(self, context, name=None, namespace=None, data=None): - - path = self.filepath_from_context(context) - with open(path) as json_file: - data = json.load(json_file) - - layers = list() - - for child in data['children']: - if child.get("filename"): - layers.append(child["filename"]) - else: - for layer in child['children']: - if layer.get("filename"): - layers.append(layer["filename"]) - - bg_folder = os.path.dirname(path) - - subset_name = context["subset"]["name"] - # read_node_name += "_{}".format(uuid.uuid4()) - container_nodes = [] - - for layer in sorted(layers): - file_to_import = [ - os.path.join(bg_folder, layer).replace("\\", "/") - ] - - read_node = harmony.send( - { - "function": copy_files + import_files, - "args": ["Top", file_to_import, layer, 1] - } - )["result"] - container_nodes.append(read_node) - - return harmony.containerise( - subset_name, - namespace, - subset_name, - context, - self.__class__.__name__, - nodes=container_nodes - ) - - def update(self, container, representation): - path = get_representation_path(representation) - with open(path) as json_file: - data = json.load(json_file) - - layers = list() - - for child in data['children']: - if child.get("filename"): - print(child["filename"]) - layers.append(child["filename"]) - else: - for layer in child['children']: - if layer.get("filename"): - print(layer["filename"]) - layers.append(layer["filename"]) - - bg_folder = os.path.dirname(path) - - print(container) - - is_latest = is_representation_from_latest(representation) - for layer in sorted(layers): - file_to_import = [ - os.path.join(bg_folder, layer).replace("\\", "/") - ] - print(20 * "#") - print(f"FILE TO REPLACE: {file_to_import}") - print(f"LAYER: {layer}") - node = harmony.find_node_by_name(layer, "READ") - print(f"{node}") - - if node in container['nodes']: - harmony.send( - { - "function": copy_files + replace_files, - "args": [file_to_import, node, 1] - } - ) - else: - read_node = harmony.send( - { - "function": copy_files + import_files, - "args": ["Top", file_to_import, layer, 1] - } - )["result"] - container['nodes'].append(read_node) - - # Colour node. - sig = harmony.signature("set_color") - func = """function %s(args){ - for( var i =0; i <= args[0].length - 1; ++i) - { - var red_color = new ColorRGBA(255, 0, 0, 255); - var green_color = new ColorRGBA(0, 255, 0, 255); - if (args[1] == "red"){ - node.setColor(args[0], red_color); - } - if (args[1] == "green"){ - node.setColor(args[0], green_color); - } - } - } - %s - """ % (sig, sig) - if is_latest: - harmony.send({"function": func, "args": [node, "green"]}) - else: - harmony.send({"function": func, "args": [node, "red"]}) - - harmony.imprint( - container['name'], {"representation": str(representation["_id"]), - "nodes": container['nodes']} - ) - - def remove(self, container): - for node in container.get("nodes"): - - func = """function deleteNode(_node) - { - node.deleteNode(_node, true, true); - } - deleteNode - """ - harmony.send( - {"function": func, "args": [node]} - ) - harmony.imprint(container['name'], {}, remove=True) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/openpype/hosts/harmony/plugins/publish/collect_current_file.py b/openpype/hosts/harmony/plugins/publish/collect_current_file.py deleted file mode 100644 index 736316fe3c..0000000000 --- a/openpype/hosts/harmony/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect information about current file.""" -import os - -import pyblish.api -import openpype.hosts.harmony.api as harmony - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context.""" - - order = pyblish.api.CollectorOrder - 0.5 - label = "Current File" - hosts = ["harmony"] - - def process(self, context): - """Inject the current working file.""" - self_name = self.__class__.__name__ - - current_file = harmony.send( - {"function": f"PypeHarmony.Publish.{self_name}.collect"})["result"] - context.data["currentFile"] = os.path.normpath(current_file) diff --git a/openpype/hosts/harmony/plugins/publish/collect_instances.py b/openpype/hosts/harmony/plugins/publish/collect_instances.py deleted file mode 100644 index edbec887f5..0000000000 --- a/openpype/hosts/harmony/plugins/publish/collect_instances.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect instances in Harmony.""" -import json - -import pyblish.api -import openpype.hosts.harmony.api as harmony - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by nodes metadata. - - This collector takes into account assets that are associated with - a composite node and marked with a unique identifier. - - Identifier: - id (str): "pyblish.avalon.instance" - """ - - label = "Instances" - order = pyblish.api.CollectorOrder - hosts = ["harmony"] - families_mapping = { - "render": ["review", "ftrack"], - "harmony.template": [], - "palette": ["palette", "ftrack"] - } - - pair_media = True - - def process(self, context): - """Plugin entry point. - - Args: - context (:class:`pyblish.api.Context`): Context data. - - """ - nodes = harmony.send( - {"function": "node.subNodes", "args": ["Top"]} - )["result"] - - for node in nodes: - data = harmony.read(node) - - # Skip non-tagged nodes. - if not data: - continue - - # Skip containers. - if "container" in data["id"]: - continue - - # skip render farm family as it is collected separately - if data["family"] == "renderFarm": - continue - - instance = context.create_instance(node.split("/")[-1]) - instance.data.update(data) - instance.data["setMembers"] = [node] - instance.data["publish"] = harmony.send( - {"function": "node.getEnable", "args": [node]} - )["result"] - instance.data["families"] = self.families_mapping[data["family"]] - - # If set in plugin, pair the scene Version in ftrack with - # thumbnails and review media. - if (self.pair_media and instance.data["family"] == "scene"): - context.data["scene_instance"] = instance - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info( - "Found: \"{0}\": \n{1}".format( - instance.data["name"], json.dumps(instance.data, indent=4) - ) - ) diff --git a/openpype/hosts/harmony/plugins/publish/collect_workfile.py b/openpype/hosts/harmony/plugins/publish/collect_workfile.py deleted file mode 100644 index 4492ab37a5..0000000000 --- a/openpype/hosts/harmony/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect current workfile from Harmony.""" -import os -import pyblish.api - -from openpype.pipeline.create import get_subset_name - - -class CollectWorkfile(pyblish.api.ContextPlugin): - """Collect current script for publish.""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect Workfile" - hosts = ["harmony"] - - def process(self, context): - """Plugin entry point.""" - family = "workfile" - basename = os.path.basename(context.data["currentFile"]) - subset = get_subset_name( - family, - "", - context.data["anatomyData"]["task"]["name"], - context.data["assetEntity"], - context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"], - project_settings=context.data["project_settings"] - ) - - # Create instance - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": basename, - "name": basename, - "family": family, - "families": [family], - "representations": [], - "asset": context.data["asset"] - }) diff --git a/openpype/hosts/harmony/plugins/publish/extract_save_scene.py b/openpype/hosts/harmony/plugins/publish/extract_save_scene.py deleted file mode 100644 index 9d37f51391..0000000000 --- a/openpype/hosts/harmony/plugins/publish/extract_save_scene.py +++ /dev/null @@ -1,13 +0,0 @@ -import pyblish.api -import openpype.hosts.harmony.api as harmony - - -class ExtractSaveScene(pyblish.api.ContextPlugin): - """Save scene for extraction.""" - - label = "Extract Save Scene" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["harmony"] - - def process(self, context): - harmony.save_scene() diff --git a/openpype/hosts/harmony/plugins/publish/extract_workfile.py b/openpype/hosts/harmony/plugins/publish/extract_workfile.py deleted file mode 100644 index 9bb3090558..0000000000 --- a/openpype/hosts/harmony/plugins/publish/extract_workfile.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract work file.""" -import os -import shutil -from zipfile import ZipFile - -from openpype.pipeline import publish - - -class ExtractWorkfile(publish.Extractor): - """Extract and zip complete workfile folder into zip.""" - - label = "Extract Workfile" - hosts = ["harmony"] - families = ["workfile"] - - def process(self, instance): - """Plugin entry point.""" - staging_dir = self.staging_dir(instance) - filepath = os.path.join(staging_dir, "{}.tpl".format(instance.name)) - src = os.path.dirname(instance.context.data["currentFile"]) - self.log.info("Copying to {}".format(filepath)) - shutil.copytree(src, filepath) - - # Prep representation. - os.chdir(staging_dir) - shutil.make_archive( - f"{instance.name}", - "zip", - os.path.join(staging_dir, f"{instance.name}.tpl") - ) - # Check if archive is ok - with ZipFile(os.path.basename(f"{instance.name}.zip")) as zr: - if zr.testzip() is not None: - raise Exception("File archive is corrupted.") - - representation = { - "name": "tpl", - "ext": "zip", - "files": f"{instance.name}.zip", - "stagingDir": staging_dir - } - instance.data["representations"] = [representation] diff --git a/openpype/hosts/harmony/plugins/publish/increment_workfile.py b/openpype/hosts/harmony/plugins/publish/increment_workfile.py deleted file mode 100644 index 1caf581567..0000000000 --- a/openpype/hosts/harmony/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,37 +0,0 @@ -import os - -import pyblish.api -from openpype.pipeline.publish import get_errored_plugins_from_context -from openpype.lib import version_up -import openpype.hosts.harmony.api as harmony - - -class IncrementWorkfile(pyblish.api.InstancePlugin): - """Increment the current workfile. - - Saves the current scene with an increased version number. - """ - - label = "Increment Workfile" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["harmony"] - families = ["workfile"] - optional = True - - def process(self, instance): - errored_plugins = get_errored_plugins_from_context(instance.context) - if errored_plugins: - raise RuntimeError( - "Skipping incrementing current file because publishing failed." - ) - - scene_dir = version_up( - os.path.dirname(instance.context.data["currentFile"]) - ) - scene_path = os.path.join( - scene_dir, os.path.basename(scene_dir) + ".xstage" - ) - - harmony.save_scene_as(scene_path) - - self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py deleted file mode 100644 index 866f12076a..0000000000 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validate scene settings.""" -import os -import json -import re - -import pyblish.api - -import openpype.hosts.harmony.api as harmony -from openpype.pipeline import PublishXmlValidationError - - -class ValidateSceneSettingsRepair(pyblish.api.Action): - """Repair the instance.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - """Repair action entry point.""" - expected = harmony.get_asset_settings() - asset_settings = _update_frames(dict.copy(expected)) - asset_settings["frameStart"] = 1 - asset_settings["frameEnd"] = asset_settings["frameEnd"] + \ - asset_settings["handleEnd"] - harmony.set_scene_settings(asset_settings) - if not os.path.exists(context.data["scenePath"]): - self.log.info("correcting scene name") - scene_dir = os.path.dirname(context.data["currentFile"]) - scene_path = os.path.join( - scene_dir, os.path.basename(scene_dir) + ".xstage" - ) - harmony.save_scene_as(scene_path) - - -class ValidateSceneSettings(pyblish.api.InstancePlugin): - """Ensure the scene settings are in sync with database.""" - - order = pyblish.api.ValidatorOrder - label = "Validate Scene Settings" - families = ["workfile"] - hosts = ["harmony"] - actions = [ValidateSceneSettingsRepair] - optional = True - - # skip frameEnd check if asset contains any of: - frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] # regex - - # skip resolution check if Task name matches any of regex patterns - skip_resolution_check = ["render", "Render"] # regex - - # skip frameStart, frameEnd check if Task name matches any of regex patt. - skip_timelines_check = [] # regex - - def process(self, instance): - """Plugin entry point.""" - - # TODO 'get_asset_settings' could expect asset document as argument - # which is available on 'context.data["assetEntity"]' - # - the same approach can be used in 'ValidateSceneSettingsRepair' - expected_settings = harmony.get_asset_settings() - self.log.info("scene settings from DB:{}".format(expected_settings)) - expected_settings.pop("entityType") # not useful for the validation - - expected_settings = _update_frames(dict.copy(expected_settings)) - expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\ - expected_settings["handleEnd"] - - task_name = instance.context.data["task"] - - if (any(re.search(pattern, task_name) - for pattern in self.skip_resolution_check)): - self.log.info("Skipping resolution check because of " - "task name and pattern {}".format( - self.skip_resolution_check)) - expected_settings.pop("resolutionWidth") - expected_settings.pop("resolutionHeight") - - if (any(re.search(pattern, os.getenv('AVALON_TASK')) - for pattern in self.skip_timelines_check)): - self.log.info("Skipping frames check because of " - "task name and pattern {}".format( - self.skip_timelines_check)) - expected_settings.pop('frameStart', None) - expected_settings.pop('frameEnd', None) - expected_settings.pop('frameStartHandle', None) - expected_settings.pop('frameEndHandle', None) - - asset_name = instance.context.data['anatomyData']['asset'] - if any(re.search(pattern, asset_name) - for pattern in self.frame_check_filter): - self.log.info("Skipping frames check because of " - "task name and pattern {}".format( - self.frame_check_filter)) - expected_settings.pop('frameStart', None) - expected_settings.pop('frameEnd', None) - expected_settings.pop('frameStartHandle', None) - expected_settings.pop('frameEndHandle', None) - - # handle case where ftrack uses only two decimal places - # 23.976023976023978 vs. 23.98 - fps = instance.context.data.get("frameRate") - if isinstance(instance.context.data.get("frameRate"), float): - fps = float( - "{:.2f}".format(instance.context.data.get("frameRate"))) - - self.log.debug("filtered settings: {}".format(expected_settings)) - - current_settings = { - "fps": fps, - "frameStart": instance.context.data["frameStart"], - "frameEnd": instance.context.data["frameEnd"], - "handleStart": instance.context.data.get("handleStart"), - "handleEnd": instance.context.data.get("handleEnd"), - "frameStartHandle": instance.context.data.get("frameStartHandle"), - "frameEndHandle": instance.context.data.get("frameEndHandle"), - "resolutionWidth": instance.context.data.get("resolutionWidth"), - "resolutionHeight": instance.context.data.get("resolutionHeight"), - } - self.log.debug("current scene settings {}".format(current_settings)) - - invalid_settings = [] - invalid_keys = set() - for key, value in expected_settings.items(): - if value != current_settings[key]: - invalid_settings.append( - "{} expected: {} found: {}".format(key, value, - current_settings[key])) - invalid_keys.add(key) - - if ((expected_settings["handleStart"] - or expected_settings["handleEnd"]) - and invalid_settings): - msg = "Handles included in calculation. Remove handles in DB " +\ - "or extend frame range in timeline." - invalid_settings[-1]["reason"] = msg - - msg = "Found invalid settings:\n{}".format( - json.dumps(invalid_settings, sort_keys=True, indent=4) - ) - - if invalid_settings: - invalid_keys_str = ",".join(invalid_keys) - break_str = "
" - invalid_setting_str = "Found invalid settings:
{}".\ - format(break_str.join(invalid_settings)) - - formatting_data = { - "invalid_setting_str": invalid_setting_str, - "invalid_keys_str": invalid_keys_str - } - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) - - scene_url = instance.context.data.get("scenePath") - if not os.path.exists(scene_url): - msg = "Scene file {} not found (saved under wrong name)".format( - scene_url - ) - formatting_data = { - "scene_url": scene_url - } - raise PublishXmlValidationError(self, msg, key="file_not_found", - formatting_data=formatting_data) - - -def _update_frames(expected_settings): - """ - Calculate proper frame range including handles set in DB. - - Harmony requires rendering from 1, so frame range is always moved - to 1. - Args: - expected_settings (dict): pulled from DB - - Returns: - modified expected_setting (dict) - """ - frames_count = expected_settings["frameEnd"] - \ - expected_settings["frameStart"] + 1 - - expected_settings["frameStart"] = 1.0 + expected_settings["handleStart"] - expected_settings["frameEnd"] = \ - expected_settings["frameStart"] + frames_count - 1 - return expected_settings diff --git a/openpype/hosts/hiero/addon.py b/openpype/hosts/hiero/addon.py deleted file mode 100644 index 1cc7a8637e..0000000000 --- a/openpype/hosts/hiero/addon.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import platform -from openpype.modules import OpenPypeModule, IHostAddon - -HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class HieroAddon(OpenPypeModule, IHostAddon): - name = "hiero" - host_name = "hiero" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to HIERO_PLUGIN_PATH - new_hiero_paths = [ - os.path.join(HIERO_ROOT_DIR, "api", "startup") - ] - old_hiero_path = env.get("HIERO_PLUGIN_PATH") or "" - for path in old_hiero_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_hiero_paths: - new_hiero_paths.append(norm_path) - - env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths) - # Remove auto screen scale factor for Qt - # - let Hiero decide it's value - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - # Remove tkinter library paths if are set - env.pop("TK_LIBRARY", None) - env.pop("TCL_LIBRARY", None) - - # Add vendor to PYTHONPATH - python_path = env["PYTHONPATH"] - python_path_parts = [] - if python_path: - python_path_parts = python_path.split(os.pathsep) - vendor_path = os.path.join(HIERO_ROOT_DIR, "vendor") - python_path_parts.insert(0, vendor_path) - env["PYTHONPATH"] = os.pathsep.join(python_path_parts) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - # Try to add QuickTime to PATH - quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" - if platform.system() == "windows" and os.path.exists(quick_time_path): - path_value = env.get("PATH") or "" - path_paths = [ - path - for path in path_value.split(os.pathsep) - if path - ] - path_paths.append(quick_time_path) - env["PATH"] = os.pathsep.join(path_paths) - - def get_workfile_extensions(self): - return [".hrox"] diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py deleted file mode 100644 index 862a2607c1..0000000000 --- a/openpype/hosts/hiero/api/events.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -import hiero.core.events -from openpype.lib import Logger, register_event_callback -from .lib import ( - sync_avalon_data_to_workfile, - launch_workfiles_app, - selection_changed_timeline, - before_project_save, -) -from .tags import add_tags_to_workfile -from .menu import update_menu_task_label - -log = Logger.get_logger(__name__) - - -def startupCompleted(event): - log.info("startup competed event...") - return - - -def shutDown(event): - log.info("shut down event...") - return - - -def beforeNewProjectCreated(event): - log.info("before new project created event...") - return - - -def afterNewProjectCreated(event): - log.info("after new project created event...") - # sync avalon data to project properties - sync_avalon_data_to_workfile() - - # add tags from preset - add_tags_to_workfile() - - # Workfiles. - if int(os.environ.get("WORKFILES_STARTUP", "0")): - hiero.core.events.sendEvent("kStartWorkfiles", None) - # reset workfiles startup not to open any more in session - os.environ["WORKFILES_STARTUP"] = "0" - - -def beforeProjectLoad(event): - log.info("before project load event...") - return - - -def afterProjectLoad(event): - log.info("after project load event...") - # sync avalon data to project properties - sync_avalon_data_to_workfile() - - # add tags from preset - add_tags_to_workfile() - - -def beforeProjectClosed(event): - log.info("before project closed event...") - return - - -def afterProjectClosed(event): - log.info("after project closed event...") - return - - -def beforeProjectSaved(event): - log.info("before project saved event...") - return - - -def afterProjectSaved(event): - log.info("after project saved event...") - return - - -def register_hiero_events(): - log.info( - "Registering events for: kBeforeNewProjectCreated, " - "kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, " - "kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, " - "kAfterProjectClose, kShutdown, kStartup, kSelectionChanged" - ) - - # hiero.core.events.registerInterest( - # "kBeforeNewProjectCreated", beforeNewProjectCreated) - hiero.core.events.registerInterest( - "kAfterNewProjectCreated", afterNewProjectCreated) - - # hiero.core.events.registerInterest( - # "kBeforeProjectLoad", beforeProjectLoad) - hiero.core.events.registerInterest( - "kAfterProjectLoad", afterProjectLoad) - - hiero.core.events.registerInterest( - "kBeforeProjectSave", before_project_save) - # hiero.core.events.registerInterest( - # "kAfterProjectSave", afterProjectSaved) - # - # hiero.core.events.registerInterest( - # "kBeforeProjectClose", beforeProjectClosed) - # hiero.core.events.registerInterest( - # "kAfterProjectClose", afterProjectClosed) - # - # hiero.core.events.registerInterest("kShutdown", shutDown) - # hiero.core.events.registerInterest("kStartup", startupCompleted) - - # INFO: was disabled because it was slowing down timeline operations - # hiero.core.events.registerInterest( - # ("kSelectionChanged", "kTimeline"), selection_changed_timeline) - - # workfiles - try: - hiero.core.events.registerEventType("kStartWorkfiles") - hiero.core.events.registerInterest( - "kStartWorkfiles", launch_workfiles_app) - except RuntimeError: - pass - -def register_events(): - """ - Adding all callbacks. - """ - - # if task changed then change notext of hiero - register_event_callback("taskChanged", update_menu_task_label) - log.info("Installed event callback for 'taskChanged'..") diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py deleted file mode 100644 index bf719160d1..0000000000 --- a/openpype/hosts/hiero/api/lib.py +++ /dev/null @@ -1,1319 +0,0 @@ -""" -Host specific functions where host api is connected -""" - -from copy import deepcopy -import os -import re -import platform -import functools -import warnings -import json -import ast -import secrets -import shutil -import hiero - -from qtpy import QtWidgets, QtCore -try: - from PySide import QtXml -except ImportError: - from PySide2 import QtXml - -from openpype.client import get_project -from openpype.settings import get_project_settings -from openpype.pipeline import Anatomy, get_current_project_name -from openpype.pipeline.load import filter_containers -from openpype.lib import Logger -from . import tags -from .constants import ( - OPENPYPE_TAG_NAME, - DEFAULT_SEQUENCE_NAME, - DEFAULT_BIN_NAME -) -from openpype.pipeline.colorspace import ( - get_imageio_config -) - - -class _CTX: - has_been_setup = False - has_menu = False - parent_gui = None - - -class DeprecatedWarning(DeprecationWarning): - pass - - -def deprecated(new_destination): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - func = None - if callable(new_destination): - func = new_destination - new_destination = None - - def _decorator(decorated_func): - if new_destination is None: - warning_message = ( - " Please check content of deprecated function to figure out" - " possible replacement." - ) - else: - warning_message = " Please replace your usage with '{}'.".format( - new_destination - ) - - @functools.wraps(decorated_func) - def wrapper(*args, **kwargs): - warnings.simplefilter("always", DeprecatedWarning) - warnings.warn( - ( - "Call to deprecated function '{}'" - "\nFunction was moved or removed.{}" - ).format(decorated_func.__name__, warning_message), - category=DeprecatedWarning, - stacklevel=4 - ) - return decorated_func(*args, **kwargs) - return wrapper - - if func is None: - return _decorator - return _decorator(func) - - -log = Logger.get_logger(__name__) - - -def flatten(list_): - for item_ in list_: - if isinstance(item_, (list, tuple)): - for sub_item in flatten(item_): - yield sub_item - else: - yield item_ - - -def get_current_project(remove_untitled=False): - projects = flatten(hiero.core.projects()) - if not remove_untitled: - return next(iter(projects)) - - # if remove_untitled - for proj in projects: - if "Untitled" in proj.name(): - proj.close() - else: - return proj - - -def get_current_sequence(name=None, new=False): - """ - Get current sequence in context of active project. - - Args: - name (str)[optional]: name of sequence we want to return - new (bool)[optional]: if we want to create new one - - Returns: - hiero.core.Sequence: the sequence object - """ - sequence = None - project = get_current_project() - root_bin = project.clipsBin() - - if new: - # create new - name = name or DEFAULT_SEQUENCE_NAME - sequence = hiero.core.Sequence(name) - root_bin.addItem(hiero.core.BinItem(sequence)) - elif name: - # look for sequence by name - sequences = project.sequences() - for _sequence in sequences: - if _sequence.name() == name: - sequence = _sequence - if not sequence: - # if nothing found create new with input name - sequence = get_current_sequence(name, True) - else: - # if name is none and new is False then return current open sequence - sequence = hiero.ui.activeSequence() - - return sequence - - -def get_timeline_selection(): - active_sequence = hiero.ui.activeSequence() - timeline_editor = hiero.ui.getTimelineEditor(active_sequence) - return list(timeline_editor.selection()) - - -def get_current_track(sequence, name, audio=False): - """ - Get current track in context of active project. - - Creates new if none is found. - - Args: - sequence (hiero.core.Sequence): hiero sequene object - name (str): name of track we want to return - audio (bool)[optional]: switch to AudioTrack - - Returns: - hiero.core.Track: the track object - """ - tracks = sequence.videoTracks() - - if audio: - tracks = sequence.audioTracks() - - # get track by name - track = None - for _track in tracks: - if _track.name() == name: - track = _track - - if not track: - if not audio: - track = hiero.core.VideoTrack(name) - else: - track = hiero.core.AudioTrack(name) - - sequence.addTrack(track) - - return track - - -def get_track_items( - selection=False, - sequence_name=None, - track_item_name=None, - track_name=None, - track_type=None, - check_enabled=True, - check_locked=True, - check_tagged=False): - """Get all available current timeline track items. - - Attribute: - selection (list)[optional]: list of selected track items - sequence_name (str)[optional]: return only clips from input sequence - track_item_name (str)[optional]: return only item with input name - track_name (str)[optional]: return only items from track name - track_type (str)[optional]: return only items of given type - (`audio` or `video`) default is `video` - check_enabled (bool)[optional]: ignore disabled if True - check_locked (bool)[optional]: ignore locked if True - - Return: - list or hiero.core.TrackItem: list of track items or single track item - """ - track_type = track_type or "video" - selection = selection or [] - return_list = [] - - # get selected track items or all in active sequence - if selection: - try: - for track_item in selection: - log.info("___ track_item: {}".format(track_item)) - # make sure only trackitems are selected - if not isinstance(track_item, hiero.core.TrackItem): - continue - - if _validate_all_atrributes( - track_item, - track_item_name, - track_name, - track_type, - check_enabled, - check_tagged - ): - log.info("___ valid trackitem: {}".format(track_item)) - return_list.append(track_item) - except AttributeError: - pass - - # collect all available active sequence track items - if not return_list: - sequence = get_current_sequence(name=sequence_name) - # get all available tracks from sequence - tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) - # loop all tracks - for track in tracks: - if check_locked and track.isLocked(): - continue - if check_enabled and not track.isEnabled(): - continue - # and all items in track - for track_item in track.items(): - # make sure no subtrackitem is also track items - if not isinstance(track_item, hiero.core.TrackItem): - continue - - if _validate_all_atrributes( - track_item, - track_item_name, - track_name, - track_type, - check_enabled, - check_tagged - ): - return_list.append(track_item) - - return return_list - - -def _validate_all_atrributes( - track_item, - track_item_name, - track_name, - track_type, - check_enabled, - check_tagged -): - def _validate_correct_name_track_item(): - if track_item_name and track_item_name in track_item.name(): - return True - elif not track_item_name: - return True - - def _validate_tagged_track_item(): - if check_tagged and track_item.tags(): - return True - elif not check_tagged: - return True - - def _validate_enabled_track_item(): - if check_enabled and track_item.isEnabled(): - return True - elif not check_enabled: - return True - - def _validate_parent_track_item(): - if track_name and track_name in track_item.parent().name(): - # filter only items fitting input track name - return True - elif not track_name: - # or add all if no track_name was defined - return True - - def _validate_type_track_item(): - if track_type == "video" and isinstance( - track_item.parent(), hiero.core.VideoTrack): - # only video track items are allowed - return True - elif track_type == "audio" and isinstance( - track_item.parent(), hiero.core.AudioTrack): - # only audio track items are allowed - return True - - # check if track item is enabled - return all([ - _validate_enabled_track_item(), - _validate_type_track_item(), - _validate_tagged_track_item(), - _validate_parent_track_item(), - _validate_correct_name_track_item() - ]) - - -def get_track_item_tags(track_item): - """ - Get track item tags excluded openpype tag - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - hiero.core.Tag: hierarchy, orig clip attributes - """ - returning_tag_data = [] - # get all tags from track item - _tags = track_item.tags() - if not _tags: - return [] - - # collect all tags which are not openpype tag - returning_tag_data.extend( - tag for tag in _tags - if tag.name() != OPENPYPE_TAG_NAME - ) - - return returning_tag_data - - -def _get_tag_unique_hash(): - # sourcery skip: avoid-builtin-shadow - return secrets.token_hex(nbytes=4) - - -def set_track_openpype_tag(track, data=None): - """ - Set openpype track tag to input track object. - - Attributes: - track (hiero.core.VideoTrack): hiero object - - Returns: - hiero.core.Tag - """ - data = data or {} - - # basic Tag's attribute - tag_data = { - "editable": "0", - "note": "OpenPype data container", - "icon": "openpype_icon.png", - "metadata": dict(data.items()) - } - # get available pype tag if any - _tag = get_track_openpype_tag(track) - - if _tag: - # it not tag then create one - tag = tags.update_tag(_tag, tag_data) - else: - # if pype tag available then update with input data - tag = tags.create_tag( - "{}_{}".format( - OPENPYPE_TAG_NAME, - _get_tag_unique_hash() - ), - tag_data - ) - # add it to the input track item - track.addTag(tag) - - return tag - - -def get_track_openpype_tag(track): - """ - Get pype track item tag created by creator or loader plugin. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - hiero.core.Tag: hierarchy, orig clip attributes - """ - # get all tags from track item - _tags = track.tags() - if not _tags: - return None - for tag in _tags: - # return only correct tag defined by global name - if OPENPYPE_TAG_NAME in tag.name(): - return tag - - -def get_track_openpype_data(track, container_name=None): - """ - Get track's openpype tag data. - - Attributes: - trackItem (hiero.core.VideoTrack): hiero object - - Returns: - dict: data found on pype tag - """ - return_data = {} - # get pype data tag from track item - tag = get_track_openpype_tag(track) - - if not tag: - return None - - # get tag metadata attribute - tag_data = deepcopy(dict(tag.metadata())) - - for obj_name, obj_data in tag_data.items(): - obj_name = obj_name.replace("tag.", "") - - if obj_name in ["applieswhole", "note", "label"]: - continue - return_data[obj_name] = json.loads(obj_data) - - return ( - return_data[container_name] - if container_name - else return_data - ) - - -@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_tag") -def get_track_item_pype_tag(track_item): - # backward compatibility alias - return get_trackitem_openpype_tag(track_item) - - -@deprecated("openpype.hosts.hiero.api.lib.set_trackitem_openpype_tag") -def set_track_item_pype_tag(track_item, data=None): - # backward compatibility alias - return set_trackitem_openpype_tag(track_item, data) - - -@deprecated("openpype.hosts.hiero.api.lib.get_trackitem_openpype_data") -def get_track_item_pype_data(track_item): - # backward compatibility alias - return get_trackitem_openpype_data(track_item) - - -def get_trackitem_openpype_tag(track_item): - """ - Get pype track item tag created by creator or loader plugin. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - hiero.core.Tag: hierarchy, orig clip attributes - """ - # get all tags from track item - _tags = track_item.tags() - if not _tags: - return None - for tag in _tags: - # return only correct tag defined by global name - if OPENPYPE_TAG_NAME in tag.name(): - return tag - - -def set_trackitem_openpype_tag(track_item, data=None): - """ - Set openpype track tag to input track object. - - Attributes: - track (hiero.core.VideoTrack): hiero object - - Returns: - hiero.core.Tag - """ - data = data or {} - - # basic Tag's attribute - tag_data = { - "editable": "0", - "note": "OpenPype data container", - "icon": "openpype_icon.png", - "metadata": dict(data.items()) - } - # get available pype tag if any - _tag = get_trackitem_openpype_tag(track_item) - if _tag: - # it not tag then create one - tag = tags.update_tag(_tag, tag_data) - else: - # if pype tag available then update with input data - tag = tags.create_tag( - "{}_{}".format( - OPENPYPE_TAG_NAME, - _get_tag_unique_hash() - ), - tag_data - ) - # add it to the input track item - track_item.addTag(tag) - - return tag - - -def get_trackitem_openpype_data(track_item): - """ - Get track item's pype tag data. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - dict: data found on pype tag - """ - data = {} - # get pype data tag from track item - tag = get_trackitem_openpype_tag(track_item) - - if not tag: - return None - - # get tag metadata attribute - tag_data = deepcopy(dict(tag.metadata())) - # convert tag metadata to normal keys names and values to correct types - for k, v in tag_data.items(): - key = k.replace("tag.", "") - - try: - # capture exceptions which are related to strings only - if re.match(r"^[\d]+$", v): - value = int(v) - elif re.match(r"^True$", v): - value = True - elif re.match(r"^False$", v): - value = False - elif re.match(r"^None$", v): - value = None - elif re.match(r"^[\w\d_]+$", v): - value = v - else: - value = ast.literal_eval(v) - except (ValueError, SyntaxError) as msg: - log.warning(msg) - value = v - - data[key] = value - - return data - - -def imprint(track_item, data=None): - """ - Adding `Avalon data` into a hiero track item tag. - - Also including publish attribute into tag. - - Arguments: - track_item (hiero.core.TrackItem): hiero track item object - data (dict): Any data which needst to be imprinted - - Examples: - data = { - 'asset': 'sq020sh0280', - 'family': 'render', - 'subset': 'subsetMain' - } - """ - data = data or {} - - tag = set_trackitem_openpype_tag(track_item, data) - - # add publish attribute - set_publish_attribute(tag, True) - - -def set_publish_attribute(tag, value): - """ Set Publish attribute in input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = tag.metadata() - # set data to the publish attribute - tag_data.setValue("tag.publish", str(value)) - - -def get_publish_attribute(tag): - """ Get Publish attribute from input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = tag.metadata() - # get data to the publish attribute - value = tag_data.value("tag.publish") - # return value converted to bool value. Atring is stored in tag. - return ast.literal_eval(value) - - -def sync_avalon_data_to_workfile(): - # import session to get project dir - project_name = get_current_project_name() - - anatomy = Anatomy(project_name) - work_template = anatomy.templates["work"]["path"] - work_root = anatomy.root_value_for_template(work_template) - active_project_root = ( - os.path.join(work_root, project_name) - ).replace("\\", "/") - # getting project - project = get_current_project() - - if "Tag Presets" in project.name(): - return - - log.debug("Synchronizing Pype metadata to project: {}".format( - project.name())) - - # set project root with backward compatibility - try: - project.setProjectDirectory(active_project_root) - except Exception: - # old way of setting it - project.setProjectRoot(active_project_root) - - # get project data from avalon db - project_doc = get_project(project_name) - project_data = project_doc["data"] - - log.debug("project_data: {}".format(project_data)) - - # get format and fps property from avalon db on project - width = project_data["resolutionWidth"] - height = project_data["resolutionHeight"] - pixel_aspect = project_data["pixelAspect"] - fps = project_data['fps'] - format_name = project_data['code'] - - # create new format in hiero project - format = hiero.core.Format(width, height, pixel_aspect, format_name) - project.setOutputFormat(format) - - # set fps to hiero project - project.setFramerate(fps) - - # TODO: add auto colorspace set from project drop - log.info("Project property has been synchronised with Avalon db") - - -def launch_workfiles_app(event): - """ - Event for launching workfiles after hiero start - - Args: - event (obj): required but unused - """ - from . import launch_workfiles_app - launch_workfiles_app() - - -def setup(console=False, port=None, menu=True): - """Setup integration - - Registers Pyblish for Hiero plug-ins and appends an item to the File-menu - - Arguments: - console (bool): Display console with GUI - port (int, optional): Port from which to start looking for an - available port to connect with Pyblish QML, default - provided by Pyblish Integration. - menu (bool, optional): Display file menu in Hiero. - """ - - if _CTX.has_been_setup: - teardown() - - add_submission() - - if menu: - add_to_filemenu() - _CTX.has_menu = True - - _CTX.has_been_setup = True - log.debug("pyblish: Loaded successfully.") - - -def teardown(): - """Remove integration""" - if not _CTX.has_been_setup: - return - - if _CTX.has_menu: - remove_from_filemenu() - _CTX.has_menu = False - - _CTX.has_been_setup = False - log.debug("pyblish: Integration torn down successfully") - - -def remove_from_filemenu(): - raise NotImplementedError("Implement me please.") - - -def add_to_filemenu(): - PublishAction() - - -class PyblishSubmission(hiero.exporters.FnSubmission.Submission): - - def __init__(self): - hiero.exporters.FnSubmission.Submission.__init__(self) - - def addToQueue(self): - from . import publish - # Add submission to Hiero module for retrieval in plugins. - hiero.submission = self - publish(hiero.ui.mainWindow()) - - -def add_submission(): - registry = hiero.core.taskRegistry - registry.addSubmission("Pyblish", PyblishSubmission) - - -class PublishAction(QtWidgets.QAction): - """ - Action with is showing as menu item - """ - - def __init__(self): - QtWidgets.QAction.__init__(self, "Publish", None) - self.triggered.connect(self.publish) - - for interest in ["kShowContextMenu/kTimeline", - "kShowContextMenukBin", - "kShowContextMenu/kSpreadsheet"]: - hiero.core.events.registerInterest(interest, self.eventHandler) - - self.setShortcut("Ctrl+Alt+P") - - def publish(self): - from . import publish - # Removing "submission" attribute from hiero module, to prevent tasks - # from getting picked up when not using the "Export" dialog. - if hasattr(hiero, "submission"): - del hiero.submission - publish(hiero.ui.mainWindow()) - - def eventHandler(self, event): - # Add the Menu to the right-click menu - event.menu.addAction(self) - - -# def CreateNukeWorkfile(nodes=None, -# nodes_effects=None, -# to_timeline=False, -# **kwargs): -# ''' Creating nuke workfile with particular version with given nodes -# Also it is creating timeline track items as precomps. -# -# Arguments: -# nodes(list of dict): each key in dict is knob order is important -# to_timeline(type): will build trackItem with metadata -# -# Returns: -# bool: True if done -# -# Raises: -# Exception: with traceback -# -# ''' -# import hiero.core -# from openpype.hosts.nuke.api.lib import ( -# BuildWorkfile, -# imprint -# ) -# -# # check if the file exists if does then Raise "File exists!" -# if os.path.exists(filepath): -# raise FileExistsError("File already exists: `{}`".format(filepath)) -# -# # if no representations matching then -# # Raise "no representations to be build" -# if len(representations) == 0: -# raise AttributeError("Missing list of `representations`") -# -# # check nodes input -# if len(nodes) == 0: -# log.warning("Missing list of `nodes`") -# -# # create temp nk file -# nuke_script = hiero.core.nuke.ScriptWriter() -# -# # create root node and save all metadata -# root_node = hiero.core.nuke.RootNode() -# -# anatomy = Anatomy(get_current_project_name()) -# work_template = anatomy.templates["work"]["path"] -# root_path = anatomy.root_value_for_template(work_template) -# -# nuke_script.addNode(root_node) -# -# script_builder = BuildWorkfile( -# root_node=root_node, -# root_path=root_path, -# nodes=nuke_script.getNodes(), -# **kwargs -# ) - - -def create_nuke_workfile_clips(nuke_workfiles, seq=None): - ''' - nuke_workfiles is list of dictionaries like: - [{ - 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', - 'name': 'test', - 'handleStart': 15, # added asymetrically to handles - 'handleEnd': 10, # added asymetrically to handles - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'Comp-tracking', - 'work_dir': 'VFX_PR', - 'shot': '00010' - }] - ''' - - proj = hiero.core.projects()[-1] - root = proj.clipsBin() - - if not seq: - seq = hiero.core.Sequence('NewSequences') - root.addItem(hiero.core.BinItem(seq)) - # todo will need to define this better - # track = seq[1] # lazy example to get a destination# track - clips_lst = [] - for nk in nuke_workfiles: - task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) - bin = create_bin(task_path, proj) - - if nk['task'] not in seq.videoTracks(): - track = hiero.core.VideoTrack(nk['task']) - seq.addTrack(track) - else: - track = seq.tracks(nk['task']) - - # create clip media - media = hiero.core.MediaSource(nk['path']) - media_in = int(media.startTime() or 0) - media_duration = int(media.duration() or 0) - - handle_start = nk.get("handleStart") - handle_end = nk.get("handleEnd") - - if media_in: - source_in = media_in + handle_start - else: - source_in = nk["frameStart"] + handle_start - - if media_duration: - source_out = (media_in + media_duration - 1) - handle_end - else: - source_out = nk["frameEnd"] - handle_end - - source = hiero.core.Clip(media) - - name = os.path.basename(os.path.splitext(nk['path'])[0]) - split_name = split_by_client_version(name)[0] or name - - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if split_name not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) - - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() - - # add to track as clip item - trackItem = hiero.core.TrackItem( - split_name, hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(source_in) - trackItem.setSourceOut(source_out) - trackItem.setTimelineIn(nk["clipIn"]) - trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) - track.addTrackItem(trackItem) - clips_lst.append(trackItem) - - return clips_lst - - -def create_bin(path=None, project=None): - ''' - Create bin in project. - If the path is "bin1/bin2/bin3" it will create whole depth - and return `bin3` - - ''' - # get the first loaded project - project = project or get_current_project() - - path = path or DEFAULT_BIN_NAME - - path = path.replace("\\", "/").split("/") - - root_bin = project.clipsBin() - - done_bin_lst = [] - for i, b in enumerate(path): - if i == 0 and len(path) > 1: - if b in [bin.name() for bin in root_bin.bins()]: - bin = [bin for bin in root_bin.bins() if b in bin.name()][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - root_bin.addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i >= 1 and i < len(path) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i == len(path) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - return done_bin_lst[-1] - - -def split_by_client_version(string): - regex = r"[/_.]v\d+" - try: - matches = re.findall(regex, string, re.IGNORECASE) - return string.split(matches[0]) - except Exception as error: - log.error(error) - return None - - -def get_selected_track_items(sequence=None): - _sequence = sequence or get_current_sequence() - - # Getting selection - timeline_editor = hiero.ui.getTimelineEditor(_sequence) - return timeline_editor.selection() - - -def set_selected_track_items(track_items_list, sequence=None): - _sequence = sequence or get_current_sequence() - - # make sure only trackItems are in list selection - only_track_items = [ - i for i in track_items_list - if isinstance(i, hiero.core.TrackItem)] - - # Getting selection - timeline_editor = hiero.ui.getTimelineEditor(_sequence) - return timeline_editor.setSelection(only_track_items) - - -def _read_doc_from_path(path): - # reading QtXml.QDomDocument from HROX path - hrox_file = QtCore.QFile(path) - if not hrox_file.open(QtCore.QFile.ReadOnly): - raise RuntimeError("Failed to open file for reading") - doc = QtXml.QDomDocument() - doc.setContent(hrox_file) - hrox_file.close() - return doc - - -def _write_doc_to_path(doc, path): - # write QtXml.QDomDocument to path as HROX - hrox_file = QtCore.QFile(path) - if not hrox_file.open(QtCore.QFile.WriteOnly): - raise RuntimeError("Failed to open file for writing") - stream = QtCore.QTextStream(hrox_file) - doc.save(stream, 1) - hrox_file.close() - - -def _set_hrox_project_knobs(doc, **knobs): - # set attributes to Project Tag - proj_elem = doc.documentElement().firstChildElement("Project") - for k, v in knobs.items(): - if "ocioconfigpath" in k: - paths_to_format = v[platform.system().lower()] - for _path in paths_to_format: - v = _path.format(**os.environ) - if not os.path.exists(v): - continue - log.debug("Project colorspace knob `{}` was set to `{}`".format(k, v)) - if isinstance(v, dict): - continue - proj_elem.setAttribute(str(k), v) - - -def apply_colorspace_project(): - project_name = get_current_project_name() - # get path the the active projects - project = get_current_project(remove_untitled=True) - current_file = project.path() - - # close the active project - project.close() - - # get presets for hiero - imageio = get_project_settings(project_name)["hiero"]["imageio"] - presets = imageio.get("workfile") - - # backward compatibility layer - # TODO: remove this after some time - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="hiero" - ) - - if config_data: - presets.update({ - "ocioConfigName": "custom" - }) - - # save the workfile as subversion "comment:_colorspaceChange" - split_current_file = os.path.splitext(current_file) - copy_current_file = current_file - - if "_colorspaceChange" not in current_file: - copy_current_file = ( - split_current_file[0] - + "_colorspaceChange" - + split_current_file[1] - ) - - try: - # duplicate the file so the changes are applied only to the copy - shutil.copyfile(current_file, copy_current_file) - except shutil.Error: - # in case the file already exists and it want to copy to the - # same filewe need to do this trick - # TEMP file name change - copy_current_file_tmp = copy_current_file + "_tmp" - # create TEMP file - shutil.copyfile(current_file, copy_current_file_tmp) - # remove original file - os.remove(current_file) - # copy TEMP back to original name - shutil.copyfile(copy_current_file_tmp, copy_current_file) - # remove the TEMP file as we dont need it - os.remove(copy_current_file_tmp) - - # use the code from below for changing xml hrox Attributes - presets.update({"name": os.path.basename(copy_current_file)}) - - # read HROX in as QDomSocument - doc = _read_doc_from_path(copy_current_file) - - # apply project colorspace properties - _set_hrox_project_knobs(doc, **presets) - - # write QDomSocument back as HROX - _write_doc_to_path(doc, copy_current_file) - - # open the file as current project - hiero.core.openProject(copy_current_file) - - -def apply_colorspace_clips(): - project_name = get_current_project_name() - project = get_current_project(remove_untitled=True) - clips = project.clips() - - # get presets for hiero - imageio = get_project_settings(project_name)["hiero"]["imageio"] - from pprint import pprint - - presets = imageio.get("regexInputs", {}).get("inputs", {}) - pprint(presets) - for clip in clips: - clip_media_source_path = clip.mediaSource().firstpath() - clip_name = clip.name() - clip_colorspace = clip.sourceMediaColourTransform() - - if "default" in clip_colorspace: - continue - - # check if any colorspace presets for read is matching - preset_clrsp = None - for k in presets: - if not bool(re.search(k["regex"], clip_media_source_path)): - continue - preset_clrsp = k["colorspace"] - - if preset_clrsp: - log.debug("Changing clip.path: {}".format(clip_media_source_path)) - log.info("Changing clip `{}` colorspace {} to {}".format( - clip_name, clip_colorspace, preset_clrsp)) - # set the found preset to the clip - clip.setSourceMediaColourTransform(preset_clrsp) - - # save project after all is changed - project.save() - - -def is_overlapping(ti_test, ti_original, strict=False): - covering_exp = ( - (ti_test.timelineIn() <= ti_original.timelineIn()) - and (ti_test.timelineOut() >= ti_original.timelineOut()) - ) - - if strict: - return covering_exp - - inside_exp = ( - (ti_test.timelineIn() >= ti_original.timelineIn()) - and (ti_test.timelineOut() <= ti_original.timelineOut()) - ) - overlaying_right_exp = ( - (ti_test.timelineIn() < ti_original.timelineOut()) - and (ti_test.timelineOut() >= ti_original.timelineOut()) - ) - overlaying_left_exp = ( - (ti_test.timelineOut() > ti_original.timelineIn()) - and (ti_test.timelineIn() <= ti_original.timelineIn()) - ) - - return any(( - covering_exp, - inside_exp, - overlaying_right_exp, - overlaying_left_exp - )) - - -def get_sequence_pattern_and_padding(file): - """ Return sequence pattern and padding from file - - Attributes: - file (string): basename form path - - Example: - Can find file.0001.ext, file.%02d.ext, file.####.ext - - Return: - string: any matching sequence pattern - int: padding of sequnce numbering - """ - foundall = re.findall( - r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file) - if not foundall: - return None, None - found = sorted(list(set(foundall[0])))[-1] - - padding = int( - re.findall(r"\d+", found)[-1]) if "%" in found else len(found) - return found, padding - - -def sync_clip_name_to_data_asset(track_items_list): - # loop through all selected clips - for track_item in track_items_list: - # ignore if parent track is locked or disabled - if track_item.parent().isLocked(): - continue - if not track_item.parent().isEnabled(): - continue - # ignore if the track item is disabled - if not track_item.isEnabled(): - continue - - # get name and data - ti_name = track_item.name() - data = get_trackitem_openpype_data(track_item) - - # ignore if no data on the clip or not publish instance - if not data: - continue - if data.get("id") != "pyblish.avalon.instance": - continue - - # fix data if wrong name - if data["asset"] != ti_name: - data["asset"] = ti_name - # remove the original tag - tag = get_trackitem_openpype_tag(track_item) - track_item.removeTag(tag) - # create new tag with updated data - set_trackitem_openpype_tag(track_item, data) - print("asset was changed in clip: {}".format(ti_name)) - - -def set_track_color(track_item, color): - track_item.source().binItem().setColor(color) - - -def check_inventory_versions(track_items=None): - """ - Actual version color identifier of Loaded containers - - Check all track items and filter only - Loader nodes for its version. It will get all versions from database - and check if the node is having actual version. If not then it will color - it to red. - """ - from . import parse_container - - track_items = track_items or get_track_items() - # presets - clip_color_last = "green" - clip_color = "red" - - containers = [] - # Find all containers and collect it's node and representation ids - for track_item in track_items: - container = parse_container(track_item) - if container: - containers.append(container) - - # Skip if nothing was found - if not containers: - return - - project_name = get_current_project_name() - filter_result = filter_containers(containers, project_name) - for container in filter_result.latest: - set_track_color(container["_item"], clip_color_last) - - for container in filter_result.outdated: - set_track_color(container["_item"], clip_color) - - -def selection_changed_timeline(event): - """Callback on timeline to check if asset in data is the same as clip name. - - Args: - event (hiero.core.Event): timeline event - """ - timeline_editor = event.sender - selection = timeline_editor.selection() - - track_items = get_track_items( - selection=selection, - track_type="video", - check_enabled=True, - check_locked=True, - check_tagged=True - ) - - # run checking function - sync_clip_name_to_data_asset(track_items) - - -def before_project_save(event): - track_items = get_track_items( - track_type="video", - check_enabled=True, - check_locked=True, - check_tagged=True - ) - - # run checking function - sync_clip_name_to_data_asset(track_items) - - # also mark old versions of loaded containers - check_inventory_versions(track_items) - - -def get_main_window(): - """Acquire Nuke's main window""" - if _CTX.parent_gui is None: - top_widgets = QtWidgets.QApplication.topLevelWidgets() - name = "Foundry::UI::DockMainWindow" - main_window = next(widget for widget in top_widgets if - widget.inherits("QMainWindow") and - widget.metaObject().className() == name) - _CTX.parent_gui = main_window - return _CTX.parent_gui diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py deleted file mode 100644 index ca611570cc..0000000000 --- a/openpype/hosts/hiero/api/menu.py +++ /dev/null @@ -1,175 +0,0 @@ -import os -import sys - -import hiero.core -from hiero.ui import findMenuAction - -from qtpy import QtGui - -from openpype.lib import Logger -from openpype.tools.utils import host_tools -from openpype.settings import get_project_settings -from openpype.pipeline import ( - get_current_project_name, - get_current_asset_name, - get_current_task_name -) - -from . import tags - -log = Logger.get_logger(__name__) - -self = sys.modules[__name__] -self._change_context_menu = None - - -def get_context_label(): - return "{}, {}".format( - get_current_asset_name(), - get_current_task_name() - ) - - -def update_menu_task_label(): - """Update the task label in Avalon menu to current session""" - - object_name = self._change_context_menu - found_menu = findMenuAction(object_name) - - if not found_menu: - log.warning("Can't find menuItem: {}".format(object_name)) - return - - label = get_context_label() - - menu = found_menu.menu() - self._change_context_menu = label - menu.setTitle(label) - - -def menu_install(): - """ - Installing menu into Hiero - - """ - - from . import ( - publish, launch_workfiles_app, reload_config, - apply_colorspace_project, apply_colorspace_clips - ) - from .lib import get_main_window - - main_window = get_main_window() - - # here is the best place to add menu - - menu_name = os.environ['AVALON_LABEL'] - - context_label = get_context_label() - - self._change_context_menu = context_label - - try: - check_made_menu = findMenuAction(menu_name) - except Exception: - check_made_menu = None - - if not check_made_menu: - # Grab Hiero's MenuBar - menu = hiero.ui.menuBar().addMenu(menu_name) - else: - menu = check_made_menu.menu() - - context_label_action = menu.addAction(context_label) - context_label_action.setEnabled(False) - - menu.addSeparator() - - workfiles_action = menu.addAction("Work Files...") - workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) - workfiles_action.triggered.connect(launch_workfiles_app) - - default_tags_action = menu.addAction("Create Default Tags") - default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) - default_tags_action.triggered.connect(tags.add_tags_to_workfile) - - menu.addSeparator() - - creator_action = menu.addAction("Create...") - creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - creator_action.triggered.connect( - lambda: host_tools.show_creator(parent=main_window) - ) - - publish_action = menu.addAction("Publish...") - publish_action.setIcon(QtGui.QIcon("icons:Output.png")) - publish_action.triggered.connect( - lambda *args: publish(hiero.ui.mainWindow()) - ) - - loader_action = menu.addAction("Load...") - loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - loader_action.triggered.connect( - lambda: host_tools.show_loader(parent=main_window) - ) - - sceneinventory_action = menu.addAction("Manage...") - sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - sceneinventory_action.triggered.connect( - lambda: host_tools.show_scene_inventory(parent=main_window) - ) - - library_action = menu.addAction("Library...") - library_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) - library_action.triggered.connect( - lambda: host_tools.show_library_loader(parent=main_window) - ) - - if os.getenv("OPENPYPE_DEVELOP"): - menu.addSeparator() - reload_action = menu.addAction("Reload pipeline") - reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) - reload_action.triggered.connect(reload_config) - - menu.addSeparator() - apply_colorspace_p_action = menu.addAction("Apply Colorspace Project") - apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) - apply_colorspace_p_action.triggered.connect(apply_colorspace_project) - - apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips") - apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) - apply_colorspace_c_action.triggered.connect(apply_colorspace_clips) - - menu.addSeparator() - - exeprimental_action = menu.addAction("Experimental tools...") - exeprimental_action.triggered.connect( - lambda: host_tools.show_experimental_tools_dialog(parent=main_window) - ) - - -def add_scripts_menu(): - try: - from . import launchforhiero - except ImportError: - - log.warning( - "Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable." - ) - return - - # load configuration of custom menu - project_settings = get_project_settings(get_current_project_name()) - config = project_settings["hiero"]["scriptsmenu"]["definition"] - _menu = project_settings["hiero"]["scriptsmenu"]["name"] - - if not config: - log.warning("Skipping studio menu, no definition found.") - return - - # run the launcher for Hiero menu - studio_menu = launchforhiero.main(title=_menu.title()) - - # apply configuration - studio_menu.build_from_configuration(studio_menu, config) diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py deleted file mode 100644 index d88aeac810..0000000000 --- a/openpype/hosts/hiero/api/pipeline.py +++ /dev/null @@ -1,336 +0,0 @@ -""" -Basic avalon integration -""" -from copy import deepcopy -import os -import contextlib -from collections import OrderedDict - -from pyblish import api as pyblish -from openpype.lib import Logger -from openpype.pipeline import ( - schema, - register_creator_plugin_path, - register_loader_plugin_path, - deregister_creator_plugin_path, - deregister_loader_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.tools.utils import host_tools -from . import lib, menu, events -import hiero - -log = Logger.get_logger(__name__) - -# plugin paths -API_DIR = os.path.dirname(os.path.abspath(__file__)) -HOST_DIR = os.path.dirname(API_DIR) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - - -def install(): - """Installing Hiero integration.""" - - # adding all events - events.register_events() - - log.info("Registering Hiero plug-ins..") - pyblish.register_host("hiero") - pyblish.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) - - # install menu - menu.menu_install() - menu.add_scripts_menu() - - # register hiero events - events.register_hiero_events() - - -def uninstall(): - """ - Uninstalling Hiero integration for avalon - - """ - log.info("Deregistering Hiero plug-ins..") - pyblish.deregister_host("hiero") - pyblish.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - - # register callback for switching publishable - pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) - - -def containerise(track_item, - name, - namespace, - context, - loader=None, - data=None): - """Bundle Hiero's object into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - track_item (hiero.core.TrackItem): object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of node used to produce this container. - - Returns: - track_item (hiero.core.TrackItem): containerised object - - """ - - data_imprint = OrderedDict({ - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - }) - - if data: - for k, v in data.items(): - data_imprint.update({k: v}) - - log.debug("_ data_imprint: {}".format(data_imprint)) - lib.set_trackitem_openpype_tag(track_item, data_imprint) - - return track_item - - -def ls(): - """List available containers. - - This function is used by the Container Manager in Nuke. You'll - need to implement a for-loop that then *yields* one Container at - a time. - - See the `container.json` schema for details on how it should look, - and the Maya equivalent, which is in `avalon.maya.pipeline` - """ - - # get all track items from current timeline - all_items = lib.get_track_items() - - # append all video tracks - for track in lib.get_current_sequence(): - if type(track) != hiero.core.VideoTrack: - continue - all_items.append(track) - - for item in all_items: - container_data = parse_container(item) - - if isinstance(container_data, list): - for _c in container_data: - yield _c - elif container_data: - yield container_data - - -def parse_container(item, validate=True): - """Return container data from track_item's pype tag. - - Args: - item (hiero.core.TrackItem or hiero.core.VideoTrack): - A containerised track item. - validate (bool)[optional]: validating with avalon scheme - - Returns: - dict: The container schema data for input containerized track item. - - """ - def data_to_container(item, data): - if ( - not data - or data.get("id") != "pyblish.avalon.container" - ): - return - - if validate and data and data.get("schema"): - schema.validate(data) - - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if any(key not in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = item.name() - - # Store reference to the node object - container["_item"] = item - - return container - - # convert tag metadata to normal keys names - if type(item) == hiero.core.VideoTrack: - return_list = [] - _data = lib.get_track_openpype_data(item) - - if not _data: - return - # convert the data to list and validate them - for _, obj_data in _data.items(): - container = data_to_container(item, obj_data) - return_list.append(container) - return return_list - else: - _data = lib.get_trackitem_openpype_data(item) - return data_to_container(item, _data) - - -def _update_container_data(container, data): - for key in container: - try: - container[key] = data[key] - except KeyError: - pass - return container - - -def update_container(item, data=None): - """Update container data to input track_item or track's - openpype tag. - - Args: - item (hiero.core.TrackItem or hiero.core.VideoTrack): - A containerised track item. - data (dict)[optional]: dictionery with data to be updated - - Returns: - bool: True if container was updated correctly - - """ - - data = data or {} - data = deepcopy(data) - - if type(item) == hiero.core.VideoTrack: - # form object data for test - object_name = data["objectName"] - - # get all available containers - containers = lib.get_track_openpype_data(item) - container = lib.get_track_openpype_data(item, object_name) - - containers = deepcopy(containers) - container = deepcopy(container) - - # update data in container - updated_container = _update_container_data(container, data) - # merge updated container back to containers - containers.update({object_name: updated_container}) - - return bool(lib.set_track_openpype_tag(item, containers)) - else: - container = lib.get_trackitem_openpype_data(item) - updated_container = _update_container_data(container, data) - - log.info("Updating container: `{}`".format(item.name())) - return bool(lib.set_trackitem_openpype_tag(item, updated_container)) - - -def launch_workfiles_app(*args): - ''' Wrapping function for workfiles launcher ''' - from .lib import get_main_window - - main_window = get_main_window() - # show workfile gui - host_tools.show_workfiles(parent=main_window) - - -def publish(parent): - """Shorthand to publish from within host""" - return host_tools.show_publish(parent) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> with maintained_selection(): - ... for track_item in track_items: - ... < do some stuff > - """ - from .lib import ( - set_selected_track_items, - get_selected_track_items - ) - previous_selection = get_selected_track_items() - reset_selection() - try: - # do the operation - yield - finally: - reset_selection() - set_selected_track_items(previous_selection) - - -def reset_selection(): - """Deselect all selected nodes - """ - from .lib import set_selected_track_items - set_selected_track_items([]) - - -def reload_config(): - """Attempt to reload pipeline at run-time. - - CAUTION: This is primarily for development and debugging purposes. - - """ - import importlib - - for module in ( - "openpype.hosts.hiero.lib", - "openpype.hosts.hiero.menu", - "openpype.hosts.hiero.tags" - ): - log.info("Reloading module: {}...".format(module)) - try: - module = importlib.import_module(module) - import imp - imp.reload(module) - except Exception as e: - log.warning("Cannot reload module: {}".format(e)) - importlib.reload(module) - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - from openpype.hosts.hiero.api import ( - get_trackitem_openpype_tag, - set_publish_attribute - ) - - # Whether instances should be passthrough based on new value - track_item = instance.data["item"] - tag = get_trackitem_openpype_tag(track_item) - set_publish_attribute(tag, new_value) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py deleted file mode 100644 index b0c73e41fb..0000000000 --- a/openpype/hosts/hiero/api/plugin.py +++ /dev/null @@ -1,955 +0,0 @@ -import os -from pprint import pformat -import re -from copy import deepcopy - -import hiero - -from qtpy import QtWidgets, QtCore -import qargparse - -from openpype.settings import get_current_project_settings -from openpype.lib import Logger -from openpype.pipeline import LoaderPlugin, LegacyCreator -from openpype.pipeline.load import get_representation_path_from_context -from . import lib - -log = Logger.get_logger(__name__) - - -def load_stylesheet(): - path = os.path.join(os.path.dirname(__file__), "style.css") - if not os.path.exists(path): - log.warning("Unable to load stylesheet, file not found in resources") - return "" - - with open(path, "r") as file_stream: - stylesheet = file_stream.read() - return stylesheet - - -class CreatorWidget(QtWidgets.QDialog): - - # output items - items = {} - - def __init__(self, name, info, ui_inputs, parent=None): - super(CreatorWidget, self).__init__(parent) - - self.setObjectName(name) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.setWindowTitle(name or "Pype Creator Input") - self.resize(500, 700) - - # Where inputs and labels are set - self.content_widget = [QtWidgets.QWidget(self)] - top_layout = QtWidgets.QFormLayout(self.content_widget[0]) - top_layout.setObjectName("ContentLayout") - top_layout.addWidget(Spacer(5, self)) - - # first add widget tag line - top_layout.addWidget(QtWidgets.QLabel(info)) - - # main dynamic layout - self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAsNeeded) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOn) - self.scroll_area.setHorizontalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOff) - self.scroll_area.setWidgetResizable(True) - - self.content_widget.append(self.scroll_area) - - scroll_widget = QtWidgets.QWidget(self) - in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) - self.content_layout = [in_scroll_area] - - # add preset data into input widget layout - self.items = self.populate_widgets(ui_inputs) - self.scroll_area.setWidget(scroll_widget) - - # Confirmation buttons - btns_widget = QtWidgets.QWidget(self) - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - - cancel_btn = QtWidgets.QPushButton("Cancel") - btns_layout.addWidget(cancel_btn) - - ok_btn = QtWidgets.QPushButton("Ok") - btns_layout.addWidget(ok_btn) - - # Main layout of the dialog - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.setSpacing(0) - - # adding content widget - for w in self.content_widget: - main_layout.addWidget(w) - - main_layout.addWidget(btns_widget) - - ok_btn.clicked.connect(self._on_ok_clicked) - cancel_btn.clicked.connect(self._on_cancel_clicked) - - stylesheet = load_stylesheet() - self.setStyleSheet(stylesheet) - - def _on_ok_clicked(self): - self.result = self.value(self.items) - self.close() - - def _on_cancel_clicked(self): - self.result = None - self.close() - - def value(self, data, new_data=None): - new_data = new_data or dict() - for k, v in data.items(): - new_data[k] = { - "target": None, - "value": None - } - if v["type"] == "dict": - new_data[k]["target"] = v["target"] - new_data[k]["value"] = self.value(v["value"]) - if v["type"] == "section": - new_data.pop(k) - new_data = self.value(v["value"], new_data) - elif getattr(v["value"], "currentText", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].currentText() - elif getattr(v["value"], "isChecked", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].isChecked() - elif getattr(v["value"], "value", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].value() - elif getattr(v["value"], "text", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].text() - - return new_data - - def camel_case_split(self, text): - matches = re.finditer( - '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) - return " ".join([str(m.group(0)).capitalize() for m in matches]) - - def create_row(self, layout, type, text, **kwargs): - value_keys = ["setText", "setCheckState", "setValue", "setChecked"] - - # get type attribute from qwidgets - attr = getattr(QtWidgets, type) - - # convert label text to normal capitalized text with spaces - label_text = self.camel_case_split(text) - - # assign the new text to label widget - label = QtWidgets.QLabel(label_text) - label.setObjectName("LineLabel") - - # create attribute name text strip of spaces - attr_name = text.replace(" ", "") - - # create attribute and assign default values - setattr( - self, - attr_name, - attr(parent=self)) - - # assign the created attribute to variable - item = getattr(self, attr_name) - - # set attributes to item which are not values - for func, val in kwargs.items(): - if func in value_keys: - continue - - if getattr(item, func): - log.debug("Setting {} to {}".format(func, val)) - func_attr = getattr(item, func) - if isinstance(val, tuple): - func_attr(*val) - else: - func_attr(val) - - # set values to item - for value_item in value_keys: - if value_item not in kwargs: - continue - if getattr(item, value_item): - getattr(item, value_item)(kwargs[value_item]) - - # add to layout - layout.addRow(label, item) - - return item - - def populate_widgets(self, data, content_layout=None): - """ - Populate widget from input dict. - - Each plugin has its own set of widget rows defined in dictionary - each row values should have following keys: `type`, `target`, - `label`, `order`, `value` and optionally also `toolTip`. - - Args: - data (dict): widget rows or organized groups defined - by types `dict` or `section` - content_layout (QtWidgets.QFormLayout)[optional]: used when nesting - - Returns: - dict: redefined data dict updated with created widgets - - """ - - content_layout = content_layout or self.content_layout[-1] - # fix order of process by defined order value - ordered_keys = list(data.keys()) - for k, v in data.items(): - try: - # try removing a key from index which should - # be filled with new - ordered_keys.pop(v["order"]) - except IndexError: - pass - # add key into correct order - ordered_keys.insert(v["order"], k) - - # process ordered - for k in ordered_keys: - v = data[k] - tool_tip = v.get("toolTip", "") - if v["type"] == "dict": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - if v["type"] == "section": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - elif v["type"] == "QLineEdit": - data[k]["value"] = self.create_row( - content_layout, "QLineEdit", v["label"], - setText=v["value"], setToolTip=tool_tip) - elif v["type"] == "QComboBox": - data[k]["value"] = self.create_row( - content_layout, "QComboBox", v["label"], - addItems=v["value"], setToolTip=tool_tip) - elif v["type"] == "QCheckBox": - data[k]["value"] = self.create_row( - content_layout, "QCheckBox", v["label"], - setChecked=v["value"], setToolTip=tool_tip) - elif v["type"] == "QSpinBox": - data[k]["value"] = self.create_row( - content_layout, "QSpinBox", v["label"], - setValue=v["value"], - setDisplayIntegerBase=10000, - setRange=(0, 99999), setMinimum=0, - setMaximum=100000, setToolTip=tool_tip) - - return data - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class SequenceLoader(LoaderPlugin): - """A basic SequenceLoader for Resolve - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - - options = [ - qargparse.Boolean( - "handles", - label="Include handles", - default=0, - help="Load with handles or without?" - ), - qargparse.Choice( - "load_to", - label="Where to load clips", - items=[ - "Current timeline", - "New timeline" - ], - default="Current timeline", - help="Where do you want clips to be loaded?" - ), - qargparse.Choice( - "load_how", - label="How to load clips", - items=[ - "Original timing", - "Sequentially in order" - ], - default="Original timing", - help="Would you like to place it at original timing?" - ) - ] - - def load( - self, - context, - name=None, - namespace=None, - options=None - ): - pass - - def update(self, container, representation): - """Update an existing `container` - """ - pass - - def remove(self, container): - """Remove an existing `container` - """ - pass - - -class ClipLoader: - - active_bin = None - data = dict() - - def __init__(self, cls, context, path, **options): - """ Initialize object - - Arguments: - cls (avalon.api.Loader): plugin object - context (dict): loader plugin context - options (dict)[optional]: possible keys: - projectBinPath: "path/to/binItem" - - """ - self.__dict__.update(cls.__dict__) - self.context = context - self.active_project = lib.get_current_project() - self.fname = path - - # try to get value from options or evaluate key value for `handles` - self.with_handles = options.get("handles") or bool( - options.get("handles") is True) - # try to get value from options or evaluate key value for `load_how` - self.sequencial_load = options.get("sequentially") or bool( - "Sequentially in order" in options.get("load_how", "")) - # try to get value from options or evaluate key value for `load_to` - self.new_sequence = options.get("newSequence") or bool( - "New timeline" in options.get("load_to", "")) - self.clip_name_template = options.get( - "clipNameTemplate") or "{asset}_{subset}_{representation}" - assert self._populate_data(), str( - "Cannot Load selected data, look into database " - "or call your supervisor") - - # inject asset data to representation dict - self._get_asset_data() - log.info("__init__ self.data: `{}`".format(pformat(self.data))) - log.info("__init__ options: `{}`".format(pformat(options))) - - # add active components to class - if self.new_sequence: - if options.get("sequence"): - # if multiselection is set then use options sequence - self.active_sequence = options["sequence"] - else: - # create new sequence - self.active_sequence = lib.get_current_sequence(new=True) - self.active_sequence.setFramerate( - hiero.core.TimeBase.fromString( - str(self.data["assetData"]["fps"]))) - else: - self.active_sequence = lib.get_current_sequence() - - if options.get("track"): - # if multiselection is set then use options track - self.active_track = options["track"] - else: - self.active_track = lib.get_current_track( - self.active_sequence, self.data["track_name"]) - - def _populate_data(self): - """ Gets context and convert it to self.data - data structure: - { - "name": "assetName_subsetName_representationName" - "path": "path/to/file/created/by/get_repr..", - "binPath": "projectBinPath", - } - """ - # create name - repr = self.context["representation"] - repr_cntx = repr["context"] - asset = str(repr_cntx["asset"]) - subset = str(repr_cntx["subset"]) - representation = str(repr_cntx["representation"]) - self.data["clip_name"] = self.clip_name_template.format(**repr_cntx) - self.data["track_name"] = "_".join([subset, representation]) - self.data["versionData"] = self.context["version"]["data"] - # gets file path - file = get_representation_path_from_context(self.context) - if not file: - repr_id = repr["_id"] - log.warning( - "Representation id `{}` is failing to load".format(repr_id)) - return None - self.data["path"] = file.replace("\\", "/") - - # convert to hashed path - if repr_cntx.get("frame"): - self._fix_path_hashes() - - # solve project bin structure path - hierarchy = str("/".join(( - "Loader", - repr_cntx["hierarchy"].replace("\\", "/"), - asset - ))) - - self.data["binPath"] = hierarchy - - return True - - def _fix_path_hashes(self): - """ Convert file path where it is needed padding with hashes - """ - file = self.data["path"] - if "#" not in file: - frame = self.context["representation"]["context"].get("frame") - padding = len(frame) - file = file.replace(frame, "#" * padding) - self.data["path"] = file - - def _get_asset_data(self): - """ Get all available asset data - - joint `data` key with asset.data dict into the representation - - """ - - asset_doc = self.context["asset"] - self.data["assetData"] = asset_doc["data"] - - def _make_track_item(self, source_bin_item, audio=False): - """ Create track item with """ - - clip = source_bin_item.activeItem() - - # add to track as clip item - if not audio: - track_item = hiero.core.TrackItem( - self.data["clip_name"], hiero.core.TrackItem.kVideo) - else: - track_item = hiero.core.TrackItem( - self.data["clip_name"], hiero.core.TrackItem.kAudio) - - track_item.setSource(clip) - track_item.setSourceIn(self.handle_start) - track_item.setTimelineIn(self.timeline_in) - track_item.setSourceOut((self.media_duration) - self.handle_end) - track_item.setTimelineOut(self.timeline_out) - track_item.setPlaybackSpeed(1) - self.active_track.addTrackItem(track_item) - - return track_item - - def load(self): - # create project bin for the media to be imported into - self.active_bin = lib.create_bin(self.data["binPath"]) - - # create mediaItem in active project bin - # create clip media - self.media = hiero.core.MediaSource(self.data["path"]) - self.media_duration = int(self.media.duration()) - - # get handles - self.handle_start = self.data["versionData"].get("handleStart") - self.handle_end = self.data["versionData"].get("handleEnd") - if self.handle_start is None: - self.handle_start = self.data["assetData"]["handleStart"] - if self.handle_end is None: - self.handle_end = self.data["assetData"]["handleEnd"] - - self.handle_start = int(self.handle_start) - self.handle_end = int(self.handle_end) - - if self.sequencial_load: - last_track_item = lib.get_track_items( - sequence_name=self.active_sequence.name(), - track_name=self.active_track.name() - ) - if len(last_track_item) == 0: - last_timeline_out = 0 - else: - last_track_item = last_track_item[-1] - last_timeline_out = int(last_track_item.timelineOut()) + 1 - self.timeline_in = last_timeline_out - self.timeline_out = last_timeline_out + int( - self.data["assetData"]["clipOut"] - - self.data["assetData"]["clipIn"]) - else: - self.timeline_in = int(self.data["assetData"]["clipIn"]) - self.timeline_out = int(self.data["assetData"]["clipOut"]) - - log.debug("__ self.timeline_in: {}".format(self.timeline_in)) - log.debug("__ self.timeline_out: {}".format(self.timeline_out)) - - # check if slate is included - slate_on = "slate" in self.context["version"]["data"]["families"] - log.debug("__ slate_on: {}".format(slate_on)) - - # if slate is on then remove the slate frame from beginning - if slate_on: - self.media_duration -= 1 - self.handle_start += 1 - - # create Clip from Media - clip = hiero.core.Clip(self.media) - clip.setName(self.data["clip_name"]) - - # add Clip to bin if not there yet - if self.data["clip_name"] not in [ - b.name() for b in self.active_bin.items()]: - bin_item = hiero.core.BinItem(clip) - self.active_bin.addItem(bin_item) - - # just make sure the clip is created - # there were some cases were hiero was not creating it - source_bin_item = None - for item in self.active_bin.items(): - if self.data["clip_name"] == item.name(): - source_bin_item = item - if not source_bin_item: - log.warning("Problem with created Source clip: `{}`".format( - self.data["clip_name"])) - - # include handles - if self.with_handles: - self.timeline_in -= self.handle_start - self.timeline_out += self.handle_end - self.handle_start = 0 - self.handle_end = 0 - - # make track item from source in bin as item - track_item = self._make_track_item(source_bin_item) - - log.info("Loading clips: `{}`".format(self.data["clip_name"])) - return track_item - - -class Creator(LegacyCreator): - """Creator class wrapper - """ - clip_color = "Purple" - rename_index = None - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - import openpype.hosts.hiero.api as phiero - self.presets = get_current_project_settings()[ - "hiero"]["create"].get(self.__class__.__name__, {}) - - # adding basic current context resolve objects - self.project = phiero.get_current_project() - self.sequence = phiero.get_current_sequence() - - if (self.options or {}).get("useSelection"): - timeline_selection = phiero.get_timeline_selection() - self.selected = phiero.get_track_items( - selection=timeline_selection - ) - else: - self.selected = phiero.get_track_items() - - self.widget = CreatorWidget - - -class PublishClip: - """ - Convert a track item to publishable instance - - Args: - track_item (hiero.core.TrackItem): hiero track item object - kwargs (optional): additional data needed for rename=True (presets) - - Returns: - hiero.core.TrackItem: hiero track item object with pype tag - """ - vertical_clip_match = {} - tag_data = {} - types = { - "shot": "shot", - "folder": "folder", - "episode": "episode", - "sequence": "sequence", - "track": "sequence", - } - - # parents search pattern - parents_search_pattern = r"\{([a-z]*?)\}" - - # default templates for non-ui use - rename_default = False - hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" - clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" - subset_name_default = "" - review_track_default = "< none >" - subset_family_default = "plate" - count_from_default = 10 - count_steps_default = 10 - vertical_sync_default = False - driving_layer_default = "" - - def __init__(self, cls, track_item, **kwargs): - # populate input cls attribute onto self.[attr] - self.__dict__.update(cls.__dict__) - - # get main parent objects - self.track_item = track_item - sequence_name = lib.get_current_sequence().name() - self.sequence_name = str(sequence_name).replace(" ", "_") - - # track item (clip) main attributes - self.ti_name = track_item.name() - self.ti_index = int(track_item.eventNumber()) - - # get track name and index - track_name = track_item.parent().name() - self.track_name = str(track_name).replace(" ", "_") - self.track_index = int(track_item.parent().trackIndex()) - - # adding tag.family into tag - if kwargs.get("avalon"): - self.tag_data.update(kwargs["avalon"]) - - # add publish attribute to tag data - self.tag_data.update({"publish": True}) - - # adding ui inputs if any - self.ui_inputs = kwargs.get("ui_inputs", {}) - - # populate default data before we get other attributes - self._populate_track_item_default_data() - - # use all populated default data to create all important attributes - self._populate_attributes() - - # create parents with correct types - self._create_parents() - - def convert(self): - # solve track item data and add them to tag data - tag_hierarchy_data = self._convert_to_tag_data() - - self.tag_data.update(tag_hierarchy_data) - - # if track name is in review track name and also if driving track name - # is not in review track name: skip tag creation - if (self.track_name in self.review_layer) and ( - self.driving_layer not in self.review_layer): - return - - # deal with clip name - new_name = self.tag_data.pop("newClipName") - - if self.rename: - # rename track item - self.track_item.setName(new_name) - self.tag_data["asset_name"] = new_name - else: - self.tag_data["asset_name"] = self.ti_name - self.tag_data["hierarchyData"]["shot"] = self.ti_name - - # AYON unique identifier - folder_path = "/{}/{}".format( - tag_hierarchy_data["hierarchy"], - self.tag_data["asset_name"] - ) - self.tag_data["folderPath"] = folder_path - if self.tag_data["heroTrack"] and self.review_layer: - self.tag_data.update({"reviewTrack": self.review_layer}) - else: - self.tag_data.update({"reviewTrack": None}) - - # TODO: remove debug print - log.debug("___ self.tag_data: {}".format( - pformat(self.tag_data) - )) - - # create pype tag on track_item and add data - lib.imprint(self.track_item, self.tag_data) - - return self.track_item - - def _populate_track_item_default_data(self): - """ Populate default formatting data from track item. """ - - self.track_item_default_data = { - "_folder_": "shots", - "_sequence_": self.sequence_name, - "_track_": self.track_name, - "_clip_": self.ti_name, - "_trackIndex_": self.track_index, - "_clipIndex_": self.ti_index - } - - def _populate_attributes(self): - """ Populate main object attributes. """ - # track item frame range and parent track name for vertical sync check - self.clip_in = int(self.track_item.timelineIn()) - self.clip_out = int(self.track_item.timelineOut()) - - # define ui inputs if non gui mode was used - self.shot_num = self.ti_index - log.debug( - "____ self.shot_num: {}".format(self.shot_num)) - - # ui_inputs data or default values if gui was not used - self.rename = self.ui_inputs.get( - "clipRename", {}).get("value") or self.rename_default - self.clip_name = self.ui_inputs.get( - "clipName", {}).get("value") or self.clip_name_default - self.hierarchy = self.ui_inputs.get( - "hierarchy", {}).get("value") or self.hierarchy_default - self.hierarchy_data = self.ui_inputs.get( - "hierarchyData", {}).get("value") or \ - self.track_item_default_data.copy() - self.count_from = self.ui_inputs.get( - "countFrom", {}).get("value") or self.count_from_default - self.count_steps = self.ui_inputs.get( - "countSteps", {}).get("value") or self.count_steps_default - self.subset_name = self.ui_inputs.get( - "subsetName", {}).get("value") or self.subset_name_default - self.subset_family = self.ui_inputs.get( - "subsetFamily", {}).get("value") or self.subset_family_default - self.vertical_sync = self.ui_inputs.get( - "vSyncOn", {}).get("value") or self.vertical_sync_default - self.driving_layer = self.ui_inputs.get( - "vSyncTrack", {}).get("value") or self.driving_layer_default - self.review_track = self.ui_inputs.get( - "reviewTrack", {}).get("value") or self.review_track_default - self.audio = self.ui_inputs.get( - "audio", {}).get("value") or False - - # build subset name from layer name - if self.subset_name == "": - self.subset_name = self.track_name - - # create subset for publishing - self.subset = self.subset_family + self.subset_name.capitalize() - - def _replace_hash_to_expression(self, name, text): - """ Replace hash with number in correct padding. """ - _spl = text.split("#") - _len = (len(_spl) - 1) - _repl = "{{{0}:0>{1}}}".format(name, _len) - return text.replace(("#" * _len), _repl) - - - def _convert_to_tag_data(self): - """ Convert internal data to tag data. - - Populating the tag data into internal variable self.tag_data - """ - # define vertical sync attributes - hero_track = True - self.review_layer = "" - if self.vertical_sync: - # check if track name is not in driving layer - if self.track_name not in self.driving_layer: - # if it is not then define vertical sync as None - hero_track = False - - # increasing steps by index of rename iteration - self.count_steps *= self.rename_index - - hierarchy_formatting_data = {} - hierarchy_data = deepcopy(self.hierarchy_data) - _data = self.track_item_default_data.copy() - if self.ui_inputs: - # adding tag metadata from ui - for _k, _v in self.ui_inputs.items(): - if _v["target"] == "tag": - self.tag_data[_k] = _v["value"] - - # driving layer is set as positive match - if hero_track or self.vertical_sync: - # mark review layer - if self.review_track and ( - self.review_track not in self.review_track_default): - # if review layer is defined and not the same as default - self.review_layer = self.review_track - # shot num calculate - if self.rename_index == 0: - self.shot_num = self.count_from - else: - self.shot_num = self.count_from + self.count_steps - - # clip name sequence number - _data.update({"shot": self.shot_num}) - - # solve # in test to pythonic expression - for _k, _v in hierarchy_data.items(): - if "#" not in _v["value"]: - continue - hierarchy_data[ - _k]["value"] = self._replace_hash_to_expression( - _k, _v["value"]) - - # fill up pythonic expresisons in hierarchy data - for k, _v in hierarchy_data.items(): - hierarchy_formatting_data[k] = _v["value"].format(**_data) - else: - # if no gui mode then just pass default data - hierarchy_formatting_data = hierarchy_data - - tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formatting_data - ) - - tag_hierarchy_data.update({"heroTrack": True}) - if hero_track and self.vertical_sync: - self.vertical_clip_match.update({ - (self.clip_in, self.clip_out): tag_hierarchy_data - }) - - if not hero_track and self.vertical_sync: - # driving layer is set as negative match - for (_in, _out), hero_data in self.vertical_clip_match.items(): - hero_data.update({"heroTrack": False}) - if _in == self.clip_in and _out == self.clip_out: - data_subset = hero_data["subset"] - # add track index in case duplicity of names in hero data - if self.subset in data_subset: - hero_data["subset"] = self.subset + str( - self.track_index) - # in case track name and subset name is the same then add - if self.subset_name == self.track_name: - hero_data["subset"] = self.subset - # assign data to return hierarchy data to tag - tag_hierarchy_data = hero_data - - # add data to return data dict - return tag_hierarchy_data - - def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): - """ Solve tag data from hierarchy data and templates. """ - # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) - clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) - - # remove shot from hierarchy data: is not needed anymore - hierarchy_formatting_data.pop("shot") - - return { - "newClipName": clip_name_filled, - "hierarchy": hierarchy_filled, - "parents": self.parents, - "hierarchyData": hierarchy_formatting_data, - "subset": self.subset, - "family": self.subset_family, - "families": [self.data["family"]] - } - - def _convert_to_entity(self, type, template): - """ Converting input key to key with type. """ - # convert to entity type - entity_type = self.types.get(type, None) - - assert entity_type, "Missing entity type for `{}`".format( - type - ) - - # first collect formatting data to use for formatting template - formatting_data = {} - for _k, _v in self.hierarchy_data.items(): - value = _v["value"].format( - **self.track_item_default_data) - formatting_data[_k] = value - - return { - "entity_type": entity_type, - "entity_name": template.format( - **formatting_data - ) - } - - def _create_parents(self): - """ Create parents and return it in list. """ - self.parents = [] - - pattern = re.compile(self.parents_search_pattern) - - par_split = [(pattern.findall(t).pop(), t) - for t in self.hierarchy.split("/")] - - for type, template in par_split: - parent = self._convert_to_entity(type, template) - self.parents.append(parent) diff --git a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py b/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py deleted file mode 100644 index 2e638c2088..0000000000 --- a/openpype/hosts/hiero/api/startup/Python/Startup/Startup.py +++ /dev/null @@ -1,19 +0,0 @@ -import traceback - -# activate hiero from pype -from openpype.pipeline import install_host -import openpype.hosts.hiero.api as phiero -install_host(phiero) - -try: - __import__("openpype.hosts.hiero.api") - __import__("pyblish") - -except ImportError as e: - print(traceback.format_exc()) - print("pyblish: Could not load integration: %s " % e) - -else: - # Setup integration - import openpype.hosts.hiero.api as phiero - phiero.lib.setup() diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py b/openpype/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py deleted file mode 100644 index 91be4d02aa..0000000000 --- a/openpype/hosts/hiero/api/startup/Python/StartupUI/otioimporter/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -__author__ = "Daniel Flehner Heen" -__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] - -import hiero.ui -import hiero.core - -import PySide2.QtWidgets as qw - -from openpype.hosts.hiero.api.otio.hiero_import import load_otio - - -class OTIOProjectSelect(qw.QDialog): - - def __init__(self, projects, *args, **kwargs): - super(OTIOProjectSelect, self).__init__(*args, **kwargs) - self.setWindowTitle("Please select active project") - self.layout = qw.QVBoxLayout() - - self.label = qw.QLabel( - "Unable to determine which project to import sequence to.\n" - "Please select one." - ) - self.layout.addWidget(self.label) - - self.projects = qw.QComboBox() - self.projects.addItems(map(lambda p: p.name(), projects)) - self.layout.addWidget(self.projects) - - QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel - self.buttonBox = qw.QDialogButtonBox(QBtn) - self.buttonBox.accepted.connect(self.accept) - self.buttonBox.rejected.connect(self.reject) - - self.layout.addWidget(self.buttonBox) - self.setLayout(self.layout) - - -def get_sequence(view): - sequence = None - if isinstance(view, hiero.ui.TimelineEditor): - sequence = view.sequence() - - elif isinstance(view, hiero.ui.BinView): - for item in view.selection(): - if not hasattr(item, "acitveItem"): - continue - - if isinstance(item.activeItem(), hiero.core.Sequence): - sequence = item.activeItem() - - return sequence - - -def OTIO_menu_action(event): - # Menu actions - otio_import_action = hiero.ui.createMenuAction( - "Import OTIO...", - open_otio_file, - icon=None - ) - - otio_add_track_action = hiero.ui.createMenuAction( - "New Track(s) from OTIO...", - open_otio_file, - icon=None - ) - otio_add_track_action.setEnabled(False) - - hiero.ui.registerAction(otio_import_action) - hiero.ui.registerAction(otio_add_track_action) - - view = hiero.ui.currentContextMenuView() - - if view: - sequence = get_sequence(view) - if sequence: - otio_add_track_action.setEnabled(True) - - for action in event.menu.actions(): - if action.text() == "Import": - action.menu().addAction(otio_import_action) - action.menu().addAction(otio_add_track_action) - - elif action.text() == "New Track": - action.menu().addAction(otio_add_track_action) - - -def open_otio_file(): - files = hiero.ui.openFileBrowser( - caption="Please select an OTIO file of choice", - pattern="*.otio", - requiredExtension=".otio" - ) - - selection = None - sequence = None - - view = hiero.ui.currentContextMenuView() - if view: - sequence = get_sequence(view) - selection = view.selection() - - if sequence: - project = sequence.project() - - elif selection: - project = selection[0].project() - - elif len(hiero.core.projects()) > 1: - dialog = OTIOProjectSelect(hiero.core.projects()) - if dialog.exec_(): - project = hiero.core.projects()[dialog.projects.currentIndex()] - - else: - bar = hiero.ui.mainWindow().statusBar() - bar.showMessage( - "OTIO Import aborted by user", - timeout=3000 - ) - return - - else: - project = hiero.core.projects()[-1] - - for otio_file in files: - load_otio(otio_file, project, sequence) - - -# HieroPlayer is quite limited and can't create transitions etc. -if not hiero.core.isHieroPlayer(): - hiero.core.events.registerInterest( - "kShowContextMenu/kBin", - OTIO_menu_action - ) - hiero.core.events.registerInterest( - "kShowContextMenu/kTimeline", - OTIO_menu_action - ) diff --git a/openpype/hosts/hiero/api/workio.py b/openpype/hosts/hiero/api/workio.py deleted file mode 100644 index 040fd1435a..0000000000 --- a/openpype/hosts/hiero/api/workio.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -import hiero - -from openpype.lib import Logger - -log = Logger.get_logger(__name__) - - -def file_extensions(): - return [".hrox"] - - -def has_unsaved_changes(): - # There are no methods for querying unsaved changes to a project, so - # enforcing to always save. - # but we could at least check if a current open script has a path - project = hiero.core.projects()[-1] - if project.path(): - return True - else: - return False - - -def save_file(filepath): - file = os.path.basename(filepath) - project = hiero.core.projects()[-1] - - if project: - log.info("Saving project: `{}` as '{}'".format(project.name(), file)) - project.saveAs(filepath) - else: - log.info("Creating new project...") - project = hiero.core.newProject() - project.saveAs(filepath) - - -def open_file(filepath): - """Manually fire the kBeforeProjectLoad event in order to work around a bug in Hiero. - The Foundry has logged this bug as: - Bug 40413 - Python API - kBeforeProjectLoad event type is not triggered - when calling hiero.core.openProject() (only triggered through UI) - It exists in all versions of Hiero through (at least) v1.9v1b12. - - Once this bug is fixed, a version check will need to be added here in order to - prevent accidentally firing this event twice. The following commented-out code - is just an example, and will need to be updated when the bug is fixed to catch the - correct versions.""" - # if (hiero.core.env['VersionMajor'] < 1 or - # hiero.core.env['VersionMajor'] == 1 and hiero.core.env['VersionMinor'] < 10: - hiero.core.events.sendEvent("kBeforeProjectLoad", None) - - project = hiero.core.projects()[-1] - - # open project file - hiero.core.openProject(filepath.replace(os.path.sep, "/")) - - # close previous project - project.close() - - - - return True - - -def current_file(): - current_file = hiero.core.projects()[-1].path() - if not current_file: - return None - return os.path.normpath(current_file) - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/hiero/plugins/create/create_shot_clip.py b/openpype/hosts/hiero/plugins/create/create_shot_clip.py deleted file mode 100644 index d0c81cffa2..0000000000 --- a/openpype/hosts/hiero/plugins/create/create_shot_clip.py +++ /dev/null @@ -1,262 +0,0 @@ -from copy import deepcopy -import openpype.hosts.hiero.api as phiero -# from openpype.hosts.hiero.api import plugin, lib -# reload(lib) -# reload(plugin) -# reload(phiero) - - -class CreateShotClip(phiero.Creator): - """Publishable clip""" - - label = "Create Publishable Clip" - family = "clip" - icon = "film" - defaults = ["Main"] - - gui_tracks = [track.name() - for track in phiero.get_current_sequence().videoTracks()] - gui_name = "Pype publish attributes creator" - gui_info = "Define sequential rename and fill hierarchy data." - gui_inputs = { - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 1}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 2}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 3}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 4}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be hero for all others", # noqa - "order": 1} - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "subsetName": { - "value": ["", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Subset Name", - "target": "ui", - "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa - "order": 0}, - "subsetFamily": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Subset Family", - "target": "ui", "toolTip": "What use of this subset is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process subsets with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resloution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "frameRangeAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0 - }, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle Start", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1 - }, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle End", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2 - } - } - } - } - - presets = None - - def process(self): - # Creator copy of object attributes that are modified during `process` - presets = deepcopy(self.presets) - gui_inputs = deepcopy(self.gui_inputs) - - # get key pares from presets and match it on ui inputs - for k, v in gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if presets.get(_k): - gui_inputs[k][ - "value"][_k]["value"] = presets[_k] - if presets.get(k): - gui_inputs[k]["value"] = presets[k] - - # open widget for plugins inputs - widget = self.widget(self.gui_name, self.gui_info, gui_inputs) - widget.exec_() - - if len(self.selected) < 1: - return - - if not widget.result: - print("Operation aborted") - return - - self.rename_add = 0 - - # get ui output for track name for vertical sync - v_sync_track = widget.result["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_track_items = list() - unsorted_selected_track_items = list() - for _ti in self.selected: - if _ti.parent().name() in v_sync_track: - sorted_selected_track_items.append(_ti) - else: - unsorted_selected_track_items.append(_ti) - - sorted_selected_track_items.extend(unsorted_selected_track_items) - - kwargs = { - "ui_inputs": widget.result, - "avalon": self.data - } - - for i, track_item in enumerate(sorted_selected_track_items): - self.rename_index = i - - # convert track item to timeline media pool item - phiero.PublishClip(self, track_item, **kwargs).convert() diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py deleted file mode 100644 index 05bd12d185..0000000000 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ /dev/null @@ -1,223 +0,0 @@ -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id -) -from openpype.pipeline import ( - get_representation_path, - get_current_project_name, -) -from openpype.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) -import openpype.hosts.hiero.api as phiero - - -class LoadClip(phiero.SequenceLoader): - """Load a subset to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - families = ["render2d", "source", "plate", "render", "review"] - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip" - order = -10 - icon = "code-fork" - color = "orange" - - # for loader multiselection - sequence = None - track = None - - # presets - clip_color_last = "green" - clip_color = "red" - - clip_name_template = "{asset}_{subset}_{representation}" - - @classmethod - def apply_settings(cls, project_settings, system_settings): - plugin_type_settings = ( - project_settings - .get("hiero", {}) - .get("load", {}) - ) - - if not plugin_type_settings: - return - - plugin_name = cls.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - if not plugin_settings: - return - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - print(" - is disabled by preset") - elif option == "representations": - continue - else: - print(" - setting `{}`: `{}`".format(option, value)) - setattr(cls, option, value) - - - def load(self, context, name, namespace, options): - # add clip name template to options - options.update({ - "clipNameTemplate": self.clip_name_template - }) - # in case loader uses multiselection - if self.track and self.sequence: - options.update({ - "sequence": self.sequence, - "track": self.track, - "clipNameTemplate": self.clip_name_template - }) - - # load clip to timeline and get main variables - path = self.filepath_from_context(context) - track_item = phiero.ClipLoader(self, context, path, **options).load() - namespace = namespace or track_item.name() - version = context['version'] - version_data = version.get("data", {}) - version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) - object_name = self.clip_name_template.format( - **context["representation"]["context"]) - - # set colorspace - if colorspace: - track_item.source().setSourceMediaColourTransform(colorspace) - - # add additional metadata from the version to imprint Avalon knob - add_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - - # move all version data keys to tag data - data_imprint = {} - for key in add_keys: - data_imprint.update({ - key: version_data.get(key, str(None)) - }) - - # add variables related to version context - data_imprint.update({ - "version": version_name, - "colorspace": colorspace, - "objectName": object_name - }) - - # update color of clip regarding the version order - self.set_item_color(track_item, version) - - # deal with multiselection - self.multiselection(track_item) - - self.log.info("Loader done: `{}`".format(name)) - - return phiero.containerise( - track_item, - name, namespace, context, - self.__class__.__name__, - data_imprint) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """ Updating previously loaded clips - """ - - # load clip to timeline and get main variables - name = container['name'] - namespace = container['namespace'] - track_item = phiero.get_track_items( - track_item_name=namespace).pop() - - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - - version_data = version_doc.get("data", {}) - version_name = version_doc.get("name", None) - colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) - file = get_representation_path(representation).replace("\\", "/") - clip = track_item.source() - - # reconnect media to new path - clip.reconnectMedia(file) - - # set colorspace - if colorspace: - clip.setSourceMediaColourTransform(colorspace) - - # add additional metadata from the version to imprint Avalon knob - add_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - - # move all version data keys to tag data - data_imprint = {} - for key in add_keys: - data_imprint.update({ - key: version_data.get(key, str(None)) - }) - - # add variables related to version context - data_imprint.update({ - "representation": str(representation["_id"]), - "version": version_name, - "colorspace": colorspace, - "objectName": object_name - }) - - # update color of clip regarding the version order - self.set_item_color(track_item, version_doc) - - return phiero.update_container(track_item, data_imprint) - - def remove(self, container): - """ Removing previously loaded clips - """ - # load clip to timeline and get main variables - namespace = container['namespace'] - track_item = phiero.get_track_items( - track_item_name=namespace).pop() - track = track_item.parent() - - # remove track item from track - track.removeItem(track_item) - - @classmethod - def multiselection(cls, track_item): - if not cls.track: - cls.track = track_item.parent() - cls.sequence = cls.track.parent() - - @classmethod - def set_item_color(cls, track_item, version_doc): - project_name = get_current_project_name() - last_version_doc = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - clip = track_item.source() - # set clip colour - if version_doc["_id"] == last_version_doc["_id"]: - clip.binItem().setColor(cls.clip_color_last) - else: - clip.binItem().setColor(cls.clip_color) diff --git a/openpype/hosts/hiero/plugins/load/load_effects.py b/openpype/hosts/hiero/plugins/load/load_effects.py deleted file mode 100644 index 31147d013f..0000000000 --- a/openpype/hosts/hiero/plugins/load/load_effects.py +++ /dev/null @@ -1,310 +0,0 @@ -import json -from collections import OrderedDict -import six - -from openpype.client import ( - get_version_by_id -) - -from openpype.pipeline import ( - AVALON_CONTAINER_ID, - load, - get_representation_path, - get_current_project_name -) -from openpype.hosts.hiero import api as phiero -from openpype.lib import Logger - - -class LoadEffects(load.LoaderPlugin): - """Loading colorspace soft effect exported from nukestudio""" - - families = ["effect"] - representations = ["*"] - extension = {"json"} - - label = "Load Effects" - order = 0 - icon = "cc" - color = "white" - - log = Logger.get_logger(__name__) - - def load(self, context, name, namespace, data): - """ - Loading function to get the soft effects to particular read node - - Arguments: - context (dict): context of version - name (str): name of the version - namespace (str): asset name - data (dict): compulsory attribute > not used - - Returns: - nuke node: containerised nuke node object - """ - active_sequence = phiero.get_current_sequence() - active_track = phiero.get_current_track( - active_sequence, "Loaded_{}".format(name)) - - # get main variables - namespace = namespace or context["asset"]["name"] - object_name = "{}_{}".format(name, namespace) - clip_in = context["asset"]["data"]["clipIn"] - clip_out = context["asset"]["data"]["clipOut"] - - data_imprint = { - "objectName": object_name, - "children_names": [] - } - - # getting file path - file = self.filepath_from_context(context) - file = file.replace("\\", "/") - - if self._shared_loading( - file, - active_track, - clip_in, - clip_out, - data_imprint - ): - self.containerise( - active_track, - name=name, - namespace=namespace, - object_name=object_name, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def _shared_loading( - self, - file, - active_track, - clip_in, - clip_out, - data_imprint, - update=False - ): - # getting data from json file with unicode conversion - with open(file, "r") as f: - json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).items()} - - # get correct order of nodes by positions on track and subtrack - nodes_order = self.reorder_nodes(json_f) - - used_subtracks = { - stitem.name(): stitem - for stitem in phiero.flatten(active_track.subTrackItems()) - } - - loaded = False - for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()): - new_name = "{}_loaded".format(ef_name) - if new_name not in used_subtracks: - effect_track_item = active_track.createEffect( - effectType=ef_val["class"], - timelineIn=clip_in, - timelineOut=clip_out, - subTrackIndex=index_order - - ) - effect_track_item.setName(new_name) - else: - effect_track_item = used_subtracks[new_name] - - node = effect_track_item.node() - for knob_name, knob_value in ef_val["node"].items(): - if ( - not knob_value - or knob_name == "name" - ): - continue - - try: - # assume list means animation - # except 4 values could be RGBA or vector - if isinstance(knob_value, list) and len(knob_value) > 4: - node[knob_name].setAnimated() - for i, value in enumerate(knob_value): - if isinstance(value, list): - # list can have vector animation - for ci, cv in enumerate(value): - node[knob_name].setValueAt( - cv, - (clip_in + i), - ci - ) - else: - # list is single values - node[knob_name].setValueAt( - value, - (clip_in + i) - ) - else: - node[knob_name].setValue(knob_value) - except NameError: - self.log.warning("Knob: {} cannot be set".format( - knob_name)) - - # register all loaded children - data_imprint["children_names"].append(new_name) - - # make sure containerisation will happen - loaded = True - - return loaded - - def update(self, container, representation): - """ Updating previously loaded effects - """ - active_track = container["_item"] - file = get_representation_path(representation).replace("\\", "/") - - # get main variables - name = container['name'] - namespace = container['namespace'] - - # get timeline in out data - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - version_data = version_doc["data"] - clip_in = version_data["clipIn"] - clip_out = version_data["clipOut"] - - object_name = "{}_{}".format(name, namespace) - - # Disable previously created nodes - used_subtracks = { - stitem.name(): stitem - for stitem in phiero.flatten(active_track.subTrackItems()) - } - container = phiero.get_track_openpype_data( - active_track, object_name - ) - - loaded_subtrack_items = container["children_names"] - for loaded_stitem in loaded_subtrack_items: - if loaded_stitem not in used_subtracks: - continue - item_to_remove = used_subtracks.pop(loaded_stitem) - # TODO: find a way to erase nodes - self.log.debug( - "This node needs to be removed: {}".format(item_to_remove)) - - data_imprint = { - "objectName": object_name, - "name": name, - "representation": str(representation["_id"]), - "children_names": [] - } - - if self._shared_loading( - file, - active_track, - clip_in, - clip_out, - data_imprint, - update=True - ): - return phiero.update_container(active_track, data_imprint) - - def reorder_nodes(self, data): - new_order = OrderedDict() - trackNums = [v["trackIndex"] for k, v in data.items() - if isinstance(v, dict)] - subTrackNums = [v["subTrackIndex"] for k, v in data.items() - if isinstance(v, dict)] - - for trackIndex in range( - min(trackNums), max(trackNums) + 1): - for subTrackIndex in range( - min(subTrackNums), max(subTrackNums) + 1): - item = self.get_item(data, trackIndex, subTrackIndex) - if item is not {}: - new_order.update(item) - return new_order - - def get_item(self, data, trackIndex, subTrackIndex): - return {key: val for key, val in data.items() - if isinstance(val, dict) - if subTrackIndex == val["subTrackIndex"] - if trackIndex == val["trackIndex"]} - - def byteify(self, input): - """ - Converts unicode strings to strings - It goes through all dictionary - - Arguments: - input (dict/str): input - - Returns: - dict: with fixed values and keys - - """ - - if isinstance(input, dict): - return {self.byteify(key): self.byteify(value) - for key, value in input.items()} - elif isinstance(input, list): - return [self.byteify(element) for element in input] - elif isinstance(input, six.text_type): - return str(input) - else: - return input - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - pass - - def containerise( - self, - track, - name, - namespace, - object_name, - context, - loader=None, - data=None - ): - """Bundle Hiero's object into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - track (hiero.core.VideoTrack): object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - object_name (str): name of container - context (dict): Asset information - loader (str, optional): Name of node used to produce this - container. - - Returns: - track_item (hiero.core.TrackItem): containerised object - - """ - - data_imprint = { - object_name: { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - } - } - - if data: - for k, v in data.items(): - data_imprint[object_name].update({k: v}) - - self.log.debug("_ data_imprint: {}".format(data_imprint)) - phiero.set_track_openpype_tag(track, data_imprint) diff --git a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py deleted file mode 100644 index 5ca79dc1dc..0000000000 --- a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import pyblish.api - -from openpype.pipeline import publish - - -class ExtractThumnail(publish.Extractor): - """ - Extractor for track item's tumnails - """ - - label = "Extract Thumnail" - order = pyblish.api.ExtractorOrder - families = ["plate", "take"] - hosts = ["hiero"] - - def process(self, instance): - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - staging_dir = self.staging_dir(instance) - - self.create_thumbnail(staging_dir, instance) - - def create_thumbnail(self, staging_dir, instance): - track_item = instance.data["item"] - track_item_name = track_item.name() - - # frames - duration = track_item.sourceDuration() - frame_start = track_item.sourceIn() - self.log.debug( - "__ frame_start: `{}`, duration: `{}`".format( - frame_start, duration)) - - # get thumbnail frame from the middle - thumb_frame = int(frame_start + (duration / 2)) - - thumb_file = "{}thumbnail{}{}".format( - track_item_name, thumb_frame, ".png") - thumb_path = os.path.join(staging_dir, thumb_file) - - thumbnail = track_item.thumbnail(thumb_frame, "colour").save( - thumb_path, - format='png' - ) - self.log.debug( - "__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) - - self.log.info("Thumnail was generated to: {}".format(thumb_path)) - thumb_representation = { - 'files': thumb_file, - 'stagingDir': staging_dir, - 'name': "thumbnail", - 'thumbnail': True, - 'ext': "png" - } - instance.data["representations"].append( - thumb_representation) diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py deleted file mode 100644 index 590d7b7050..0000000000 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ /dev/null @@ -1,428 +0,0 @@ -import pyblish - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline.editorial import is_overlapping_otio_ranges - -from openpype.hosts.hiero import api as phiero -from openpype.hosts.hiero.api.otio import hiero_export - -import hiero -# # developer reload modules -from pprint import pformat - - -class PrecollectInstances(pyblish.api.ContextPlugin): - """Collect all Track items selection.""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Precollect Instances" - hosts = ["hiero"] - - audio_track_items = [] - - def process(self, context): - self.otio_timeline = context.data["otioTimeline"] - timeline_selection = phiero.get_timeline_selection() - selected_timeline_items = phiero.get_track_items( - selection=timeline_selection, - check_tagged=True, - check_enabled=True - ) - - # only return enabled track items - if not selected_timeline_items: - selected_timeline_items = phiero.get_track_items( - check_enabled=True, check_tagged=True) - - self.log.info( - "Processing enabled track items: {}".format( - selected_timeline_items)) - - # add all tracks subtreck effect items to context - all_tracks = hiero.ui.activeSequence().videoTracks() - tracks_effect_items = self.collect_sub_track_items(all_tracks) - context.data["tracksEffectItems"] = tracks_effect_items - - # process all sellected timeline track items - for track_item in selected_timeline_items: - data = {} - clip_name = track_item.name() - source_clip = track_item.source() - self.log.debug("clip_name: {}".format(clip_name)) - - # get openpype tag data - tag_data = phiero.get_trackitem_openpype_data(track_item) - self.log.debug("__ tag_data: {}".format(pformat(tag_data))) - - if not tag_data: - continue - - if tag_data.get("id") != "pyblish.avalon.instance": - continue - - # get clips subtracks and anotations - annotations = self.clip_annotations(source_clip) - subtracks = self.clip_subtrack(track_item) - self.log.debug("Annotations: {}".format(annotations)) - self.log.debug(">> Subtracks: {}".format(subtracks)) - - # solve handles length - tag_data["handleStart"] = min( - tag_data["handleStart"], int(track_item.handleInLength())) - tag_data["handleEnd"] = min( - tag_data["handleEnd"], int(track_item.handleOutLength())) - - # add audio to families - with_audio = False - if tag_data.pop("audio"): - with_audio = True - - # add tag data to instance data - data.update({ - k: v for k, v in tag_data.items() - if k not in ("id", "applieswhole", "label") - }) - - asset, asset_name = self._get_asset_data(tag_data) - - subset = tag_data["subset"] - - # insert family into families - families = [str(f) for f in tag_data["families"]] - - # form label - label = "{} -".format(asset) - if asset_name != clip_name: - label += " ({})".format(clip_name) - label += " {}".format(subset) - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "asset": asset, - "asset_name": asset_name, - "item": track_item, - "families": families, - "publish": tag_data["publish"], - "fps": context.data["fps"], - - # clip's effect - "clipEffectItems": subtracks, - "clipAnnotations": annotations, - - # add all additional tags - "tags": phiero.get_track_item_tags(track_item), - "newAssetPublishing": True - }) - - # otio clip data - otio_data = self.get_otio_clip_instance_data(track_item) or {} - self.log.debug("__ otio_data: {}".format(pformat(otio_data))) - data.update(otio_data) - self.log.debug("__ data: {}".format(pformat(data))) - - # add resolution - self.get_resolution_to_data(data, context) - - # create instance - instance = context.create_instance(**data) - - # add colorspace data - instance.data.update({ - "versionData": { - "colorspace": track_item.sourceMediaColourTransform(), - } - }) - - # create shot instance for shot attributes create/update - self.create_shot_instance(context, **data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.info( - "_ instance.data: {}".format(pformat(instance.data))) - - if not with_audio: - continue - - # create audio subset instance - self.create_audio_instance(context, **data) - - # add audioReview attribute to plate instance data - # if reviewTrack is on - if tag_data.get("reviewTrack") is not None: - instance.data["reviewAudio"] = True - - def get_resolution_to_data(self, data, context): - assert data.get("otioClip"), "Missing `otioClip` data" - - # solve source resolution option - if data.get("sourceResolution", None): - otio_clip_metadata = data[ - "otioClip"].media_reference.metadata - data.update({ - "resolutionWidth": otio_clip_metadata[ - "openpype.source.width"], - "resolutionHeight": otio_clip_metadata[ - "openpype.source.height"], - "pixelAspect": otio_clip_metadata[ - "openpype.source.pixelAspect"] - }) - else: - otio_tl_metadata = context.data["otioTimeline"].metadata - data.update({ - "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], - "resolutionHeight": otio_tl_metadata[ - "openpype.timeline.height"], - "pixelAspect": otio_tl_metadata[ - "openpype.timeline.pixelAspect"] - }) - - def create_shot_instance(self, context, **data): - subset = "shotMain" - master_layer = data.get("heroTrack") - hierarchy_data = data.get("hierarchyData") - item = data.get("item") - clip_name = item.name() - - if not master_layer: - return - - if not hierarchy_data: - return - - asset = data["asset"] - asset_name = data["asset_name"] - - # insert family into families - family = "shot" - - # form label - label = "{} -".format(asset) - if asset_name != clip_name: - label += " ({}) ".format(clip_name) - label += " {}".format(subset) - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "subset": subset, - "family": family, - "families": [] - }) - - instance = context.create_instance(**data) - self.log.info("Creating instance: {}".format(instance)) - self.log.debug( - "_ instance.data: {}".format(pformat(instance.data))) - - def _get_asset_data(self, data): - folder_path = data.pop("folderPath", None) - - if data.get("asset_name"): - asset_name = data["asset_name"] - else: - asset_name = data["asset"] - - # backward compatibility for clip tags - # which are missing folderPath key - # TODO remove this in future versions - if not folder_path: - hierarchy_path = data["hierarchy"] - folder_path = "/{}/{}".format( - hierarchy_path, - asset_name - ) - - if AYON_SERVER_ENABLED: - asset = folder_path - else: - asset = asset_name - - return asset, asset_name - - def create_audio_instance(self, context, **data): - subset = "audioMain" - master_layer = data.get("heroTrack") - - if not master_layer: - return - - asset = data.get("asset") - item = data.get("item") - clip_name = item.name() - - # test if any audio clips - if not self.test_any_audio(item): - return - - asset = data["asset"] - asset_name = data["asset_name"] - - # insert family into families - family = "audio" - - # form label - label = "{} -".format(asset) - if asset_name != clip_name: - label += " ({}) ".format(clip_name) - label += " {}".format(subset) - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": label, - "subset": subset, - "family": family, - "families": ["clip"] - }) - # remove review track attr if any - data.pop("reviewTrack") - - # create instance - instance = context.create_instance(**data) - self.log.info("Creating instance: {}".format(instance)) - self.log.debug( - "_ instance.data: {}".format(pformat(instance.data))) - - def test_any_audio(self, track_item): - # collect all audio tracks to class variable - if not self.audio_track_items: - for otio_clip in self.otio_timeline.each_clip(): - if otio_clip.parent().kind != "Audio": - continue - self.audio_track_items.append(otio_clip) - - # get track item timeline range - timeline_range = self.create_otio_time_range_from_timeline_item_data( - track_item) - - # loop through audio track items and search for overlapping clip - for otio_audio in self.audio_track_items: - parent_range = otio_audio.range_in_parent() - - # if any overaling clip found then return True - if is_overlapping_otio_ranges( - parent_range, timeline_range, strict=False): - return True - - def get_otio_clip_instance_data(self, track_item): - """ - Return otio objects for timeline, track and clip - - Args: - timeline_item_data (dict): timeline_item_data from list returned by - resolve.get_current_timeline_items() - otio_timeline (otio.schema.Timeline): otio object - - Returns: - dict: otio clip object - - """ - ti_track_name = track_item.parent().name() - timeline_range = self.create_otio_time_range_from_timeline_item_data( - track_item) - for otio_clip in self.otio_timeline.each_clip(): - track_name = otio_clip.parent().name - parent_range = otio_clip.range_in_parent() - if ti_track_name != track_name: - continue - if otio_clip.name != track_item.name(): - continue - self.log.debug("__ parent_range: {}".format(parent_range)) - self.log.debug("__ timeline_range: {}".format(timeline_range)) - if is_overlapping_otio_ranges( - parent_range, timeline_range, strict=True): - - # add pypedata marker to otio_clip metadata - for marker in otio_clip.markers: - if phiero.OPENPYPE_TAG_NAME in marker.name: - otio_clip.metadata.update(marker.metadata) - return {"otioClip": otio_clip} - - return None - - @staticmethod - def create_otio_time_range_from_timeline_item_data(track_item): - timeline = phiero.get_current_sequence() - frame_start = int(track_item.timelineIn()) - frame_duration = int(track_item.duration()) - fps = timeline.framerate().toFloat() - - return hiero_export.create_otio_time_range( - frame_start, frame_duration, fps) - - def collect_sub_track_items(self, tracks): - """ - Returns dictionary with track index as key and list of subtracks - """ - # collect all subtrack items - sub_track_items = {} - for track in tracks: - items = track.items() - - effet_items = track.subTrackItems() - - # skip if no clips on track > need track with effect only - if not effet_items: - continue - - # skip all disabled tracks - if not track.isEnabled(): - continue - - track_index = track.trackIndex() - _sub_track_items = phiero.flatten(effet_items) - - _sub_track_items = list(_sub_track_items) - # continue only if any subtrack items are collected - if not _sub_track_items: - continue - - enabled_sti = [] - # loop all found subtrack items and check if they are enabled - for _sti in _sub_track_items: - # checking if not enabled - if not _sti.isEnabled(): - continue - if isinstance(_sti, hiero.core.Annotation): - continue - # collect the subtrack item - enabled_sti.append(_sti) - - # continue only if any subtrack items are collected - if not enabled_sti: - continue - - # add collection of subtrackitems to dict - sub_track_items[track_index] = enabled_sti - - return sub_track_items - - @staticmethod - def clip_annotations(clip): - """ - Returns list of Clip's hiero.core.Annotation - """ - annotations = [] - subTrackItems = phiero.flatten(clip.subTrackItems()) - annotations += [item for item in subTrackItems if isinstance( - item, hiero.core.Annotation)] - return annotations - - @staticmethod - def clip_subtrack(clip): - """ - Returns list of Clip's hiero.core.SubTrackItem - """ - subtracks = [] - subTrackItems = phiero.flatten(clip.parent().subTrackItems()) - for item in subTrackItems: - if "TimeWarp" in item.name(): - continue - # avoid all anotation - if isinstance(item, hiero.core.Annotation): - continue - # # avoid all not anaibled - if not item.isEnabled(): - continue - subtracks.append(item) - return subtracks diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py deleted file mode 100644 index 8abb0885c6..0000000000 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -import tempfile -from pprint import pformat - -import pyblish.api -from qtpy.QtGui import QPixmap - -import hiero.ui - -from openpype import AYON_SERVER_ENABLED -from openpype.hosts.hiero.api.otio import hiero_export - - -class PrecollectWorkfile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - label = "Precollect Workfile" - order = pyblish.api.CollectorOrder - 0.491 - - def process(self, context): - asset = context.data["asset"] - asset_name = asset - if AYON_SERVER_ENABLED: - asset_name = asset_name.split("/")[-1] - - active_timeline = hiero.ui.activeSequence() - project = active_timeline.project() - fps = active_timeline.framerate().toFloat() - - # adding otio timeline to context - otio_timeline = hiero_export.create_otio_timeline() - - # get workfile thumbnail paths - tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") - thumbnail_name = "workfile_thumbnail.png" - thumbnail_path = os.path.join(tmp_staging, thumbnail_name) - - # search for all windows with name of actual sequence - _windows = [w for w in hiero.ui.windowManager().windows() - if active_timeline.name() in w.windowTitle()] - - # export window to thumb path - QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png') - - # thumbnail - thumb_representation = { - 'files': thumbnail_name, - 'stagingDir': tmp_staging, - 'name': "thumbnail", - 'thumbnail': True, - 'ext': "png" - } - - # get workfile paths - current_file = project.path() - staging_dir, base_name = os.path.split(current_file) - - # creating workfile representation - workfile_representation = { - 'name': 'hrox', - 'ext': 'hrox', - 'files': base_name, - "stagingDir": staging_dir, - } - family = "workfile" - instance_data = { - "label": "{} - {}Main".format( - asset, family), - "name": "{}_{}".format(asset_name, family), - "asset": context.data["asset"], - # TODO use 'get_subset_name' - "subset": "{}{}Main".format(asset_name, family.capitalize()), - "item": project, - "family": family, - "families": [], - "representations": [workfile_representation, thumb_representation] - } - - # create instance with workfile - instance = context.create_instance(**instance_data) - - # update context with main project attributes - context_data = { - "activeProject": project, - "activeTimeline": active_timeline, - "otioTimeline": otio_timeline, - "currentFile": current_file, - "colorspace": self.get_colorspace(project), - "fps": fps - } - self.log.debug("__ context_data: {}".format(pformat(context_data))) - context.data.update(context_data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.debug("__ instance.data: {}".format(pformat(instance.data))) - self.log.debug("__ context_data: {}".format(pformat(context_data))) - - def get_colorspace(self, project): - # get workfile's colorspace properties - return { - "useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), - "lutSetting16Bit": project.lutSetting16Bit(), - "lutSetting8Bit": project.lutSetting8Bit(), - "lutSettingFloat": project.lutSettingFloat(), - "lutSettingLog": project.lutSettingLog(), - "lutSettingViewer": project.lutSettingViewer(), - "lutSettingWorkingSpace": project.lutSettingWorkingSpace(), - "lutUseOCIOForExport": project.lutUseOCIOForExport(), - "ocioConfigName": project.ocioConfigName(), - "ocioConfigPath": project.ocioConfigPath() - } diff --git a/openpype/hosts/houdini/addon.py b/openpype/hosts/houdini/addon.py deleted file mode 100644 index 80856b0624..0000000000 --- a/openpype/hosts/houdini/addon.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -HOUDINI_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class HoudiniAddon(OpenPypeModule, IHostAddon): - name = "houdini" - host_name = "houdini" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to HOUDINI_PATH and HOUDINI_MENU_PATH - startup_path = os.path.join(HOUDINI_HOST_DIR, "startup") - new_houdini_path = [startup_path] - new_houdini_menu_path = [startup_path] - - old_houdini_path = env.get("HOUDINI_PATH") or "" - old_houdini_menu_path = env.get("HOUDINI_MENU_PATH") or "" - - for path in old_houdini_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_path: - new_houdini_path.append(norm_path) - - for path in old_houdini_menu_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_houdini_menu_path: - new_houdini_menu_path.append(norm_path) - - # Add ampersand for unknown reason (Maybe is needed in Houdini?) - new_houdini_path.append("&") - new_houdini_menu_path.append("&") - - env["HOUDINI_PATH"] = os.pathsep.join(new_houdini_path) - env["HOUDINI_MENU_PATH"] = os.pathsep.join(new_houdini_menu_path) - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(HOUDINI_HOST_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".hip", ".hiplc", ".hipnc"] diff --git a/openpype/hosts/houdini/api/action.py b/openpype/hosts/houdini/api/action.py deleted file mode 100644 index 77966d6d5c..0000000000 --- a/openpype/hosts/houdini/api/action.py +++ /dev/null @@ -1,83 +0,0 @@ -import pyblish.api -import hou - -from openpype.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - hou.clearAllSelected() - if invalid: - self.log.info("Selecting invalid nodes: {}".format( - ", ".join(node.path() for node in invalid) - )) - for node in invalid: - node.setSelected(True) - node.setCurrent(True) - else: - self.log.info("No invalid nodes found.") - - -class SelectROPAction(pyblish.api.Action): - """Select ROP. - - It's used to select the associated ROPs with the errored instances. - """ - - label = "Select ROP" - on = "failed" # This action is only available on a failed plug-in - icon = "mdi.cursor-default-click" - - def process(self, context, plugin): - errored_instances = get_errored_instances_from_context(context, plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding ROP nodes..") - rop_nodes = list() - for instance in errored_instances: - node_path = instance.data.get("instance_node") - if not node_path: - continue - - node = hou.node(node_path) - if not node: - continue - - rop_nodes.append(node) - - hou.clearAllSelected() - if rop_nodes: - self.log.info("Selecting ROP nodes: {}".format( - ", ".join(node.path() for node in rop_nodes) - )) - for node in rop_nodes: - node.setSelected(True) - node.setCurrent(True) - else: - self.log.info("No ROP nodes found.") diff --git a/openpype/hosts/houdini/api/colorspace.py b/openpype/hosts/houdini/api/colorspace.py deleted file mode 100644 index cc40b9df1c..0000000000 --- a/openpype/hosts/houdini/api/colorspace.py +++ /dev/null @@ -1,69 +0,0 @@ -import attr -import hou -from openpype.hosts.houdini.api.lib import get_color_management_preferences -from openpype.pipeline.colorspace import get_display_view_colorspace_name - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - - -@attr.s -class RenderProduct(object): - """Getting Colorspace as - Specific Render Product Parameter for submitting - publish job. - - """ - colorspace = attr.ib() # colorspace - view = attr.ib() - productName = attr.ib(default=None) - - -class ARenderProduct(object): - - def __init__(self): - """Constructor.""" - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_colorspace_data() - - def _get_layer_data(self): - return LayerMetadata( - frameStart=int(hou.playbar.frameRange()[0]), - frameEnd=int(hou.playbar.frameRange()[1]), - ) - - def get_colorspace_data(self): - """To be implemented by renderer class. - - This should return a list of RenderProducts. - - Returns: - list: List of RenderProduct - - """ - data = get_color_management_preferences() - colorspace_data = [ - RenderProduct( - colorspace=data["display"], - view=data["view"], - productName="" - ) - ] - return colorspace_data - - -def get_default_display_view_colorspace(): - """Returns the colorspace attribute of the default (display, view) pair. - - It's used for 'ociocolorspace' parm in OpenGL Node.""" - - prefs = get_color_management_preferences() - return get_display_view_colorspace_name( - config_path=prefs["config"], - display=prefs["display"], - view=prefs["view"] - ) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py deleted file mode 100644 index edd50f10c1..0000000000 --- a/openpype/hosts/houdini/api/lib.py +++ /dev/null @@ -1,1056 +0,0 @@ -# -*- coding: utf-8 -*- -import sys -import os -import errno -import re -import uuid -import logging -from contextlib import contextmanager -import json - -import six - -from openpype.lib import StringTemplate -from openpype.client import get_project, get_asset_by_name -from openpype.settings import get_current_project_settings -from openpype.pipeline import ( - Anatomy, - get_current_project_name, - get_current_asset_name, - registered_host, - get_current_context, - get_current_host_name, -) -from openpype.pipeline.create import CreateContext -from openpype.pipeline.template_data import get_template_data -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.widgets import popup -from openpype.tools.utils.host_tools import get_tool_by_name - -import hou - - -self = sys.modules[__name__] -self._parent = None -log = logging.getLogger(__name__) -JSON_PREFIX = "JSON:::" - - -def get_asset_fps(asset_doc=None): - """Return current asset fps.""" - - if asset_doc is None: - asset_doc = get_current_project_asset(fields=["data.fps"]) - return asset_doc["data"]["fps"] - - -def set_id(node, unique_id, overwrite=False): - exists = node.parm("id") - if not exists: - imprint(node, {"id": unique_id}) - - if not exists and overwrite: - node.setParm("id", unique_id) - - -def get_id(node): - """Get the `cbId` attribute of the given node. - - Args: - node (hou.Node): the name of the node to retrieve the attribute from - - Returns: - str: cbId attribute of the node. - - """ - - if node is not None: - return node.parm("id") - - -def generate_ids(nodes, asset_id=None): - """Returns new unique ids for the given nodes. - - Note: This does not assign the new ids, it only generates the values. - - To assign new ids using this method: - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id) - - To also override any existing values (and assign regenerated ids): - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id, overwrite=True) - - Args: - nodes (list): List of nodes. - asset_id (str or bson.ObjectId): The database id for the *asset* to - generate for. When None provided the current asset in the - active session is used. - - Returns: - list: A list of (node, id) tuples. - - """ - - if asset_id is None: - project_name = get_current_project_name() - asset_name = get_current_asset_name() - # Get the asset ID from the database for the asset of current context - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - - assert asset_doc, "No current asset found in Session" - asset_id = asset_doc['_id'] - - node_ids = [] - for node in nodes: - _, uid = str(uuid.uuid4()).rsplit("-", 1) - unique_id = "{}:{}".format(asset_id, uid) - node_ids.append((node, unique_id)) - - return node_ids - - -def get_id_required_nodes(): - - valid_types = ["geometry"] - nodes = {n for n in hou.node("/out").children() if - n.type().name() in valid_types} - - return list(nodes) - - -def get_output_parameter(node): - """Return the render output parameter of the given node - - Example: - root = hou.node("/obj") - my_alembic_node = root.createNode("alembic") - get_output_parameter(my_alembic_node) - >>> "filename" - - Notes: - I'm using node.type().name() to get on par with the creators, - Because the return value of `node.type().name()` is the - same string value used in creators - e.g. instance_data.update({"node_type": "alembic"}) - - Rop nodes in different network categories have - the same output parameter. - So, I took that into consideration as a hint for - future development. - - Args: - node(hou.Node): node instance - - Returns: - hou.Parm - """ - - node_type = node.type().name() - - # Figure out which type of node is being rendered - if node_type in {"alembic", "rop_alembic"}: - return node.parm("filename") - elif node_type == "arnold": - if node_type.evalParm("ar_ass_export_enable"): - return node.parm("ar_ass_file") - return node.parm("ar_picture") - elif node_type in { - "geometry", - "rop_geometry", - "filmboxfbx", - "rop_fbx" - }: - return node.parm("sopoutput") - elif node_type == "comp": - return node.parm("copoutput") - elif node_type in {"karma", "opengl"}: - return node.parm("picture") - elif node_type == "ifd": # Mantra - if node.evalParm("soho_outputmode"): - return node.parm("soho_diskfile") - return node.parm("vm_picture") - elif node_type == "Redshift_Proxy_Output": - return node.parm("RS_archive_file") - elif node_type == "Redshift_ROP": - return node.parm("RS_outputFileNamePrefix") - elif node_type in {"usd", "usd_rop", "usdexport"}: - return node.parm("lopoutput") - elif node_type in {"usdrender", "usdrender_rop"}: - return node.parm("outputimage") - elif node_type == "vray_renderer": - return node.parm("SettingsOutput_img_file_path") - - raise TypeError("Node type '%s' not supported" % node_type) - - -def set_scene_fps(fps): - hou.setFps(fps) - - -# Valid FPS -def validate_fps(): - """Validate current scene FPS and show pop-up when it is incorrect - - Returns: - bool - - """ - - fps = get_asset_fps() - current_fps = hou.fps() # returns float - - if current_fps != fps: - - # Find main window - parent = hou.ui.mainQtWindow() - if parent is None: - pass - else: - dialog = popup.PopupUpdateKeys(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Houdini scene does not match project FPS") - dialog.setMessage("Scene %i FPS does not match project %i FPS" % - (current_fps, fps)) - dialog.setButtonText("Fix") - - # on_show is the Fix button clicked callback - dialog.on_clicked_state.connect(lambda: set_scene_fps(fps)) - - dialog.show() - - return False - - return True - - -def create_remote_publish_node(force=True): - """Function to create a remote publish node in /out - - This is a hacked "Shell" node that does *nothing* except for triggering - `colorbleed.lib.publish_remote()` as pre-render script. - - All default attributes of the Shell node are hidden to the Artist to - avoid confusion. - - Additionally some custom attributes are added that can be collected - by a Collector to set specific settings for the publish, e.g. whether - to separate the jobs per instance or process in one single job. - - """ - - cmd = "import colorbleed.lib; colorbleed.lib.publish_remote()" - - existing = hou.node("/out/REMOTE_PUBLISH") - if existing: - if force: - log.warning("Removing existing '/out/REMOTE_PUBLISH' node..") - existing.destroy() - else: - raise RuntimeError("Node already exists /out/REMOTE_PUBLISH. " - "Please remove manually or set `force` to " - "True.") - - # Create the shell node - out = hou.node("/out") - node = out.createNode("shell", node_name="REMOTE_PUBLISH") - node.moveToGoodPosition() - - # Set color make it stand out (avalon/pyblish color) - node.setColor(hou.Color(0.439, 0.709, 0.933)) - - # Set the pre-render script - node.setParms({ - "prerender": cmd, - "lprerender": "python" # command language - }) - - # Lock the attributes to ensure artists won't easily mess things up. - node.parm("prerender").lock(True) - node.parm("lprerender").lock(True) - - # Lock up the actual shell command - command_parm = node.parm("command") - command_parm.set("") - command_parm.lock(True) - shellexec_parm = node.parm("shellexec") - shellexec_parm.set(False) - shellexec_parm.lock(True) - - # Get the node's parm template group so we can customize it - template = node.parmTemplateGroup() - - # Hide default tabs - template.hideFolder("Shell", True) - template.hideFolder("Scripts", True) - - # Hide default settings - template.hide("execute", True) - template.hide("renderdialog", True) - template.hide("trange", True) - template.hide("f", True) - template.hide("take", True) - - # Add custom settings to this node. - parm_folder = hou.FolderParmTemplate("folder", "Submission Settings") - - # Separate Jobs per Instance - parm = hou.ToggleParmTemplate(name="separateJobPerInstance", - label="Separate Job per Instance", - default_value=False) - parm_folder.addParmTemplate(parm) - - # Add our custom Submission Settings folder - template.append(parm_folder) - - # Apply template back to the node - node.setParmTemplateGroup(template) - - -def render_rop(ropnode): - """Render ROP node utility for Publishing. - - This renders a ROP node with the settings we want during Publishing. - """ - # Print verbose when in batch mode without UI - verbose = not hou.isUIAvailable() - - # Render - try: - ropnode.render(verbose=verbose, - # Allow Deadline to capture completion percentage - output_progress=verbose) - except hou.Error as exc: - # The hou.Error is not inherited from a Python Exception class, - # so we explicitly capture the houdini error, otherwise pyblish - # will remain hanging. - import traceback - traceback.print_exc() - raise RuntimeError("Render failed: {0}".format(exc)) - - -def imprint(node, data, update=False): - """Store attributes with value on a node - - Depending on the type of attribute it creates the correct parameter - template. Houdini uses a template per type, see the docs for more - information. - - http://www.sidefx.com/docs/houdini/hom/hou/ParmTemplate.html - - Because of some update glitch where you cannot overwrite existing - ParmTemplates on node using: - `setParmTemplates()` and `parmTuplesInFolder()` - update is done in another pass. - - Args: - node(hou.Node): node object from Houdini - data(dict): collection of attributes and their value - update (bool, optional): flag if imprint should update - already existing data or leave them untouched and only - add new. - - Returns: - None - - """ - if not data: - return - if not node: - self.log.error("Node is not set, calling imprint on invalid data.") - return - - current_parms = {p.name(): p for p in node.spareParms()} - update_parm_templates = [] - new_parm_templates = [] - - for key, value in data.items(): - if value is None: - continue - - parm_template = get_template_from_value(key, value) - - if key in current_parms: - if node.evalParm(key) == value: - continue - if not update: - log.debug(f"{key} already exists on {node}") - else: - log.debug(f"replacing {key}") - update_parm_templates.append(parm_template) - continue - - new_parm_templates.append(parm_template) - - if not new_parm_templates and not update_parm_templates: - return - - parm_group = node.parmTemplateGroup() - - # Add new parm templates - if new_parm_templates: - parm_folder = parm_group.findFolder("Extra") - - # if folder doesn't exist yet, create one and append to it, - # else append to existing one - if not parm_folder: - parm_folder = hou.FolderParmTemplate("folder", "Extra") - parm_folder.setParmTemplates(new_parm_templates) - parm_group.append(parm_folder) - else: - # Add to parm template folder instance then replace with updated - # one in parm template group - for template in new_parm_templates: - parm_folder.addParmTemplate(template) - parm_group.replace(parm_folder.name(), parm_folder) - - # Update existing parm templates - for parm_template in update_parm_templates: - parm_group.replace(parm_template.name(), parm_template) - - # When replacing a parm with a parm of the same name it preserves its - # value if before the replacement the parm was not at the default, - # because it has a value override set. Since we're trying to update the - # parm by using the new value as `default` we enforce the parm is at - # default state - node.parm(parm_template.name()).revertToDefaults() - - node.setParmTemplateGroup(parm_group) - - -def lsattr(attr, value=None, root="/"): - """Return nodes that have `attr` - When `value` is not None it will only return nodes matching that value - for the given attribute. - Args: - attr (str): Name of the attribute (hou.Parm) - value (object, Optional): The value to compare the attribute too. - When the default None is provided the value check is skipped. - root (str): The root path in Houdini to search in. - Returns: - list: Matching nodes that have attribute with value. - """ - if value is None: - # Use allSubChildren() as allNodes() errors on nodes without - # permission to enter without a means to continue of querying - # the rest - nodes = hou.node(root).allSubChildren() - return [n for n in nodes if n.parm(attr)] - return lsattrs({attr: value}) - - -def lsattrs(attrs, root="/"): - """Return nodes matching `key` and `value` - Arguments: - attrs (dict): collection of attribute: value - root (str): The root path in Houdini to search in. - Example: - >> lsattrs({"id": "myId"}) - ["myNode"] - >> lsattr("id") - ["myNode", "myOtherNode"] - Returns: - list: Matching nodes that have attribute with value. - """ - - matches = set() - # Use allSubChildren() as allNodes() errors on nodes without - # permission to enter without a means to continue of querying - # the rest - nodes = hou.node(root).allSubChildren() - for node in nodes: - for attr in attrs: - if not node.parm(attr): - continue - elif node.evalParm(attr) != attrs[attr]: - continue - else: - matches.add(node) - - return list(matches) - - -def read(node): - """Read the container data in to a dict - - Args: - node(hou.Node): Houdini node - - Returns: - dict - - """ - # `spareParms` returns a tuple of hou.Parm objects - data = {} - if not node: - return data - for parameter in node.spareParms(): - value = parameter.eval() - # test if value is json encoded dict - if isinstance(value, six.string_types) and \ - value.startswith(JSON_PREFIX): - try: - value = json.loads(value[len(JSON_PREFIX):]) - except json.JSONDecodeError: - # not a json - pass - data[parameter.name()] = value - - return data - - -@contextmanager -def maintained_selection(): - """Maintain selection during context - Example: - >>> with maintained_selection(): - ... # Modify selection - ... node.setSelected(on=False, clear_all_selected=True) - >>> # Selection restored - """ - - previous_selection = hou.selectedNodes() - try: - yield - finally: - # Clear the selection - # todo: does hou.clearAllSelected() do the same? - for node in hou.selectedNodes(): - node.setSelected(on=False) - - if previous_selection: - for node in previous_selection: - node.setSelected(on=True) - - -def reset_framerange(): - """Set frame range and FPS to current asset""" - - # Get asset data - project_name = get_current_project_name() - asset_name = get_current_asset_name() - # Get the asset ID from the database for the asset of current context - asset_doc = get_asset_by_name(project_name, asset_name) - asset_data = asset_doc["data"] - - # Get FPS - fps = get_asset_fps(asset_doc) - - # Get Start and End Frames - frame_start = asset_data.get("frameStart") - frame_end = asset_data.get("frameEnd") - - if frame_start is None or frame_end is None: - log.warning("No edit information found for %s" % asset_name) - return - - handle_start = asset_data.get("handleStart", 0) - handle_end = asset_data.get("handleEnd", 0) - - frame_start -= int(handle_start) - frame_end += int(handle_end) - - # Set frame range and FPS - print("Setting scene FPS to {}".format(int(fps))) - set_scene_fps(fps) - hou.playbar.setFrameRange(frame_start, frame_end) - hou.playbar.setPlaybackRange(frame_start, frame_end) - hou.setFrame(frame_start) - - -def get_main_window(): - """Acquire Houdini's main window""" - if self._parent is None: - self._parent = hou.ui.mainQtWindow() - return self._parent - - -def get_template_from_value(key, value): - if isinstance(value, float): - parm = hou.FloatParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, bool): - parm = hou.ToggleParmTemplate(name=key, - label=key, - default_value=value) - elif isinstance(value, int): - parm = hou.IntParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, six.string_types): - parm = hou.StringParmTemplate(name=key, - label=key, - num_components=1, - default_value=(value,)) - elif isinstance(value, (dict, list, tuple)): - parm = hou.StringParmTemplate(name=key, - label=key, - num_components=1, - default_value=( - JSON_PREFIX + json.dumps(value),)) - else: - raise TypeError("Unsupported type: %r" % type(value)) - - return parm - - -def get_frame_data(node, log=None): - """Get the frame data: `frameStartHandle`, `frameEndHandle` - and `byFrameStep`. - - This function uses Houdini node's `trange`, `t1, `t2` and `t3` - parameters as the source of truth for the full inclusive frame - range to render, as such these are considered as the frame - range including the handles. - - The non-inclusive frame start and frame end without handles - can be computed by subtracting the handles from the inclusive - frame range. - - Args: - node (hou.Node): ROP node to retrieve frame range from, - the frame range is assumed to be the frame range - *including* the start and end handles. - - Returns: - dict: frame data for `frameStartHandle`, `frameEndHandle` - and `byFrameStep`. - - """ - - if log is None: - log = self.log - - data = {} - - if node.parm("trange") is None: - log.debug( - "Node has no 'trange' parameter: {}".format(node.path()) - ) - return data - - if node.evalParm("trange") == 0: - data["frameStartHandle"] = hou.intFrame() - data["frameEndHandle"] = hou.intFrame() - data["byFrameStep"] = 1.0 - - log.info( - "Node '{}' has 'Render current frame' set.\n" - "Asset Handles are ignored.\n" - "frameStart and frameEnd are set to the " - "current frame.".format(node.path()) - ) - else: - data["frameStartHandle"] = int(node.evalParm("f1")) - data["frameEndHandle"] = int(node.evalParm("f2")) - data["byFrameStep"] = node.evalParm("f3") - - return data - - -def splitext(name, allowed_multidot_extensions): - # type: (str, list) -> tuple - """Split file name to name and extension. - - Args: - name (str): File name to split. - allowed_multidot_extensions (list of str): List of allowed multidot - extensions. - - Returns: - tuple: Name and extension. - """ - - for ext in allowed_multidot_extensions: - if name.endswith(ext): - return name[:-len(ext)], ext - - return os.path.splitext(name) - - -def get_top_referenced_parm(parm): - - processed = set() # disallow infinite loop - while True: - if parm.path() in processed: - raise RuntimeError("Parameter references result in cycle.") - - processed.add(parm.path()) - - ref = parm.getReferencedParm() - if ref.path() == parm.path(): - # It returns itself when it doesn't reference - # another parameter - return ref - else: - parm = ref - - -def evalParmNoFrame(node, parm, pad_character="#"): - - parameter = node.parm(parm) - assert parameter, "Parameter does not exist: %s.%s" % (node, parm) - - # If the parameter has a parameter reference, then get that - # parameter instead as otherwise `unexpandedString()` fails. - parameter = get_top_referenced_parm(parameter) - - # Substitute out the frame numbering with padded characters - try: - raw = parameter.unexpandedString() - except hou.Error as exc: - print("Failed: %s" % parameter) - raise RuntimeError(exc) - - def replace(match): - padding = 1 - n = match.group(2) - if n and int(n): - padding = int(n) - return pad_character * padding - - expression = re.sub(r"(\$F([0-9]*))", replace, raw) - - with hou.ScriptEvalContext(parameter): - return hou.expandStringAtFrame(expression, 0) - - -def get_color_management_preferences(): - """Get default OCIO preferences""" - return { - "config": hou.Color.ocio_configPath(), - "display": hou.Color.ocio_defaultDisplay(), - "view": hou.Color.ocio_defaultView() - } - - -def get_obj_node_output(obj_node): - """Find output node. - - If the node has any output node return the - output node with the minimum `outputidx`. - When no output is present return the node - with the display flag set. If no output node is - detected then None is returned. - - Arguments: - node (hou.Node): The node to retrieve a single - the output node for. - - Returns: - Optional[hou.Node]: The child output node. - - """ - - outputs = obj_node.subnetOutputs() - if not outputs: - return - - elif len(outputs) == 1: - return outputs[0] - - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - -def get_output_children(output_node, include_sops=True): - """Recursively return a list of all output nodes - contained in this node including this node. - - It works in a similar manner to output_node.allNodes(). - """ - out_list = [output_node] - - if output_node.childTypeCategory() == hou.objNodeTypeCategory(): - for child in output_node.children(): - out_list += get_output_children(child, include_sops=include_sops) - - elif include_sops and \ - output_node.childTypeCategory() == hou.sopNodeTypeCategory(): - out = get_obj_node_output(output_node) - if out: - out_list += [out] - - return out_list - - -def get_resolution_from_doc(doc): - """Get resolution from the given asset document. """ - - if not doc or "data" not in doc: - print("Entered document is not valid. \"{}\"".format(str(doc))) - return None - - resolution_width = doc["data"].get("resolutionWidth") - resolution_height = doc["data"].get("resolutionHeight") - - # Make sure both width and height are set - if resolution_width is None or resolution_height is None: - print("No resolution information found for \"{}\"".format(doc["name"])) - return None - - return int(resolution_width), int(resolution_height) - - -def set_camera_resolution(camera, asset_doc=None): - """Apply resolution to camera from asset document of the publish""" - - if not asset_doc: - asset_doc = get_current_project_asset() - - resolution = get_resolution_from_doc(asset_doc) - - if resolution: - print("Setting camera resolution: {} -> {}x{}".format( - camera.name(), resolution[0], resolution[1] - )) - camera.parm("resx").set(resolution[0]) - camera.parm("resy").set(resolution[1]) - - -def get_camera_from_container(container): - """Get camera from container node. """ - - cameras = container.recursiveGlob( - "*", - filter=hou.nodeTypeFilter.ObjCamera, - include_subnets=False - ) - - assert len(cameras) == 1, "Camera instance must have only one camera" - return cameras[0] - - -def get_current_context_template_data_with_asset_data(): - """ - TODOs: - Support both 'assetData' and 'folderData' in future. - """ - - context = get_current_context() - project_name = context["project_name"] - asset_name = context["asset_name"] - task_name = context["task_name"] - host_name = get_current_host_name() - - anatomy = Anatomy(project_name) - project_doc = get_project(project_name) - asset_doc = get_asset_by_name(project_name, asset_name) - - # get context specific vars - asset_data = asset_doc["data"] - - # compute `frameStartHandle` and `frameEndHandle` - frame_start = asset_data.get("frameStart") - frame_end = asset_data.get("frameEnd") - handle_start = asset_data.get("handleStart") - handle_end = asset_data.get("handleEnd") - if frame_start is not None and handle_start is not None: - asset_data["frameStartHandle"] = frame_start - handle_start - - if frame_end is not None and handle_end is not None: - asset_data["frameEndHandle"] = frame_end + handle_end - - template_data = get_template_data( - project_doc, asset_doc, task_name, host_name - ) - template_data["root"] = anatomy.roots - template_data["assetData"] = asset_data - - return template_data - - -def get_context_var_changes(): - """get context var changes.""" - - houdini_vars_to_update = {} - - project_settings = get_current_project_settings() - houdini_vars_settings = \ - project_settings["houdini"]["general"]["update_houdini_var_context"] - - if not houdini_vars_settings["enabled"]: - return houdini_vars_to_update - - houdini_vars = houdini_vars_settings["houdini_vars"] - - # No vars specified - nothing to do - if not houdini_vars: - return houdini_vars_to_update - - # Get Template data - template_data = get_current_context_template_data_with_asset_data() - - # Set Houdini Vars - for item in houdini_vars: - # For consistency reasons we always force all vars to be uppercase - # Also remove any leading, and trailing whitespaces. - var = item["var"].strip().upper() - - # get and resolve template in value - item_value = StringTemplate.format_template( - item["value"], - template_data - ) - - if var == "JOB" and item_value == "": - # sync $JOB to $HIP if $JOB is empty - item_value = os.environ["HIP"] - - if item["is_directory"]: - item_value = item_value.replace("\\", "/") - - current_value = hou.hscript("echo -n `${}`".format(var))[0] - - if current_value != item_value: - houdini_vars_to_update[var] = ( - current_value, item_value, item["is_directory"] - ) - - return houdini_vars_to_update - - -def update_houdini_vars_context(): - """Update asset context variables""" - - for var, (_old, new, is_directory) in get_context_var_changes().items(): - if is_directory: - try: - os.makedirs(new) - except OSError as e: - if e.errno != errno.EEXIST: - print( - "Failed to create ${} dir. Maybe due to " - "insufficient permissions.".format(var) - ) - - hou.hscript("set {}={}".format(var, new)) - os.environ[var] = new - print("Updated ${} to {}".format(var, new)) - - -def update_houdini_vars_context_dialog(): - """Show pop-up to update asset context variables""" - update_vars = get_context_var_changes() - if not update_vars: - # Nothing to change - print("Nothing to change, Houdini vars are already up to date.") - return - - message = "\n".join( - "${}: {} -> {}".format(var, old or "None", new or "None") - for var, (old, new, _is_directory) in update_vars.items() - ) - - # TODO: Use better UI! - parent = hou.ui.mainQtWindow() - dialog = popup.Popup(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Houdini scene has outdated asset variables") - dialog.setMessage(message) - dialog.setButtonText("Fix") - - # on_show is the Fix button clicked callback - dialog.on_clicked.connect(update_houdini_vars_context) - - dialog.show() - - -def publisher_show_and_publish(comment=None): - """Open publisher window and trigger publishing action. - - Args: - comment (Optional[str]): Comment to set in publisher window. - """ - - main_window = get_main_window() - publisher_window = get_tool_by_name( - tool_name="publisher", - parent=main_window, - ) - publisher_window.show_and_publish(comment) - - -def find_rop_input_dependencies(input_tuple): - """Self publish from ROP nodes. - - Arguments: - tuple (hou.RopNode.inputDependencies) which can be a nested tuples - represents the input dependencies of the ROP node, consisting of ROPs, - and the frames that need to be be rendered prior to rendering the ROP. - - Returns: - list of the RopNode.path() that can be found inside - the input tuple. - """ - - out_list = [] - if isinstance(input_tuple[0], hou.RopNode): - return input_tuple[0].path() - - if isinstance(input_tuple[0], tuple): - for item in input_tuple: - out_list.append(find_rop_input_dependencies(item)) - - return out_list - - -def self_publish(): - """Self publish from ROP nodes. - - Firstly, it gets the node and its dependencies. - Then, it deactivates all other ROPs - And finaly, it triggers the publishing action. - """ - - result, comment = hou.ui.readInput( - "Add Publish Comment", - buttons=("Publish", "Cancel"), - title="Publish comment", - close_choice=1 - ) - - if result: - return - - current_node = hou.node(".") - inputs_paths = find_rop_input_dependencies( - current_node.inputDependencies() - ) - inputs_paths.append(current_node.path()) - - host = registered_host() - context = CreateContext(host, reset=True) - - for instance in context.instances: - node_path = instance.data.get("instance_node") - instance["active"] = node_path and node_path in inputs_paths - - context.save_changes() - - publisher_show_and_publish(comment) - - -def add_self_publish_button(node): - """Adds a self publish button to the rop node.""" - - label = os.environ.get("AVALON_LABEL") or "AYON" - - button_parm = hou.ButtonParmTemplate( - "ayon_self_publish", - "{} Publish".format(label), - script_callback="from openpype.hosts.houdini.api.lib import " - "self_publish; self_publish()", - script_callback_language=hou.scriptLanguage.Python, - join_with_next=True - ) - - template = node.parmTemplateGroup() - template.insertBefore((0,), button_parm) - node.setParmTemplateGroup(template) diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py deleted file mode 100644 index d0f45c36b5..0000000000 --- a/openpype/hosts/houdini/api/pipeline.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for OpenPype Houdini integration.""" -import os -import sys -import logging - -import hou # noqa - -from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost - -import pyblish.api - -from openpype.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - register_inventory_action_path, - AVALON_CONTAINER_ID, -) -from openpype.pipeline.load import any_outdated_containers -from openpype.hosts.houdini import HOUDINI_HOST_DIR -from openpype.hosts.houdini.api import lib, shelves, creator_node_shelves - -from openpype.lib import ( - register_event_callback, - emit_event, -) - - -log = logging.getLogger("openpype.hosts.houdini") - -AVALON_CONTAINERS = "/obj/AVALON_CONTAINERS" -CONTEXT_CONTAINER = "/obj/OpenPypeContext" -IS_HEADLESS = not hasattr(hou, "ui") - -PLUGINS_DIR = os.path.join(HOUDINI_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "houdini" - - def __init__(self): - super(HoudiniHost, self).__init__() - self._op_events = {} - self._has_been_setup = False - - def install(self): - pyblish.api.register_host("houdini") - pyblish.api.register_host("hython") - pyblish.api.register_host("hpython") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - log.info("Installing callbacks ... ") - # register_event_callback("init", on_init) - self._register_callbacks() - register_event_callback("before.save", before_save) - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - - self._has_been_setup = True - - # Set asset settings for the empty scene directly after launch of - # Houdini so it initializes into the correct scene FPS, - # Frame Range, etc. - # TODO: make sure this doesn't trigger when - # opening with last workfile. - _set_context_settings() - - if not IS_HEADLESS: - import hdefereval # noqa, hdefereval is only available in ui mode - # Defer generation of shelves due to issue on Windows where shelf - # initialization during start up delays Houdini UI by minutes - # making it extremely slow to launch. - hdefereval.executeDeferred(shelves.generate_shelves) - - if not IS_HEADLESS: - import hdefereval # noqa, hdefereval is only available in ui mode - hdefereval.executeDeferred(creator_node_shelves.install) - - def workfile_has_unsaved_changes(self): - return hou.hipFile.hasUnsavedChanges() - - def get_workfile_extensions(self): - return [".hip", ".hiplc", ".hipnc"] - - def save_workfile(self, dst_path=None): - # Force forwards slashes to avoid segfault - if dst_path: - dst_path = dst_path.replace("\\", "/") - hou.hipFile.save(file_name=dst_path, - save_to_recent_files=True) - return dst_path - - def open_workfile(self, filepath): - # Force forwards slashes to avoid segfault - filepath = filepath.replace("\\", "/") - - hou.hipFile.load(filepath, - suppress_save_prompt=True, - ignore_load_warnings=False) - - return filepath - - def get_current_workfile(self): - current_filepath = hou.hipFile.path() - if (os.path.basename(current_filepath) == "untitled.hip" and - not os.path.exists(current_filepath)): - # By default a new scene in houdini is saved in the current - # working directory as "untitled.hip" so we need to capture - # that and consider it 'not saved' when it's in that state. - return None - - return current_filepath - - def get_containers(self): - return ls() - - def _register_callbacks(self): - for event in self._op_events.copy().values(): - if event is None: - continue - - try: - hou.hipFile.removeEventCallback(event) - except RuntimeError as e: - log.info(e) - - self._op_events[on_file_event_callback] = hou.hipFile.addEventCallback( - on_file_event_callback - ) - - @staticmethod - def create_context_node(): - """Helper for creating context holding node. - - Returns: - hou.Node: context node - - """ - obj_network = hou.node("/obj") - op_ctx = obj_network.createNode("subnet", - node_name="OpenPypeContext", - run_init_scripts=False, - load_contents=False) - - op_ctx.moveToGoodPosition() - op_ctx.setBuiltExplicitly(False) - op_ctx.setCreatorState("OpenPype") - op_ctx.setComment("OpenPype node to hold context metadata") - op_ctx.setColor(hou.Color((0.081, 0.798, 0.810))) - op_ctx.setDisplayFlag(False) - op_ctx.hide(True) - return op_ctx - - def update_context_data(self, data, changes): - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.create_context_node() - - lib.imprint(op_ctx, data) - - def get_context_data(self): - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.create_context_node() - return lib.read(op_ctx) - - def save_file(self, dst_path=None): - # Force forwards slashes to avoid segfault - dst_path = dst_path.replace("\\", "/") - - hou.hipFile.save(file_name=dst_path, - save_to_recent_files=True) - - -def on_file_event_callback(event): - if event == hou.hipFileEventType.AfterLoad: - emit_event("open") - elif event == hou.hipFileEventType.AfterSave: - emit_event("save") - elif event == hou.hipFileEventType.BeforeSave: - emit_event("before.save") - elif event == hou.hipFileEventType.AfterClear: - emit_event("new") - - -def containerise(name, - namespace, - nodes, - context, - loader=None, - suffix=""): - """Bundle `nodes` into a subnet and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - nodes (list): Long names of nodes to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - - """ - - # Ensure AVALON_CONTAINERS subnet exists - subnet = hou.node(AVALON_CONTAINERS) - if subnet is None: - obj_network = hou.node("/obj") - subnet = obj_network.createNode("subnet", - node_name="AVALON_CONTAINERS") - - # Create proper container name - container_name = "{}_{}".format(name, suffix or "CON") - container = hou.node("/obj/{}".format(name)) - container.setName(container_name, unique_name=True) - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - } - - lib.imprint(container, data) - - # "Parent" the container under the container network - hou.moveNodesTo([container], subnet) - - subnet.node(container_name).moveToGoodPosition() - - return container - - -def parse_container(container): - """Return the container node's full container data. - - Args: - container (hou.Node): A container node name. - - Returns: - dict: The container schema data for this container node. - - """ - data = lib.read(container) - - # Backwards compatibility pre-schemas for containers - data["schema"] = data.get("schema", "openpype:container-1.0") - - # Append transient data - data["objectName"] = container.path() - data["node"] = container - - return data - - -def ls(): - containers = [] - for identifier in (AVALON_CONTAINER_ID, - "pyblish.mindbender.container"): - containers += lib.lsattr("id", identifier) - - for container in sorted(containers, - # Hou 19+ Python 3 hou.ObjNode are not - # sortable due to not supporting greater - # than comparisons - key=lambda node: node.path()): - yield parse_container(container) - - -def before_save(): - return lib.validate_fps() - - -def on_save(): - - log.info("Running callback on save..") - - # update houdini vars - lib.update_houdini_vars_context_dialog() - - nodes = lib.get_id_required_nodes() - for node, new_id in lib.generate_ids(nodes): - lib.set_id(node, new_id, overwrite=False) - - -def _show_outdated_content_popup(): - # Get main window - parent = lib.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Houdini window can't be found.") - else: - from openpype.widgets import popup - - # Show outdated pop-up - def _on_show_inventory(): - from openpype.tools.utils import host_tools - host_tools.show_scene_inventory(parent=parent) - - dialog = popup.Popup(parent=parent) - dialog.setWindowTitle("Houdini scene has outdated content") - dialog.setMessage("There are outdated containers in " - "your Houdini scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - -def on_open(): - - if not hou.isUIAvailable(): - log.debug("Batch mode detected, ignoring `on_open` callbacks..") - return - - log.info("Running callback on open..") - - # update houdini vars - lib.update_houdini_vars_context_dialog() - - # Validate FPS after update_task_from_path to - # ensure it is using correct FPS for the asset - lib.validate_fps() - - if any_outdated_containers(): - parent = lib.get_main_window() - if parent is None: - # When opening Houdini with last workfile on launch the UI hasn't - # initialized yet completely when the `on_open` callback triggers. - # We defer the dialog popup to wait for the UI to become available. - # We assume it will open because `hou.isUIAvailable()` returns True - import hdefereval - hdefereval.executeDeferred(_show_outdated_content_popup) - else: - _show_outdated_content_popup() - - log.warning("Scene has outdated content.") - - -def on_new(): - """Set project resolution and fps when create a new file""" - - if hou.hipFile.isLoadingHipFile(): - # This event also triggers when Houdini opens a file due to the - # new event being registered to 'afterClear'. As such we can skip - # 'new' logic if the user is opening a file anyway - log.debug("Skipping on new callback due to scene being opened.") - return - - log.info("Running callback on new..") - _set_context_settings() - - # It seems that the current frame always gets reset to frame 1 on - # new scene. So we enforce current frame to be at the start of the playbar - # with execute deferred - def _enforce_start_frame(): - start = hou.playbar.playbackRange()[0] - hou.setFrame(start) - - if hou.isUIAvailable(): - import hdefereval - hdefereval.executeDeferred(_enforce_start_frame) - else: - # Run without execute deferred when no UI is available because - # without UI `hdefereval` is not available to import - _enforce_start_frame() - - -def _set_context_settings(): - """Apply the project settings from the project definition - - Settings can be overwritten by an asset if the asset.data contains - any information regarding those settings. - - Examples of settings: - fps - resolution - renderer - - Returns: - None - """ - - lib.reset_framerange() - lib.update_houdini_vars_context() diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py deleted file mode 100644 index e162d0e461..0000000000 --- a/openpype/hosts/houdini/api/plugin.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- -"""Houdini specific Avalon/Pyblish plugin definitions.""" -import sys -from abc import ( - ABCMeta -) -import six -import hou - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import ( - CreatorError, - LegacyCreator, - Creator as NewCreator, - CreatedInstance -) -from openpype.lib import BoolDef -from .lib import imprint, read, lsattr, add_self_publish_button - - -class OpenPypeCreatorError(CreatorError): - pass - - -class Creator(LegacyCreator): - """Creator plugin to create instances in Houdini - - To support the wide range of node types for render output (Alembic, VDB, - Mantra) the Creator needs a node type to create the correct instance - - By default, if none is given, is `geometry`. An example of accepted node - types: geometry, alembic, ifd (mantra) - - Please check the Houdini documentation for more node types. - - Tip: to find the exact node type to create press the `i` left of the node - when hovering over a node. The information is visible under the name of - the node. - - Deprecated: - This creator is deprecated and will be removed in future version. - - """ - defaults = ['Main'] - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - self.nodes = [] - - def process(self): - """This is the base functionality to create instances in Houdini - - The selected nodes are stored in self to be used in an override method. - This is currently necessary in order to support the multiple output - types in Houdini which can only be rendered through their own node. - - Default node type if none is given is `geometry` - - It also makes it easier to apply custom settings per instance type - - Example of override method for Alembic: - - def process(self): - instance = super(CreateEpicNode, self, process() - # Set parameters for Alembic node - instance.setParms( - {"sop_path": "$HIP/%s.abc" % self.nodes[0]} - ) - - Returns: - hou.Node - - """ - try: - if (self.options or {}).get("useSelection"): - self.nodes = hou.selectedNodes() - - # Get the node type and remove it from the data, not needed - node_type = self.data.pop("node_type", None) - if node_type is None: - node_type = "geometry" - - # Get out node - out = hou.node("/out") - instance = out.createNode(node_type, node_name=self.name) - instance.moveToGoodPosition() - - imprint(instance, self.data) - - self._process(instance) - - except hou.Error as er: - six.reraise( - OpenPypeCreatorError, - OpenPypeCreatorError("Creator error: {}".format(er)), - sys.exc_info()[2]) - - -class HoudiniCreatorBase(object): - @staticmethod - def cache_subsets(shared_data): - """Cache instances for Creators to shared data. - - Create `houdini_cached_subsets` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - Create `houdini_cached_legacy_subsets` key for any legacy instances - detected in the scene as instances per family. - - Args: - Dict[str, Any]: Shared data. - - Return: - Dict[str, Any]: Shared data dictionary. - - """ - if shared_data.get("houdini_cached_subsets") is None: - cache = dict() - cache_legacy = dict() - - for node in lsattr("id", "pyblish.avalon.instance"): - - creator_identifier_parm = node.parm("creator_identifier") - if creator_identifier_parm: - # creator instance - creator_id = creator_identifier_parm.eval() - cache.setdefault(creator_id, []).append(node) - - else: - # legacy instance - family_parm = node.parm("family") - if not family_parm: - # must be a broken instance - continue - - family = family_parm.eval() - cache_legacy.setdefault(family, []).append(node) - - shared_data["houdini_cached_subsets"] = cache - shared_data["houdini_cached_legacy_subsets"] = cache_legacy - - return shared_data - - @staticmethod - def create_instance_node( - asset_name, node_name, parent, node_type="geometry" - ): - # type: (str, str, str) -> hou.Node - """Create node representing instance. - - Arguments: - asset_name (str): Asset name. - node_name (str): Name of the new node. - parent (str): Name of the parent node. - node_type (str, optional): Type of the node. - - Returns: - hou.Node: Newly created instance node. - - """ - parent_node = hou.node(parent) - instance_node = parent_node.createNode( - node_type, node_name=node_name) - instance_node.moveToGoodPosition() - return instance_node - - -@six.add_metaclass(ABCMeta) -class HoudiniCreator(NewCreator, HoudiniCreatorBase): - """Base class for most of the Houdini creator plugins.""" - selected_nodes = [] - settings_name = None - add_publish_button = False - - def create(self, subset_name, instance_data, pre_create_data): - try: - self.selected_nodes = [] - - if pre_create_data.get("use_selection"): - self.selected_nodes = hou.selectedNodes() - - # Get the node type and remove it from the data, not needed - node_type = instance_data.pop("node_type", None) - if node_type is None: - node_type = "geometry" - - if AYON_SERVER_ENABLED: - asset_name = instance_data["folderPath"] - else: - asset_name = instance_data["asset"] - - instance_node = self.create_instance_node( - asset_name, subset_name, "/out", node_type) - - self.customize_node_look(instance_node) - - instance_data["instance_node"] = instance_node.path() - instance_data["instance_id"] = instance_node.path() - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self) - self._add_instance_to_context(instance) - self.imprint(instance_node, instance.data_to_store()) - - if self.add_publish_button: - add_self_publish_button(instance_node) - - return instance - - except hou.Error as er: - six.reraise( - OpenPypeCreatorError, - OpenPypeCreatorError("Creator error: {}".format(er)), - sys.exc_info()[2]) - - def lock_parameters(self, node, parameters): - """Lock list of specified parameters on the node. - - Args: - node (hou.Node): Houdini node to lock parameters on. - parameters (list of str): List of parameter names. - - """ - for name in parameters: - try: - parm = node.parm(name) - parm.lock(True) - except AttributeError: - self.log.debug("missing lock pattern {}".format(name)) - - def collect_instances(self): - # cache instances if missing - self.cache_subsets(self.collection_shared_data) - for instance in self.collection_shared_data[ - "houdini_cached_subsets"].get(self.identifier, []): - - node_data = read(instance) - - # Node paths are always the full node path since that is unique - # Because it's the node's path it's not written into attributes - # but explicitly collected - node_path = instance.path() - node_data["instance_id"] = node_path - node_data["instance_node"] = node_path - - created_instance = CreatedInstance.from_existing( - node_data, self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = hou.node(created_inst.get("instance_node")) - new_values = { - key: changes[key].new_value - for key in changes.changed_keys - } - # Update parm templates and values - self.imprint( - instance_node, - new_values, - update=True - ) - - def imprint(self, node, values, update=False): - # Never store instance node and instance id since that data comes - # from the node's path - values.pop("instance_node", None) - values.pop("instance_id", None) - imprint(node, values, update=update) - - def remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - instance_node = hou.node(instance.data.get("instance_node")) - if instance_node: - instance_node.destroy() - - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", label="Use selection") - ] - - @staticmethod - def customize_node_look( - node, color=None, - shape="chevron_down"): - """Set custom look for instance nodes. - - Args: - node (hou.Node): Node to set look. - color (hou.Color, Optional): Color of the node. - shape (str, Optional): Shape name of the node. - - Returns: - None - - """ - if not color: - color = hou.Color((0.616, 0.871, 0.769)) - node.setUserData('nodeshape', shape) - node.setColor(color) - - def get_network_categories(self): - """Return in which network view type this creator should show. - - The node type categories returned here will be used to define where - the creator will show up in the TAB search for nodes in Houdini's - Network View. - - This can be overridden in inherited classes to define where that - particular Creator should be visible in the TAB search. - - Returns: - list: List of houdini node type categories - - """ - return [hou.ropNodeTypeCategory()] - - def apply_settings(self, project_settings): - """Method called on initialization of plugin to apply settings.""" - - # Apply General Settings - houdini_general_settings = project_settings["houdini"]["general"] - self.add_publish_button = houdini_general_settings.get( - "add_self_publish_button", False) - - # Apply Creator Settings - settings_name = self.settings_name - if settings_name is None: - settings_name = self.__class__.__name__ - - settings = project_settings["houdini"]["create"] - settings = settings.get(settings_name) - if settings is None: - self.log.debug( - "No settings found for {}".format(self.__class__.__name__) - ) - return - - for key, value in settings.items(): - setattr(self, key, value) diff --git a/openpype/hosts/houdini/hooks/set_paths.py b/openpype/hosts/houdini/hooks/set_paths.py deleted file mode 100644 index b23659e23b..0000000000 --- a/openpype/hosts/houdini/hooks/set_paths.py +++ /dev/null @@ -1,18 +0,0 @@ -from openpype.lib.applications import PreLaunchHook, LaunchTypes - - -class SetPath(PreLaunchHook): - """Set current dir to workdir. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"houdini"} - launch_types = {LaunchTypes.local} - - def execute(self): - workdir = self.launch_context.env.get("AVALON_WORKDIR", "") - if not workdir: - self.log.warning("BUG: Workdir is not filled.") - return - - self.launch_context.kwargs["cwd"] = workdir diff --git a/openpype/hosts/houdini/plugins/create/convert_legacy.py b/openpype/hosts/houdini/plugins/create/convert_legacy.py deleted file mode 100644 index 86103e3369..0000000000 --- a/openpype/hosts/houdini/plugins/create/convert_legacy.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -"""Converter for legacy Houdini subsets.""" -from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin -from openpype.hosts.houdini.api.lib import imprint - - -class HoudiniLegacyConvertor(SubsetConvertorPlugin): - """Find and convert any legacy subsets in the scene. - - This Converter will find all legacy subsets in the scene and will - transform them to the current system. Since the old subsets doesn't - retain any information about their original creators, the only mapping - we can do is based on their families. - - Its limitation is that you can have multiple creators creating subset - of the same family and there is no way to handle it. This code should - nevertheless cover all creators that came with OpenPype. - - """ - identifier = "io.openpype.creators.houdini.legacy" - family_to_id = { - "camera": "io.openpype.creators.houdini.camera", - "ass": "io.openpype.creators.houdini.ass", - "imagesequence": "io.openpype.creators.houdini.imagesequence", - "hda": "io.openpype.creators.houdini.hda", - "pointcache": "io.openpype.creators.houdini.pointcache", - "redshiftproxy": "io.openpype.creators.houdini.redshiftproxy", - "redshift_rop": "io.openpype.creators.houdini.redshift_rop", - "usd": "io.openpype.creators.houdini.usd", - "usdrender": "io.openpype.creators.houdini.usdrender", - "vdbcache": "io.openpype.creators.houdini.vdbcache" - } - - def __init__(self, *args, **kwargs): - super(HoudiniLegacyConvertor, self).__init__(*args, **kwargs) - self.legacy_subsets = {} - - def find_instances(self): - """Find legacy subsets in the scene. - - Legacy subsets are the ones that doesn't have `creator_identifier` - parameter on them. - - This is using cached entries done in - :py:meth:`~HoudiniCreatorBase.cache_subsets()` - - """ - self.legacy_subsets = self.collection_shared_data.get( - "houdini_cached_legacy_subsets") - if not self.legacy_subsets: - return - self.add_convertor_item("Found {} incompatible subset{}.".format( - len(self.legacy_subsets), "s" if len(self.legacy_subsets) > 1 else "") - ) - - def convert(self): - """Convert all legacy subsets to current. - - It is enough to add `creator_identifier` and `instance_node`. - - """ - if not self.legacy_subsets: - return - - for family, subsets in self.legacy_subsets.items(): - if family in self.family_to_id: - for subset in subsets: - data = { - "creator_identifier": self.family_to_id[family], - "instance_node": subset.path() - } - if family == "pointcache": - data["families"] = ["abc"] - self.log.info("Converting {} to {}".format( - subset.path(), self.family_to_id[family])) - imprint(subset, data) diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py deleted file mode 100644 index 2d2f89cc48..0000000000 --- a/openpype/hosts/houdini/plugins/create/create_pointcache.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating pointcache alembics.""" -from openpype.hosts.houdini.api import plugin -from openpype.lib import BoolDef - -import hou - - - -class CreatePointCache(plugin.HoudiniCreator): - """Alembic ROP to pointcache""" - identifier = "io.openpype.creators.houdini.pointcache" - label = "PointCache (Abc)" - family = "pointcache" - icon = "gears" - - def create(self, subset_name, instance_data, pre_create_data): - instance_data.pop("active", None) - instance_data.update({"node_type": "alembic"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreatePointCache, self).create( - subset_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - parms = { - "use_sop_path": True, - "build_from_path": True, - "path_attrib": "path", - "prim_to_detail_pattern": "cbId", - "format": 2, - "facesets": 0, - "filename": hou.text.expandString( - "$HIP/pyblish/{}.abc".format(subset_name)) - } - - if self.selected_nodes: - selected_node = self.selected_nodes[0] - - # Although Houdini allows ObjNode path on `sop_path` for the - # the ROP node we prefer it set to the SopNode path explicitly - - # Allow sop level paths (e.g. /obj/geo1/box1) - if isinstance(selected_node, hou.SopNode): - parms["sop_path"] = selected_node.path() - self.log.debug( - "Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'." - % selected_node.path() - ) - - # Allow object level paths to Geometry nodes (e.g. /obj/geo1) - # but do not allow other object level nodes types like cameras, etc. - elif isinstance(selected_node, hou.ObjNode) and \ - selected_node.type().name() in ["geo"]: - - # get the output node with the minimum - # 'outputidx' or the node with display flag - sop_path = self.get_obj_output(selected_node) - - if sop_path: - parms["sop_path"] = sop_path.path() - self.log.debug( - "Valid ObjNode selection, 'SOP Path' in ROP will be set to " - "the child path '%s'." - % sop_path.path() - ) - - if not parms.get("sop_path", None): - self.log.debug( - "Selection isn't valid. 'SOP Path' in ROP will be empty." - ) - else: - self.log.debug( - "No Selection. 'SOP Path' in ROP will be empty." - ) - - instance_node.setParms(parms) - instance_node.parm("trange").set(1) - - # Lock any parameters in this list - to_lock = ["prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_obj_output(self, obj_node): - """Find output node with the smallest 'outputidx'.""" - - outputs = obj_node.subnetOutputs() - - # if obj_node is empty - if not outputs: - return - - # if obj_node has one output child whether its - # sop output node or a node with the render flag - elif len(outputs) == 1: - return outputs[0] - - # if there are more than one, then it have multiple ouput nodes - # return the one with the minimum 'outputidx' - else: - return min(outputs, - key=lambda node: node.evalParm('outputidx')) - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py b/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py deleted file mode 100644 index e1577c92e9..0000000000 --- a/openpype/hosts/houdini/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating Redshift proxies.""" -from openpype.hosts.houdini.api import plugin -import hou -from openpype.lib import BoolDef - - -class CreateRedshiftProxy(plugin.HoudiniCreator): - """Redshift Proxy""" - identifier = "io.openpype.creators.houdini.redshiftproxy" - label = "Redshift Proxy" - family = "redshiftproxy" - icon = "magic" - - def create(self, subset_name, instance_data, pre_create_data): - - # Remove the active, we are checking the bypass flag of the nodes - instance_data.pop("active", None) - - # Redshift provides a `Redshift_Proxy_Output` node type which shows - # a limited set of parameters by default and is set to extract a - # Redshift Proxy. However when "imprinting" extra parameters needed - # for OpenPype it starts showing all its parameters again. It's unclear - # why this happens. - # TODO: Somehow enforce so that it only shows the original limited - # attributes of the Redshift_Proxy_Output node type - instance_data.update({"node_type": "Redshift_Proxy_Output"}) - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - creator_attributes["farm"] = pre_create_data["farm"] - - instance = super(CreateRedshiftProxy, self).create( - subset_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - parms = { - "RS_archive_file": '$HIP/pyblish/{}.$F4.rs'.format(subset_name), - } - - if self.selected_nodes: - parms["RS_archive_sopPath"] = self.selected_nodes[0].path() - - instance_node.setParms(parms) - - # Lock some Avalon attributes - to_lock = ["family", "id", "prim_to_detail_pattern"] - self.lock_parameters(instance_node, to_lock) - - def get_network_categories(self): - return [ - hou.ropNodeTypeCategory(), - hou.sopNodeTypeCategory() - ] - - def get_instance_attr_defs(self): - return [ - BoolDef("farm", - label="Submitting to Farm", - default=False) - ] - - def get_pre_create_attr_defs(self): - attrs = super().get_pre_create_attr_defs() - # Use same attributes as for instance attributes - return attrs + self.get_instance_attr_defs() diff --git a/openpype/hosts/houdini/plugins/create/create_review.py b/openpype/hosts/houdini/plugins/create/create_review.py deleted file mode 100644 index 60c34a358b..0000000000 --- a/openpype/hosts/houdini/plugins/create/create_review.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating openGL reviews.""" -from openpype.hosts.houdini.api import plugin -from openpype.lib import EnumDef, BoolDef, NumberDef - -import os -import hou - - -class CreateReview(plugin.HoudiniCreator): - """Review with OpenGL ROP""" - - identifier = "io.openpype.creators.houdini.review" - label = "Review" - family = "review" - icon = "video-camera" - - def create(self, subset_name, instance_data, pre_create_data): - - instance_data.pop("active", None) - instance_data.update({"node_type": "opengl"}) - instance_data["imageFormat"] = pre_create_data.get("imageFormat") - instance_data["keepImages"] = pre_create_data.get("keepImages") - - instance = super(CreateReview, self).create( - subset_name, - instance_data, - pre_create_data) - - instance_node = hou.node(instance.get("instance_node")) - - frame_range = hou.playbar.frameRange() - - filepath = "{root}/{subset}/{subset}.$F4.{ext}".format( - root=hou.text.expandString("$HIP/pyblish"), - subset="`chs(\"subset\")`", # keep dynamic link to subset - ext=pre_create_data.get("image_format") or "png" - ) - - parms = { - "picture": filepath, - - "trange": 1, - - # Unlike many other ROP nodes the opengl node does not default - # to expression of $FSTART and $FEND so we preserve that behavior - # but do set the range to the frame range of the playbar - "f1": frame_range[0], - "f2": frame_range[1], - } - - override_resolution = pre_create_data.get("override_resolution") - if override_resolution: - parms.update({ - "tres": override_resolution, - "res1": pre_create_data.get("resx"), - "res2": pre_create_data.get("resy"), - "aspect": pre_create_data.get("aspect"), - }) - - if self.selected_nodes: - # The first camera found in selection we will use as camera - # Other node types we set in force objects - camera = None - force_objects = [] - for node in self.selected_nodes: - path = node.path() - if node.type().name() == "cam": - if camera: - continue - camera = path - else: - force_objects.append(path) - - if not camera: - self.log.warning("No camera found in selection.") - - parms.update({ - "camera": camera or "", - "scenepath": "/obj", - "forceobjects": " ".join(force_objects), - "vobjects": "" # clear candidate objects from '*' value - }) - - instance_node.setParms(parms) - - # Set OCIO Colorspace to the default output colorspace - # if there's OCIO - if os.getenv("OCIO"): - self.set_colorcorrect_to_default_view_space(instance_node) - - to_lock = ["id", "family"] - - self.lock_parameters(instance_node, to_lock) - - def get_pre_create_attr_defs(self): - attrs = super(CreateReview, self).get_pre_create_attr_defs() - - image_format_enum = [ - "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", - "rad", "rat", "rta", "sgi", "tga", "tif", - ] - - return attrs + [ - BoolDef("keepImages", - label="Keep Image Sequences", - default=False), - EnumDef("imageFormat", - image_format_enum, - default="png", - label="Image Format Options"), - BoolDef("override_resolution", - label="Override resolution", - tooltip="When disabled the resolution set on the camera " - "is used instead.", - default=True), - NumberDef("resx", - label="Resolution Width", - default=1280, - minimum=2, - decimals=0), - NumberDef("resy", - label="Resolution Height", - default=720, - minimum=2, - decimals=0), - NumberDef("aspect", - label="Aspect Ratio", - default=1.0, - minimum=0.0001, - decimals=3) - ] - - def set_colorcorrect_to_default_view_space(self, - instance_node): - """Set ociocolorspace to the default output space.""" - from openpype.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa - - # set Color Correction parameter to OpenColorIO - instance_node.setParms({"colorcorrect": 2}) - - # Get default view space for ociocolorspace parm. - default_view_space = get_default_display_view_colorspace() - instance_node.setParms( - {"ociocolorspace": default_view_space} - ) - - self.log.debug( - "'OCIO Colorspace' parm on '{}' has been set to " - "the default view color space '{}'" - .format(instance_node, default_view_space) - ) diff --git a/openpype/hosts/houdini/plugins/create/create_workfile.py b/openpype/hosts/houdini/plugins/create/create_workfile.py deleted file mode 100644 index 850f5c994e..0000000000 --- a/openpype/hosts/houdini/plugins/create/create_workfile.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" -from openpype import AYON_SERVER_ENABLED -from openpype.hosts.houdini.api import plugin -from openpype.hosts.houdini.api.lib import read, imprint -from openpype.hosts.houdini.api.pipeline import CONTEXT_CONTAINER -from openpype.pipeline import CreatedInstance, AutoCreator -from openpype.client import get_asset_by_name -import hou - - -class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.houdini.workfile" - label = "Workfile" - family = "workfile" - icon = "fa5.file" - - default_variant = "Main" - - def create(self): - variant = self.default_variant - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - project_name = self.project_name - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.host_name - - if current_instance is None: - current_instance_asset = None - elif AYON_SERVER_ENABLED: - current_instance_asset = current_instance["folderPath"] - else: - current_instance_asset = current_instance["asset"] - - if current_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - data = { - "task": task_name, - "variant": variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - - data.update( - self.get_dynamic_data( - variant, task_name, asset_doc, - project_name, host_name, current_instance) - ) - self.log.info("Auto-creating workfile instance...") - current_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(current_instance) - elif ( - current_instance_asset != asset_name - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - if AYON_SERVER_ENABLED: - current_instance["folderPath"] = asset_name - else: - current_instance["asset"] = asset_name - current_instance["task"] = task_name - current_instance["subset"] = subset_name - - # write workfile information to context container. - op_ctx = hou.node(CONTEXT_CONTAINER) - if not op_ctx: - op_ctx = self.create_context_node() - - workfile_data = {"workfile": current_instance.data_to_store()} - imprint(op_ctx, workfile_data) - - def collect_instances(self): - op_ctx = hou.node(CONTEXT_CONTAINER) - instance = read(op_ctx) - if not instance: - return - workfile = instance.get("workfile") - if not workfile: - return - created_instance = CreatedInstance.from_existing( - workfile, self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - op_ctx = hou.node(CONTEXT_CONTAINER) - for created_inst, _changes in update_list: - if created_inst["creator_identifier"] == self.identifier: - workfile_data = {"workfile": created_inst.data_to_store()} - imprint(op_ctx, workfile_data, update=True) diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py deleted file mode 100644 index 637be1513d..0000000000 --- a/openpype/hosts/houdini/plugins/load/actions.py +++ /dev/null @@ -1,85 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" - -from openpype.pipeline import load - - -class SetFrameRangeLoader(load.LoaderPlugin): - """Set frame range excluding pre- and post-handles""" - - families = [ - "animation", - "camera", - "pointcache", - "vdbcache", - "usd", - ] - representations = ["abc", "vdb", "usd"] - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import hou - - version = context["version"] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print( - "Skipping setting frame range because start or " - "end frame data is missing.." - ) - return - - hou.playbar.setFrameRange(start, end) - hou.playbar.setPlaybackRange(start, end) - - -class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set frame range including pre- and post-handles""" - - families = [ - "animation", - "camera", - "pointcache", - "vdbcache", - "usd", - ] - representations = ["abc", "vdb", "usd"] - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import hou - - version = context["version"] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print( - "Skipping setting frame range because start or " - "end frame data is missing.." - ) - return - - # Include handles - start -= version_data.get("handleStart", 0) - end += version_data.get("handleEnd", 0) - - hou.playbar.setFrameRange(start, end) - hou.playbar.setPlaybackRange(start, end) diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py deleted file mode 100644 index 48bd730ebe..0000000000 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.houdini.api import pipeline - - -class AbcLoader(load.LoaderPlugin): - """Load Alembic""" - - families = ["model", "animation", "pointcache", "gpuCache"] - label = "Load Alembic" - representations = ["abc"] - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - import hou - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["asset"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Create an alembic node (supports animation) - alembic = container.createNode("alembic", node_name=node_name) - alembic.setParms({"fileName": file_path}) - - # Add unpack node - unpack_name = "unpack_{}".format(name) - unpack = container.createNode("unpack", node_name=unpack_name) - unpack.setInput(0, alembic) - unpack.setParms({"transfer_attributes": "path"}) - - # Add normal to points - # Order of menu ['point', 'vertex', 'prim', 'detail'] - normal_name = "normal_{}".format(name) - normal_node = container.createNode("normal", node_name=normal_name) - normal_node.setParms({"type": 0}) - - normal_node.setInput(0, unpack) - - null = container.createNode("null", node_name="OUT".format(name)) - null.setInput(0, normal_node) - - # Ensure display flag is on the Alembic input node and not on the OUT - # node to optimize "debug" displaying in the viewport. - alembic.setDisplayFlag(True) - - # Set new position for unpack node else it gets cluttered - nodes = [container, alembic, unpack, normal_node, null] - for nr, node in enumerate(nodes): - node.setPosition([0, (0 - nr)]) - - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, representation): - - node = container["node"] - try: - alembic_node = next( - n for n in node.children() if n.type().name() == "alembic" - ) - except StopIteration: - self.log.error("Could not find node of type `alembic`") - return - - # Update the file path - file_path = get_representation_path(representation) - file_path = file_path.replace("\\", "/") - - alembic_node.setParms({"fileName": file_path}) - - # Update attribute - node.setParms({"representation": str(representation["_id"])}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, representation): - self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py deleted file mode 100644 index e16146a267..0000000000 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ /dev/null @@ -1,211 +0,0 @@ -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.houdini.api import pipeline - -from openpype.hosts.houdini.api.lib import ( - set_camera_resolution, - get_camera_from_container -) - -import hou - - -ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")' - '.alembicGetCameraDict') - - -def transfer_non_default_values(src, dest, ignore=None): - """Copy parm from src to dest. - - Because the Alembic Archive rebuilds the entire node - hierarchy on triggering "Build Hierarchy" we want to - preserve any local tweaks made by the user on the camera - for ease of use. That could be a background image, a - resolution change or even Redshift camera parameters. - - We try to do so by finding all Parms that exist on both - source and destination node, include only those that both - are not at their default value, they must be visible, - we exclude those that have the special "alembic archive" - channel expression and ignore certain Parm types. - - """ - - ignore_types = { - hou.parmTemplateType.Toggle, - hou.parmTemplateType.Menu, - hou.parmTemplateType.Button, - hou.parmTemplateType.FolderSet, - hou.parmTemplateType.Separator, - hou.parmTemplateType.Label, - } - - src.updateParmStates() - - for parm in src.allParms(): - - if ignore and parm.name() in ignore: - continue - - # If destination parm does not exist, ignore.. - dest_parm = dest.parm(parm.name()) - if not dest_parm: - continue - - # Ignore values that are currently at default - if parm.isAtDefault() and dest_parm.isAtDefault(): - continue - - if not parm.isVisible(): - # Ignore hidden parameters, assume they - # are implementation details - continue - - expression = None - try: - expression = parm.expression() - except hou.OperationFailed: - # No expression present - pass - - if expression is not None and ARCHIVE_EXPRESSION in expression: - # Assume it's part of the automated connections that the - # Alembic Archive makes on loading of the camera and thus we do - # not want to transfer the expression - continue - - # Ignore folders, separators, etc. - if parm.parmTemplate().type() in ignore_types: - continue - - print("Preserving attribute: %s" % parm.name()) - dest_parm.setFromParm(parm) - - -class CameraLoader(load.LoaderPlugin): - """Load camera from an Alembic file""" - - families = ["camera"] - label = "Load Camera (abc)" - representations = ["abc"] - order = -10 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context).replace("\\", "/") - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["asset"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a archive node - node = self.create_and_connect(obj, "alembicarchive", node_name) - - # TODO: add FPS of project / asset - node.setParms({"fileName": file_path, "channelRef": True}) - - # Apply some magic - node.parm("buildHierarchy").pressButton() - node.moveToGoodPosition() - - # Create an alembic xform node - nodes = [node] - - camera = get_camera_from_container(node) - self._match_maya_render_mask(camera) - set_camera_resolution(camera, asset_doc=context["asset"]) - self[:] = nodes - - return pipeline.containerise(node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="") - - def update(self, container, representation): - - node = container["node"] - - # Update the file path - file_path = get_representation_path(representation) - file_path = file_path.replace("\\", "/") - - # Update attributes - node.setParms({"fileName": file_path, - "representation": str(representation["_id"])}) - - # Store the cam temporarily next to the Alembic Archive - # so that we can preserve parm values the user set on it - # after build hierarchy was triggered. - old_camera = get_camera_from_container(node) - temp_camera = old_camera.copyTo(node.parent()) - - # Rebuild - node.parm("buildHierarchy").pressButton() - - # Apply values to the new camera - new_camera = get_camera_from_container(node) - transfer_non_default_values(temp_camera, - new_camera, - # The hidden uniform scale attribute - # gets a default connection to - # "icon_scale" just skip that completely - ignore={"scale"}) - - self._match_maya_render_mask(new_camera) - set_camera_resolution(new_camera) - - temp_camera.destroy() - - def remove(self, container): - - node = container["node"] - node.destroy() - - def create_and_connect(self, node, node_type, name=None): - """Create a node within a node which and connect it to the input - - Args: - node(hou.Node): parent of the new node - node_type(str) name of the type of node, eg: 'alembic' - name(str, Optional): name of the node - - Returns: - hou.Node - - """ - if name: - new_node = node.createNode(node_type, node_name=name) - else: - new_node = node.createNode(node_type) - - new_node.moveToGoodPosition() - return new_node - - def _match_maya_render_mask(self, camera): - """Workaround to match Maya render mask in Houdini""" - - # print("Setting match maya render mask ") - parm = camera.parm("aperture") - expression = parm.expression() - expression = expression.replace("return ", "aperture = ") - expression += """ -# Match maya render mask (logic from Houdini's own FBX importer) -node = hou.pwd() -resx = node.evalParm('resx') -resy = node.evalParm('resy') -aspect = node.evalParm('aspect') -aperture *= min(1, (resx / resy * aspect) / 1.5) -return aperture -""" - parm.setExpression(expression, language=hou.exprLanguage.Python) diff --git a/openpype/hosts/houdini/plugins/load/load_fbx.py b/openpype/hosts/houdini/plugins/load/load_fbx.py deleted file mode 100644 index 894ac62b3e..0000000000 --- a/openpype/hosts/houdini/plugins/load/load_fbx.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -"""Fbx Loader for houdini. """ -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.houdini.api import pipeline - - -class FbxLoader(load.LoaderPlugin): - """Load fbx files. """ - - label = "Load FBX" - icon = "code-fork" - color = "orange" - - order = -10 - - families = ["*"] - representations = ["*"] - extensions = {"fbx"} - - def load(self, context, name=None, namespace=None, data=None): - - # get file path from context - file_path = self.filepath_from_context(context) - file_path = file_path.replace("\\", "/") - - # get necessary data - namespace, node_name = self.get_node_name(context, name, namespace) - - # create load tree - nodes = self.create_load_node_tree(file_path, node_name, name) - - self[:] = nodes - - # Call containerise function which does some automations for you - # like moving created nodes to the AVALON_CONTAINERS subnetwork - containerised_nodes = pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - return containerised_nodes - - def update(self, container, representation): - - node = container["node"] - try: - file_node = next( - n for n in node.children() if n.type().name() == "file" - ) - except StopIteration: - self.log.error("Could not find node of type `file`") - return - - # Update the file path from representation - file_path = get_representation_path(representation) - file_path = file_path.replace("\\", "/") - - file_node.setParms({"file": file_path}) - - # Update attribute - node.setParms({"representation": str(representation["_id"])}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - def switch(self, container, representation): - self.update(container, representation) - - def get_node_name(self, context, name=None, namespace=None): - """Define node name.""" - - if not namespace: - namespace = context["asset"]["name"] - - if namespace: - node_name = "{}_{}".format(namespace, name) - else: - node_name = name - - return namespace, node_name - - def create_load_node_tree(self, file_path, node_name, subset_name): - """Create Load network. - - you can start building your tree at any obj level. - it'll be much easier to build it in the root obj level. - - Afterwards, your tree will be automatically moved to - '/obj/AVALON_CONTAINERS' subnetwork. - """ - import hou - - # Get the root obj level - obj = hou.node("/obj") - - # Create a new obj geo node - parent_node = obj.createNode("geo", node_name=node_name) - - # In older houdini, - # when reating a new obj geo node, a default file node will be - # automatically created. - # so, we will delete it if exists. - file_node = parent_node.node("file1") - if file_node: - file_node.destroy() - - # Create a new file node - file_node = parent_node.createNode("file", node_name=node_name) - file_node.setParms({"file": file_path}) - - # Create attribute delete - attribdelete_name = "attribdelete_{}".format(subset_name) - attribdelete = parent_node.createNode("attribdelete", - node_name=attribdelete_name) - attribdelete.setParms({"ptdel": "fbx_*"}) - attribdelete.setInput(0, file_node) - - # Create a Null node - null_name = "OUT_{}".format(subset_name) - null = parent_node.createNode("null", node_name=null_name) - null.setInput(0, attribdelete) - - # Ensure display flag is on the file_node input node and not on the OUT - # node to optimize "debug" displaying in the viewport. - file_node.setDisplayFlag(True) - - # Set new position for children nodes - parent_node.layoutChildren() - - # Return all the nodes - return [parent_node, file_node, attribdelete, null] diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py deleted file mode 100644 index cff2b74e52..0000000000 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ /dev/null @@ -1,132 +0,0 @@ -import os - -from openpype.pipeline import ( - load, - get_representation_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.houdini.api import lib, pipeline - -import hou - - -def get_image_avalon_container(): - """The COP2 files must be in a COP2 network. - - So we maintain a single entry point within AVALON_CONTAINERS, - just for ease of use. - - """ - - path = pipeline.AVALON_CONTAINERS - avalon_container = hou.node(path) - if not avalon_container: - # Let's create avalon container secretly - # but make sure the pipeline still is built the - # way we anticipate it was built, asserting it. - assert path == "/obj/AVALON_CONTAINERS" - - parent = hou.node("/obj") - avalon_container = parent.createNode( - "subnet", node_name="AVALON_CONTAINERS" - ) - - image_container = hou.node(path + "/IMAGES") - if not image_container: - image_container = avalon_container.createNode( - "cop2net", node_name="IMAGES" - ) - image_container.moveToGoodPosition() - - return image_container - - -class ImageLoader(load.LoaderPlugin): - """Load images into COP2""" - - families = ["imagesequence"] - label = "Load Image (COP2)" - representations = ["*"] - order = -10 - - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Format file name, Houdini only wants forward slashes - file_path = self.filepath_from_context(context) - file_path = os.path.normpath(file_path) - file_path = file_path.replace("\\", "/") - file_path = self._get_file_sequence(file_path) - - # Get the root node - parent = get_image_avalon_container() - - # Define node name - namespace = namespace if namespace else context["asset"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - node = parent.createNode("file", node_name=node_name) - node.moveToGoodPosition() - - node.setParms({"filename1": file_path}) - - # Imprint it manually - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": node_name, - "namespace": namespace, - "loader": str(self.__class__.__name__), - "representation": str(context["representation"]["_id"]), - } - - # todo: add folder="Avalon" - lib.imprint(node, data) - - return node - - def update(self, container, representation): - - node = container["node"] - - # Update the file path - file_path = get_representation_path(representation) - file_path = file_path.replace("\\", "/") - file_path = self._get_file_sequence(file_path) - - # Update attributes - node.setParms( - { - "filename1": file_path, - "representation": str(representation["_id"]), - } - ) - - def remove(self, container): - - node = container["node"] - - # Let's clean up the IMAGES COP2 network - # if it ends up being empty and we deleted - # the last file node. Store the parent - # before we delete the node. - parent = node.parent() - - node.destroy() - - if not parent.children(): - parent.destroy() - - def _get_file_sequence(self, file_path): - root = os.path.dirname(file_path) - files = sorted(os.listdir(root)) - - first_fname = files[0] - prefix, padding, suffix = first_fname.rsplit(".", 2) - fname = ".".join([prefix, "$F{}".format(len(padding)), suffix]) - return os.path.join(root, fname).replace("\\", "/") - - def switch(self, container, representation): - self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_redshift_proxy.py b/openpype/hosts/houdini/plugins/load/load_redshift_proxy.py deleted file mode 100644 index efd7c6d0ca..0000000000 --- a/openpype/hosts/houdini/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import re -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.houdini.api import pipeline -from openpype.pipeline.load import LoadError - -import hou - - -class RedshiftProxyLoader(load.LoaderPlugin): - """Load Redshift Proxy""" - - families = ["redshiftproxy"] - label = "Load Redshift Proxy" - representations = ["rs"] - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - - # Get the root node - obj = hou.node("/obj") - - # Define node name - namespace = namespace if namespace else context["asset"]["name"] - node_name = "{}_{}".format(namespace, name) if namespace else name - - # Create a new geo node - container = obj.createNode("geo", node_name=node_name) - - # Check whether the Redshift parameters exist - if not, then likely - # redshift is not set up or initialized correctly - if not container.parm("RS_objprop_proxy_enable"): - container.destroy() - raise LoadError("Unable to initialize geo node with Redshift " - "attributes. Make sure you have the Redshift " - "plug-in set up correctly for Houdini.") - - # Enable by default - container.setParms({ - "RS_objprop_proxy_enable": True, - "RS_objprop_proxy_file": self.format_path( - self.filepath_from_context(context), - context["representation"]) - }) - - # Remove the file node, it only loads static meshes - # Houdini 17 has removed the file node from the geo node - file_node = container.node("file1") - if file_node: - file_node.destroy() - - # Add this stub node inside so it previews ok - proxy_sop = container.createNode("redshift_proxySOP", - node_name=node_name) - proxy_sop.setDisplayFlag(True) - - nodes = [container, proxy_sop] - - self[:] = nodes - - return pipeline.containerise( - node_name, - namespace, - nodes, - context, - self.__class__.__name__, - suffix="", - ) - - def update(self, container, representation): - - # Update the file path - file_path = get_representation_path(representation) - - node = container["node"] - node.setParms({ - "RS_objprop_proxy_file": self.format_path( - file_path, representation) - }) - - # Update attribute - node.setParms({"representation": str(representation["_id"])}) - - def remove(self, container): - - node = container["node"] - node.destroy() - - @staticmethod - def format_path(path, representation): - """Format file path correctly for single redshift proxy - or redshift proxy sequence.""" - if not os.path.exists(path): - raise RuntimeError("Path does not exist: %s" % path) - - is_sequence = bool(representation["context"].get("frame")) - # The path is either a single file or sequence in a folder. - if is_sequence: - filename = re.sub(r"(.*)\.(\d+)\.(rs.*)", "\\1.$F4.\\3", path) - filename = os.path.join(path, filename) - else: - filename = path - - filename = os.path.normpath(filename) - filename = filename.replace("\\", "/") - - return filename diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py deleted file mode 100644 index e92a42f2e8..0000000000 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ /dev/null @@ -1,120 +0,0 @@ -import pyblish.api - -from openpype.pipeline import registered_host - - -def collect_input_containers(nodes): - """Collect containers that contain any of the node in `nodes`. - - This will return any loaded Avalon container that contains at least one of - the nodes. As such, the Avalon container is an input for it. Or in short, - there are member nodes of that container. - - Returns: - list: Input avalon containers - - """ - - # Lookup by node ids - lookup = frozenset(nodes) - - containers = [] - host = registered_host() - for container in host.ls(): - - node = container["node"] - - # Usually the loaded containers don't have any complex references - # and the contained children should be all we need. So we disregard - # checking for .references() on the nodes. - members = set(node.allSubChildren()) - members.add(node) # include the node itself - - # If there's an intersection - if not lookup.isdisjoint(members): - containers.append(container) - - return containers - - -def iter_upstream(node): - """Yields all upstream inputs for the current node. - - This includes all `node.inputAncestors()` but also traverses through all - `node.references()` for the node itself and for any of the upstream nodes. - This method has no max-depth and will collect all upstream inputs. - - Yields: - hou.Node: The upstream nodes, including references. - - """ - - upstream = node.inputAncestors( - include_ref_inputs=True, follow_subnets=True - ) - - # Initialize process queue with the node's ancestors itself - queue = list(upstream) - collected = set(upstream) - - # Traverse upstream references for all nodes and yield them as we - # process the queue. - while queue: - upstream_node = queue.pop() - yield upstream_node - - # Find its references that are not collected yet. - references = upstream_node.references() - references = [n for n in references if n not in collected] - - queue.extend(references) - collected.update(references) - - # Include the references' ancestors that have not been collected yet. - for reference in references: - ancestors = reference.inputAncestors( - include_ref_inputs=True, follow_subnets=True - ) - ancestors = [n for n in ancestors if n not in collected] - - queue.extend(ancestors) - collected.update(ancestors) - - -class CollectUpstreamInputs(pyblish.api.InstancePlugin): - """Collect source input containers used for this publish. - - This will include `inputs` data of which loaded publishes were used in the - generation of this publish. This leaves an upstream trace to what was used - as input. - - """ - - label = "Collect Inputs" - order = pyblish.api.CollectorOrder + 0.4 - hosts = ["houdini"] - - def process(self, instance): - # We can't get the "inputAncestors" directly from the ROP - # node, so we find the related output node (set in SOP/COP path) - # and include that together with its ancestors - output = instance.data.get("output_node") - - if output is None: - # If no valid output node is set then ignore it as validation - # will be checking those cases. - self.log.debug( - "No output node found, skipping collecting of inputs.." - ) - return - - # Collect all upstream parents - nodes = list(iter_upstream(output)) - nodes.append(output) - - # Collect containers for the given set of nodes - containers = collect_input_containers(nodes) - - inputs = [c["representation"] for c in containers] - instance.data["inputRepresentations"] = inputs - self.log.debug("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py deleted file mode 100644 index 52966fb3c2..0000000000 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ /dev/null @@ -1,93 +0,0 @@ -import hou - -import pyblish.api - -from openpype.hosts.houdini.api import lib - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by all node in out graph and pre-defined attributes - - This collector takes into account assets that are associated with - an specific node and marked with a unique identifier; - - Identifier: - id (str): "pyblish.avalon.instance - - Specific node: - The specific node is important because it dictates in which way the - subset is being exported. - - alembic: will export Alembic file which supports cascading attributes - like 'cbId' and 'path' - geometry: Can export a wide range of file types, default out - - """ - - order = pyblish.api.CollectorOrder - 0.01 - label = "Collect Instances" - hosts = ["houdini"] - - def process(self, context): - - nodes = hou.node("/out").children() - nodes += hou.node("/obj").children() - - # Include instances in USD stage only when it exists so it - # remains backwards compatible with version before houdini 18 - stage = hou.node("/stage") - if stage: - nodes += stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop) - - for node in nodes: - - if not node.parm("id"): - continue - - if node.evalParm("id") != "pyblish.avalon.instance": - continue - - # instance was created by new creator code, skip it as - # it is already collected. - if node.parm("creator_identifier"): - continue - - has_family = node.evalParm("family") - assert has_family, "'%s' is missing 'family'" % node.name() - - self.log.info( - "Processing legacy instance node {}".format(node.path()) - ) - - data = lib.read(node) - # Check bypass state and reverse - if hasattr(node, "isBypassed"): - data.update({"active": not node.isBypassed()}) - - # temporarily translation of `active` to `publish` till issue has - # been resolved. - # https://github.com/pyblish/pyblish-base/issues/307 - if "active" in data: - data["publish"] = data["active"] - - # Create nice name if the instance has a frame range. - label = data.get("name", node.name()) - label += " (%s)" % data["asset"] # include asset in name - - instance = context.create_instance(label) - - # Include `families` using `family` data - instance.data["families"] = [instance.data["family"]] - - instance[:] = [node] - instance.data["instance_node"] = node.path() - instance.data.update(data) - - def sort_by_family(instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) - - # Sort/grouped by family (preserving local index) - context[:] = sorted(context, key=sort_by_family) - - return context diff --git a/openpype/hosts/houdini/plugins/publish/extract_alembic.py b/openpype/hosts/houdini/plugins/publish/extract_alembic.py deleted file mode 100644 index df2fdda241..0000000000 --- a/openpype/hosts/houdini/plugins/publish/extract_alembic.py +++ /dev/null @@ -1,48 +0,0 @@ -import os - -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop - -import hou - - -class ExtractAlembic(publish.Extractor): - - order = pyblish.api.ExtractorOrder - label = "Extract Alembic" - hosts = ["houdini"] - families = ["abc", "camera"] - targets = ["local", "remote"] - - def process(self, instance): - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - - ropnode = hou.node(instance.data["instance_node"]) - - # Get the filename from the filename parameter - output = ropnode.evalParm("filename") - staging_dir = os.path.dirname(output) - instance.data["stagingDir"] = staging_dir - - file_name = os.path.basename(output) - - # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, - staging_dir)) - - render_rop(ropnode) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': file_name, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_fbx.py b/openpype/hosts/houdini/plugins/publish/extract_fbx.py deleted file mode 100644 index 7dc193c6a9..0000000000 --- a/openpype/hosts/houdini/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -"""Fbx Extractor for houdini. """ - -import os -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop - -import hou - - -class ExtractFBX(publish.Extractor): - - label = "Extract FBX" - families = ["fbx"] - hosts = ["houdini"] - - order = pyblish.api.ExtractorOrder + 0.1 - - def process(self, instance): - - # get rop node - ropnode = hou.node(instance.data.get("instance_node")) - output_file = ropnode.evalParm("sopoutput") - - # get staging_dir and file_name - staging_dir = os.path.normpath(os.path.dirname(output_file)) - file_name = os.path.basename(output_file) - - # render rop - self.log.debug("Writing FBX '%s' to '%s'", file_name, staging_dir) - render_rop(ropnode) - - # prepare representation - representation = { - "name": "fbx", - "ext": "fbx", - "files": file_name, - "stagingDir": staging_dir - } - - # A single frame may also be rendered without start/end frame. - if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa - representation["frameStart"] = instance.data["frameStartHandle"] - representation["frameEnd"] = instance.data["frameEndHandle"] - - # set value type for 'representations' key to list - if "representations" not in instance.data: - instance.data["representations"] = [] - - # update instance data - instance.data["stagingDir"] = staging_dir - instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py deleted file mode 100644 index 218f3b9256..0000000000 --- a/openpype/hosts/houdini/plugins/publish/extract_redshift_proxy.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.houdini.api.lib import render_rop - -import hou - - -class ExtractRedshiftProxy(publish.Extractor): - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Extract Redshift Proxy" - families = ["redshiftproxy"] - hosts = ["houdini"] - targets = ["local", "remote"] - - def process(self, instance): - if instance.data.get("farm"): - self.log.debug("Should be processed on farm, skipping.") - return - ropnode = hou.node(instance.data.get("instance_node")) - - # Get the filename from the filename parameter - # `.evalParm(parameter)` will make sure all tokens are resolved - output = ropnode.evalParm("RS_archive_file") - staging_dir = os.path.normpath(os.path.dirname(output)) - instance.data["stagingDir"] = staging_dir - file_name = os.path.basename(output) - - self.log.info("Writing Redshift Proxy '%s' to '%s'" % (file_name, - staging_dir)) - - render_rop(ropnode) - - output = instance.data["frames"] - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "rs", - "ext": "rs", - "files": output, - "stagingDir": staging_dir, - } - - # A single frame may also be rendered without start/end frame. - if "frameStartHandle" in instance.data and "frameEndHandle" in instance.data: # noqa - representation["frameStart"] = instance.data["frameStartHandle"] - representation["frameEnd"] = instance.data["frameEndHandle"] - - instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py deleted file mode 100644 index 4788cca3cf..0000000000 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ /dev/null @@ -1,50 +0,0 @@ -import pyblish.api - -from openpype.lib import version_up -from openpype.pipeline import registered_host -from openpype.pipeline.publish import get_errored_plugins_from_context -from openpype.hosts.houdini.api import HoudiniHost -from openpype.pipeline.publish import KnownPublishError - - -class IncrementCurrentFile(pyblish.api.ContextPlugin): - """Increment the current file. - - Saves the current scene with an increased version number. - - """ - - label = "Increment current file" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["houdini"] - families = ["workfile", - "redshift_rop", - "arnold_rop", - "mantra_rop", - "karma_rop", - "usdrender", - "publish.hou"] - optional = True - - def process(self, context): - - errored_plugins = get_errored_plugins_from_context(context) - if any( - plugin.__name__ == "HoudiniSubmitPublishDeadline" - for plugin in errored_plugins - ): - raise KnownPublishError( - "Skipping incrementing current file because " - "submission to deadline failed." - ) - - # Filename must not have changed since collecting - host = registered_host() # type: HoudiniHost - current_file = host.current_file() - if context.data["currentFile"] != current_file: - raise KnownPublishError( - "Collected filename mismatches from current scene name." - ) - - new_filepath = version_up(current_file) - host.save_workfile(new_filepath) diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py deleted file mode 100644 index 3ae3fa3220..0000000000 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ /dev/null @@ -1,26 +0,0 @@ -import pyblish.api - -from openpype.pipeline import registered_host - - -class SaveCurrentScene(pyblish.api.ContextPlugin): - """Save current scene""" - - label = "Save current file" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["houdini"] - - def process(self, context): - - # Filename must not have changed since collecting - host = registered_host() - current_file = host.get_current_workfile() - assert context.data['currentFile'] == current_file, ( - "Collected filename from current scene name." - ) - - if host.workfile_has_unsaved_changes(): - self.log.info("Saving current file: {}".format(current_file)) - host.save_workfile(current_file) - else: - self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/validate_frame_range.py b/openpype/hosts/houdini/plugins/publish/validate_frame_range.py deleted file mode 100644 index 1b12fa7096..0000000000 --- a/openpype/hosts/houdini/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from openpype.pipeline import PublishValidationError -from openpype.pipeline.publish import RepairAction -from openpype.hosts.houdini.api.action import SelectInvalidAction - -import hou - - -class DisableUseAssetHandlesAction(RepairAction): - label = "Disable use asset handles" - icon = "mdi.toggle-switch-off" - - -class ValidateFrameRange(pyblish.api.InstancePlugin): - """Validate Frame Range. - - Due to the usage of start and end handles, - then Frame Range must be >= (start handle + end handle) - which results that frameEnd be smaller than frameStart - """ - - order = pyblish.api.ValidatorOrder - 0.1 - hosts = ["houdini"] - label = "Validate Frame Range" - actions = [DisableUseAssetHandlesAction, SelectInvalidAction] - - def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - title="Invalid Frame Range", - message=( - "Invalid frame range because the instance " - "start frame ({0[frameStart]}) is higher than " - "the end frame ({0[frameEnd]})" - .format(instance.data) - ), - description=( - "## Invalid Frame Range\n" - "The frame range for the instance is invalid because " - "the start frame is higher than the end frame.\n\nThis " - "is likely due to asset handles being applied to your " - "instance or the ROP node's start frame " - "is set higher than the end frame.\n\nIf your ROP frame " - "range is correct and you do not want to apply asset " - "handles make sure to disable Use asset handles on the " - "publish instance." - ) - ) - - @classmethod - def get_invalid(cls, instance): - - if not instance.data.get("instance_node"): - return - - rop_node = hou.node(instance.data["instance_node"]) - frame_start = instance.data.get("frameStart") - frame_end = instance.data.get("frameEnd") - - if frame_start is None or frame_end is None: - cls.log.debug( - "Skipping frame range validation for " - "instance without frame data: {}".format(rop_node.path()) - ) - return - - if frame_start > frame_end: - cls.log.info( - "The ROP node render range is set to " - "{0[frameStartHandle]} - {0[frameEndHandle]} " - "The asset handles applied to the instance are start handle " - "{0[handleStart]} and end handle {0[handleEnd]}" - .format(instance.data) - ) - return [rop_node] - - @classmethod - def repair(cls, instance): - - if not cls.get_invalid(instance): - # Already fixed - return - - # Disable use asset handles - context = instance.context - create_context = context.data["create_context"] - instance_id = instance.data.get("instance_id") - if not instance_id: - cls.log.debug("'{}' must have instance id" - .format(instance)) - return - - created_instance = create_context.get_instance_by_id(instance_id) - if not instance_id: - cls.log.debug("Unable to find instance '{}' by id" - .format(instance)) - return - - created_instance.publish_attributes["CollectAssetHandles"]["use_handles"] = False # noqa - - create_context.save_changes() - cls.log.debug("use asset handles is turned off for '{}'" - .format(instance)) diff --git a/openpype/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py deleted file mode 100644 index ae3c7e5602..0000000000 --- a/openpype/hosts/houdini/plugins/publish/validate_unreal_staticmesh_naming.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -import pyblish.api -from openpype.pipeline import ( - PublishValidationError, - OptionalPyblishPluginMixin -) -from openpype.pipeline.publish import ValidateContentsOrder - -from openpype.hosts.houdini.api.action import SelectInvalidAction -from openpype.hosts.houdini.api.lib import get_output_children - -import hou - - -class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate name of Unreal Static Mesh. - - This validator checks if output node name has a collision prefix: - - UBX - - UCP - - USP - - UCX - - This validator also checks if subset name is correct - - {static mesh prefix}_{Asset-Name}{Variant}. - - """ - - families = ["staticMesh"] - hosts = ["houdini"] - label = "Unreal Static Mesh Name (FBX)" - order = ValidateContentsOrder + 0.1 - actions = [SelectInvalidAction] - - optional = True - collision_prefixes = [] - static_mesh_prefix = "" - - @classmethod - def apply_settings(cls, project_settings, system_settings): - - settings = ( - project_settings["houdini"]["create"]["CreateStaticMesh"] - ) - cls.collision_prefixes = settings["collision_prefixes"] - cls.static_mesh_prefix = settings["static_mesh_prefix"] - - def process(self, instance): - - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - nodes = [n.path() for n in invalid] - raise PublishValidationError( - "See log for details. " - "Invalid nodes: {0}".format(nodes) - ) - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - rop_node = hou.node(instance.data["instance_node"]) - output_node = instance.data.get("output_node") - if output_node is None: - cls.log.debug( - "No Output Node, skipping check.." - ) - return - - if rop_node.evalParm("buildfrompath"): - # This validator doesn't support naming check if - # building hierarchy from path' is used - cls.log.info( - "Using 'Build Hierarchy from Path Attribute', skipping check.." - ) - return - - # Check nodes names - all_outputs = get_output_children(output_node, include_sops=False) - for output in all_outputs: - for prefix in cls.collision_prefixes: - if output.name().startswith(prefix): - invalid.append(output) - cls.log.error( - "Invalid node name: Node '%s' " - "includes a collision prefix '%s'", - output.path(), prefix - ) - break - - return invalid diff --git a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py deleted file mode 100644 index 0b92fc2706..0000000000 --- a/openpype/hosts/houdini/startup/python2.7libs/pythonrc.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from openpype.pipeline import install_host -from openpype.hosts.houdini.api import HoudiniHost -from openpype import AYON_SERVER_ENABLED - - -def main(): - print("Installing {} ...".format( - "AYON" if AYON_SERVER_ENABLED else "OpenPype")) - install_host(HoudiniHost()) - - -main() diff --git a/openpype/hosts/houdini/startup/python3.10libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.10libs/pythonrc.py deleted file mode 100644 index 0b92fc2706..0000000000 --- a/openpype/hosts/houdini/startup/python3.10libs/pythonrc.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from openpype.pipeline import install_host -from openpype.hosts.houdini.api import HoudiniHost -from openpype import AYON_SERVER_ENABLED - - -def main(): - print("Installing {} ...".format( - "AYON" if AYON_SERVER_ENABLED else "OpenPype")) - install_host(HoudiniHost()) - - -main() diff --git a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py deleted file mode 100644 index 0b92fc2706..0000000000 --- a/openpype/hosts/houdini/startup/python3.7libs/pythonrc.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from openpype.pipeline import install_host -from openpype.hosts.houdini.api import HoudiniHost -from openpype import AYON_SERVER_ENABLED - - -def main(): - print("Installing {} ...".format( - "AYON" if AYON_SERVER_ENABLED else "OpenPype")) - install_host(HoudiniHost()) - - -main() diff --git a/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py b/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py deleted file mode 100644 index 0b92fc2706..0000000000 --- a/openpype/hosts/houdini/startup/python3.9libs/pythonrc.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype startup script.""" -from openpype.pipeline import install_host -from openpype.hosts.houdini.api import HoudiniHost -from openpype import AYON_SERVER_ENABLED - - -def main(): - print("Installing {} ...".format( - "AYON" if AYON_SERVER_ENABLED else "OpenPype")) - install_host(HoudiniHost()) - - -main() diff --git a/openpype/hosts/max/addon.py b/openpype/hosts/max/addon.py deleted file mode 100644 index 9d6ab5a8b3..0000000000 --- a/openpype/hosts/max/addon.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- -import os -from openpype.modules import OpenPypeModule, IHostAddon - -MAX_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class MaxAddon(OpenPypeModule, IHostAddon): - name = "max" - host_name = "max" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Remove auto screen scale factor for Qt - # - let 3dsmax decide it's value - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - - def get_workfile_extensions(self): - return [".max"] - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(MAX_HOST_DIR, "hooks") - ] diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py deleted file mode 100644 index e2d8d9c55f..0000000000 --- a/openpype/hosts/max/api/lib.py +++ /dev/null @@ -1,562 +0,0 @@ -# -*- coding: utf-8 -*- -"""Library of functions useful for 3dsmax pipeline.""" -import contextlib -import logging -import json -from typing import Any, Dict, Union - -import six -from openpype.pipeline import get_current_project_name, colorspace -from openpype.settings import get_project_settings -from openpype.pipeline.context_tools import ( - get_current_project, get_current_project_asset) -from openpype.style import load_stylesheet -from pymxs import runtime as rt - - -JSON_PREFIX = "JSON::" -log = logging.getLogger("openpype.hosts.max") - - -def get_main_window(): - """Acquire Max's main window""" - from qtpy import QtWidgets - top_widgets = QtWidgets.QApplication.topLevelWidgets() - name = "QmaxApplicationWindow" - for widget in top_widgets: - if ( - widget.inherits("QMainWindow") - and widget.metaObject().className() == name - ): - return widget - raise RuntimeError('Count not find 3dsMax main window.') - - -def imprint(node_name: str, data: dict) -> bool: - node = rt.GetNodeByName(node_name) - if not node: - return False - - for k, v in data.items(): - if isinstance(v, (dict, list)): - rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}") - else: - rt.SetUserProp(node, k, v) - - return True - - -def lsattr( - attr: str, - value: Union[str, None] = None, - root: Union[str, None] = None) -> list: - """List nodes having attribute with specified value. - - Args: - attr (str): Attribute name to match. - value (str, Optional): Value to match, of omitted, all nodes - with specified attribute are returned no matter of value. - root (str, Optional): Root node name. If omitted, scene root is used. - - Returns: - list of nodes. - """ - root = rt.RootNode if root is None else rt.GetNodeByName(root) - - def output_node(node, nodes): - nodes.append(node) - for child in node.Children: - output_node(child, nodes) - - nodes = [] - output_node(root, nodes) - return [ - n for n in nodes - if rt.GetUserProp(n, attr) == value - ] if value else [ - n for n in nodes - if rt.GetUserProp(n, attr) - ] - - -def read(container) -> dict: - data = {} - props = rt.GetUserPropBuffer(container) - # this shouldn't happen but let's guard against it anyway - if not props: - return data - - for line in props.split("\r\n"): - try: - key, value = line.split("=") - except ValueError: - # if the line cannot be split we can't really parse it - continue - - value = value.strip() - if isinstance(value.strip(), six.string_types) and \ - value.startswith(JSON_PREFIX): - with contextlib.suppress(json.JSONDecodeError): - value = json.loads(value[len(JSON_PREFIX):]) - - # default value behavior - # convert maxscript boolean values - if value == "true": - value = True - elif value == "false": - value = False - - data[key.strip()] = value - - data["instance_node"] = container.Name - - return data - - -@contextlib.contextmanager -def maintained_selection(): - previous_selection = rt.GetCurrentSelection() - try: - yield - finally: - if previous_selection: - rt.Select(previous_selection) - else: - rt.Select() - - -def get_all_children(parent, node_type=None): - """Handy function to get all the children of a given node - - Args: - parent (3dsmax Node1): Node to get all children of. - node_type (None, runtime.class): give class to check for - e.g. rt.FFDBox/rt.GeometryClass etc. - - Returns: - list: list of all children of the parent node - """ - def list_children(node): - children = [] - for c in node.Children: - children.append(c) - children = children + list_children(c) - return children - child_list = list_children(parent) - - return ([x for x in child_list if rt.SuperClassOf(x) == node_type] - if node_type else child_list) - - -def get_current_renderer(): - """ - Notes: - Get current renderer for Max - - Returns: - "{Current Renderer}:{Current Renderer}" - e.g. "Redshift_Renderer:Redshift_Renderer" - """ - return rt.renderers.production - - -def get_default_render_folder(project_setting=None): - return (project_setting["max"] - ["RenderSettings"] - ["default_render_image_folder"]) - - -def set_render_frame_range(start_frame, end_frame): - """ - Note: - Frame range can be specified in different types. Possible values are: - * `1` - Single frame. - * `2` - Active time segment ( animationRange ). - * `3` - User specified Range. - * `4` - User specified Frame pickup string (for example `1,3,5-12`). - - Todo: - Current type is hard-coded, there should be a custom setting for this. - """ - rt.rendTimeType = 3 - if start_frame is not None and end_frame is not None: - rt.rendStart = int(start_frame) - rt.rendEnd = int(end_frame) - - -def get_multipass_setting(project_setting=None): - return (project_setting["max"] - ["RenderSettings"] - ["multipass"]) - - -def set_scene_resolution(width: int, height: int): - """Set the render resolution - - Args: - width(int): value of the width - height(int): value of the height - - Returns: - None - - """ - # make sure the render dialog is closed - # for the update of resolution - # Changing the Render Setup dialog settings should be done - # with the actual Render Setup dialog in a closed state. - if rt.renderSceneDialog.isOpen(): - rt.renderSceneDialog.close() - - rt.renderWidth = width - rt.renderHeight = height - - -def reset_scene_resolution(): - """Apply the scene resolution from the project definition - - scene resolution can be overwritten by an asset if the asset.data contains - any information regarding scene resolution . - Returns: - None - """ - data = ["data.resolutionWidth", "data.resolutionHeight"] - project_resolution = get_current_project(fields=data) - project_resolution_data = project_resolution["data"] - asset_resolution = get_current_project_asset(fields=data) - asset_resolution_data = asset_resolution["data"] - # Set project resolution - project_width = int(project_resolution_data.get("resolutionWidth", 1920)) - project_height = int(project_resolution_data.get("resolutionHeight", 1080)) - width = int(asset_resolution_data.get("resolutionWidth", project_width)) - height = int(asset_resolution_data.get("resolutionHeight", project_height)) - - set_scene_resolution(width, height) - - -def get_frame_range(asset_doc=None) -> Union[Dict[str, Any], None]: - """Get the current assets frame range and handles. - - Args: - asset_doc (dict): Asset Entity Data - - Returns: - dict: with frame start, frame end, handle start, handle end. - """ - # Set frame start/end - if asset_doc is None: - asset_doc = get_current_project_asset() - - data = asset_doc["data"] - frame_start = data.get("frameStart") - frame_end = data.get("frameEnd") - - if frame_start is None or frame_end is None: - return {} - - frame_start = int(frame_start) - frame_end = int(frame_end) - handle_start = int(data.get("handleStart", 0)) - handle_end = int(data.get("handleEnd", 0)) - frame_start_handle = frame_start - handle_start - frame_end_handle = frame_end + handle_end - - return { - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - } - - -def reset_frame_range(fps: bool = True): - """Set frame range to current asset. - This is part of 3dsmax documentation: - - animationRange: A System Global variable which lets you get and - set an Interval value that defines the start and end frames - of the Active Time Segment. - frameRate: A System Global variable which lets you get - and set an Integer value that defines the current - scene frame rate in frames-per-second. - """ - if fps: - data_fps = get_current_project(fields=["data.fps"]) - fps_number = float(data_fps["data"]["fps"]) - rt.frameRate = fps_number - frame_range = get_frame_range() - - set_timeline( - frame_range["frameStartHandle"], frame_range["frameEndHandle"]) - set_render_frame_range( - frame_range["frameStartHandle"], frame_range["frameEndHandle"]) - - -def reset_unit_scale(): - """Apply the unit scale setting to 3dsMax - """ - project_name = get_current_project_name() - settings = get_project_settings(project_name).get("max") - scene_scale = settings.get("unit_scale_settings", - {}).get("scene_unit_scale") - if scene_scale: - rt.units.DisplayType = rt.Name("Metric") - rt.units.MetricType = rt.Name(scene_scale) - else: - rt.units.DisplayType = rt.Name("Generic") - - -def convert_unit_scale(): - """Convert system unit scale in 3dsMax - for fbx export - - Returns: - str: unit scale - """ - unit_scale_dict = { - "millimeters": "mm", - "centimeters": "cm", - "meters": "m", - "kilometers": "km" - } - current_unit_scale = rt.Execute("units.MetricType as string") - return unit_scale_dict[current_unit_scale] - - -def set_context_setting(): - """Apply the project settings from the project definition - - Settings can be overwritten by an asset if the asset.data contains - any information regarding those settings. - - Examples of settings: - frame range - resolution - - Returns: - None - """ - reset_scene_resolution() - reset_frame_range() - reset_colorspace() - reset_unit_scale() - - -def get_max_version(): - """ - Args: - get max version date for deadline - - Returns: - #(25000, 62, 0, 25, 0, 0, 997, 2023, "") - max_info[7] = max version date - """ - max_info = rt.MaxVersion() - return max_info[7] - - -def is_headless(): - """Check if 3dsMax runs in batch mode. - If it returns True, it runs in 3dsbatch.exe - If it returns False, it runs in 3dsmax.exe - """ - return rt.maxops.isInNonInteractiveMode() - - -def set_timeline(frameStart, frameEnd): - """Set frame range for timeline editor in Max - """ - rt.animationRange = rt.interval(frameStart, frameEnd) - return rt.animationRange - - -def reset_colorspace(): - """OCIO Configuration - Supports in 3dsMax 2024+ - - """ - if int(get_max_version()) < 2024: - return - project_name = get_current_project_name() - colorspace_mgr = rt.ColorPipelineMgr - project_settings = get_project_settings(project_name) - - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) - if max_config_data: - ocio_config_path = max_config_data["path"] - colorspace_mgr = rt.ColorPipelineMgr - colorspace_mgr.Mode = rt.Name("OCIO_Custom") - colorspace_mgr.OCIOConfigPath = ocio_config_path - - -def check_colorspace(): - parent = get_main_window() - if parent is None: - log.info("Skipping outdated pop-up " - "because Max main window can't be found.") - if int(get_max_version()) >= 2024: - color_mgr = rt.ColorPipelineMgr - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) - if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"): - if not is_headless(): - from openpype.widgets import popup - dialog = popup.Popup(parent=parent) - dialog.setWindowTitle("Warning: Wrong OCIO Mode") - dialog.setMessage("This scene has wrong OCIO " - "Mode setting.") - dialog.setButtonText("Fix") - dialog.setStyleSheet(load_stylesheet()) - dialog.on_clicked.connect(reset_colorspace) - dialog.show() - -def unique_namespace(namespace, format="%02d", - prefix="", suffix="", con_suffix="CON"): - """Return unique namespace - - Arguments: - namespace (str): Name of namespace to consider - format (str, optional): Formatting of the given iteration number - suffix (str, optional): Only consider namespaces with this suffix. - con_suffix: max only, for finding the name of the master container - - >>> unique_namespace("bar") - # bar01 - >>> unique_namespace(":hello") - # :hello01 - >>> unique_namespace("bar:", suffix="_NS") - # bar01_NS: - - """ - - def current_namespace(): - current = namespace - # When inside a namespace Max adds no trailing : - if not current.endswith(":"): - current += ":" - return current - - # Always check against the absolute namespace root - # There's no clash with :x if we're defining namespace :a:x - ROOT = ":" if namespace.startswith(":") else current_namespace() - - # Strip trailing `:` tokens since we might want to add a suffix - start = ":" if namespace.startswith(":") else "" - end = ":" if namespace.endswith(":") else "" - namespace = namespace.strip(":") - if ":" in namespace: - # Split off any nesting that we don't uniqify anyway. - parents, namespace = namespace.rsplit(":", 1) - start += parents + ":" - ROOT += start - - iteration = 1 - increment_version = True - while increment_version: - nr_namespace = namespace + format % iteration - unique = prefix + nr_namespace + suffix - container_name = f"{unique}:{namespace}{con_suffix}" - if not rt.getNodeByName(container_name): - name_space = start + unique + end - increment_version = False - return name_space - else: - increment_version = True - iteration += 1 - - -def get_namespace(container_name): - """Get the namespace and name of the sub-container - - Args: - container_name (str): the name of master container - - Raises: - RuntimeError: when there is no master container found - - Returns: - namespace (str): namespace of the sub-container - name (str): name of the sub-container - """ - node = rt.getNodeByName(container_name) - if not node: - raise RuntimeError("Master Container Not Found..") - name = rt.getUserProp(node, "name") - namespace = rt.getUserProp(node, "namespace") - return namespace, name - - -def object_transform_set(container_children): - """A function which allows to store the transform of - previous loaded object(s) - Args: - container_children(list): A list of nodes - - Returns: - transform_set (dict): A dict with all transform data of - the previous loaded object(s) - """ - transform_set = {} - for node in container_children: - name = f"{node.name}.transform" - transform_set[name] = node.pos - name = f"{node.name}.scale" - transform_set[name] = node.scale - return transform_set - - -def get_plugins() -> list: - """Get all loaded plugins in 3dsMax - - Returns: - plugin_info_list: a list of loaded plugins - """ - manager = rt.PluginManager - count = manager.pluginDllCount - plugin_info_list = [] - for p in range(1, count + 1): - plugin_info = manager.pluginDllName(p) - plugin_info_list.append(plugin_info) - - return plugin_info_list - - -@contextlib.contextmanager -def render_resolution(width, height): - """Set render resolution option during context - - Args: - width (int): render width - height (int): render height - """ - current_renderWidth = rt.renderWidth - current_renderHeight = rt.renderHeight - try: - rt.renderWidth = width - rt.renderHeight = height - yield - finally: - rt.renderWidth = current_renderWidth - rt.renderHeight = current_renderHeight - - -@contextlib.contextmanager -def suspended_refresh(): - """Suspended refresh for scene and modify panel redraw. - """ - if is_headless(): - yield - return - rt.disableSceneRedraw() - rt.suspendEditing() - try: - yield - - finally: - rt.enableSceneRedraw() - rt.resumeEditing() diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py deleted file mode 100644 index eaf5015ba8..0000000000 --- a/openpype/hosts/max/api/lib_renderproducts.py +++ /dev/null @@ -1,275 +0,0 @@ -# Render Element Example : For scanline render, VRay -# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC -# arnold -# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html -import os - -from pymxs import runtime as rt - -from openpype.hosts.max.api.lib import get_current_renderer -from openpype.pipeline import get_current_project_name -from openpype.settings import get_project_settings - - -class RenderProducts(object): - - def __init__(self, project_settings=None): - self._project_settings = project_settings - if not self._project_settings: - self._project_settings = get_project_settings( - get_current_project_name() - ) - - def get_beauty(self, container): - render_dir = os.path.dirname(rt.rendOutputFilename) - - output_file = os.path.join(render_dir, container) - - setting = self._project_settings - img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa - - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - - return { - "beauty": self.get_expected_beauty( - output_file, start_frame, end_frame, img_fmt - ) - } - - def get_multiple_beauty(self, outputs, cameras): - beauty_output_frames = dict() - for output, camera in zip(outputs, cameras): - filename, ext = os.path.splitext(output) - filename = filename.replace(".", "") - ext = ext.replace(".", "") - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - new_beauty = self.get_expected_beauty( - filename, start_frame, end_frame, ext - ) - beauty_output = ({ - f"{camera}_beauty": new_beauty - }) - beauty_output_frames.update(beauty_output) - return beauty_output_frames - - def get_multiple_aovs(self, outputs, cameras): - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - aovs_frames = {} - for output, camera in zip(outputs, cameras): - filename, ext = os.path.splitext(output) - filename = filename.replace(".", "") - ext = ext.replace(".", "") - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - - if renderer in [ - "ART_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - elif renderer == "Redshift_Renderer": - render_name = self.get_render_elements_name() - if render_name: - rs_aov_files = rt.Execute("renderers.current.separateAovFiles") # noqa - # this doesn't work, always returns False - # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles - if ext == "exr" and not rs_aov_files: - for name in render_name: - if name == "RsCryptomatte": - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - else: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - elif renderer == "Arnold": - render_name = self.get_arnold_product_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_arnold_product( # noqa - filename, name, start_frame, - end_frame, ext) - }) - elif renderer in [ - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3" - ]: - if ext != "exr": - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - aovs_frames.update({ - f"{camera}_{name}": self.get_expected_aovs( - filename, name, start_frame, - end_frame, ext) - }) - - return aovs_frames - - def get_aovs(self, container): - render_dir = os.path.dirname(rt.rendOutputFilename) - - output_file = os.path.join(render_dir, - container) - - setting = self._project_settings - img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa - - start_frame = int(rt.rendStart) - end_frame = int(rt.rendEnd) + 1 - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - render_dict = {} - - if renderer in [ - "ART_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - elif renderer == "Redshift_Renderer": - render_name = self.get_render_elements_name() - if render_name: - rs_aov_files = rt.Execute("renderers.current.separateAovFiles") - # this doesn't work, always returns False - # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles - if img_fmt == "exr" and not rs_aov_files: - for name in render_name: - if name == "RsCryptomatte": - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - else: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) - }) - - elif renderer == "Arnold": - render_name = self.get_arnold_product_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_arnold_product( - output_file, name, start_frame, - end_frame, img_fmt) - }) - elif renderer in [ - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3" - ]: - if img_fmt != "exr": - render_name = self.get_render_elements_name() - if render_name: - for name in render_name: - render_dict.update({ - name: self.get_expected_aovs( - output_file, name, start_frame, - end_frame, img_fmt) # noqa - }) - - return render_dict - - def get_expected_beauty(self, folder, start_frame, end_frame, fmt): - beauty_frame_range = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - beauty_output = f"{folder}.{frame}.{fmt}" - beauty_output = beauty_output.replace("\\", "/") - beauty_frame_range.append(beauty_output) - - return beauty_frame_range - - def get_arnold_product_name(self): - """Get all the Arnold AOVs name""" - aov_name = [] - - amw = rt.MaxToAOps.AOVsManagerWindow() - aov_mgr = rt.renderers.current.AOVManager - # Check if there is any aov group set in AOV manager - aov_group_num = len(aov_mgr.drivers) - if aov_group_num < 1: - return - for i in range(aov_group_num): - # get the specific AOV group - aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list) - # close the AOVs manager window - amw.close() - - return aov_name - - def get_expected_arnold_product(self, folder, name, - start_frame, end_frame, fmt): - """Get all the expected Arnold AOVs""" - aov_list = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - render_element = f"{folder}_{name}.{frame}.{fmt}" - render_element = render_element.replace("\\", "/") - aov_list.append(render_element) - - return aov_list - - def get_render_elements_name(self): - """Get all the render element names for general """ - render_name = [] - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 1: - return - # get render elements from the renders - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - if renderlayer_name.enabled: - target, renderpass = str(renderlayer_name).split(":") - render_name.append(renderpass) - - return render_name - - def get_expected_aovs(self, folder, name, - start_frame, end_frame, fmt): - """Get all the expected render element output files. """ - render_elements = [] - for f in range(start_frame, end_frame): - frame = "%04d" % f - render_element = f"{folder}_{name}.{frame}.{fmt}" - render_element = render_element.replace("\\", "/") - render_elements.append(render_element) - - return render_elements - - def image_format(self): - return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa diff --git a/openpype/hosts/max/api/lib_rendersettings.py b/openpype/hosts/max/api/lib_rendersettings.py deleted file mode 100644 index be50e296eb..0000000000 --- a/openpype/hosts/max/api/lib_rendersettings.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -from pymxs import runtime as rt -from openpype.lib import Logger -from openpype.settings import get_project_settings -from openpype.pipeline import get_current_project_name -from openpype.pipeline.context_tools import get_current_project_asset - -from openpype.hosts.max.api.lib import ( - set_render_frame_range, - get_current_renderer, - get_default_render_folder -) - - -class RenderSettings(object): - - log = Logger.get_logger("RenderSettings") - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - def __init__(self, project_settings=None): - """ - Set up the naming convention for the render - elements for the deadline submission - """ - - self._project_settings = project_settings - if not self._project_settings: - self._project_settings = get_project_settings( - get_current_project_name() - ) - - def set_render_camera(self, selection): - for sel in selection: - # to avoid Attribute Error from pymxs wrapper - if rt.classOf(sel) in rt.Camera.classes: - rt.viewport.setCamera(sel) - return - raise RuntimeError("Active Camera not found") - - def render_output(self, container): - folder = rt.maxFilePath - # hard-coded, should be customized in the setting - file = rt.maxFileName - folder = folder.replace("\\", "/") - # hard-coded, set the renderoutput path - setting = self._project_settings - render_folder = get_default_render_folder(setting) - filename, ext = os.path.splitext(file) - output_dir = os.path.join(folder, - render_folder, - filename) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - # hard-coded, should be customized in the setting - context = get_current_project_asset() - - # get project resolution - width = context["data"].get("resolutionWidth") - height = context["data"].get("resolutionHeight") - # Set Frame Range - frame_start = context["data"].get("frame_start") - frame_end = context["data"].get("frame_end") - set_render_frame_range(frame_start, frame_end) - # get the production render - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - output = os.path.join(output_dir, container) - try: - aov_separator = self._aov_chars[( - self._project_settings["max"] - ["RenderSettings"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "." - output_filename = f"{output}..{img_fmt}" - output_filename = output_filename.replace("{aov_separator}", - aov_separator) - rt.rendOutputFilename = output_filename - if renderer == "VUE_File_Renderer": - return - # TODO: Finish the arnold render setup - if renderer == "Arnold": - self.arnold_setup() - - if renderer in [ - "ART_Renderer", - "Redshift_Renderer", - "V_Ray_6_Hotfix_3", - "V_Ray_GPU_6_Hotfix_3", - "Default_Scanline_Renderer", - "Quicksilver_Hardware_Renderer", - ]: - self.render_element_layer(output, width, height, img_fmt) - - rt.rendSaveFile = True - - if rt.renderSceneDialog.isOpen(): - rt.renderSceneDialog.close() - - def arnold_setup(self): - # get Arnold RenderView run in the background - # for setting up renderable camera - arv = rt.MAXToAOps.ArnoldRenderView() - render_camera = rt.viewport.GetCamera() - if render_camera: - arv.setOption("Camera", str(render_camera)) - - # TODO: add AOVs and extension - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - setup_cmd = ( - f""" - amw = MaxtoAOps.AOVsManagerWindow() - amw.close() - aovmgr = renderers.current.AOVManager - aovmgr.drivers = #() - img_fmt = "{img_fmt}" - if img_fmt == "png" then driver = ArnoldPNGDriver() - if img_fmt == "jpg" then driver = ArnoldJPEGDriver() - if img_fmt == "exr" then driver = ArnoldEXRDriver() - if img_fmt == "tif" then driver = ArnoldTIFFDriver() - if img_fmt == "tiff" then driver = ArnoldTIFFDriver() - append aovmgr.drivers driver - aovmgr.drivers[1].aov_list = #() - """) - - rt.execute(setup_cmd) - arv.close() - - def render_element_layer(self, dir, width, height, ext): - """For Renderers with render elements""" - rt.renderWidth = width - rt.renderHeight = height - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{dir}_{renderpass}..{ext}" - render_elem.SetRenderElementFileName(i, aov_name) - - def get_render_output(self, container, output_dir): - output = os.path.join(output_dir, container) - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - output_filename = f"{output}..{img_fmt}" - return output_filename - - def get_render_element(self): - orig_render_elem = [] - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - - for i in range(render_elem_num): - render_element = render_elem.GetRenderElementFilename(i) - orig_render_elem.append(render_element) - - return orig_render_elem - - def get_batch_render_elements(self, container, - output_dir, camera): - render_element_list = list() - output = os.path.join(output_dir, container) - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{output}_{camera}_{renderpass}..{img_fmt}" - render_element_list.append(aov_name) - return render_element_list - - def get_batch_render_output(self, camera): - target_layer_no = rt.batchRenderMgr.FindView(camera) - target_layer = rt.batchRenderMgr.GetView(target_layer_no) - return target_layer.outputFilename - - def batch_render_elements(self, camera): - target_layer_no = rt.batchRenderMgr.FindView(camera) - target_layer = rt.batchRenderMgr.GetView(target_layer_no) - outputfilename = target_layer.outputFilename - directory = os.path.dirname(outputfilename) - render_elem = rt.maxOps.GetCurRenderElementMgr() - render_elem_num = render_elem.NumRenderElements() - if render_elem_num < 0: - return - ext = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - - for i in range(render_elem_num): - renderlayer_name = render_elem.GetRenderElement(i) - target, renderpass = str(renderlayer_name).split(":") - aov_name = f"{directory}_{camera}_{renderpass}..{ext}" - render_elem.SetRenderElementFileName(i, aov_name) - - def batch_render_layer(self, container, - output_dir, cameras): - outputs = list() - output = os.path.join(output_dir, container) - img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa - for cam in cameras: - camera = rt.getNodeByName(cam) - layer_no = rt.batchRenderMgr.FindView(cam) - renderlayer = None - if layer_no == 0: - renderlayer = rt.batchRenderMgr.CreateView(camera) - else: - renderlayer = rt.batchRenderMgr.GetView(layer_no) - # use camera name as renderlayer name - renderlayer.name = cam - renderlayer.outputFilename = f"{output}_{cam}..{img_fmt}" - outputs.append(renderlayer.outputFilename) - return outputs diff --git a/openpype/hosts/max/api/menu.py b/openpype/hosts/max/api/menu.py deleted file mode 100644 index 9bdb6bd7ce..0000000000 --- a/openpype/hosts/max/api/menu.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -"""3dsmax menu definition of AYON.""" -import os -from qtpy import QtWidgets, QtCore -from pymxs import runtime as rt - -from openpype.tools.utils import host_tools -from openpype.hosts.max.api import lib - - -class OpenPypeMenu(object): - """Object representing OpenPype/AYON menu. - - This is using "hack" to inject itself before "Help" menu of 3dsmax. - For some reason `postLoadingMenus` event doesn't fire, and main menu - if probably re-initialized by menu templates, se we wait for at least - 1 event Qt event loop before trying to insert. - - """ - - def __init__(self): - super().__init__() - self.main_widget = self.get_main_widget() - self.menu = None - - timer = QtCore.QTimer() - # set number of event loops to wait. - timer.setInterval(1) - timer.timeout.connect(self._on_timer) - timer.start() - - self._timer = timer - self._counter = 0 - - def _on_timer(self): - if self._counter < 1: - self._counter += 1 - return - - self._counter = 0 - self._timer.stop() - self.build_openpype_menu() - - @staticmethod - def get_main_widget(): - """Get 3dsmax main window.""" - return QtWidgets.QWidget.find(rt.windows.getMAXHWND()) - - def get_main_menubar(self) -> QtWidgets.QMenuBar: - """Get main Menubar by 3dsmax main window.""" - return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0] - - def get_or_create_openpype_menu( - self, name: str = "&Openpype", - before: str = "&Help") -> QtWidgets.QAction: - """Create AYON menu. - - Args: - name (str, Optional): AYON menu name. - before (str, Optional): Name of the 3dsmax main menu item to - add AYON menu before. - - Returns: - QtWidgets.QAction: AYON menu action. - - """ - if self.menu is not None: - return self.menu - - menu_bar = self.get_main_menubar() - menu_items = menu_bar.findChildren( - QtWidgets.QMenu, options=QtCore.Qt.FindDirectChildrenOnly) - help_action = None - for item in menu_items: - if name in item.title(): - # we already have OpenPype menu - return item - - if before in item.title(): - help_action = item.menuAction() - tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON" - op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label)) - menu_bar.insertMenu(help_action, op_menu) - - self.menu = op_menu - return op_menu - - def build_openpype_menu(self) -> QtWidgets.QAction: - """Build items in AYON menu.""" - openpype_menu = self.get_or_create_openpype_menu() - load_action = QtWidgets.QAction("Load...", openpype_menu) - load_action.triggered.connect(self.load_callback) - openpype_menu.addAction(load_action) - - publish_action = QtWidgets.QAction("Publish...", openpype_menu) - publish_action.triggered.connect(self.publish_callback) - openpype_menu.addAction(publish_action) - - manage_action = QtWidgets.QAction("Manage...", openpype_menu) - manage_action.triggered.connect(self.manage_callback) - openpype_menu.addAction(manage_action) - - library_action = QtWidgets.QAction("Library...", openpype_menu) - library_action.triggered.connect(self.library_callback) - openpype_menu.addAction(library_action) - - openpype_menu.addSeparator() - - workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu) - workfiles_action.triggered.connect(self.workfiles_callback) - openpype_menu.addAction(workfiles_action) - - openpype_menu.addSeparator() - - res_action = QtWidgets.QAction("Set Resolution", openpype_menu) - res_action.triggered.connect(self.resolution_callback) - openpype_menu.addAction(res_action) - - frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu) - frame_action.triggered.connect(self.frame_range_callback) - openpype_menu.addAction(frame_action) - - colorspace_action = QtWidgets.QAction("Set Colorspace", openpype_menu) - colorspace_action.triggered.connect(self.colorspace_callback) - openpype_menu.addAction(colorspace_action) - - unit_scale_action = QtWidgets.QAction("Set Unit Scale", openpype_menu) - unit_scale_action.triggered.connect(self.unit_scale_callback) - openpype_menu.addAction(unit_scale_action) - - return openpype_menu - - def load_callback(self): - """Callback to show Loader tool.""" - host_tools.show_loader(parent=self.main_widget) - - def publish_callback(self): - """Callback to show Publisher tool.""" - host_tools.show_publisher(parent=self.main_widget) - - def manage_callback(self): - """Callback to show Scene Manager/Inventory tool.""" - host_tools.show_scene_inventory(parent=self.main_widget) - - def library_callback(self): - """Callback to show Library Loader tool.""" - host_tools.show_library_loader(parent=self.main_widget) - - def workfiles_callback(self): - """Callback to show Workfiles tool.""" - host_tools.show_workfiles(parent=self.main_widget) - - def resolution_callback(self): - """Callback to reset scene resolution""" - return lib.reset_scene_resolution() - - def frame_range_callback(self): - """Callback to reset frame range""" - return lib.reset_frame_range() - - def colorspace_callback(self): - """Callback to reset colorspace""" - return lib.reset_colorspace() - - def unit_scale_callback(self): - """Callback to reset unit scale""" - return lib.reset_unit_scale() diff --git a/openpype/hosts/max/api/pipeline.py b/openpype/hosts/max/api/pipeline.py deleted file mode 100644 index d0ae854dc8..0000000000 --- a/openpype/hosts/max/api/pipeline.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for OpenPype Houdini integration.""" -import os -import logging -from operator import attrgetter - -import json - -from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -import pyblish.api -from openpype.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.hosts.max.api.menu import OpenPypeMenu -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.plugin import MS_CUSTOM_ATTRIB -from openpype.hosts.max import MAX_HOST_DIR - -from pymxs import runtime as rt # noqa - -log = logging.getLogger("openpype.hosts.max") - -PLUGINS_DIR = os.path.join(MAX_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - - name = "max" - menu = None - - def __init__(self): - super(MaxHost, self).__init__() - self._op_events = {} - self._has_been_setup = False - - def install(self): - pyblish.api.register_host("max") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - # self._register_callbacks() - self.menu = OpenPypeMenu() - - self._has_been_setup = True - - def context_setting(): - return lib.set_context_setting() - - rt.callbacks.addScript(rt.Name('systemPostNew'), - context_setting) - - rt.callbacks.addScript(rt.Name('filePostOpen'), - lib.check_colorspace) - - def has_unsaved_changes(self): - # TODO: how to get it from 3dsmax? - return True - - def get_workfile_extensions(self): - return [".max"] - - def save_workfile(self, dst_path=None): - rt.saveMaxFile(dst_path) - return dst_path - - def open_workfile(self, filepath): - rt.checkForSave() - rt.loadMaxFile(filepath) - return filepath - - def get_current_workfile(self): - return os.path.join(rt.maxFilePath, rt.maxFileName) - - def get_containers(self): - return ls() - - def _register_callbacks(self): - rt.callbacks.removeScripts(id=rt.name("OpenPypeCallbacks")) - - rt.callbacks.addScript( - rt.Name("postLoadingMenus"), - self._deferred_menu_creation, id=rt.Name('OpenPypeCallbacks')) - - def _deferred_menu_creation(self): - self.log.info("Building menu ...") - self.menu = OpenPypeMenu() - - @staticmethod - def create_context_node(): - """Helper for creating context holding node.""" - - root_scene = rt.rootScene - - create_attr_script = (""" -attributes "OpenPypeContext" -( - parameters main rollout:params - ( - context type: #string - ) - - rollout params "OpenPype Parameters" - ( - editText editTextContext "Context" type: #string - ) -) - """) - - attr = rt.execute(create_attr_script) - rt.custAttributes.add(root_scene, attr) - - return root_scene.OpenPypeContext.context - - def update_context_data(self, data, changes): - try: - _ = rt.rootScene.OpenPypeContext.context - except AttributeError: - # context node doesn't exists - self.create_context_node() - - rt.rootScene.OpenPypeContext.context = json.dumps(data) - - def get_context_data(self): - try: - context = rt.rootScene.OpenPypeContext.context - except AttributeError: - # context node doesn't exists - context = self.create_context_node() - if not context: - context = "{}" - return json.loads(context) - - def save_file(self, dst_path=None): - # Force forwards slashes to avoid segfault - dst_path = dst_path.replace("\\", "/") - rt.saveMaxFile(dst_path) - - -def ls() -> list: - """Get all OpenPype instances.""" - objs = rt.objects - containers = [ - obj for obj in objs - if rt.getUserProp(obj, "id") == AVALON_CONTAINER_ID - ] - - for container in sorted(containers, key=attrgetter("name")): - yield lib.read(container) - - -def containerise(name: str, nodes: list, context, - namespace=None, loader=None, suffix="_CON"): - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace or "", - "loader": loader, - "representation": context["representation"]["_id"], - } - container_name = f"{namespace}:{name}{suffix}" - container = rt.container(name=container_name) - import_custom_attribute_data(container, nodes) - if not lib.imprint(container_name, data): - print(f"imprinting of {container_name} failed.") - return container - - -def load_custom_attribute_data(): - """Re-loading the AYON custom parameter built by the creator - - Returns: - attribute: re-loading the custom OP attributes set in Maxscript - """ - return rt.Execute(MS_CUSTOM_ATTRIB) - - -def import_custom_attribute_data(container: str, selections: list): - """Importing the Openpype/AYON custom parameter built by the creator - - Args: - container (str): target container which adds custom attributes - selections (list): nodes to be added into - group in custom attributes - """ - attrs = load_custom_attribute_data() - modifier = rt.EmptyModifier() - rt.addModifier(container, modifier) - container.modifiers[0].name = "OP Data" - rt.custAttributes.add(container.modifiers[0], attrs) - node_list = [] - sel_list = [] - for i in selections: - node_ref = rt.NodeTransformMonitor(node=i) - node_list.append(node_ref) - sel_list.append(str(i)) - - # Setting the property - rt.setProperty( - container.modifiers[0].openPypeData, - "all_handles", node_list) - rt.setProperty( - container.modifiers[0].openPypeData, - "sel_list", sel_list) - - -def update_custom_attribute_data(container: str, selections: list): - """Updating the AYON custom parameter built by the creator - - Args: - container (str): target container which adds custom attributes - selections (list): nodes to be added into - group in custom attributes - """ - if container.modifiers[0].name == "OP Data": - rt.deleteModifier(container, container.modifiers[0]) - import_custom_attribute_data(container, selections) - - -def get_previous_loaded_object(container: str): - """Get previous loaded_object through the OP data - - Args: - container (str): the container which stores the OP data - - Returns: - node_list(list): list of nodes which are previously loaded - """ - node_list = [] - sel_list = rt.getProperty(container.modifiers[0].openPypeData, "sel_list") - for obj in rt.Objects: - if str(obj) in sel_list: - node_list.append(obj) - return node_list diff --git a/openpype/hosts/max/api/plugin.py b/openpype/hosts/max/api/plugin.py deleted file mode 100644 index 2cf0d69146..0000000000 --- a/openpype/hosts/max/api/plugin.py +++ /dev/null @@ -1,292 +0,0 @@ -# -*- coding: utf-8 -*- -"""3dsmax specific Avalon/Pyblish plugin definitions.""" -from abc import ABCMeta - -import six -from pymxs import runtime as rt - -from openpype.lib import BoolDef -from openpype.pipeline import CreatedInstance, Creator, CreatorError - -from .lib import imprint, lsattr, read - -MS_CUSTOM_ATTRIB = """attributes "openPypeData" -( - parameters main rollout:OPparams - ( - all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on - sel_list type:#stringTab tabSize:0 tabSizeVariable:on - ) - - rollout OPparams "OP Parameters" - ( - listbox list_node "Node References" items:#() - button button_add "Add to Container" - button button_del "Delete from Container" - - fn node_to_name the_node = - ( - handle = the_node.handle - obj_name = the_node.name - handle_name = obj_name + "<" + handle as string + ">" - return handle_name - ) - fn nodes_to_add node = - ( - sceneObjs = #() - if classOf node == Container do return false - n = node as string - for obj in Objects do - ( - tmp_obj = obj as string - append sceneObjs tmp_obj - ) - if sel_list != undefined do - ( - for obj in sel_list do - ( - idx = findItem sceneObjs obj - if idx do - ( - deleteItem sceneObjs idx - ) - ) - ) - idx = findItem sceneObjs n - if idx then return true else false - ) - - fn nodes_to_rmv node = - ( - n = node as string - idx = findItem sel_list n - if idx then return true else false - ) - - on button_add pressed do - ( - current_sel = selectByName title:"Select Objects to add to - the Container" buttontext:"Add" filter:nodes_to_add - if current_sel == undefined then return False - temp_arr = #() - i_node_arr = #() - for c in current_sel do - ( - handle_name = node_to_name c - node_ref = NodeTransformMonitor node:c - idx = finditem list_node.items handle_name - if idx do ( - continue - ) - name = c as string - append temp_arr handle_name - append i_node_arr node_ref - append sel_list name - ) - all_handles = join i_node_arr all_handles - list_node.items = join temp_arr list_node.items - ) - - on button_del pressed do - ( - current_sel = selectByName title:"Select Objects to remove - from the Container" buttontext:"Remove" filter: nodes_to_rmv - if current_sel == undefined or current_sel.count == 0 then - ( - return False - ) - temp_arr = #() - i_node_arr = #() - new_i_node_arr = #() - new_temp_arr = #() - - for c in current_sel do - ( - node_ref = NodeTransformMonitor node:c as string - handle_name = node_to_name c - n = c as string - tmp_all_handles = #() - for i in all_handles do - ( - tmp = i as string - append tmp_all_handles tmp - ) - idx = finditem tmp_all_handles node_ref - if idx do - ( - new_i_node_arr = DeleteItem all_handles idx - - ) - idx = finditem list_node.items handle_name - if idx do - ( - new_temp_arr = DeleteItem list_node.items idx - ) - idx = finditem sel_list n - if idx do - ( - sel_list = DeleteItem sel_list idx - ) - ) - all_handles = join i_node_arr new_i_node_arr - list_node.items = join temp_arr new_temp_arr - ) - - on OPparams open do - ( - if all_handles.count != 0 then - ( - temp_arr = #() - for x in all_handles do - ( - if x.node == undefined do continue - handle_name = node_to_name x.node - append temp_arr handle_name - ) - list_node.items = temp_arr - ) - ) - ) -)""" - - -class OpenPypeCreatorError(CreatorError): - pass - - -class MaxCreatorBase(object): - - @staticmethod - def cache_subsets(shared_data): - if shared_data.get("max_cached_subsets") is not None: - return shared_data - - shared_data["max_cached_subsets"] = {} - cached_instances = lsattr("id", "pyblish.avalon.instance") - for i in cached_instances: - creator_id = rt.GetUserProp(i, "creator_identifier") - if creator_id not in shared_data["max_cached_subsets"]: - shared_data["max_cached_subsets"][creator_id] = [i.name] - else: - shared_data[ - "max_cached_subsets"][creator_id].append(i.name) - return shared_data - - @staticmethod - def create_instance_node(node): - """Create instance node. - - If the supplied node is existing node, it will be used to hold the - instance, otherwise new node of type Dummy will be created. - - Args: - node (rt.MXSWrapperBase, str): Node or node name to use. - - Returns: - instance - """ - if isinstance(node, str): - node = rt.Container(name=node) - - attrs = rt.Execute(MS_CUSTOM_ATTRIB) - modifier = rt.EmptyModifier() - rt.addModifier(node, modifier) - node.modifiers[0].name = "OP Data" - rt.custAttributes.add(node.modifiers[0], attrs) - - return node - - -@six.add_metaclass(ABCMeta) -class MaxCreator(Creator, MaxCreatorBase): - selected_nodes = [] - - def create(self, subset_name, instance_data, pre_create_data): - if pre_create_data.get("use_selection"): - self.selected_nodes = rt.GetCurrentSelection() - if rt.getNodeByName(subset_name): - raise CreatorError(f"'{subset_name}' is already created..") - - instance_node = self.create_instance_node(subset_name) - instance_data["instance_node"] = instance_node.name - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self - ) - if pre_create_data.get("use_selection"): - - node_list = [] - sel_list = [] - for i in self.selected_nodes: - node_ref = rt.NodeTransformMonitor(node=i) - node_list.append(node_ref) - sel_list.append(str(i)) - - # Setting the property - rt.setProperty( - instance_node.modifiers[0].openPypeData, - "all_handles", node_list) - rt.setProperty( - instance_node.modifiers[0].openPypeData, - "sel_list", sel_list) - - self._add_instance_to_context(instance) - imprint(instance_node.name, instance.data_to_store()) - - return instance - - def collect_instances(self): - self.cache_subsets(self.collection_shared_data) - for instance in self.collection_shared_data["max_cached_subsets"].get(self.identifier, []): # noqa - created_instance = CreatedInstance.from_existing( - read(rt.GetNodeByName(instance)), self - ) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = created_inst.get("instance_node") - new_values = { - key: changes[key].new_value - for key in changes.changed_keys - } - subset = new_values.get("subset", "") - if subset and instance_node != subset: - node = rt.getNodeByName(instance_node) - new_subset_name = new_values["subset"] - if rt.getNodeByName(new_subset_name): - raise CreatorError( - "The subset '{}' already exists.".format( - new_subset_name)) - instance_node = new_subset_name - created_inst["instance_node"] = instance_node - node.name = instance_node - - imprint( - instance_node, - created_inst.data_to_store(), - ) - - def remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - instance_node = rt.GetNodeByName( - instance.data.get("instance_node")) - if instance_node: - count = rt.custAttributes.count(instance_node.modifiers[0]) - rt.custAttributes.delete(instance_node.modifiers[0], count) - rt.Delete(instance_node) - - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", label="Use selection") - ] diff --git a/openpype/hosts/max/hooks/set_paths.py b/openpype/hosts/max/hooks/set_paths.py deleted file mode 100644 index 4b961fa91e..0000000000 --- a/openpype/hosts/max/hooks/set_paths.py +++ /dev/null @@ -1,18 +0,0 @@ -from openpype.lib.applications import PreLaunchHook, LaunchTypes - - -class SetPath(PreLaunchHook): - """Set current dir to workdir. - - Hook `GlobalHostDataHook` must be executed before this hook. - """ - app_groups = {"max"} - launch_types = {LaunchTypes.local} - - def execute(self): - workdir = self.launch_context.env.get("AVALON_WORKDIR", "") - if not workdir: - self.log.warning("BUG: Workdir is not filled.") - return - - self.launch_context.kwargs["cwd"] = workdir diff --git a/openpype/hosts/max/plugins/create/create_camera.py b/openpype/hosts/max/plugins/create/create_camera.py deleted file mode 100644 index 804d629ec7..0000000000 --- a/openpype/hosts/max/plugins/create/create_camera.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -from openpype.hosts.max.api import plugin - - -class CreateCamera(plugin.MaxCreator): - """Creator plugin for Camera.""" - identifier = "io.openpype.creators.max.camera" - label = "Camera" - family = "camera" - icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_model.py b/openpype/hosts/max/plugins/create/create_model.py deleted file mode 100644 index fc09d475ef..0000000000 --- a/openpype/hosts/max/plugins/create/create_model.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for model.""" -from openpype.hosts.max.api import plugin - - -class CreateModel(plugin.MaxCreator): - """Creator plugin for Model.""" - identifier = "io.openpype.creators.max.model" - label = "Model" - family = "model" - icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_pointcache.py b/openpype/hosts/max/plugins/create/create_pointcache.py deleted file mode 100644 index c2d11f4c32..0000000000 --- a/openpype/hosts/max/plugins/create/create_pointcache.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating pointcache alembics.""" -from openpype.hosts.max.api import plugin - - -class CreatePointCache(plugin.MaxCreator): - """Creator plugin for Point caches.""" - identifier = "io.openpype.creators.max.pointcache" - label = "Point Cache" - family = "pointcache" - icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_redshift_proxy.py b/openpype/hosts/max/plugins/create/create_redshift_proxy.py deleted file mode 100644 index 6eb59f0a73..0000000000 --- a/openpype/hosts/max/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance - - -class CreateRedshiftProxy(plugin.MaxCreator): - identifier = "io.openpype.creators.max.redshiftproxy" - label = "Redshift Proxy" - family = "redshiftproxy" - icon = "gear" diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py deleted file mode 100644 index 617334753a..0000000000 --- a/openpype/hosts/max/plugins/create/create_render.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating camera.""" -import os -from openpype.hosts.max.api import plugin -from openpype.lib import BoolDef -from openpype.hosts.max.api.lib_rendersettings import RenderSettings - - -class CreateRender(plugin.MaxCreator): - """Creator plugin for Renders.""" - identifier = "io.openpype.creators.max.render" - label = "Render" - family = "maxrender" - icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - from pymxs import runtime as rt - file = rt.maxFileName - filename, _ = os.path.splitext(file) - instance_data["AssetName"] = filename - instance_data["multiCamera"] = pre_create_data.get("multi_cam") - num_of_renderlayer = rt.batchRenderMgr.numViews - if num_of_renderlayer > 0: - rt.batchRenderMgr.DeleteView(num_of_renderlayer) - - instance = super(CreateRender, self).create( - subset_name, - instance_data, - pre_create_data) - - container_name = instance.data.get("instance_node") - # set output paths for rendering(mandatory for deadline) - RenderSettings().render_output(container_name) - # TODO: create multiple camera options - if self.selected_nodes: - selected_nodes_name = [] - for sel in self.selected_nodes: - name = sel.name - selected_nodes_name.append(name) - RenderSettings().batch_render_layer( - container_name, filename, - selected_nodes_name) - - def get_pre_create_attr_defs(self): - attrs = super(CreateRender, self).get_pre_create_attr_defs() - return attrs + [ - BoolDef("multi_cam", - label="Multiple Cameras Submission", - default=False), - ] diff --git a/openpype/hosts/max/plugins/create/create_review.py b/openpype/hosts/max/plugins/create/create_review.py deleted file mode 100644 index 78d27a722b..0000000000 --- a/openpype/hosts/max/plugins/create/create_review.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating review in Max.""" -from openpype.hosts.max.api import plugin -from openpype.lib import BoolDef, EnumDef, NumberDef - - -class CreateReview(plugin.MaxCreator): - """Review in 3dsMax""" - - identifier = "io.openpype.creators.max.review" - label = "Review" - family = "review" - icon = "video-camera" - - review_width = 1920 - review_height = 1080 - percentSize = 100 - keep_images = False - image_format = "png" - visual_style = "Realistic" - viewport_preset = "Quality" - vp_texture = True - anti_aliasing = "None" - - def apply_settings(self, project_settings): - settings = project_settings["max"]["CreateReview"] # noqa - - # Take some defaults from settings - self.review_width = settings.get("review_width", self.review_width) - self.review_height = settings.get("review_height", self.review_height) - self.percentSize = settings.get("percentSize", self.percentSize) - self.keep_images = settings.get("keep_images", self.keep_images) - self.image_format = settings.get("image_format", self.image_format) - self.visual_style = settings.get("visual_style", self.visual_style) - self.viewport_preset = settings.get( - "viewport_preset", self.viewport_preset) - self.anti_aliasing = settings.get( - "anti_aliasing", self.anti_aliasing) - self.vp_texture = settings.get("vp_texture", self.vp_texture) - - def create(self, subset_name, instance_data, pre_create_data): - # Transfer settings from pre create to instance - creator_attributes = instance_data.setdefault( - "creator_attributes", dict()) - for key in ["imageFormat", - "keepImages", - "review_width", - "review_height", - "percentSize", - "visualStyleMode", - "viewportPreset", - "antialiasingQuality", - "vpTexture"]: - if key in pre_create_data: - creator_attributes[key] = pre_create_data[key] - - super(CreateReview, self).create( - subset_name, - instance_data, - pre_create_data) - - def get_instance_attr_defs(self): - image_format_enum = ["exr", "jpg", "png", "tga"] - - visual_style_preset_enum = [ - "Realistic", "Shaded", "Facets", - "ConsistentColors", "HiddenLine", - "Wireframe", "BoundingBox", "Ink", - "ColorInk", "Acrylic", "Tech", "Graphite", - "ColorPencil", "Pastel", "Clay", "ModelAssist" - ] - preview_preset_enum = [ - "Quality", "Standard", "Performance", - "DXMode", "Customize"] - anti_aliasing_enum = ["None", "2X", "4X", "8X"] - - return [ - NumberDef("review_width", - label="Review width", - decimals=0, - minimum=0, - default=self.review_width), - NumberDef("review_height", - label="Review height", - decimals=0, - minimum=0, - default=self.review_height), - NumberDef("percentSize", - label="Percent of Output", - default=self.percentSize, - minimum=1, - decimals=0), - BoolDef("keepImages", - label="Keep Image Sequences", - default=self.keep_images), - EnumDef("imageFormat", - image_format_enum, - default=self.image_format, - label="Image Format Options"), - EnumDef("visualStyleMode", - visual_style_preset_enum, - default=self.visual_style, - label="Preference"), - EnumDef("viewportPreset", - preview_preset_enum, - default=self.viewport_preset, - label="Preview Preset"), - EnumDef("antialiasingQuality", - anti_aliasing_enum, - default=self.anti_aliasing, - label="Anti-aliasing Quality"), - BoolDef("vpTexture", - label="Viewport Texture", - default=self.vp_texture) - ] - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attributes - attrs = super().get_pre_create_attr_defs() - return attrs + self.get_instance_attr_defs() diff --git a/openpype/hosts/max/plugins/load/load_camera_fbx.py b/openpype/hosts/max/plugins/load/load_camera_fbx.py deleted file mode 100644 index ce1427a980..0000000000 --- a/openpype/hosts/max/plugins/load/load_camera_fbx.py +++ /dev/null @@ -1,99 +0,0 @@ -import os - -from openpype.hosts.max.api import lib, maintained_selection -from openpype.hosts.max.api.lib import ( - unique_namespace, - get_namespace, - object_transform_set -) -from openpype.hosts.max.api.pipeline import ( - containerise, - get_previous_loaded_object, - update_custom_attribute_data -) -from openpype.pipeline import get_representation_path, load - - -class FbxLoader(load.LoaderPlugin): - """Fbx Loader.""" - - families = ["camera"] - representations = ["fbx"] - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - filepath = self.filepath_from_context(context) - filepath = os.path.normpath(filepath) - rt.FBXImporterSetParam("Animation", True) - rt.FBXImporterSetParam("Camera", True) - rt.FBXImporterSetParam("AxisConversionMethod", True) - rt.FBXImporterSetParam("Mode", rt.Name("create")) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.ImportFile( - filepath, - rt.name("noPrompt"), - using=rt.FBXIMP) - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - selections = rt.GetCurrentSelection() - - for selection in selections: - selection.name = f"{namespace}:{selection.name}" - - return containerise( - name, selections, context, - namespace, loader=self.__class__.__name__) - - def update(self, container, representation): - from pymxs import runtime as rt - - path = get_representation_path(representation) - node_name = container["instance_node"] - node = rt.getNodeByName(node_name) - namespace, _ = get_namespace(node_name) - - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - prev_fbx_objects = rt.GetCurrentSelection() - transform_data = object_transform_set(prev_fbx_objects) - for prev_fbx_obj in prev_fbx_objects: - if rt.isValidNode(prev_fbx_obj): - rt.Delete(prev_fbx_obj) - - rt.FBXImporterSetParam("Animation", True) - rt.FBXImporterSetParam("Camera", True) - rt.FBXImporterSetParam("Mode", rt.Name("merge")) - rt.FBXImporterSetParam("AxisConversionMethod", True) - rt.FBXImporterSetParam("Preserveinstances", True) - rt.ImportFile( - path, rt.name("noPrompt"), using=rt.FBXIMP) - current_fbx_objects = rt.GetCurrentSelection() - fbx_objects = [] - for fbx_object in current_fbx_objects: - fbx_object.name = f"{namespace}:{fbx_object.name}" - fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object.name}.transform" - if fbx_transform in transform_data.keys(): - fbx_object.pos = transform_data[fbx_transform] or 0 - fbx_object.scale = transform_data[ - f"{fbx_object.name}.scale"] or 0 - - update_custom_attribute_data(node, fbx_objects) - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - from pymxs import runtime as rt - - node = rt.GetNodeByName(container["instance_node"]) - rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py deleted file mode 100644 index c41608c860..0000000000 --- a/openpype/hosts/max/plugins/load/load_model.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from openpype.pipeline import load, get_representation_path -from openpype.hosts.max.api.pipeline import ( - containerise, - get_previous_loaded_object -) -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( - maintained_selection, unique_namespace -) - - -class ModelAbcLoader(load.LoaderPlugin): - """Loading model with the Alembic loader.""" - - families = ["model"] - label = "Load Model with Alembic" - representations = ["abc"] - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - - file_path = os.path.normpath(self.filepath_from_context(context)) - - abc_before = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - rt.AlembicImport.ImportToRoot = False - rt.AlembicImport.CustomAttributes = True - rt.AlembicImport.UVs = True - rt.AlembicImport.VertexColors = True - rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport) - - abc_after = { - c - for c in rt.rootNode.Children - if rt.classOf(c) == rt.AlembicContainer - } - - # This should yield new AlembicContainer node - abc_containers = abc_after.difference(abc_before) - - if len(abc_containers) != 1: - self.log.error("Something failed when loading.") - - abc_container = abc_containers.pop() - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - abc_objects = [] - for abc_object in abc_container.Children: - abc_object.name = f"{namespace}:{abc_object.name}" - abc_objects.append(abc_object) - # rename the abc container with namespace - abc_container_name = f"{namespace}:{name}" - abc_container.name = abc_container_name - abc_objects.append(abc_container) - - return containerise( - name, abc_objects, context, - namespace, loader=self.__class__.__name__ - ) - - def update(self, container, representation): - from pymxs import runtime as rt - - path = get_representation_path(representation) - node = rt.GetNodeByName(container["instance_node"]) - node_list = [n for n in get_previous_loaded_object(node) - if rt.ClassOf(n) == rt.AlembicContainer] - with maintained_selection(): - rt.Select(node_list) - - for alembic in rt.Selection: - abc = rt.GetNodeByName(alembic.name) - rt.Select(abc.Children) - for abc_con in abc.Children: - abc_con.source = path - rt.Select(abc_con.Children) - for abc_obj in abc_con.Children: - abc_obj.source = path - lib.imprint( - container["instance_node"], - {"representation": str(representation["_id"])}, - ) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - from pymxs import runtime as rt - - node = rt.GetNodeByName(container["instance_node"]) - rt.Delete(node) - - @staticmethod - def get_container_children(parent, type_name): - from pymxs import runtime as rt - - def list_children(node): - children = [] - for c in node.Children: - children.append(c) - children += list_children(c) - return children - - filtered = [] - for child in list_children(parent): - class_type = str(rt.ClassOf(child.baseObject)) - if class_type == type_name: - filtered.append(child) - - return filtered diff --git a/openpype/hosts/max/plugins/load/load_redshift_proxy.py b/openpype/hosts/max/plugins/load/load_redshift_proxy.py deleted file mode 100644 index daf6d3e169..0000000000 --- a/openpype/hosts/max/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -import clique - -from openpype.pipeline import ( - load, - get_representation_path -) -from openpype.pipeline.load import LoadError -from openpype.hosts.max.api.pipeline import ( - containerise, - update_custom_attribute_data, - get_previous_loaded_object -) -from openpype.hosts.max.api import lib -from openpype.hosts.max.api.lib import ( - unique_namespace, - get_plugins -) - - -class RedshiftProxyLoader(load.LoaderPlugin): - """Load rs files with Redshift Proxy""" - - label = "Load Redshift Proxy" - families = ["redshiftproxy"] - representations = ["rs"] - order = -9 - icon = "code-fork" - color = "white" - - def load(self, context, name=None, namespace=None, data=None): - from pymxs import runtime as rt - plugin_info = get_plugins() - if "redshift4max.dlr" not in plugin_info: - raise LoadError("Redshift not loaded/installed in Max..") - filepath = self.filepath_from_context(context) - rs_proxy = rt.RedshiftProxy() - rs_proxy.file = filepath - files_in_folder = os.listdir(os.path.dirname(filepath)) - collections, remainder = clique.assemble(files_in_folder) - if collections: - rs_proxy.is_sequence = True - - namespace = unique_namespace( - name + "_", - suffix="_", - ) - rs_proxy.name = f"{namespace}:{rs_proxy.name}" - - return containerise( - name, [rs_proxy], context, - namespace, loader=self.__class__.__name__) - - def update(self, container, representation): - from pymxs import runtime as rt - - path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) - node_list = get_previous_loaded_object(node) - rt.Select(node_list) - update_custom_attribute_data( - node, rt.Selection) - for proxy in rt.Selection: - proxy.file = path - - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - from pymxs import runtime as rt - - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py deleted file mode 100644 index 8abffa5ab6..0000000000 --- a/openpype/hosts/max/plugins/publish/collect_render.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect Render""" -import os -import pyblish.api - -from pymxs import runtime as rt -from openpype.pipeline.publish import KnownPublishError -from openpype.hosts.max.api import colorspace -from openpype.hosts.max.api.lib import get_max_version, get_current_renderer -from openpype.hosts.max.api.lib_rendersettings import RenderSettings -from openpype.hosts.max.api.lib_renderproducts import RenderProducts - - -class CollectRender(pyblish.api.InstancePlugin): - """Collect Render for Deadline""" - - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect 3dsmax Render Layers" - hosts = ['max'] - families = ["maxrender"] - - def process(self, instance): - context = instance.context - folder = rt.maxFilePath - file = rt.maxFileName - current_file = os.path.join(folder, file) - filepath = current_file.replace("\\", "/") - context.data['currentFile'] = current_file - - files_by_aov = RenderProducts().get_beauty(instance.name) - aovs = RenderProducts().get_aovs(instance.name) - files_by_aov.update(aovs) - - camera = rt.viewport.GetCamera() - if instance.data.get("members"): - camera_list = [member for member in instance.data["members"] - if rt.ClassOf(member) == rt.Camera.Classes] - if camera_list: - camera = camera_list[-1] - - instance.data["cameras"] = [camera.name] if camera else None # noqa - - if instance.data.get("multiCamera"): - cameras = instance.data.get("members") - if not cameras: - raise KnownPublishError("There should be at least" - " one renderable camera in container") - sel_cam = [ - c.name for c in cameras - if rt.classOf(c) in rt.Camera.classes] - container_name = instance.data.get("instance_node") - render_dir = os.path.dirname(rt.rendOutputFilename) - outputs = RenderSettings().batch_render_layer( - container_name, render_dir, sel_cam - ) - - instance.data["cameras"] = sel_cam - - files_by_aov = RenderProducts().get_multiple_beauty( - outputs, sel_cam) - aovs = RenderProducts().get_multiple_aovs( - outputs, sel_cam) - files_by_aov.update(aovs) - - if "expectedFiles" not in instance.data: - instance.data["expectedFiles"] = list() - instance.data["files"] = list() - instance.data["expectedFiles"].append(files_by_aov) - instance.data["files"].append(files_by_aov) - - img_format = RenderProducts().image_format() - # OCIO config not support in - # most of the 3dsmax renderers - # so this is currently hard coded - # TODO: add options for redshift/vray ocio config - instance.data["colorspaceConfig"] = "" - instance.data["colorspaceDisplay"] = "sRGB" - instance.data["colorspaceView"] = "ACES 1.0 SDR-video" - - if int(get_max_version()) >= 2024: - colorspace_mgr = rt.ColorPipelineMgr # noqa - display = next( - (display for display in colorspace_mgr.GetDisplayList())) - view_transform = next( - (view for view in colorspace_mgr.GetViewList(display))) - instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath - instance.data["colorspaceDisplay"] = display - instance.data["colorspaceView"] = view_transform - - instance.data["renderProducts"] = colorspace.ARenderProduct() - instance.data["publishJobState"] = "Suspended" - instance.data["attachTo"] = [] - renderer_class = get_current_renderer() - renderer = str(renderer_class).split(":")[0] - # also need to get the render dir for conversion - data = { - "asset": instance.data["asset"], - "subset": str(instance.name), - "publish": True, - "maxversion": str(get_max_version()), - "imageFormat": img_format, - "family": 'maxrender', - "families": ['maxrender'], - "renderer": renderer, - "source": filepath, - "plugin": "3dsmax", - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "farm": True - } - instance.data.update(data) - - # TODO: this should be unified with maya and its "multipart" flag - # on instance. - if renderer == "Redshift_Renderer": - instance.data.update( - {"separateAovFiles": rt.Execute( - "renderers.current.separateAovFiles")}) - - self.log.info("data: {0}".format(data)) diff --git a/openpype/hosts/max/plugins/publish/collect_review.py b/openpype/hosts/max/plugins/publish/collect_review.py deleted file mode 100644 index e7e957e6f1..0000000000 --- a/openpype/hosts/max/plugins/publish/collect_review.py +++ /dev/null @@ -1,153 +0,0 @@ -# dont forget getting the focal length for burnin -"""Collect Review""" -import pyblish.api - -from pymxs import runtime as rt -from openpype.lib import BoolDef -from openpype.hosts.max.api.lib import get_max_version -from openpype.pipeline.publish import ( - OpenPypePyblishPluginMixin, - KnownPublishError -) - - -class CollectReview(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): - """Collect Review Data for Preview Animation""" - - order = pyblish.api.CollectorOrder + 0.02 - label = "Collect Review Data" - hosts = ['max'] - families = ["review"] - - def process(self, instance): - nodes = instance.data["members"] - - def is_camera(node): - is_camera_class = rt.classOf(node) in rt.Camera.classes - return is_camera_class and rt.isProperty(node, "fov") - - # Use first camera in instance - cameras = [node for node in nodes if is_camera(node)] - if cameras: - if len(cameras) > 1: - self.log.warning( - "Found more than one camera in instance, using first " - f"one found: {cameras[0]}" - ) - camera = cameras[0] - camera_name = camera.name - focal_length = camera.fov - else: - raise KnownPublishError( - "Unable to find a valid camera in 'Review' container." - " Only native max Camera supported. " - f"Found objects: {nodes}" - ) - creator_attrs = instance.data["creator_attributes"] - attr_values = self.get_attr_values_from_data(instance.data) - - general_preview_data = { - "review_camera": camera_name, - "frameStart": instance.data["frameStartHandle"], - "frameEnd": instance.data["frameEndHandle"], - "percentSize": creator_attrs["percentSize"], - "imageFormat": creator_attrs["imageFormat"], - "keepImages": creator_attrs["keepImages"], - "fps": instance.context.data["fps"], - "review_width": creator_attrs["review_width"], - "review_height": creator_attrs["review_height"], - } - - if int(get_max_version()) >= 2024: - colorspace_mgr = rt.ColorPipelineMgr # noqa - display = next( - (display for display in colorspace_mgr.GetDisplayList())) - view_transform = next( - (view for view in colorspace_mgr.GetViewList(display))) - instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath - instance.data["colorspaceDisplay"] = display - instance.data["colorspaceView"] = view_transform - - preview_data = { - "vpStyle": creator_attrs["visualStyleMode"], - "vpPreset": creator_attrs["viewportPreset"], - "vpTextures": creator_attrs["vpTexture"], - "dspGeometry": attr_values.get("dspGeometry"), - "dspShapes": attr_values.get("dspShapes"), - "dspLights": attr_values.get("dspLights"), - "dspCameras": attr_values.get("dspCameras"), - "dspHelpers": attr_values.get("dspHelpers"), - "dspParticles": attr_values.get("dspParticles"), - "dspBones": attr_values.get("dspBones"), - "dspBkg": attr_values.get("dspBkg"), - "dspGrid": attr_values.get("dspGrid"), - "dspSafeFrame": attr_values.get("dspSafeFrame"), - "dspFrameNums": attr_values.get("dspFrameNums") - } - else: - general_viewport = { - "dspBkg": attr_values.get("dspBkg"), - "dspGrid": attr_values.get("dspGrid") - } - nitrous_manager = { - "AntialiasingQuality": creator_attrs["antialiasingQuality"], - } - nitrous_viewport = { - "VisualStyleMode": creator_attrs["visualStyleMode"], - "ViewportPreset": creator_attrs["viewportPreset"], - "UseTextureEnabled": creator_attrs["vpTexture"] - } - preview_data = { - "general_viewport": general_viewport, - "nitrous_manager": nitrous_manager, - "nitrous_viewport": nitrous_viewport, - "vp_btn_mgr": {"EnableButtons": False} - } - - # Enable ftrack functionality - instance.data.setdefault("families", []).append('ftrack') - - burnin_members = instance.data.setdefault("burninDataMembers", {}) - burnin_members["focalLength"] = focal_length - - instance.data.update(general_preview_data) - instance.data["viewport_options"] = preview_data - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("dspGeometry", - label="Geometry", - default=True), - BoolDef("dspShapes", - label="Shapes", - default=False), - BoolDef("dspLights", - label="Lights", - default=False), - BoolDef("dspCameras", - label="Cameras", - default=False), - BoolDef("dspHelpers", - label="Helpers", - default=False), - BoolDef("dspParticles", - label="Particle Systems", - default=True), - BoolDef("dspBones", - label="Bone Objects", - default=False), - BoolDef("dspBkg", - label="Background", - default=True), - BoolDef("dspGrid", - label="Active Grid", - default=False), - BoolDef("dspSafeFrame", - label="Safe Frames", - default=False), - BoolDef("dspFrameNums", - label="Frame Numbers", - default=False) - ] diff --git a/openpype/hosts/max/plugins/publish/extract_alembic.py b/openpype/hosts/max/plugins/publish/extract_alembic.py deleted file mode 100644 index 24e0121a61..0000000000 --- a/openpype/hosts/max/plugins/publish/extract_alembic.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Export alembic file. - -Note: - Parameters on AlembicExport (AlembicExport.Parameter): - - ParticleAsMesh (bool): Sets whether particle shapes are exported - as meshes. - AnimTimeRange (enum): How animation is saved: - #CurrentFrame: saves current frame - #TimeSlider: saves the active time segments on time slider (default) - #StartEnd: saves a range specified by the Step - StartFrame (int) - EnFrame (int) - ShapeSuffix (bool): When set to true, appends the string "Shape" to the - name of each exported mesh. This property is set to false by default. - SamplesPerFrame (int): Sets the number of animation samples per frame. - Hidden (bool): When true, export hidden geometry. - UVs (bool): When true, export the mesh UV map channel. - Normals (bool): When true, export the mesh normals. - VertexColors (bool): When true, export the mesh vertex color map 0 and the - current vertex color display data when it differs - ExtraChannels (bool): When true, export the mesh extra map channels - (map channels greater than channel 1) - Velocity (bool): When true, export the meh vertex and particle velocity - data. - MaterialIDs (bool): When true, export the mesh material ID as - Alembic face sets. - Visibility (bool): When true, export the node visibility data. - LayerName (bool): When true, export the node layer name as an Alembic - object property. - MaterialName (bool): When true, export the geometry node material name as - an Alembic object property - ObjectID (bool): When true, export the geometry node g-buffer object ID as - an Alembic object property. - CustomAttributes (bool): When true, export the node and its modifiers - custom attributes into an Alembic object compound property. -""" -import os -import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.hosts.max.api.lib import suspended_refresh -from openpype.lib import BoolDef - - -class ExtractAlembic(publish.Extractor, - OptionalPyblishPluginMixin): - order = pyblish.api.ExtractorOrder - label = "Extract Pointcache" - hosts = ["max"] - families = ["pointcache"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - parent_dir = self.staging_dir(instance) - file_name = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, file_name) - - with suspended_refresh(): - self._set_abc_attributes(instance) - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.exportFile( - path, - rt.name("noPrompt"), - selectedOnly=True, - using=rt.AlembicExport, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "abc", - "ext": "abc", - "files": file_name, - "stagingDir": parent_dir, - } - instance.data["representations"].append(representation) - - def _set_abc_attributes(self, instance): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - attr_values = self.get_attr_values_from_data(instance.data) - custom_attrs = attr_values.get("custom_attrs", False) - if not custom_attrs: - self.log.debug( - "No Custom Attributes included in this abc export...") - rt.AlembicExport.ArchiveType = rt.Name("ogawa") - rt.AlembicExport.CoordinateSystem = rt.Name("maya") - rt.AlembicExport.StartFrame = start - rt.AlembicExport.EndFrame = end - rt.AlembicExport.CustomAttributes = custom_attrs - - @classmethod - def get_attribute_defs(cls): - return [ - BoolDef("custom_attrs", - label="Custom Attributes", - default=False), - ] - - -class ExtractCameraAlembic(ExtractAlembic): - """Extract Camera with AlembicExport.""" - - label = "Extract Alembic Camera" - families = ["camera"] - - -class ExtractModel(ExtractAlembic): - """Extract Geometry in Alembic Format""" - label = "Extract Geometry (Alembic)" - families = ["model"] - - def _set_abc_attributes(self, instance): - attr_values = self.get_attr_values_from_data(instance.data) - custom_attrs = attr_values.get("custom_attrs", False) - if not custom_attrs: - self.log.debug( - "No Custom Attributes included in this abc export...") - rt.AlembicExport.ArchiveType = rt.name("ogawa") - rt.AlembicExport.CoordinateSystem = rt.name("maya") - rt.AlembicExport.CustomAttributes = custom_attrs - rt.AlembicExport.UVs = True - rt.AlembicExport.VertexColors = True - rt.AlembicExport.PreserveInstances = True diff --git a/openpype/hosts/max/plugins/publish/extract_fbx.py b/openpype/hosts/max/plugins/publish/extract_fbx.py deleted file mode 100644 index 7454cd08d1..0000000000 --- a/openpype/hosts/max/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin -from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection -from openpype.hosts.max.api.lib import convert_unit_scale - - -class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Geometry in FBX Format - """ - - order = pyblish.api.ExtractorOrder - 0.05 - label = "Extract FBX" - hosts = ["max"] - families = ["model"] - optional = True - - def process(self, instance): - if not self.is_active(instance.data): - return - - stagingdir = self.staging_dir(instance) - filename = "{name}.fbx".format(**instance.data) - filepath = os.path.join(stagingdir, filename) - self._set_fbx_attributes() - - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - rt.exportFile( - filepath, - rt.name("noPrompt"), - selectedOnly=True, - using=rt.FBXEXP, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "fbx", - "ext": "fbx", - "files": filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - self.log.info( - "Extracted instance '%s' to: %s" % (instance.name, filepath) - ) - - def _set_fbx_attributes(self): - unit_scale = convert_unit_scale() - rt.FBXExporterSetParam("Animation", False) - rt.FBXExporterSetParam("Cameras", False) - rt.FBXExporterSetParam("Lights", False) - rt.FBXExporterSetParam("PointCache", False) - rt.FBXExporterSetParam("AxisConversionMethod", "Animation") - rt.FBXExporterSetParam("UpAxis", "Y") - rt.FBXExporterSetParam("Preserveinstances", True) - if unit_scale: - rt.FBXExporterSetParam("ConvertUnit", unit_scale) - - -class ExtractCameraFbx(ExtractModelFbx): - """Extract Camera with FbxExporter.""" - - order = pyblish.api.ExtractorOrder - 0.2 - label = "Extract Fbx Camera" - families = ["camera"] - optional = True - - def _set_fbx_attributes(self): - unit_scale = convert_unit_scale() - rt.FBXExporterSetParam("Animation", True) - rt.FBXExporterSetParam("Cameras", True) - rt.FBXExporterSetParam("AxisConversionMethod", "Animation") - rt.FBXExporterSetParam("UpAxis", "Y") - rt.FBXExporterSetParam("Preserveinstances", True) - if unit_scale: - rt.FBXExporterSetParam("ConvertUnit", unit_scale) diff --git a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py deleted file mode 100644 index 47ed85977b..0000000000 --- a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import publish -from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection - - -class ExtractRedshiftProxy(publish.Extractor): - """ - Extract Redshift Proxy with rsProxy - """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Extract RedShift Proxy" - hosts = ["max"] - families = ["redshiftproxy"] - - def process(self, instance): - start = instance.data["frameStartHandle"] - end = instance.data["frameEndHandle"] - - self.log.debug("Extracting Redshift Proxy...") - stagingdir = self.staging_dir(instance) - rs_filename = "{name}.rs".format(**instance.data) - rs_filepath = os.path.join(stagingdir, rs_filename) - rs_filepath = rs_filepath.replace("\\", "/") - - rs_filenames = self.get_rsfiles(instance, start, end) - - with maintained_selection(): - # select and export - node_list = instance.data["members"] - rt.Select(node_list) - # Redshift rsProxy command - # rsProxy fp selected compress connectivity startFrame endFrame - # camera warnExisting transformPivotToOrigin - rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1) - - self.log.info("Performing Extraction ...") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'rs', - 'ext': 'rs', - 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, - stagingdir)) - - def get_rsfiles(self, instance, startFrame, endFrame): - rs_filenames = [] - rs_name = instance.data["name"] - for frame in range(startFrame, endFrame + 1): - rs_filename = "%s.%04d.rs" % (rs_name, frame) - rs_filenames.append(rs_filename) - - return rs_filenames diff --git a/openpype/hosts/max/plugins/publish/extract_thumbnail.py b/openpype/hosts/max/plugins/publish/extract_thumbnail.py deleted file mode 100644 index 02fa75e032..0000000000 --- a/openpype/hosts/max/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.max.api.preview_animation import render_preview_animation - - -class ExtractThumbnail(publish.Extractor): - """Extract Thumbnail for Review - """ - - order = pyblish.api.ExtractorOrder - label = "Extract Thumbnail" - hosts = ["max"] - families = ["review"] - - def process(self, instance): - ext = instance.data.get("imageFormat") - frame = int(instance.data["frameStart"]) - staging_dir = self.staging_dir(instance) - filepath = os.path.join( - staging_dir, f"{instance.name}_thumbnail") - self.log.debug("Writing Thumbnail to '{}'".format(filepath)) - - review_camera = instance.data["review_camera"] - viewport_options = instance.data.get("viewport_options", {}) - files = render_preview_animation( - filepath, - ext, - review_camera, - start_frame=frame, - end_frame=frame, - percentSize=instance.data["percentSize"], - width=instance.data["review_width"], - height=instance.data["review_height"], - viewport_options=viewport_options) - - thumbnail = next(os.path.basename(path) for path in files) - - representation = { - "name": "thumbnail", - "ext": ext, - "files": thumbnail, - "stagingDir": staging_dir, - "thumbnail": True - } - - self.log.debug(f"{representation}") - - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(representation) diff --git a/openpype/hosts/max/plugins/publish/increment_workfile_version.py b/openpype/hosts/max/plugins/publish/increment_workfile_version.py deleted file mode 100644 index 3dec214f77..0000000000 --- a/openpype/hosts/max/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,19 +0,0 @@ -import pyblish.api -from openpype.lib import version_up -from pymxs import runtime as rt - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 0.9 - label = "Increment Workfile Version" - hosts = ["max"] - families = ["workfile"] - - def process(self, context): - path = context.data["currentFile"] - filepath = version_up(path) - - rt.saveMaxFile(filepath) - self.log.info("Incrementing file version") diff --git a/openpype/hosts/max/plugins/publish/validate_attributes.py b/openpype/hosts/max/plugins/publish/validate_attributes.py deleted file mode 100644 index 0632ee38f0..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_attributes.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for Attributes.""" -from pyblish.api import ContextPlugin, ValidatorOrder -from pymxs import runtime as rt - -from openpype.pipeline.publish import ( - OptionalPyblishPluginMixin, - PublishValidationError, - RepairContextAction -) - - -def has_property(object_name, property_name): - """Return whether an object has a property with given name""" - return rt.Execute(f'isProperty {object_name} "{property_name}"') - - -def is_matching_value(object_name, property_name, value): - """Return whether an existing property matches value `value""" - property_value = rt.Execute(f"{object_name}.{property_name}") - - # Wrap property value if value is a string valued attributes - # starting with a `#` - if ( - isinstance(value, str) and - value.startswith("#") and - not value.endswith(")") - ): - # prefix value with `#` - # not applicable for #() array value type - # and only applicable for enum i.e. #bob, #sally - property_value = f"#{property_value}" - - return property_value == value - - -class ValidateAttributes(OptionalPyblishPluginMixin, - ContextPlugin): - """Validates attributes in the project setting are consistent - with the nodes from MaxWrapper Class in 3ds max. - E.g. "renderers.current.separateAovFiles", - "renderers.production.PrimaryGIEngine" - Admin(s) need to put the dict below and enable this validator for a check: - { - "renderers.current":{ - "separateAovFiles" : True - }, - "renderers.production":{ - "PrimaryGIEngine": "#RS_GIENGINE_BRUTE_FORCE" - } - .... - } - - """ - - order = ValidatorOrder - hosts = ["max"] - label = "Attributes" - actions = [RepairContextAction] - optional = True - - @classmethod - def get_invalid(cls, context): - attributes = ( - context.data["project_settings"]["max"]["publish"] - ["ValidateAttributes"]["attributes"] - ) - if not attributes: - return - invalid = [] - for object_name, required_properties in attributes.items(): - if not rt.Execute(f"isValidValue {object_name}"): - # Skip checking if the node does not - # exist in MaxWrapper Class - cls.log.debug(f"Unable to find '{object_name}'." - " Skipping validation of attributes.") - continue - - for property_name, value in required_properties.items(): - if not has_property(object_name, property_name): - cls.log.error( - "Non-existing property: " - f"{object_name}.{property_name}") - invalid.append((object_name, property_name)) - - if not is_matching_value(object_name, property_name, value): - cls.log.error( - f"Invalid value for: {object_name}.{property_name}" - f" should be: {value}") - invalid.append((object_name, property_name)) - - return invalid - - def process(self, context): - if not self.is_active(context.data): - self.log.debug("Skipping Validate Attributes...") - return - invalid_attributes = self.get_invalid(context) - if invalid_attributes: - bullet_point_invalid_statement = "\n".join( - "- {}".format(invalid) for invalid - in invalid_attributes - ) - report = ( - "Required Attribute(s) have invalid value(s).\n\n" - f"{bullet_point_invalid_statement}\n\n" - "You can use repair action to fix them if they are not\n" - "unknown property value(s)." - ) - raise PublishValidationError( - report, title="Invalid Value(s) for Required Attribute(s)") - - @classmethod - def repair(cls, context): - attributes = ( - context.data["project_settings"]["max"]["publish"] - ["ValidateAttributes"]["attributes"] - ) - invalid_attributes = cls.get_invalid(context) - for attrs in invalid_attributes: - prop, attr = attrs - value = attributes[prop][attr] - if isinstance(value, str) and not value.startswith("#"): - attribute_fix = '{}.{}="{}"'.format( - prop, attr, value - ) - else: - attribute_fix = "{}.{}={}".format( - prop, attr, value - ) - rt.Execute(attribute_fix) diff --git a/openpype/hosts/max/plugins/publish/validate_camera_contents.py b/openpype/hosts/max/plugins/publish/validate_camera_contents.py deleted file mode 100644 index 0c61e6431d..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_camera_contents.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from openpype.pipeline import PublishValidationError -from pymxs import runtime as rt - - -class ValidateCameraContent(pyblish.api.InstancePlugin): - """Validates Camera instance contents. - - A Camera instance may only hold a SINGLE camera's transform - """ - - order = pyblish.api.ValidatorOrder - families = ["camera", "review"] - hosts = ["max"] - label = "Camera Contents" - camera_type = ["$Free_Camera", "$Target_Camera", - "$Physical_Camera", "$Target"] - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError(("Camera instance must only include" - "camera (and camera target). " - f"Invalid content {invalid}")) - - def get_invalid(self, instance): - """ - Get invalid nodes if the instance is not camera - """ - invalid = [] - container = instance.data["instance_node"] - self.log.info(f"Validating camera content for {container}") - - selection_list = instance.data["members"] - for sel in selection_list: - # to avoid Attribute Error from pymxs wrapper - sel_tmp = str(sel) - found = any(sel_tmp.startswith(cam) for cam in self.camera_type) - if not found: - self.log.error("Camera not found") - invalid.append(sel) - return invalid diff --git a/openpype/hosts/max/plugins/publish/validate_deadline_publish.py b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py deleted file mode 100644 index b2f0e863f4..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_deadline_publish.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import pyblish.api -from pymxs import runtime as rt -from openpype.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from openpype.hosts.max.api.lib_rendersettings import RenderSettings - - -class ValidateDeadlinePublish(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates Render File Directory is - not the same in every submission - """ - - order = ValidateContentsOrder - families = ["maxrender"] - hosts = ["max"] - label = "Render Output for Deadline" - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - return - file = rt.maxFileName - filename, ext = os.path.splitext(file) - if filename not in rt.rendOutputFilename: - raise PublishValidationError( - "Render output folder " - "doesn't match the max scene name! " - "Use Repair action to " - "fix the folder file path.." - ) - - @classmethod - def repair(cls, instance): - container = instance.data.get("instance_node") - RenderSettings().render_output(container) - cls.log.debug("Reset the render output folder...") diff --git a/openpype/hosts/max/plugins/publish/validate_frame_range.py b/openpype/hosts/max/plugins/publish/validate_frame_range.py deleted file mode 100644 index 0e8316e844..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,88 +0,0 @@ -import pyblish.api - -from pymxs import runtime as rt -from openpype.pipeline import ( - OptionalPyblishPluginMixin -) -from openpype.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - KnownPublishError -) -from openpype.hosts.max.api.lib import get_frame_range, set_timeline - - -class ValidateFrameRange(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates the frame ranges. - - This is an optional validator checking if the frame range on instance - matches the frame range specified for the asset. - - It also validates render frame ranges of render layers. - - Repair action will change everything to match the asset frame range. - - This can be turned off by the artist to allow custom ranges. - """ - - label = "Validate Frame Range" - order = ValidateContentsOrder - families = ["camera", "maxrender", - "pointcache", "pointcloud", - "review", "redshiftproxy"] - hosts = ["max"] - optional = True - actions = [RepairAction] - - def process(self, instance): - if not self.is_active(instance.data): - self.log.debug("Skipping Validate Frame Range...") - return - - frame_range = get_frame_range( - asset_doc=instance.data["assetEntity"]) - - inst_frame_start = instance.data.get("frameStartHandle") - inst_frame_end = instance.data.get("frameEndHandle") - if inst_frame_start is None or inst_frame_end is None: - raise KnownPublishError( - "Missing frame start and frame end on " - "instance to to validate." - ) - frame_start_handle = frame_range["frameStartHandle"] - frame_end_handle = frame_range["frameEndHandle"] - errors = [] - if frame_start_handle != inst_frame_start: - errors.append( - f"Start frame ({inst_frame_start}) on instance does not match " # noqa - f"with the start frame ({frame_start_handle}) set on the asset data. ") # noqa - if frame_end_handle != inst_frame_end: - errors.append( - f"End frame ({inst_frame_end}) on instance does not match " - f"with the end frame ({frame_end_handle}) " - "from the asset data. ") - - if errors: - bullet_point_errors = "\n".join( - "- {}".format(error) for error in errors - ) - report = ( - "Frame range settings are incorrect.\n\n" - f"{bullet_point_errors}\n\n" - "You can use repair action to fix it." - ) - raise PublishValidationError(report, title="Frame Range incorrect") - - @classmethod - def repair(cls, instance): - frame_range = get_frame_range() - frame_start_handle = frame_range["frameStartHandle"] - frame_end_handle = frame_range["frameEndHandle"] - - if instance.data["family"] == "maxrender": - rt.rendStart = frame_start_handle - rt.rendEnd = frame_end_handle - else: - set_timeline(frame_start_handle, frame_end_handle) diff --git a/openpype/hosts/max/plugins/publish/validate_instance_has_members.py b/openpype/hosts/max/plugins/publish/validate_instance_has_members.py deleted file mode 100644 index 3c0039d5e0..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_instance_has_members.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): - """Validates Instance has members. - - Check if MaxScene containers includes any contents underneath. - """ - - order = pyblish.api.ValidatorOrder - families = ["camera", - "model", - "maxScene", - "review", - "pointcache", - "pointcloud", - "redshiftproxy"] - hosts = ["max"] - label = "Container Contents" - - def process(self, instance): - if not instance.data["members"]: - raise PublishValidationError("No content found in the container") diff --git a/openpype/hosts/max/plugins/publish/validate_loaded_plugin.py b/openpype/hosts/max/plugins/publish/validate_loaded_plugin.py deleted file mode 100644 index efa06795b0..0000000000 --- a/openpype/hosts/max/plugins/publish/validate_loaded_plugin.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for Loaded Plugin.""" -import os -import pyblish.api -from pymxs import runtime as rt - -from openpype.pipeline.publish import ( - RepairAction, - OptionalPyblishPluginMixin, - PublishValidationError -) -from openpype.hosts.max.api.lib import get_plugins - - -class ValidateLoadedPlugin(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validates if the specific plugin is loaded in 3ds max. - Studio Admin(s) can add the plugins they want to check in validation - via studio defined project settings - """ - - order = pyblish.api.ValidatorOrder - hosts = ["max"] - label = "Validate Loaded Plugins" - optional = True - actions = [RepairAction] - - family_plugins_mapping = {} - - @classmethod - def get_invalid(cls, instance): - """Plugin entry point.""" - family_plugins_mapping = cls.family_plugins_mapping - if not family_plugins_mapping: - return - - invalid = [] - # Find all plug-in requirements for current instance - instance_families = {instance.data["family"]} - instance_families.update(instance.data.get("families", [])) - cls.log.debug("Checking plug-in validation " - f"for instance families: {instance_families}") - all_required_plugins = set() - - for mapping in family_plugins_mapping: - # Check for matching families - if not mapping: - return - - match_families = {fam.strip() for fam in mapping["families"]} - has_match = "*" in match_families or match_families.intersection( - instance_families) - - if not has_match: - continue - - cls.log.debug( - f"Found plug-in family requirements: {match_families}") - required_plugins = [ - # match lowercase and format with os.environ to allow - # plugin names defined by max version, e.g. {3DSMAX_VERSION} - plugin.format(**os.environ).lower() - for plugin in mapping["plugins"] - # ignore empty fields in settings - if plugin.strip() - ] - - all_required_plugins.update(required_plugins) - - if not all_required_plugins: - # Instance has no plug-in requirements - return - - # get all DLL loaded plugins in Max and their plugin index - available_plugins = { - plugin_name.lower(): index for index, plugin_name in enumerate( - get_plugins()) - } - # validate the required plug-ins - for plugin in sorted(all_required_plugins): - plugin_index = available_plugins.get(plugin) - if plugin_index is None: - debug_msg = ( - f"Plugin {plugin} does not exist" - " in 3dsMax Plugin List." - ) - invalid.append((plugin, debug_msg)) - continue - if not rt.pluginManager.isPluginDllLoaded(plugin_index): - debug_msg = f"Plugin {plugin} not loaded." - invalid.append((plugin, debug_msg)) - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - self.log.debug("Skipping Validate Loaded Plugin...") - return - invalid = self.get_invalid(instance) - if invalid: - bullet_point_invalid_statement = "\n".join( - "- {}".format(message) for _, message in invalid - ) - report = ( - "Required plugins are not loaded.\n\n" - f"{bullet_point_invalid_statement}\n\n" - "You can use repair action to load the plugin." - ) - raise PublishValidationError( - report, title="Missing Required Plugins") - - @classmethod - def repair(cls, instance): - # get all DLL loaded plugins in Max and their plugin index - invalid = cls.get_invalid(instance) - if not invalid: - return - - # get all DLL loaded plugins in Max and their plugin index - available_plugins = { - plugin_name.lower(): index for index, plugin_name in enumerate( - get_plugins()) - } - - for invalid_plugin, _ in invalid: - plugin_index = available_plugins.get(invalid_plugin) - - if plugin_index is None: - cls.log.warning( - f"Can't enable missing plugin: {invalid_plugin}") - continue - - if not rt.pluginManager.isPluginDllLoaded(plugin_index): - rt.pluginManager.loadPluginDll(plugin_index) diff --git a/openpype/hosts/max/startup/startup.py b/openpype/hosts/max/startup/startup.py deleted file mode 100644 index 0d3135a16f..0000000000 --- a/openpype/hosts/max/startup/startup.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import sys - -# this might happen in some 3dsmax version where PYTHONPATH isn't added -# to sys.path automatically -for path in os.environ["PYTHONPATH"].split(os.pathsep): - if path and path not in sys.path: - sys.path.append(path) - -from openpype.hosts.max.api import MaxHost -from openpype.pipeline import install_host - -host = MaxHost() -install_host(host) diff --git a/openpype/hosts/maya/addon.py b/openpype/hosts/maya/addon.py deleted file mode 100644 index b9ecb8279f..0000000000 --- a/openpype/hosts/maya/addon.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -MAYA_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class MayaAddon(OpenPypeModule, IHostAddon): - name = "maya" - host_name = "maya" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to PYTHONPATH - new_python_paths = [ - os.path.join(MAYA_ROOT_DIR, "startup") - ] - old_python_path = env.get("PYTHONPATH") or "" - for path in old_python_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_python_paths: - new_python_paths.append(norm_path) - - env["PYTHONPATH"] = os.pathsep.join(new_python_paths) - - # Set default environments - envs = { - "OPENPYPE_LOG_NO_COLORS": "Yes", - # For python module 'qtpy' - "QT_API": "PySide2", - # For python module 'Qt' - "QT_PREFERRED_BINDING": "PySide2" - } - for key, value in envs.items(): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(MAYA_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".ma", ".mb"] diff --git a/openpype/hosts/maya/api/action.py b/openpype/hosts/maya/api/action.py deleted file mode 100644 index 277f4cc238..0000000000 --- a/openpype/hosts/maya/api/action.py +++ /dev/null @@ -1,136 +0,0 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline.publish import get_errored_instances_from_context - - -class GenerateUUIDsOnInvalidAction(pyblish.api.Action): - """Generate UUIDs on the invalid nodes in the instance. - - Invalid nodes are those returned by the plugin's `get_invalid` method. - As such it is the plug-in's responsibility to ensure the nodes that - receive new UUIDs are actually invalid. - - Requires: - - instance.data["asset"] - - """ - - label = "Regenerate UUIDs" - on = "failed" # This action is only available on a failed plug-in - icon = "wrench" # Icon from Awesome Icon - - def process(self, context, plugin): - - from maya import cmds - - self.log.info("Finding bad nodes..") - - errored_instances = get_errored_instances_from_context(context) - - # Apply pyblish logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(errored_instances, plugin) - - # Get the nodes from the all instances that ran through this plug-in - all_invalid = [] - for instance in instances: - invalid = plugin.get_invalid(instance) - - # Don't allow referenced nodes to get their ids regenerated to - # avoid loaded content getting messed up with reference edits - if invalid: - referenced = {node for node in invalid if - cmds.referenceQuery(node, isNodeReferenced=True)} - if referenced: - self.log.warning("Skipping UUID generation on referenced " - "nodes: {}".format(list(referenced))) - invalid = [node for node in invalid - if node not in referenced] - - if invalid: - - self.log.info("Fixing instance {}".format(instance.name)) - self._update_id_attribute(instance, invalid) - - all_invalid.extend(invalid) - - if not all_invalid: - self.log.info("No invalid nodes found.") - return - - all_invalid = list(set(all_invalid)) - self.log.info("Generated ids on nodes: {0}".format(all_invalid)) - - def _update_id_attribute(self, instance, nodes): - """Delete the id attribute - - Args: - instance: The instance we're fixing for - nodes (list): all nodes to regenerate ids on - """ - - from . import lib - - # Expecting this is called on validators in which case 'assetEntity' - # should be always available, but kept a way to query it by name. - asset_doc = instance.data.get("assetEntity") - if not asset_doc: - asset_name = instance.data["asset"] - project_name = instance.context.data["projectName"] - self.log.info(( - "Asset is not stored on instance." - " Querying by name \"{}\" from project \"{}\"" - ).format(asset_name, project_name)) - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["_id"] - ) - - for node, _id in lib.generate_ids(nodes, asset_id=asset_doc["_id"]): - lib.set_id(node, _id, overwrite=True) - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - try: - from maya import cmds - except ImportError: - raise ImportError("Current host is not Maya") - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - # Ensure unique (process each node only once) - invalid = list(set(invalid)) - - if invalid: - self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) - cmds.select(invalid, replace=True, noExpand=True) - else: - self.log.info("No invalid nodes found.") - cmds.select(deselect=True) diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py deleted file mode 100644 index 46494413b7..0000000000 --- a/openpype/hosts/maya/api/commands.py +++ /dev/null @@ -1,128 +0,0 @@ -# -*- coding: utf-8 -*- -"""OpenPype script commands to be used directly in Maya.""" -from maya import cmds - -from openpype.client import get_asset_by_name, get_project -from openpype.pipeline import get_current_project_name, get_current_asset_name - - -class ToolWindows: - - _windows = {} - - @classmethod - def get_window(cls, tool): - """Get widget for specific tool. - - Args: - tool (str): Name of the tool. - - Returns: - Stored widget. - - """ - try: - return cls._windows[tool] - except KeyError: - return None - - @classmethod - def set_window(cls, tool, window): - """Set widget for the tool. - - Args: - tool (str): Name of the tool. - window (QtWidgets.QWidget): Widget - - """ - cls._windows[tool] = window - - -def edit_shader_definitions(): - from qtpy import QtWidgets - from openpype.hosts.maya.api.shader_definition_editor import ( - ShaderDefinitionsEditor - ) - from openpype.tools.utils import qt_app_context - - top_level_widgets = QtWidgets.QApplication.topLevelWidgets() - main_window = next(widget for widget in top_level_widgets - if widget.objectName() == "MayaWindow") - - with qt_app_context(): - window = ToolWindows.get_window("shader_definition_editor") - if not window: - window = ShaderDefinitionsEditor(parent=main_window) - ToolWindows.set_window("shader_definition_editor", window) - window.show() - - -def _resolution_from_document(doc): - if not doc or "data" not in doc: - print("Entered document is not valid. \"{}\"".format(str(doc))) - return None - - resolution_width = doc["data"].get("resolutionWidth") - resolution_height = doc["data"].get("resolutionHeight") - # Backwards compatibility - if resolution_width is None or resolution_height is None: - resolution_width = doc["data"].get("resolution_width") - resolution_height = doc["data"].get("resolution_height") - - # Make sure both width and height are set - if resolution_width is None or resolution_height is None: - cmds.warning( - "No resolution information found for \"{}\"".format(doc["name"]) - ) - return None - - return int(resolution_width), int(resolution_height) - - -def reset_resolution(): - # Default values - resolution_width = 1920 - resolution_height = 1080 - - # Get resolution from asset - project_name = get_current_project_name() - asset_name = get_current_asset_name() - asset_doc = get_asset_by_name(project_name, asset_name) - resolution = _resolution_from_document(asset_doc) - # Try get resolution from project - if resolution is None: - # TODO go through visualParents - print(( - "Asset \"{}\" does not have set resolution." - " Trying to get resolution from project" - ).format(asset_name)) - project_doc = get_project(project_name) - resolution = _resolution_from_document(project_doc) - - if resolution is None: - msg = "Using default resolution {}x{}" - else: - resolution_width, resolution_height = resolution - msg = "Setting resolution to {}x{}" - - print(msg.format(resolution_width, resolution_height)) - - # set for different renderers - # arnold, vray, redshift, renderman - - renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer").lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - - # default attributes are usable for Arnold, Renderman and Redshift - width_attr_name = "defaultResolution.width" - height_attr_name = "defaultResolution.height" - - # Vray has its own way - if renderer == "vray": - width_attr_name = "vraySettings.width" - height_attr_name = "vraySettings.height" - - cmds.setAttr(width_attr_name, resolution_width) - cmds.setAttr(height_attr_name, resolution_height) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py deleted file mode 100644 index da34896c3f..0000000000 --- a/openpype/hosts/maya/api/lib.py +++ /dev/null @@ -1,4244 +0,0 @@ -"""Standalone helper functions""" - -import os -import copy -from pprint import pformat -import sys -import uuid -import re - -import json -import logging -import contextlib -import capture -from .exitstack import ExitStack -from collections import OrderedDict, defaultdict -from math import ceil -from six import string_types - -from maya import cmds, mel -from maya.api import OpenMaya - -from openpype.client import ( - get_project, - get_asset_by_name, - get_subsets, - get_last_versions, - get_representation_by_name -) -from openpype.settings import get_project_settings -from openpype.pipeline import ( - get_current_project_name, - get_current_asset_name, - get_current_task_name, - discover_loader_plugins, - loaders_from_representation, - get_representation_path, - load_container, - registered_host -) -from openpype.lib import NumberDef -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.pipeline.create import CreateContext -from openpype.lib.profiles_filtering import filter_profiles - - -self = sys.modules[__name__] -self._parent = None - -log = logging.getLogger(__name__) - -IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True) -ATTRIBUTE_DICT = {"int": {"attributeType": "long"}, - "str": {"dataType": "string"}, - "unicode": {"dataType": "string"}, - "float": {"attributeType": "double"}, - "bool": {"attributeType": "bool"}} - -SHAPE_ATTRS = {"castsShadows", - "receiveShadows", - "motionBlur", - "primaryVisibility", - "smoothShading", - "visibleInReflections", - "visibleInRefractions", - "doubleSided", - "opposite"} - - -DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - -# The maya alembic export types -_alembic_options = { - "startFrame": float, - "endFrame": float, - "frameRange": str, # "start end"; overrides startFrame & endFrame - "eulerFilter": bool, - "frameRelativeSample": float, - "noNormals": bool, - "renderableOnly": bool, - "step": float, - "stripNamespaces": bool, - "uvWrite": bool, - "wholeFrameGeo": bool, - "worldSpace": bool, - "writeVisibility": bool, - "writeColorSets": bool, - "writeFaceSets": bool, - "writeCreases": bool, # Maya 2015 Ext1+ - "writeUVSets": bool, # Maya 2017+ - "dataFormat": str, - "root": (list, tuple), - "attr": (list, tuple), - "attrPrefix": (list, tuple), - "userAttr": (list, tuple), - "melPerFrameCallback": str, - "melPostJobCallback": str, - "pythonPerFrameCallback": str, - "pythonPostJobCallback": str, - "selection": bool -} - -INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} -FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} - - -DISPLAY_LIGHTS_ENUM = [ - {"label": "Use Project Settings", "value": "project_settings"}, - {"label": "Default Lighting", "value": "default"}, - {"label": "All Lights", "value": "all"}, - {"label": "Selected Lights", "value": "selected"}, - {"label": "Flat Lighting", "value": "flat"}, - {"label": "No Lights", "value": "none"} -] - - -def get_main_window(): - """Acquire Maya's main window""" - from qtpy import QtWidgets - - if self._parent is None: - self._parent = { - widget.objectName(): widget - for widget in QtWidgets.QApplication.topLevelWidgets() - }["MayaWindow"] - return self._parent - - -@contextlib.contextmanager -def suspended_refresh(suspend=True): - """Suspend viewport refreshes - - cmds.ogs(pause=True) is a toggle so we cant pass False. - """ - if IS_HEADLESS: - yield - return - - original_state = cmds.ogs(query=True, pause=True) - try: - if suspend and not original_state: - cmds.ogs(pause=True) - yield - finally: - if suspend and not original_state: - cmds.ogs(pause=True) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> scene = cmds.file(new=True, force=True) - >>> node = cmds.createNode("transform", name="Test") - >>> cmds.select("persp") - >>> with maintained_selection(): - ... cmds.select("Test", replace=True) - >>> "Test" in cmds.ls(selection=True) - False - - """ - - previous_selection = cmds.ls(selection=True) - try: - yield - finally: - if previous_selection: - cmds.select(previous_selection, - replace=True, - noExpand=True) - else: - cmds.select(clear=True) - - -def reload_all_udim_tile_previews(): - """Regenerate all UDIM tile preview in texture file""" - for texture_file in cmds.ls(type="file"): - if cmds.getAttr("{}.uvTilingMode".format(texture_file)) > 0: - cmds.ogs(regenerateUVTilePreview=texture_file) - - -@contextlib.contextmanager -def panel_camera(panel, camera): - """Set modelPanel's camera during the context. - - Arguments: - panel (str): modelPanel name. - camera (str): camera name. - - """ - original_camera = cmds.modelPanel(panel, query=True, camera=True) - try: - cmds.modelPanel(panel, edit=True, camera=camera) - yield - finally: - cmds.modelPanel(panel, edit=True, camera=original_camera) - - -def render_capture_preset(preset): - """Capture playblast with a preset. - - To generate the preset use `generate_capture_preset`. - - Args: - preset (dict): preset options - - Returns: - str: Output path of `capture.capture` - """ - - # Force a refresh at the start of the timeline - # TODO (Question): Why do we need to do this? What bug does it solve? - # Is this for simulations? - cmds.refresh(force=True) - refresh_frame_int = int(cmds.playbackOptions(query=True, minTime=True)) - cmds.currentTime(refresh_frame_int - 1, edit=True) - cmds.currentTime(refresh_frame_int, edit=True) - log.debug( - "Using preset: {}".format( - json.dumps(preset, indent=4, sort_keys=True) - ) - ) - preset = copy.deepcopy(preset) - # not supported by `capture` so we pop it off of the preset - reload_textures = preset["viewport_options"].pop("loadTextures", False) - panel = preset.pop("panel") - with ExitStack() as stack: - stack.enter_context(maintained_time()) - stack.enter_context(panel_camera(panel, preset["camera"])) - stack.enter_context(viewport_default_options(panel, preset)) - if reload_textures: - # Force immediate texture loading when to ensure - # all textures have loaded before the playblast starts - stack.enter_context(material_loading_mode(mode="immediate")) - # Regenerate all UDIM tiles previews - reload_all_udim_tile_previews() - path = capture.capture(log=self.log, **preset) - - return path - - -def generate_capture_preset(instance, camera, path, - start=None, end=None, capture_preset=None): - """Function for getting all the data of preset options for - playblast capturing - - Args: - instance (pyblish.api.Instance): instance - camera (str): review camera - path (str): filepath - start (int): frameStart - end (int): frameEnd - capture_preset (dict): capture preset - - Returns: - dict: Resulting preset - """ - preset = load_capture_preset(data=capture_preset) - - preset["camera"] = camera - preset["start_frame"] = start - preset["end_frame"] = end - preset["filename"] = path - preset["overwrite"] = True - preset["panel"] = instance.data["panel"] - - # Disable viewer since we use the rendering logic for publishing - # We don't want to open the generated playblast in a viewer directly. - preset["viewer"] = False - - # "isolate_view" will already have been applied at creation, so we'll - # ignore it here. - preset.pop("isolate_view") - - # Set resolution variables from capture presets - width_preset = capture_preset["Resolution"]["width"] - height_preset = capture_preset["Resolution"]["height"] - - # Set resolution variables from asset values - asset_data = instance.data["assetEntity"]["data"] - asset_width = asset_data.get("resolutionWidth") - asset_height = asset_data.get("resolutionHeight") - review_instance_width = instance.data.get("review_width") - review_instance_height = instance.data.get("review_height") - - # Use resolution from instance if review width/height is set - # Otherwise use the resolution from preset if it has non-zero values - # Otherwise fall back to asset width x height - # Else define no width, then `capture.capture` will use render resolution - if review_instance_width and review_instance_height: - preset["width"] = review_instance_width - preset["height"] = review_instance_height - elif width_preset and height_preset: - preset["width"] = width_preset - preset["height"] = height_preset - elif asset_width and asset_height: - preset["width"] = asset_width - preset["height"] = asset_height - - # Isolate view is requested by having objects in the set besides a - # camera. If there is only 1 member it'll be the camera because we - # validate to have 1 camera only. - if instance.data["isolate"] and len(instance.data["setMembers"]) > 1: - preset["isolate"] = instance.data["setMembers"] - - # Override camera options - # Enforce persisting camera depth of field - camera_options = preset.setdefault("camera_options", {}) - camera_options["depthOfField"] = cmds.getAttr( - "{0}.depthOfField".format(camera) - ) - - # Use Pan/Zoom from instance data instead of from preset - preset.pop("pan_zoom", None) - camera_options["panZoomEnabled"] = instance.data["panZoom"] - - # Override viewport options by instance data - viewport_options = preset.setdefault("viewport_options", {}) - viewport_options["displayLights"] = instance.data["displayLights"] - viewport_options["imagePlane"] = instance.data.get("imagePlane", True) - - # Override transparency if requested. - transparency = instance.data.get("transparency", 0) - if transparency != 0: - preset["viewport2_options"]["transparencyAlgorithm"] = transparency - - # Update preset with current panel setting - # if override_viewport_options is turned off - if not capture_preset["Viewport Options"]["override_viewport_options"]: - panel_preset = capture.parse_view(preset["panel"]) - panel_preset.pop("camera") - preset.update(panel_preset) - - return preset - - -@contextlib.contextmanager -def viewport_default_options(panel, preset): - """Context manager used by `render_capture_preset`. - - We need to explicitly enable some viewport changes so the viewport is - refreshed ahead of playblasting. - - """ - # TODO: Clarify in the docstring WHY we need to set it ahead of - # playblasting. What issues does it solve? - viewport_defaults = {} - try: - keys = [ - "useDefaultMaterial", - "wireframeOnShaded", - "xray", - "jointXray", - "backfaceCulling", - "textures" - ] - for key in keys: - viewport_defaults[key] = cmds.modelEditor( - panel, query=True, **{key: True} - ) - if preset["viewport_options"].get(key): - cmds.modelEditor( - panel, edit=True, **{key: True} - ) - yield - finally: - # Restoring viewport options. - if viewport_defaults: - cmds.modelEditor( - panel, edit=True, **viewport_defaults - ) - - -@contextlib.contextmanager -def material_loading_mode(mode="immediate"): - """Set material loading mode during context""" - original = cmds.displayPref(query=True, materialLoadingMode=True) - cmds.displayPref(materialLoadingMode=mode) - try: - yield - finally: - cmds.displayPref(materialLoadingMode=original) - - -def get_namespace(node): - """Return namespace of given node""" - node_name = node.rsplit("|", 1)[-1] - if ":" in node_name: - return node_name.rsplit(":", 1)[0] - else: - return "" - - -def strip_namespace(node, namespace): - """Strip given namespace from node path. - - The namespace will only be stripped from names - if it starts with that namespace. If the namespace - occurs within another namespace it's not removed. - - Examples: - >>> strip_namespace("namespace:node", namespace="namespace:") - "node" - >>> strip_namespace("hello:world:node", namespace="hello:world") - "node" - >>> strip_namespace("hello:world:node", namespace="hello") - "world:node" - >>> strip_namespace("hello:world:node", namespace="world") - "hello:world:node" - >>> strip_namespace("ns:group|ns:node", namespace="ns") - "group|node" - - Returns: - str: Node name without given starting namespace. - - """ - - # Ensure namespace ends with `:` - if not namespace.endswith(":"): - namespace = "{}:".format(namespace) - - # The long path for a node can also have the namespace - # in its parents so we need to remove it from each - return "|".join( - name[len(namespace):] if name.startswith(namespace) else name - for name in node.split("|") - ) - - -def get_custom_namespace(custom_namespace): - """Return unique namespace. - - The input namespace can contain a single group - of '#' number tokens to indicate where the namespace's - unique index should go. The amount of tokens defines - the zero padding of the number, e.g ### turns into 001. - - Warning: Note that a namespace will always be - prefixed with a _ if it starts with a digit - - Example: - >>> get_custom_namespace("myspace_##_") - # myspace_01_ - >>> get_custom_namespace("##_myspace") - # _01_myspace - >>> get_custom_namespace("myspace##") - # myspace01 - - """ - split = re.split("([#]+)", custom_namespace, 1) - - if len(split) == 3: - base, padding, suffix = split - padding = "%0{}d".format(len(padding)) - else: - base = split[0] - padding = "%02d" # default padding - suffix = "" - - return unique_namespace( - base, - format=padding, - prefix="_" if not base or base[0].isdigit() else "", - suffix=suffix - ) - - -def unique_namespace(namespace, format="%02d", prefix="", suffix=""): - """Return unique namespace - - Arguments: - namespace (str): Name of namespace to consider - format (str, optional): Formatting of the given iteration number - suffix (str, optional): Only consider namespaces with this suffix. - - >>> unique_namespace("bar") - # bar01 - >>> unique_namespace(":hello") - # :hello01 - >>> unique_namespace("bar:", suffix="_NS") - # bar01_NS: - - """ - - def current_namespace(): - current = cmds.namespaceInfo(currentNamespace=True, - absoluteName=True) - # When inside a namespace Maya adds no trailing : - if not current.endswith(":"): - current += ":" - return current - - # Always check against the absolute namespace root - # There's no clash with :x if we're defining namespace :a:x - ROOT = ":" if namespace.startswith(":") else current_namespace() - - # Strip trailing `:` tokens since we might want to add a suffix - start = ":" if namespace.startswith(":") else "" - end = ":" if namespace.endswith(":") else "" - namespace = namespace.strip(":") - if ":" in namespace: - # Split off any nesting that we don't uniqify anyway. - parents, namespace = namespace.rsplit(":", 1) - start += parents + ":" - ROOT += start - - def exists(n): - # Check for clash with nodes and namespaces - fullpath = ROOT + n - return cmds.objExists(fullpath) or cmds.namespace(exists=fullpath) - - iteration = 1 - while True: - nr_namespace = namespace + format % iteration - unique = prefix + nr_namespace + suffix - - if not exists(unique): - return start + unique + end - - iteration += 1 - - -def read(node): - """Return user-defined attributes from `node`""" - - data = dict() - - for attr in cmds.listAttr(node, userDefined=True) or list(): - try: - value = cmds.getAttr(node + "." + attr, asString=True) - - except RuntimeError: - # For Message type attribute or others that have connections, - # take source node name as value. - source = cmds.listConnections(node + "." + attr, - source=True, - destination=False) - source = cmds.ls(source, long=True) or [None] - value = source[0] - - except ValueError: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - - return data - - -def matrix_equals(a, b, tolerance=1e-10): - """ - Compares two matrices with an imperfection tolerance - - Args: - a (list, tuple): the matrix to check - b (list, tuple): the matrix to check against - tolerance (float): the precision of the differences - - Returns: - bool : True or False - - """ - if not all(abs(x - y) < tolerance for x, y in zip(a, b)): - return False - return True - - -def float_round(num, places=0, direction=ceil): - return direction(num * (10**places)) / float(10**places) - - -def pairwise(iterable): - """s -> (s0,s1), (s2,s3), (s4, s5), ...""" - from six.moves import zip - - a = iter(iterable) - return zip(a, a) - - -def collect_animation_defs(fps=False): - """Get the basic animation attribute defintions for the publisher. - - Returns: - OrderedDict - - """ - - # get scene values as defaults - frame_start = cmds.playbackOptions(query=True, minTime=True) - frame_end = cmds.playbackOptions(query=True, maxTime=True) - frame_start_handle = cmds.playbackOptions( - query=True, animationStartTime=True - ) - frame_end_handle = cmds.playbackOptions(query=True, animationEndTime=True) - - handle_start = frame_start - frame_start_handle - handle_end = frame_end_handle - frame_end - - # build attributes - defs = [ - NumberDef("frameStart", - label="Frame Start", - default=frame_start, - decimals=0), - NumberDef("frameEnd", - label="Frame End", - default=frame_end, - decimals=0), - NumberDef("handleStart", - label="Handle Start", - default=handle_start, - decimals=0), - NumberDef("handleEnd", - label="Handle End", - default=handle_end, - decimals=0), - NumberDef("step", - label="Step size", - tooltip="A smaller step size means more samples and larger " - "output files.\n" - "A 1.0 step size is a single sample every frame.\n" - "A 0.5 step size is two samples per frame.\n" - "A 0.2 step size is five samples per frame.", - default=1.0, - decimals=3), - ] - - if fps: - current_fps = mel.eval('currentTimeUnitToFPS()') - fps_def = NumberDef( - "fps", label="FPS", default=current_fps, decimals=5 - ) - defs.append(fps_def) - - return defs - - -def imprint(node, data): - """Write `data` to `node` as userDefined attributes - - Arguments: - node (str): Long name of node - data (dict): Dictionary of key/value pairs - - Example: - >>> from maya import cmds - >>> def compute(): - ... return 6 - ... - >>> cube, generator = cmds.polyCube() - >>> imprint(cube, { - ... "regularString": "myFamily", - ... "computedValue": lambda: compute() - ... }) - ... - >>> cmds.getAttr(cube + ".computedValue") - 6 - - """ - - for key, value in data.items(): - - if callable(value): - # Support values evaluated at imprint - value = value() - - if isinstance(value, bool): - add_type = {"attributeType": "bool"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, string_types): - add_type = {"dataType": "string"} - set_type = {"type": "string"} - elif isinstance(value, int): - add_type = {"attributeType": "long"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, float): - add_type = {"attributeType": "double"} - set_type = {"keyable": False, "channelBox": True} - elif isinstance(value, (list, tuple)): - add_type = {"attributeType": "enum", "enumName": ":".join(value)} - set_type = {"keyable": False, "channelBox": True} - value = 0 # enum default - else: - raise TypeError("Unsupported type: %r" % type(value)) - - cmds.addAttr(node, longName=key, **add_type) - cmds.setAttr(node + "." + key, value, **set_type) - - -def lsattr(attr, value=None): - """Return nodes matching `key` and `value` - - Arguments: - attr (str): Name of Maya attribute - value (object, optional): Value of attribute. If none - is provided, return all nodes with this attribute. - - Example: - >> lsattr("id", "myId") - ["myNode"] - >> lsattr("id") - ["myNode", "myOtherNode"] - - """ - - if value is None: - return cmds.ls("*.%s" % attr, - recursive=True, - objectsOnly=True, - long=True) - return lsattrs({attr: value}) - - -def lsattrs(attrs): - """Return nodes with the given attribute(s). - - Arguments: - attrs (dict): Name and value pairs of expected matches - - Example: - >>> # Return nodes with an `age` of five. - >>> lsattrs({"age": "five"}) - >>> # Return nodes with both `age` and `color` of five and blue. - >>> lsattrs({"age": "five", "color": "blue"}) - - Return: - list: matching nodes. - - """ - - dep_fn = OpenMaya.MFnDependencyNode() - dag_fn = OpenMaya.MFnDagNode() - selection_list = OpenMaya.MSelectionList() - - first_attr = next(iter(attrs)) - - try: - selection_list.add("*.{0}".format(first_attr), - searchChildNamespaces=True) - except RuntimeError as exc: - if str(exc).endswith("Object does not exist"): - return [] - - matches = set() - for i in range(selection_list.length()): - node = selection_list.getDependNode(i) - if node.hasFn(OpenMaya.MFn.kDagNode): - fn_node = dag_fn.setObject(node) - full_path_names = [path.fullPathName() - for path in fn_node.getAllPaths()] - else: - fn_node = dep_fn.setObject(node) - full_path_names = [fn_node.name()] - - for attr in attrs: - try: - plug = fn_node.findPlug(attr, True) - if plug.asString() != attrs[attr]: - break - except RuntimeError: - break - else: - matches.update(full_path_names) - - return list(matches) - - -@contextlib.contextmanager -def attribute_values(attr_values): - """Remaps node attributes to values during context. - - Arguments: - attr_values (dict): Dictionary with (attr, value) - - """ - - original = [(attr, cmds.getAttr(attr)) for attr in attr_values] - try: - for attr, value in attr_values.items(): - if isinstance(value, string_types): - cmds.setAttr(attr, value, type="string") - else: - cmds.setAttr(attr, value) - yield - finally: - for attr, value in original: - if isinstance(value, string_types): - cmds.setAttr(attr, value, type="string") - elif value is None and cmds.getAttr(attr, type=True) == "string": - # In some cases the maya.cmds.getAttr command returns None - # for string attributes but this value cannot assigned. - # Note: After setting it once to "" it will then return "" - # instead of None. So this would only happen once. - cmds.setAttr(attr, "", type="string") - else: - cmds.setAttr(attr, value) - - -@contextlib.contextmanager -def keytangent_default(in_tangent_type='auto', - out_tangent_type='auto'): - """Set the default keyTangent for new keys during this context""" - - original_itt = cmds.keyTangent(query=True, g=True, itt=True)[0] - original_ott = cmds.keyTangent(query=True, g=True, ott=True)[0] - cmds.keyTangent(g=True, itt=in_tangent_type) - cmds.keyTangent(g=True, ott=out_tangent_type) - try: - yield - finally: - cmds.keyTangent(g=True, itt=original_itt) - cmds.keyTangent(g=True, ott=original_ott) - - -@contextlib.contextmanager -def undo_chunk(): - """Open a undo chunk during context.""" - - try: - cmds.undoInfo(openChunk=True) - yield - finally: - cmds.undoInfo(closeChunk=True) - - -@contextlib.contextmanager -def evaluation(mode="off"): - """Set the evaluation manager during context. - - Arguments: - mode (str): The mode to apply during context. - "off": The standard DG evaluation (stable) - "serial": A serial DG evaluation - "parallel": The Maya 2016+ parallel evaluation - - """ - - original = cmds.evaluationManager(query=True, mode=1)[0] - try: - cmds.evaluationManager(mode=mode) - yield - finally: - cmds.evaluationManager(mode=original) - - -@contextlib.contextmanager -def empty_sets(sets, force=False): - """Remove all members of the sets during the context""" - - assert isinstance(sets, (list, tuple)) - - original = dict() - original_connections = [] - - # Store original state - for obj_set in sets: - members = cmds.sets(obj_set, query=True) - original[obj_set] = members - - try: - for obj_set in sets: - cmds.sets(clear=obj_set) - if force: - # Break all connections if force is enabled, this way we - # prevent Maya from exporting any reference nodes which are - # connected with placeHolder[x] attributes - plug = "%s.dagSetMembers" % obj_set - connections = cmds.listConnections(plug, - source=True, - destination=False, - plugs=True, - connections=True) or [] - original_connections.extend(connections) - for dest, src in pairwise(connections): - cmds.disconnectAttr(src, dest) - yield - finally: - - for dest, src in pairwise(original_connections): - cmds.connectAttr(src, dest) - - # Restore original members - _iteritems = getattr(original, "iteritems", original.items) - for origin_set, members in _iteritems(): - cmds.sets(members, forceElement=origin_set) - - -@contextlib.contextmanager -def renderlayer(layer): - """Set the renderlayer during the context - - Arguments: - layer (str): Name of layer to switch to. - - """ - - original = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - - try: - cmds.editRenderLayerGlobals(currentRenderLayer=layer) - yield - finally: - cmds.editRenderLayerGlobals(currentRenderLayer=original) - - -class delete_after(object): - """Context Manager that will delete collected nodes after exit. - - This allows to ensure the nodes added to the context are deleted - afterwards. This is useful if you want to ensure nodes are deleted - even if an error is raised. - - Examples: - with delete_after() as delete_bin: - cube = maya.cmds.polyCube() - delete_bin.extend(cube) - # cube exists - # cube deleted - - """ - - def __init__(self, nodes=None): - - self._nodes = list() - - if nodes: - self.extend(nodes) - - def append(self, node): - self._nodes.append(node) - - def extend(self, nodes): - self._nodes.extend(nodes) - - def __iter__(self): - return iter(self._nodes) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self._nodes: - cmds.delete(self._nodes) - - -def get_current_renderlayer(): - return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) - - -def get_renderer(layer): - with renderlayer(layer): - return cmds.getAttr("defaultRenderGlobals.currentRenderer") - - -@contextlib.contextmanager -def no_undo(flush=False): - """Disable the undo queue during the context - - Arguments: - flush (bool): When True the undo queue will be emptied when returning - from the context losing all undo history. Defaults to False. - - """ - original = cmds.undoInfo(query=True, state=True) - keyword = 'state' if flush else 'stateWithoutFlush' - - try: - cmds.undoInfo(**{keyword: False}) - yield - finally: - cmds.undoInfo(**{keyword: original}) - - -def get_shader_assignments_from_shapes(shapes, components=True): - """Return the shape assignment per related shading engines. - - Returns a dictionary where the keys are shadingGroups and the values are - lists of assigned shapes or shape-components. - - Since `maya.cmds.sets` returns shader members on the shapes as components - on the transform we correct that in this method too. - - For the 'shapes' this will return a dictionary like: - { - "shadingEngineX": ["nodeX", "nodeY"], - "shadingEngineY": ["nodeA", "nodeB"] - } - - Args: - shapes (list): The shapes to collect the assignments for. - components (bool): Whether to include the component assignments. - - Returns: - dict: The {shadingEngine: shapes} relationships - - """ - - shapes = cmds.ls(shapes, - long=True, - shapes=True, - objectsOnly=True) - if not shapes: - return {} - - # Collect shading engines and their shapes - assignments = defaultdict(list) - for shape in shapes: - - # Get unique shading groups for the shape - shading_groups = cmds.listConnections(shape, - source=False, - destination=True, - plugs=False, - connections=False, - type="shadingEngine") or [] - shading_groups = list(set(shading_groups)) - for shading_group in shading_groups: - assignments[shading_group].append(shape) - - if components: - # Note: Components returned from maya.cmds.sets are "listed" as if - # being assigned to the transform like: pCube1.f[0] as opposed - # to pCubeShape1.f[0] so we correct that here too. - - # Build a mapping from parent to shapes to include in lookup. - transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes} - lookup = set(shapes) | set(transforms.keys()) - - component_assignments = defaultdict(list) - for shading_group in assignments.keys(): - members = cmds.ls(cmds.sets(shading_group, query=True), long=True) - for member in members: - - node = member.split(".", 1)[0] - if node not in lookup: - continue - - # Component - if "." in member: - - # Fix transform to shape as shaders are assigned to shapes - if node in transforms: - shape = transforms[node] - component = member.split(".", 1)[1] - member = "{0}.{1}".format(shape, component) - - component_assignments[shading_group].append(member) - assignments = component_assignments - - return dict(assignments) - - -@contextlib.contextmanager -def shader(nodes, shadingEngine="initialShadingGroup"): - """Assign a shader to nodes during the context""" - - shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1) - original = get_shader_assignments_from_shapes(shapes) - - try: - # Assign override shader - if shapes: - cmds.sets(shapes, edit=True, forceElement=shadingEngine) - yield - finally: - - # Assign original shaders - for sg, members in original.items(): - if members: - cmds.sets(members, edit=True, forceElement=sg) - - -@contextlib.contextmanager -def displaySmoothness(nodes, - divisionsU=0, - divisionsV=0, - pointsWire=4, - pointsShaded=1, - polygonObject=1): - """Set the displaySmoothness during the context""" - - # Ensure only non-intermediate shapes - nodes = cmds.ls(nodes, - dag=1, - shapes=1, - long=1, - noIntermediate=True) - - def parse(node): - """Parse the current state of a node""" - state = {} - for key in ["divisionsU", - "divisionsV", - "pointsWire", - "pointsShaded", - "polygonObject"]: - value = cmds.displaySmoothness(node, query=1, **{key: True}) - if value is not None: - state[key] = value[0] - return state - - originals = dict((node, parse(node)) for node in nodes) - - try: - # Apply current state - cmds.displaySmoothness(nodes, - divisionsU=divisionsU, - divisionsV=divisionsV, - pointsWire=pointsWire, - pointsShaded=pointsShaded, - polygonObject=polygonObject) - yield - finally: - # Revert state - _iteritems = getattr(originals, "iteritems", originals.items) - for node, state in _iteritems(): - if state: - cmds.displaySmoothness(node, **state) - - -@contextlib.contextmanager -def no_display_layers(nodes): - """Ensure nodes are not in a displayLayer during context. - - Arguments: - nodes (list): The nodes to remove from any display layer. - - """ - - # Ensure long names - nodes = cmds.ls(nodes, long=True) - - # Get the original state - lookup = set(nodes) - original = {} - for layer in cmds.ls(type='displayLayer'): - - # Skip default layer - if layer == "defaultLayer": - continue - - members = cmds.editDisplayLayerMembers(layer, - query=True, - fullNames=True) - if not members: - continue - members = set(members) - - included = lookup.intersection(members) - if included: - original[layer] = list(included) - - try: - # Add all nodes to default layer - cmds.editDisplayLayerMembers("defaultLayer", nodes, noRecurse=True) - yield - finally: - # Restore original members - _iteritems = getattr(original, "iteritems", original.items) - for layer, members in _iteritems(): - cmds.editDisplayLayerMembers(layer, members, noRecurse=True) - - -@contextlib.contextmanager -def namespaced(namespace, new=True, relative_names=None): - """Work inside namespace during context - - Args: - new (bool): When enabled this will rename the namespace to a unique - namespace if the input namespace already exists. - - Yields: - str: The namespace that is used during the context - - """ - original = cmds.namespaceInfo(cur=True, absoluteName=True) - original_relative_names = cmds.namespace(query=True, relativeNames=True) - if new: - namespace = unique_namespace(namespace) - cmds.namespace(add=namespace) - if relative_names is not None: - cmds.namespace(relativeNames=relative_names) - try: - cmds.namespace(set=namespace) - yield namespace - finally: - cmds.namespace(set=original) - if relative_names is not None: - cmds.namespace(relativeNames=original_relative_names) - - -@contextlib.contextmanager -def maintained_selection_api(): - """Maintain selection using the Maya Python API. - - Warning: This is *not* added to the undo stack. - - """ - original = OpenMaya.MGlobal.getActiveSelectionList() - try: - yield - finally: - OpenMaya.MGlobal.setActiveSelectionList(original) - - -@contextlib.contextmanager -def tool(context): - """Set a tool context during the context manager. - - """ - original = cmds.currentCtx() - try: - cmds.setToolTo(context) - yield - finally: - cmds.setToolTo(original) - - -def polyConstraint(components, *args, **kwargs): - """Return the list of *components* with the constraints applied. - - A wrapper around Maya's `polySelectConstraint` to retrieve its results as - a list without altering selections. For a list of possible constraints - see `maya.cmds.polySelectConstraint` documentation. - - Arguments: - components (list): List of components of polygon meshes - - Returns: - list: The list of components filtered by the given constraints. - - """ - - kwargs.pop('mode', None) - - with no_undo(flush=False): - # Reverting selection to the original selection using - # `maya.cmds.select` can be slow in rare cases where previously - # `maya.cmds.polySelectConstraint` had set constrain to "All and Next" - # and the "Random" setting was activated. To work around this we - # revert to the original selection using the Maya API. This is safe - # since we're not generating any undo change anyway. - with tool("selectSuperContext"): - # Selection can be very slow when in a manipulator mode. - # So we force the selection context which is fast. - with maintained_selection_api(): - # Apply constraint using mode=2 (current and next) so - # it applies to the selection made before it; because just - # a `maya.cmds.select()` call will not trigger the constraint. - with reset_polySelectConstraint(): - cmds.select(components, r=1, noExpand=True) - cmds.polySelectConstraint(*args, mode=2, **kwargs) - result = cmds.ls(selection=True) - cmds.select(clear=True) - return result - - -@contextlib.contextmanager -def reset_polySelectConstraint(reset=True): - """Context during which the given polyConstraint settings are disabled. - - The original settings are restored after the context. - - """ - - original = cmds.polySelectConstraint(query=True, stateString=True) - - try: - if reset: - # Ensure command is available in mel - # This can happen when running standalone - if not mel.eval("exists resetPolySelectConstraint"): - mel.eval("source polygonConstraint") - - # Reset all parameters - mel.eval("resetPolySelectConstraint;") - cmds.polySelectConstraint(disable=True) - yield - finally: - mel.eval(original) - - -def is_visible(node, - displayLayer=True, - intermediateObject=True, - parentHidden=True, - visibility=True): - """Is `node` visible? - - Returns whether a node is hidden by one of the following methods: - - The node exists (always checked) - - The node must be a dagNode (always checked) - - The node's visibility is off. - - The node is set as intermediate Object. - - The node is in a disabled displayLayer. - - Whether any of its parent nodes is hidden. - - Roughly based on: http://ewertb.soundlinker.com/mel/mel.098.php - - Returns: - bool: Whether the node is visible in the scene - - """ - - # Only existing objects can be visible - if not cmds.objExists(node): - return False - - # Only dagNodes can be visible - if not cmds.objectType(node, isAType='dagNode'): - return False - - if visibility: - if not cmds.getAttr('{0}.visibility'.format(node)): - return False - - if intermediateObject and cmds.objectType(node, isAType='shape'): - if cmds.getAttr('{0}.intermediateObject'.format(node)): - return False - - if displayLayer: - # Display layers set overrideEnabled and overrideVisibility on members - if cmds.attributeQuery('overrideEnabled', node=node, exists=True): - override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) - override_visibility = cmds.getAttr('{}.overrideVisibility'.format( - node)) - if override_enabled and override_visibility: - return False - - if parentHidden: - parents = cmds.listRelatives(node, parent=True, fullPath=True) - if parents: - parent = parents[0] - if not is_visible(parent, - displayLayer=displayLayer, - intermediateObject=False, - parentHidden=parentHidden, - visibility=visibility): - return False - - return True - - -def extract_alembic(file, - startFrame=None, - endFrame=None, - selection=True, - uvWrite=True, - eulerFilter=True, - dataFormat="ogawa", - verbose=False, - **kwargs): - """Extract a single Alembic Cache. - - This extracts an Alembic cache using the `-selection` flag to minimize - the extracted content to solely what was Collected into the instance. - - Arguments: - - startFrame (float): Start frame of output. Ignored if `frameRange` - provided. - - endFrame (float): End frame of output. Ignored if `frameRange` - provided. - - frameRange (tuple or str): Two-tuple with start and end frame or a - string formatted as: "startFrame endFrame". This argument - overrides `startFrame` and `endFrame` arguments. - - dataFormat (str): The data format to use for the cache, - defaults to "ogawa" - - verbose (bool): When on, outputs frame number information to the - Script Editor or output window during extraction. - - noNormals (bool): When on, normal data from the original polygon - objects is not included in the exported Alembic cache file. - - renderableOnly (bool): When on, any non-renderable nodes or hierarchy, - such as hidden objects, are not included in the Alembic file. - Defaults to False. - - stripNamespaces (bool): When on, any namespaces associated with the - exported objects are removed from the Alembic file. For example, an - object with the namespace taco:foo:bar appears as bar in the - Alembic file. - - uvWrite (bool): When on, UV data from polygon meshes and subdivision - objects are written to the Alembic file. Only the current UV map is - included. - - worldSpace (bool): When on, the top node in the node hierarchy is - stored as world space. By default, these nodes are stored as local - space. Defaults to False. - - eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with - an Euler filter. Euler filtering helps resolve irregularities in - rotations especially if X, Y, and Z rotations exceed 360 degrees. - Defaults to True. - - """ - - # Ensure alembic exporter is loaded - cmds.loadPlugin('AbcExport', quiet=True) - - # Alembic Exporter requires forward slashes - file = file.replace('\\', '/') - - # Pass the start and end frame on as `frameRange` so that it - # never conflicts with that argument - if "frameRange" not in kwargs: - # Fallback to maya timeline if no start or end frame provided. - if startFrame is None: - startFrame = cmds.playbackOptions(query=True, - animationStartTime=True) - if endFrame is None: - endFrame = cmds.playbackOptions(query=True, - animationEndTime=True) - - # Ensure valid types are converted to frame range - assert isinstance(startFrame, _alembic_options["startFrame"]) - assert isinstance(endFrame, _alembic_options["endFrame"]) - kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame) - else: - # Allow conversion from tuple for `frameRange` - frame_range = kwargs["frameRange"] - if isinstance(frame_range, (list, tuple)): - assert len(frame_range) == 2 - kwargs["frameRange"] = "{0} {1}".format(frame_range[0], - frame_range[1]) - - # Assemble options - options = { - "selection": selection, - "uvWrite": uvWrite, - "eulerFilter": eulerFilter, - "dataFormat": dataFormat - } - options.update(kwargs) - - # Validate options - for key, value in options.copy().items(): - - # Discard unknown options - if key not in _alembic_options: - log.warning("extract_alembic() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - continue - - # Validate value type - valid_types = _alembic_options[key] - if not isinstance(value, valid_types): - raise TypeError("Alembic option unsupported type: " - "{0} (expected {1})".format(value, valid_types)) - - # Ignore empty values, like an empty string, since they mess up how - # job arguments are built - if isinstance(value, (list, tuple)): - value = [x for x in value if x.strip()] - - # Ignore option completely if no values remaining - if not value: - options.pop(key) - continue - - options[key] = value - - # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ - maya_version = int(cmds.about(version=True)) - if maya_version >= 2018: - options['autoSubd'] = options.pop('writeCreases', False) - - # Format the job string from options - job_args = list() - for key, value in options.items(): - if isinstance(value, (list, tuple)): - for entry in value: - job_args.append("-{} {}".format(key, entry)) - elif isinstance(value, bool): - # Add only when state is set to True - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - job_str += ' -file "%s"' % file - - # Ensure output directory exists - parent_dir = os.path.dirname(file) - if not os.path.exists(parent_dir): - os.makedirs(parent_dir) - - if verbose: - log.debug("Preparing Alembic export with options: %s", - json.dumps(options, indent=4)) - log.debug("Extracting Alembic with job arguments: %s", job_str) - - # Perform extraction - print("Alembic Job Arguments : {}".format(job_str)) - - # Disable the parallel evaluation temporarily to ensure no buggy - # exports are made. (PLN-31) - # TODO: Make sure this actually fixes the issues - with evaluation("off"): - cmds.AbcExport(j=job_str, verbose=verbose) - - if verbose: - log.debug("Extracted Alembic to: %s", file) - - return file - - -# region ID -def get_id_required_nodes(referenced_nodes=False, nodes=None): - """Filter out any node which are locked (reference) or readOnly - - Args: - referenced_nodes (bool): set True to filter out reference nodes - nodes (list, Optional): nodes to consider - Returns: - nodes (set): list of filtered nodes - """ - - lookup = None - if nodes is None: - # Consider all nodes - nodes = cmds.ls() - else: - # Build a lookup for the only allowed nodes in output based - # on `nodes` input of the function (+ ensure long names) - lookup = set(cmds.ls(nodes, long=True)) - - def _node_type_exists(node_type): - try: - cmds.nodeType(node_type, isTypeName=True) - return True - except RuntimeError: - return False - - # `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly - # remove default nodes and reference nodes - camera_shapes = ["frontShape", "sideShape", "topShape", "perspShape"] - - ignore = set() - if not referenced_nodes: - ignore |= set(cmds.ls(long=True, referencedNodes=True)) - - # list all defaultNodes to filter out from the rest - ignore |= set(cmds.ls(long=True, defaultNodes=True)) - ignore |= set(cmds.ls(camera_shapes, long=True)) - - # Remove Turtle from the result of `cmds.ls` if Turtle is loaded - # TODO: This should be a less specific check for a single plug-in. - if _node_type_exists("ilrBakeLayer"): - ignore |= set(cmds.ls(type="ilrBakeLayer", long=True)) - - # Establish set of nodes types to include - types = ["objectSet", "file", "mesh", "nurbsCurve", "nurbsSurface"] - - # Check if plugin nodes are available for Maya by checking if the plugin - # is loaded - if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): - types.append("pgYetiMaya") - - # We *always* ignore intermediate shapes, so we filter them out directly - nodes = cmds.ls(nodes, type=types, long=True, noIntermediate=True) - - # The items which need to pass the id to their parent - # Add the collected transform to the nodes - dag = cmds.ls(nodes, type="dagNode", long=True) # query only dag nodes - transforms = cmds.listRelatives(dag, - parent=True, - fullPath=True) or [] - - nodes = set(nodes) - nodes |= set(transforms) - - nodes -= ignore # Remove the ignored nodes - if not nodes: - return nodes - - # Ensure only nodes from the input `nodes` are returned when a - # filter was applied on function call because we also iterated - # to parents and alike - if lookup is not None: - nodes &= lookup - - # Avoid locked nodes - nodes_list = list(nodes) - locked = cmds.lockNode(nodes_list, query=True, lock=True) - for node, lock in zip(nodes_list, locked): - if lock: - log.warning("Skipping locked node: %s" % node) - nodes.remove(node) - - return nodes - - -def get_id(node): - """Get the `cbId` attribute of the given node. - - Args: - node (str): the name of the node to retrieve the attribute from - Returns: - str - - """ - if node is None: - return - - sel = OpenMaya.MSelectionList() - sel.add(node) - - api_node = sel.getDependNode(0) - fn = OpenMaya.MFnDependencyNode(api_node) - - if not fn.hasAttribute("cbId"): - return - - try: - return fn.findPlug("cbId", False).asString() - except RuntimeError: - log.warning("Failed to retrieve cbId on %s", node) - return - - -def generate_ids(nodes, asset_id=None): - """Returns new unique ids for the given nodes. - - Note: This does not assign the new ids, it only generates the values. - - To assign new ids using this method: - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id) - - To also override any existing values (and assign regenerated ids): - >>> nodes = ["a", "b", "c"] - >>> for node, id in generate_ids(nodes): - >>> set_id(node, id, overwrite=True) - - Args: - nodes (list): List of nodes. - asset_id (str or bson.ObjectId): The database id for the *asset* to - generate for. When None provided the current asset in the - active session is used. - - Returns: - list: A list of (node, id) tuples. - - """ - - if asset_id is None: - # Get the asset ID from the database for the asset of current context - project_name = get_current_project_name() - asset_name = get_current_asset_name() - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - assert asset_doc, "No current asset found in Session" - asset_id = asset_doc['_id'] - - node_ids = [] - for node in nodes: - _, uid = str(uuid.uuid4()).rsplit("-", 1) - unique_id = "{}:{}".format(asset_id, uid) - node_ids.append((node, unique_id)) - - return node_ids - - -def set_id(node, unique_id, overwrite=False): - """Add cbId to `node` unless one already exists. - - Args: - node (str): the node to add the "cbId" on - unique_id (str): The unique node id to assign. - This should be generated by `generate_ids`. - overwrite (bool, optional): When True overrides the current value even - if `node` already has an id. Defaults to False. - - Returns: - None - - """ - - exists = cmds.attributeQuery("cbId", node=node, exists=True) - - # Add the attribute if it does not exist yet - if not exists: - cmds.addAttr(node, longName="cbId", dataType="string") - - # Set the value - if not exists or overwrite: - attr = "{0}.cbId".format(node) - cmds.setAttr(attr, unique_id, type="string") - - -def get_attribute(plug, - asString=False, - expandEnvironmentVariables=False, - **kwargs): - """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`. - - Like Pymel getAttr this applies some changes to `maya.cmds.getAttr` - - maya pointlessly returned vector results as a tuple wrapped in a list - (ex. '[(1,2,3)]'). This command unpacks the vector for you. - - when getting a multi-attr, maya would raise an error, but this will - return a list of values for the multi-attr - - added support for getting message attributes by returning the - connections instead - - Note that the asString + expandEnvironmentVariables argument naming - convention matches the `maya.cmds.getAttr` arguments so that it can - act as a direct replacement for it. - - Args: - plug (str): Node's attribute plug as `node.attribute` - asString (bool): Return string value for enum attributes instead - of the index. Note that the return value can be dependent on the - UI language Maya is running in. - expandEnvironmentVariables (bool): Expand any environment variable and - (tilde characters on UNIX) found in string attributes which are - returned. - - Kwargs: - Supports the keyword arguments of `maya.cmds.getAttr` - - Returns: - object: The value of the maya attribute. - - """ - attr_type = cmds.getAttr(plug, type=True) - if asString: - kwargs["asString"] = True - if expandEnvironmentVariables: - kwargs["expandEnvironmentVariables"] = True - try: - res = cmds.getAttr(plug, **kwargs) - except RuntimeError: - if attr_type == "message": - return cmds.listConnections(plug) - - node, attr = plug.split(".", 1) - children = cmds.attributeQuery(attr, node=node, listChildren=True) - if children: - return [ - get_attribute("{}.{}".format(node, child)) - for child in children - ] - - raise - - # Convert vector result wrapped in tuple - if isinstance(res, list) and len(res): - if isinstance(res[0], tuple) and len(res): - if attr_type in {'pointArray', 'vectorArray'}: - return res - return res[0] - - return res - - -def set_attribute(attribute, value, node): - """Adjust attributes based on the value from the attribute data - - If an attribute does not exists on the target it will be added with - the dataType being controlled by the value type. - - Args: - attribute (str): name of the attribute to change - value: the value to change to attribute to - node (str): name of the node - - Returns: - None - """ - - value_type = type(value).__name__ - kwargs = ATTRIBUTE_DICT[value_type] - if not cmds.attributeQuery(attribute, node=node, exists=True): - log.debug("Creating attribute '{}' on " - "'{}'".format(attribute, node)) - cmds.addAttr(node, longName=attribute, **kwargs) - - node_attr = "{}.{}".format(node, attribute) - enum_type = cmds.attributeQuery(attribute, node=node, enum=True) - if enum_type and value_type == "str": - enum_string_values = cmds.attributeQuery( - attribute, node=node, listEnum=True - )[0].split(":") - cmds.setAttr( - "{}.{}".format(node, attribute), enum_string_values.index(value) - ) - elif "dataType" in kwargs: - attr_type = kwargs["dataType"] - cmds.setAttr(node_attr, value, type=attr_type) - else: - cmds.setAttr(node_attr, value) - - -def apply_attributes(attributes, nodes_by_id): - """Alter the attributes to match the state when publishing - - Apply attribute settings from the publish to the node in the scene based - on the UUID which is stored in the cbId attribute. - - Args: - attributes (list): list of dictionaries - nodes_by_id (dict): collection of nodes based on UUID - {uuid: [node, node]} - - """ - - for attr_data in attributes: - nodes = nodes_by_id[attr_data["uuid"]] - attr_value = attr_data["attributes"] - for node in nodes: - for attr, value in attr_value.items(): - set_attribute(attr, value, node) - - -def get_container_members(container): - """Returns the members of a container. - This includes the nodes from any loaded references in the container. - """ - if isinstance(container, dict): - # Assume it's a container dictionary - container = container["objectName"] - - members = cmds.sets(container, query=True) or [] - members = cmds.ls(members, long=True, objectsOnly=True) or [] - all_members = set(members) - - # Include any referenced nodes from any reference in the container - # This is required since we've removed adding ALL nodes of a reference - # into the container set and only add the reference node now. - for ref in cmds.ls(members, exactType="reference", objectsOnly=True): - - # Ignore any `:sharedReferenceNode` - if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): - continue - - # Ignore _UNKNOWN_REF_NODE_ (PLN-160) - if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): - continue - - reference_members = cmds.referenceQuery(ref, nodes=True, dagPath=True) - reference_members = cmds.ls(reference_members, - long=True, - objectsOnly=True) - all_members.update(reference_members) - - return list(all_members) - - -# region LOOKDEV -def list_looks(project_name, asset_id): - """Return all look subsets for the given asset - - This assumes all look subsets start with "look*" in their names. - """ - # # get all subsets with look leading in - # the name associated with the asset - # TODO this should probably look for family 'look' instead of checking - # subset name that can not start with family - subset_docs = get_subsets(project_name, asset_ids=[asset_id]) - return [ - subset_doc - for subset_doc in subset_docs - if subset_doc["name"].startswith("look") - ] - - -def assign_look_by_version(nodes, version_id): - """Assign nodes a specific published look version by id. - - This assumes the nodes correspond with the asset. - - Args: - nodes(list): nodes to assign look to - version_id (bson.ObjectId): database id of the version - - Returns: - None - """ - - project_name = get_current_project_name() - - # Get representations of shader file and relationships - look_representation = get_representation_by_name( - project_name, "ma", version_id - ) - json_representation = get_representation_by_name( - project_name, "json", version_id - ) - - # See if representation is already loaded, if so reuse it. - host = registered_host() - representation_id = str(look_representation['_id']) - for container in host.ls(): - if (container['loader'] == "LookLoader" and - container['representation'] == representation_id): - log.info("Reusing loaded look ..") - container_node = container['objectName'] - break - else: - log.info("Using look for the first time ..") - - # Load file - _loaders = discover_loader_plugins() - loaders = loaders_from_representation(_loaders, representation_id) - Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None) - if Loader is None: - raise RuntimeError("Could not find LookLoader, this is a bug") - - # Reference the look file - with maintained_selection(): - container_node = load_container(Loader, look_representation) - - # Get container members - shader_nodes = get_container_members(container_node) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - relationships = json.load(f) - - # Assign relationships - apply_shaders(relationships, shader_nodes, nodes) - - -def assign_look(nodes, subset="lookDefault"): - """Assigns a look to a node. - - Optimizes the nodes by grouping by asset id and finding - related subset by name. - - Args: - nodes (list): all nodes to assign the look to - subset (str): name of the subset to find - """ - - # Group all nodes per asset id - grouped = defaultdict(list) - for node in nodes: - pype_id = get_id(node) - if not pype_id: - continue - - parts = pype_id.split(":", 1) - grouped[parts[0]].append(node) - - project_name = get_current_project_name() - subset_docs = get_subsets( - project_name, subset_names=[subset], asset_ids=grouped.keys() - ) - subset_docs_by_asset_id = { - str(subset_doc["parent"]): subset_doc - for subset_doc in subset_docs - } - subset_ids = { - subset_doc["_id"] - for subset_doc in subset_docs_by_asset_id.values() - } - last_version_docs = get_last_versions( - project_name, - subset_ids=subset_ids, - fields=["_id", "name", "data.families"] - ) - last_version_docs_by_subset_id = { - last_version_doc["parent"]: last_version_doc - for last_version_doc in last_version_docs - } - - for asset_id, asset_nodes in grouped.items(): - # create objectId for database - subset_doc = subset_docs_by_asset_id.get(asset_id) - if not subset_doc: - log.warning("No subset '{}' found for {}".format(subset, asset_id)) - continue - - last_version = last_version_docs_by_subset_id.get(subset_doc["_id"]) - if not last_version: - log.warning(( - "Not found last version for subset '{}' on asset with id {}" - ).format(subset, asset_id)) - continue - - families = last_version.get("data", {}).get("families") or [] - if "look" not in families: - log.warning(( - "Last version for subset '{}' on asset with id {}" - " does not have look family" - ).format(subset, asset_id)) - continue - - log.debug("Assigning look '{}' ".format( - subset, last_version["name"])) - - assign_look_by_version(asset_nodes, last_version["_id"]) - - -def apply_shaders(relationships, shadernodes, nodes): - """Link shadingEngine to the right nodes based on relationship data - - Relationship data is constructed of a collection of `sets` and `attributes` - `sets` corresponds with the shaderEngines found in the lookdev. - Each set has the keys `name`, `members` and `uuid`, the `members` - hold a collection of node information `name` and `uuid`. - - Args: - relationships (dict): relationship data - shadernodes (list): list of nodes of the shading objectSets (includes - VRayObjectProperties and shadingEngines) - nodes (list): list of nodes to apply shader to - - Returns: - None - """ - - attributes = relationships.get("attributes", []) - shader_data = relationships.get("relationships", {}) - - shading_engines = cmds.ls(shadernodes, type="objectSet", long=True) - assert shading_engines, "Error in retrieving objectSets from reference" - - # region compute lookup - nodes_by_id = defaultdict(list) - for node in nodes: - nodes_by_id[get_id(node)].append(node) - - shading_engines_by_id = defaultdict(list) - for shad in shading_engines: - shading_engines_by_id[get_id(shad)].append(shad) - # endregion - - # region assign shading engines and other sets - for data in shader_data.values(): - # collect all unique IDs of the set members - shader_uuid = data["uuid"] - member_uuids = [member["uuid"] for member in data["members"]] - - filtered_nodes = list() - for m_uuid in member_uuids: - filtered_nodes.extend(nodes_by_id[m_uuid]) - - id_shading_engines = shading_engines_by_id[shader_uuid] - if not id_shading_engines: - log.error("No shader found with cbId " - "'{}'".format(shader_uuid)) - continue - elif len(id_shading_engines) > 1: - log.error("Skipping shader assignment. " - "More than one shader found with cbId " - "'{}'. (found: {})".format(shader_uuid, - id_shading_engines)) - continue - - if not filtered_nodes: - log.warning("No nodes found for shading engine " - "'{0}'".format(id_shading_engines[0])) - continue - try: - cmds.sets(filtered_nodes, forceElement=id_shading_engines[0]) - except RuntimeError as rte: - log.error("Error during shader assignment: {}".format(rte)) - - # endregion - - apply_attributes(attributes, nodes_by_id) - - -# endregion LOOKDEV -def get_isolate_view_sets(): - """Return isolate view sets of all modelPanels. - - Returns: - list: all sets related to isolate view - - """ - - view_sets = set() - for panel in cmds.getPanel(type="modelPanel") or []: - view_set = cmds.modelEditor(panel, query=True, viewObjects=True) - if view_set: - view_sets.add(view_set) - - return view_sets - - -def get_related_sets(node): - """Return objectSets that are relationships for a look for `node`. - - Filters out based on: - - id attribute is NOT `pyblish.avalon.container` - - shapes and deformer shapes (alembic creates meshShapeDeformed) - - set name ends with any from a predefined list - - set in not in viewport set (isolate selected for example) - - Args: - node (str): name of the current node to check - - Returns: - list: The related sets - - """ - - # Ignore specific suffices - ignore_suffices = ["out_SET", "controls_SET", "_INST", "_CON"] - - # Default nodes to ignore - defaults = {"defaultLightSet", "defaultObjectSet"} - - # Ids to ignore - ignored = {"pyblish.avalon.instance", "pyblish.avalon.container"} - - view_sets = get_isolate_view_sets() - - sets = cmds.listSets(object=node, extendToShape=False) - if not sets: - return [] - - # Fix 'no object matches name' errors on nodes returned by listSets. - # In rare cases it can happen that a node is added to an internal maya - # set inaccessible by maya commands, for example check some nodes - # returned by `cmds.listSets(allSets=True)` - sets = cmds.ls(sets) - - # Ignore `avalon.container` - sets = [s for s in sets if - not cmds.attributeQuery("id", node=s, exists=True) or - not cmds.getAttr("%s.id" % s) in ignored] - - # Exclude deformer sets (`type=2` for `maya.cmds.listSets`) - deformer_sets = cmds.listSets(object=node, - extendToShape=False, - type=2) or [] - deformer_sets = set(deformer_sets) # optimize lookup - sets = [s for s in sets if s not in deformer_sets] - - # Ignore when the set has a specific suffix - sets = [s for s in sets if not any(s.endswith(x) for x in ignore_suffices)] - - # Ignore viewport filter view sets (from isolate select and - # viewports) - sets = [s for s in sets if s not in view_sets] - sets = [s for s in sets if s not in defaults] - - return sets - - -def get_container_transforms(container, members=None, root=False): - """Retrieve the root node of the container content - - When a container is created through a Loader the content - of the file will be grouped under a transform. The name of the root - transform is stored in the container information - - Args: - container (dict): the container - members (list): optional and convenience argument - root (bool): return highest node in hierarchy if True - - Returns: - root (list / str): - """ - - if not members: - members = get_container_members(container) - - results = cmds.ls(members, type="transform", long=True) - if root: - root = get_highest_in_hierarchy(results) - if root: - results = root[0] - - return results - - -def get_highest_in_hierarchy(nodes): - """Return highest nodes in the hierarchy that are in the `nodes` list. - - The "highest in hierarchy" are the nodes closest to world: top-most level. - - Args: - nodes (list): The nodes in which find the highest in hierarchies. - - Returns: - list: The highest nodes from the input nodes. - - """ - - # Ensure we use long names - nodes = cmds.ls(nodes, long=True) - lookup = set(nodes) - - highest = [] - for node in nodes: - # If no parents are within the nodes input list - # then this is a highest node - if not any(n in lookup for n in iter_parents(node)): - highest.append(node) - - return highest - - -def iter_parents(node): - """Iter parents of node from its long name. - - Note: The `node` *must* be the long node name. - - Args: - node (str): Node long name. - - Yields: - str: All parent node names (long names) - - """ - while True: - split = node.rsplit("|", 1) - if len(split) == 1 or not split[0]: - return - - node = split[0] - yield node - - -def remove_other_uv_sets(mesh): - """Remove all other UV sets than the current UV set. - - Keep only current UV set and ensure it's the renamed to default 'map1'. - - """ - - uvSets = cmds.polyUVSet(mesh, query=True, allUVSets=True) - current = cmds.polyUVSet(mesh, query=True, currentUVSet=True)[0] - - # Copy over to map1 - if current != 'map1': - cmds.polyUVSet(mesh, uvSet=current, newUVSet='map1', copy=True) - cmds.polyUVSet(mesh, currentUVSet=True, uvSet='map1') - current = 'map1' - - # Delete all non-current UV sets - deleteUVSets = [uvSet for uvSet in uvSets if uvSet != current] - uvSet = None - - # Maya Bug (tested in 2015/2016): - # In some cases the API's MFnMesh will report less UV sets than - # maya.cmds.polyUVSet. This seems to happen when the deletion of UV sets - # has not triggered a cleanup of the UVSet array attribute on the mesh - # node. It will still have extra entries in the attribute, though it will - # not show up in API or UI. Nevertheless it does show up in - # maya.cmds.polyUVSet. To ensure we clean up the array we'll force delete - # the extra remaining 'indices' that we don't want. - - # TODO: Implement a better fix - # The best way to fix would be to get the UVSet indices from api with - # MFnMesh (to ensure we keep correct ones) and then only force delete the - # other entries in the array attribute on the node. But for now we're - # deleting all entries except first one. Note that the first entry could - # never be removed (the default 'map1' always exists and is supposed to - # be undeletable.) - try: - for uvSet in deleteUVSets: - cmds.polyUVSet(mesh, delete=True, uvSet=uvSet) - except RuntimeError as exc: - log.warning('Error uvSet: %s - %s', uvSet, exc) - indices = cmds.getAttr('{0}.uvSet'.format(mesh), - multiIndices=True) - if not indices: - log.warning("No uv set found indices for: %s", mesh) - return - - # Delete from end to avoid shifting indices - # and remove the indices in the attribute - indices = reversed(indices[1:]) - for i in indices: - attr = '{0}.uvSet[{1}]'.format(mesh, i) - cmds.removeMultiInstance(attr, b=True) - - -def get_node_parent(node): - """Return full path name for parent of node""" - parents = cmds.listRelatives(node, parent=True, fullPath=True) - return parents[0] if parents else None - - -def get_id_from_sibling(node, history_only=True): - """Return first node id in the history chain that matches this node. - - The nodes in history must be of the exact same node type and must be - parented under the same parent. - - Optionally, if no matching node is found from the history, all the - siblings of the node that are of the same type are checked. - Additionally to having the same parent, the sibling must be marked as - 'intermediate object'. - - Args: - node (str): node to retrieve the history from - history_only (bool): if True and if nothing found in history, - look for an 'intermediate object' in all the node's siblings - of same type - - Returns: - str or None: The id from the sibling node or None when no id found - on any valid nodes in the history or siblings. - - """ - - node = cmds.ls(node, long=True)[0] - - # Find all similar nodes in history - history = cmds.listHistory(node) - node_type = cmds.nodeType(node) - similar_nodes = cmds.ls(history, exactType=node_type, long=True) - - # Exclude itself - similar_nodes = [x for x in similar_nodes if x != node] - - # The node *must be* under the same parent - parent = get_node_parent(node) - similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent] - - # Check all of the remaining similar nodes and take the first one - # with an id and assume it's the original. - for similar_node in similar_nodes: - _id = get_id(similar_node) - if _id: - return _id - - if not history_only: - # Get siblings of same type - similar_nodes = cmds.listRelatives(parent, - type=node_type, - fullPath=True) - similar_nodes = cmds.ls(similar_nodes, exactType=node_type, long=True) - - # Exclude itself - similar_nodes = [x for x in similar_nodes if x != node] - - # Get all unique ids from siblings in order since - # we consistently take the first one found - sibling_ids = OrderedDict() - for similar_node in similar_nodes: - # Check if "intermediate object" - if not cmds.getAttr(similar_node + ".intermediateObject"): - continue - - _id = get_id(similar_node) - if not _id: - continue - - if _id in sibling_ids: - sibling_ids[_id].append(similar_node) - else: - sibling_ids[_id] = [similar_node] - - if sibling_ids: - first_id, found_nodes = next(iter(sibling_ids.items())) - - # Log a warning if we've found multiple unique ids - if len(sibling_ids) > 1: - log.warning(("Found more than 1 intermediate shape with" - " unique id for '{}'. Using id of first" - " found: '{}'".format(node, found_nodes[0]))) - - return first_id - - -def set_scene_fps(fps, update=True): - """Set FPS from project configuration - - Args: - fps (int, float): desired FPS - update(bool): toggle update animation, default is True - - Returns: - None - - """ - - fps_mapping = { - '15': 'game', - '24': 'film', - '25': 'pal', - '30': 'ntsc', - '48': 'show', - '50': 'palf', - '60': 'ntscf', - '23.976023976023978': '23.976fps', - '29.97002997002997': '29.97fps', - '47.952047952047955': '47.952fps', - '59.94005994005994': '59.94fps', - '44100': '44100fps', - '48000': '48000fps' - } - - unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None) - if unit is None: - raise ValueError("Unsupported FPS value: `%s`" % fps) - - # Get time slider current state - start_frame = cmds.playbackOptions(query=True, minTime=True) - end_frame = cmds.playbackOptions(query=True, maxTime=True) - - # Get animation data - animation_start = cmds.playbackOptions(query=True, animationStartTime=True) - animation_end = cmds.playbackOptions(query=True, animationEndTime=True) - - current_frame = cmds.currentTime(query=True) - - log.info("Setting scene FPS to: '{}'".format(unit)) - cmds.currentUnit(time=unit, updateAnimation=update) - - # Set time slider data back to previous state - cmds.playbackOptions(edit=True, minTime=start_frame) - cmds.playbackOptions(edit=True, maxTime=end_frame) - - # Set animation data - cmds.playbackOptions(edit=True, animationStartTime=animation_start) - cmds.playbackOptions(edit=True, animationEndTime=animation_end) - - cmds.currentTime(current_frame, edit=True, update=True) - - # Force file stated to 'modified' - cmds.file(modified=True) - - -def set_scene_resolution(width, height, pixelAspect): - """Set the render resolution - - Args: - width(int): value of the width - height(int): value of the height - - Returns: - None - - """ - - control_node = "defaultResolution" - current_renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer") - aspect_ratio_attr = "deviceAspectRatio" - - # Give VRay a helping hand as it is slightly different from the rest - if current_renderer == "vray": - aspect_ratio_attr = "aspectRatio" - vray_node = "vraySettings" - if cmds.objExists(vray_node): - control_node = vray_node - else: - log.error("Can't set VRay resolution because there is no node " - "named: `%s`" % vray_node) - - log.info("Setting scene resolution to: %s x %s" % (width, height)) - cmds.setAttr("%s.width" % control_node, width) - cmds.setAttr("%s.height" % control_node, height) - - deviceAspectRatio = ((float(width) / float(height)) * float(pixelAspect)) - cmds.setAttr( - "{}.{}".format(control_node, aspect_ratio_attr), deviceAspectRatio) - cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) - - -def get_fps_for_current_context(): - """Get fps that should be set for current context. - - Todos: - - Skip project value. - - Merge logic with 'get_frame_range' and 'reset_scene_resolution' -> - all the values in the functions can be collected at one place as - they have same requirements. - - Returns: - Union[int, float]: FPS value. - """ - - project_name = get_current_project_name() - asset_name = get_current_asset_name() - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["data.fps"] - ) or {} - fps = asset_doc.get("data", {}).get("fps") - if not fps: - project_doc = get_project(project_name, fields=["data.fps"]) or {} - fps = project_doc.get("data", {}).get("fps") - - if not fps: - fps = 25 - - return convert_to_maya_fps(fps) - - -def get_frame_range(include_animation_range=False): - """Get the current assets frame range and handles. - - Args: - include_animation_range (bool, optional): Whether to include - `animationStart` and `animationEnd` keys to define the outer - range of the timeline. It is excluded by default. - - Returns: - dict: Asset's expected frame range values. - - """ - - # Set frame start/end - project_name = get_current_project_name() - asset_name = get_current_asset_name() - asset = get_asset_by_name(project_name, asset_name) - - frame_start = asset["data"].get("frameStart") - frame_end = asset["data"].get("frameEnd") - - if frame_start is None or frame_end is None: - cmds.warning("No edit information found for %s" % asset_name) - return - - handle_start = asset["data"].get("handleStart") or 0 - handle_end = asset["data"].get("handleEnd") or 0 - - frame_range = { - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end - } - if include_animation_range: - # The animation range values are only included to define whether - # the Maya time slider should include the handles or not. - # Some usages of this function use the full dictionary to define - # instance attributes for which we want to exclude the animation - # keys. That is why these are excluded by default. - task_name = get_current_task_name() - settings = get_project_settings(project_name) - include_handles_settings = settings["maya"]["include_handles"] - current_task = asset.get("data").get("tasks").get(task_name) - - animation_start = frame_start - animation_end = frame_end - - include_handles = include_handles_settings["include_handles_default"] - for item in include_handles_settings["per_task_type"]: - if current_task["type"] in item["task_type"]: - include_handles = item["include_handles"] - break - if include_handles: - animation_start -= int(handle_start) - animation_end += int(handle_end) - - frame_range["animationStart"] = animation_start - frame_range["animationEnd"] = animation_end - - return frame_range - - -def reset_frame_range(playback=True, render=True, fps=True): - """Set frame range to current asset - - Args: - playback (bool, Optional): Whether to set the maya timeline playback - frame range. Defaults to True. - render (bool, Optional): Whether to set the maya render frame range. - Defaults to True. - fps (bool, Optional): Whether to set scene FPS. Defaults to True. - """ - if fps: - set_scene_fps(get_fps_for_current_context()) - - frame_range = get_frame_range(include_animation_range=True) - if not frame_range: - # No frame range data found for asset - return - - frame_start = frame_range["frameStart"] - frame_end = frame_range["frameEnd"] - animation_start = frame_range["animationStart"] - animation_end = frame_range["animationEnd"] - - if playback: - cmds.playbackOptions( - minTime=frame_start, - maxTime=frame_end, - animationStartTime=animation_start, - animationEndTime=animation_end - ) - cmds.currentTime(frame_start) - - if render: - cmds.setAttr("defaultRenderGlobals.startFrame", animation_start) - cmds.setAttr("defaultRenderGlobals.endFrame", animation_end) - - -def reset_scene_resolution(): - """Apply the scene resolution from the project definition - - scene resolution can be overwritten by an asset if the asset.data contains - any information regarding scene resolution . - - Returns: - None - """ - - project_name = get_current_project_name() - project_doc = get_project(project_name) - project_data = project_doc["data"] - asset_data = get_current_project_asset()["data"] - - # Set project resolution - width_key = "resolutionWidth" - height_key = "resolutionHeight" - pixelAspect_key = "pixelAspect" - - width = asset_data.get(width_key, project_data.get(width_key, 1920)) - height = asset_data.get(height_key, project_data.get(height_key, 1080)) - pixelAspect = asset_data.get(pixelAspect_key, - project_data.get(pixelAspect_key, 1)) - - set_scene_resolution(width, height, pixelAspect) - - -def set_context_settings(): - """Apply the project settings from the project definition - - Settings can be overwritten by an asset if the asset.data contains - any information regarding those settings. - - Examples of settings: - fps - resolution - renderer - - Returns: - None - """ - - - # Set project fps - set_scene_fps(get_fps_for_current_context()) - - reset_scene_resolution() - - # Set frame range. - reset_frame_range() - - # Set colorspace - set_colorspace() - - -# Valid FPS -def validate_fps(): - """Validate current scene FPS and show pop-up when it is incorrect - - Returns: - bool - - """ - - expected_fps = get_fps_for_current_context() - current_fps = mel.eval('currentTimeUnitToFPS()') - - fps_match = current_fps == expected_fps - if not fps_match and not IS_HEADLESS: - from openpype.widgets import popup - - parent = get_main_window() - - dialog = popup.PopupUpdateKeys(parent=parent) - dialog.setModal(True) - dialog.setWindowTitle("Maya scene does not match project FPS") - dialog.setMessage( - "Scene {} FPS does not match project {} FPS".format( - current_fps, expected_fps - ) - ) - dialog.setButtonText("Fix") - - # Set new text for button (add optional argument for the popup?) - toggle = dialog.widgets["toggle"] - update = toggle.isChecked() - dialog.on_clicked_state.connect( - lambda: set_scene_fps(expected_fps, update) - ) - - dialog.show() - - return False - - return fps_match - - -def bake(nodes, - frame_range=None, - step=1.0, - simulation=True, - preserve_outside_keys=False, - disable_implicit_control=True, - shape=True): - """Bake the given nodes over the time range. - - This will bake all attributes of the node, including custom attributes. - - Args: - nodes (list): Names of transform nodes, eg. camera, light. - frame_range (list): frame range with start and end frame. - or if None then takes timeSliderRange - simulation (bool): Whether to perform a full simulation of the - attributes over time. - preserve_outside_keys (bool): Keep keys that are outside of the baked - range. - disable_implicit_control (bool): When True will disable any - constraints to the object. - shape (bool): When True also bake attributes on the children shapes. - step (float): The step size to sample by. - - Returns: - None - - """ - - # Parse inputs - if not nodes: - return - - assert isinstance(nodes, (list, tuple)), "Nodes must be a list or tuple" - - # If frame range is None fall back to time slider playback time range - if frame_range is None: - frame_range = [cmds.playbackOptions(query=True, minTime=True), - cmds.playbackOptions(query=True, maxTime=True)] - - # If frame range is single frame bake one frame more, - # otherwise maya.cmds.bakeResults gets confused - if frame_range[1] == frame_range[0]: - frame_range[1] += 1 - - # Bake it - with keytangent_default(in_tangent_type='auto', - out_tangent_type='auto'): - cmds.bakeResults(nodes, - simulation=simulation, - preserveOutsideKeys=preserve_outside_keys, - disableImplicitControl=disable_implicit_control, - shape=shape, - sampleBy=step, - time=(frame_range[0], frame_range[1])) - - -def bake_to_world_space(nodes, - frame_range=None, - simulation=True, - preserve_outside_keys=False, - disable_implicit_control=True, - shape=True, - step=1.0): - """Bake the nodes to world space transformation (incl. other attributes) - - Bakes the transforms to world space (while maintaining all its animated - attributes and settings) by duplicating the node. Then parents it to world - and constrains to the original. - - Other attributes are also baked by connecting all attributes directly. - Baking is then done using Maya's bakeResults command. - - See `bake` for the argument documentation. - - Returns: - list: The newly created and baked node names. - - """ - @contextlib.contextmanager - def _unlock_attr(attr): - """Unlock attribute during context if it is locked""" - if not cmds.getAttr(attr, lock=True): - # If not locked, do nothing - yield - return - try: - cmds.setAttr(attr, lock=False) - yield - finally: - cmds.setAttr(attr, lock=True) - - def _get_attrs(node): - """Workaround for buggy shape attribute listing with listAttr - - This will only return keyable settable attributes that have an - incoming connections (those that have a reason to be baked). - - Technically this *may* fail to return attributes driven by complex - expressions for which maya makes no connections, e.g. doing actual - `setAttr` calls in expressions. - - Arguments: - node (str): The node to list attributes for. - - Returns: - list: Keyable attributes with incoming connections. - The attribute may be locked. - - """ - attrs = cmds.listAttr(node, - write=True, - scalar=True, - settable=True, - connectable=True, - keyable=True, - shortNames=True) or [] - valid_attrs = [] - for attr in attrs: - node_attr = '{0}.{1}'.format(node, attr) - - # Sometimes Maya returns 'non-existent' attributes for shapes - # so we filter those out - if not cmds.attributeQuery(attr, node=node, exists=True): - continue - - # We only need those that have a connection, just to be safe - # that it's actually keyable/connectable anyway. - if cmds.connectionInfo(node_attr, - isDestination=True): - valid_attrs.append(attr) - - return valid_attrs - - transform_attrs = {"t", "r", "s", - "tx", "ty", "tz", - "rx", "ry", "rz", - "sx", "sy", "sz"} - - world_space_nodes = [] - with ExitStack() as stack: - delete_bin = stack.enter_context(delete_after()) - # Create the duplicate nodes that are in world-space connected to - # the originals - for node in nodes: - - # Duplicate the node - short_name = node.rsplit("|", 1)[-1] - new_name = "{0}_baked".format(short_name) - new_node = cmds.duplicate(node, - name=new_name, - renameChildren=True)[0] # noqa - - # Parent new node to world - if cmds.listRelatives(new_node, parent=True): - new_node = cmds.parent(new_node, world=True)[0] - - # Temporarily unlock and passthrough connect all attributes - # so we can bake them over time - # Skip transform attributes because we will constrain them later - attrs = set(_get_attrs(node)) - transform_attrs - for attr in attrs: - orig_node_attr = "{}.{}".format(node, attr) - new_node_attr = "{}.{}".format(new_node, attr) - - # unlock during context to avoid connection errors - stack.enter_context(_unlock_attr(new_node_attr)) - cmds.connectAttr(orig_node_attr, - new_node_attr, - force=True) - - # If shapes are also baked then also temporarily unlock and - # passthrough connect all shape attributes for baking - if shape: - children_shapes = cmds.listRelatives(new_node, - children=True, - fullPath=True, - shapes=True) - if children_shapes: - orig_children_shapes = cmds.listRelatives(node, - children=True, - fullPath=True, - shapes=True) - for orig_shape, new_shape in zip(orig_children_shapes, - children_shapes): - attrs = _get_attrs(orig_shape) - for attr in attrs: - orig_node_attr = "{}.{}".format(orig_shape, attr) - new_node_attr = "{}.{}".format(new_shape, attr) - - # unlock during context to avoid connection errors - stack.enter_context(_unlock_attr(new_node_attr)) - cmds.connectAttr(orig_node_attr, - new_node_attr, - force=True) - - # Constraint transforms - for attr in transform_attrs: - transform_attr = "{}.{}".format(new_node, attr) - stack.enter_context(_unlock_attr(transform_attr)) - delete_bin.extend(cmds.parentConstraint(node, new_node, mo=False)) - delete_bin.extend(cmds.scaleConstraint(node, new_node, mo=False)) - - world_space_nodes.append(new_node) - - bake(world_space_nodes, - frame_range=frame_range, - step=step, - simulation=simulation, - preserve_outside_keys=preserve_outside_keys, - disable_implicit_control=disable_implicit_control, - shape=shape) - - return world_space_nodes - - -def load_capture_preset(data): - """Convert OpenPype Extract Playblast settings to `capture` arguments - - Input data is the settings from: - `project_settings/maya/publish/ExtractPlayblast/capture_preset` - - Args: - data (dict): Capture preset settings from OpenPype settings - - Returns: - dict: `capture.capture` compatible keyword arguments - - """ - - options = dict() - viewport_options = dict() - viewport2_options = dict() - camera_options = dict() - - # Straight key-value match from settings to capture arguments - options.update(data["Codec"]) - options.update(data["Generic"]) - options.update(data["Resolution"]) - - camera_options.update(data['Camera Options']) - viewport_options.update(data["Renderer"]) - - # DISPLAY OPTIONS - disp_options = {} - for key, value in data['Display Options'].items(): - if key.startswith('background'): - # Convert background, backgroundTop, backgroundBottom colors - if len(value) == 4: - # Ignore alpha + convert RGB to float - value = [ - float(value[0]) / 255, - float(value[1]) / 255, - float(value[2]) / 255 - ] - disp_options[key] = value - elif key == "displayGradient": - disp_options[key] = value - - options['display_options'] = disp_options - - # Viewport Options has a mixture of Viewport2 Options and Viewport Options - # to pass along to capture. So we'll need to differentiate between the two - VIEWPORT2_OPTIONS = { - "textureMaxResolution", - "renderDepthOfField", - "ssaoEnable", - "ssaoSamples", - "ssaoAmount", - "ssaoRadius", - "ssaoFilterRadius", - "hwFogStart", - "hwFogEnd", - "hwFogAlpha", - "hwFogFalloff", - "hwFogColorR", - "hwFogColorG", - "hwFogColorB", - "hwFogDensity", - "motionBlurEnable", - "motionBlurSampleCount", - "motionBlurShutterOpenFraction", - "lineAAEnable" - } - for key, value in data['Viewport Options'].items(): - - # There are some keys we want to ignore - if key in {"override_viewport_options", "high_quality"}: - continue - - # First handle special cases where we do value conversion to - # separate option values - if key == 'textureMaxResolution': - viewport2_options['textureMaxResolution'] = value - if value > 0: - viewport2_options['enableTextureMaxRes'] = True - viewport2_options['textureMaxResMode'] = 1 - else: - viewport2_options['enableTextureMaxRes'] = False - viewport2_options['textureMaxResMode'] = 0 - - elif key == 'multiSample': - viewport2_options['multiSampleEnable'] = value > 0 - viewport2_options['multiSampleCount'] = value - - elif key == 'alphaCut': - viewport2_options['transparencyAlgorithm'] = 5 - viewport2_options['transparencyQuality'] = 1 - - elif key == 'hwFogFalloff': - # Settings enum value string to integer - viewport2_options['hwFogFalloff'] = int(value) - - # Then handle Viewport 2.0 Options - elif key in VIEWPORT2_OPTIONS: - viewport2_options[key] = value - - # Then assume remainder is Viewport Options - else: - viewport_options[key] = value - - options['viewport_options'] = viewport_options - options['viewport2_options'] = viewport2_options - options['camera_options'] = camera_options - - # use active sound track - scene = capture.parse_active_scene() - options['sound'] = scene['sound'] - - return options - - -def get_attr_in_layer(attr, layer): - """Return attribute value in specified renderlayer. - - Same as cmds.getAttr but this gets the attribute's value in a - given render layer without having to switch to it. - - Warning for parent attribute overrides: - Attributes that have render layer overrides to their parent attribute - are not captured correctly since they do not have a direct connection. - For example, an override to sphere.rotate when querying sphere.rotateX - will not return correctly! - - Note: This is much faster for Maya's renderLayer system, yet the code - does no optimized query for render setup. - - Args: - attr (str): attribute name, ex. "node.attribute" - layer (str): layer name - - Returns: - The return value from `maya.cmds.getAttr` - - """ - - try: - if cmds.mayaHasRenderSetup(): - from . import lib_rendersetup - return lib_rendersetup.get_attr_in_layer(attr, layer) - except AttributeError: - pass - - # Ignore complex query if we're in the layer anyway - current_layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - if layer == current_layer: - return cmds.getAttr(attr) - - connections = cmds.listConnections(attr, - plugs=True, - source=False, - destination=True, - type="renderLayer") or [] - connections = filter(lambda x: x.endswith(".plug"), connections) - if not connections: - return cmds.getAttr(attr) - - # Some value types perform a conversion when assigning - # TODO: See if there's a maya method to allow this conversion - # instead of computing it ourselves. - attr_type = cmds.getAttr(attr, type=True) - conversion = None - if attr_type == "time": - conversion = mel.eval('currentTimeUnitToFPS()') # returns float - elif attr_type == "doubleAngle": - # Radians to Degrees: 180 / pi - # TODO: This will likely only be correct when Maya units are set - # to degrees - conversion = 57.2957795131 - elif attr_type == "doubleLinear": - raise NotImplementedError("doubleLinear conversion not implemented.") - - for connection in connections: - if connection.startswith(layer + "."): - attr_split = connection.split(".") - if attr_split[0] == layer: - attr = ".".join(attr_split[0:-1]) - value = cmds.getAttr("%s.value" % attr) - if conversion: - value *= conversion - return value - - else: - # When connections are present, but none - # to the specific renderlayer than the layer - # should have the "defaultRenderLayer"'s value - layer = "defaultRenderLayer" - for connection in connections: - if connection.startswith(layer): - attr_split = connection.split(".") - if attr_split[0] == "defaultRenderLayer": - attr = ".".join(attr_split[0:-1]) - value = cmds.getAttr("%s.value" % attr) - if conversion: - value *= conversion - return value - - return cmds.getAttr(attr) - - -def fix_incompatible_containers(): - """Backwards compatibility: old containers to use new ReferenceLoader""" - old_loaders = { - "MayaAsciiLoader", - "AbcLoader", - "ModelLoader", - "CameraLoader", - "RigLoader", - "FBXLoader" - } - host = registered_host() - for container in host.ls(): - loader = container['loader'] - if loader in old_loaders: - log.info( - "Converting legacy container loader {} to " - "ReferenceLoader: {}".format(loader, container["objectName"]) - ) - cmds.setAttr(container["objectName"] + ".loader", - "ReferenceLoader", type="string") - - -def _null(*args): - pass - - -class shelf(): - '''A simple class to build shelves in maya. Since the build method is empty, - it should be extended by the derived class to build the necessary shelf - elements. By default it creates an empty shelf called "customShelf".''' - - ########################################################################### - '''This is an example shelf.''' - # class customShelf(_shelf): - # def build(self): - # self.addButon(label="button1") - # self.addButon("button2") - # self.addButon("popup") - # p = cmds.popupMenu(b=1) - # self.addMenuItem(p, "popupMenuItem1") - # self.addMenuItem(p, "popupMenuItem2") - # sub = self.addSubMenu(p, "subMenuLevel1") - # self.addMenuItem(sub, "subMenuLevel1Item1") - # sub2 = self.addSubMenu(sub, "subMenuLevel2") - # self.addMenuItem(sub2, "subMenuLevel2Item1") - # self.addMenuItem(sub2, "subMenuLevel2Item2") - # self.addMenuItem(sub, "subMenuLevel1Item2") - # self.addMenuItem(p, "popupMenuItem3") - # self.addButon("button3") - # customShelf() - ########################################################################### - - def __init__(self, name="customShelf", iconPath="", preset={}): - self.name = name - - self.iconPath = iconPath - - self.labelBackground = (0, 0, 0, 0) - self.labelColour = (.9, .9, .9) - - self.preset = preset - - self._cleanOldShelf() - cmds.setParent(self.name) - self.build() - - def build(self): - '''This method should be overwritten in derived classes to actually - build the shelf elements. Otherwise, nothing is added to the shelf.''' - for item in self.preset['items']: - if not item.get('command'): - item['command'] = self._null - if item['type'] == 'button': - self.addButon(item['name'], - command=item['command'], - icon=item['icon']) - if item['type'] == 'menuItem': - self.addMenuItem(item['parent'], - item['name'], - command=item['command'], - icon=item['icon']) - if item['type'] == 'subMenu': - self.addMenuItem(item['parent'], - item['name'], - command=item['command'], - icon=item['icon']) - - def addButon(self, label, icon="commandButton.png", - command=_null, doubleCommand=_null): - ''' - Adds a shelf button with the specified label, command, - double click command and image. - ''' - cmds.setParent(self.name) - if icon: - icon = os.path.join(self.iconPath, icon) - print(icon) - cmds.shelfButton(width=37, height=37, image=icon, label=label, - command=command, dcc=doubleCommand, - imageOverlayLabel=label, olb=self.labelBackground, - olc=self.labelColour) - - def addMenuItem(self, parent, label, command=_null, icon=""): - ''' - Adds a shelf button with the specified label, command, - double click command and image. - ''' - if icon: - icon = os.path.join(self.iconPath, icon) - print(icon) - return cmds.menuItem(p=parent, label=label, c=command, i="") - - def addSubMenu(self, parent, label, icon=None): - ''' - Adds a sub menu item with the specified label and icon to - the specified parent popup menu. - ''' - if icon: - icon = os.path.join(self.iconPath, icon) - print(icon) - return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) - - def _cleanOldShelf(self): - ''' - Checks if the shelf exists and empties it if it does - or creates it if it does not. - ''' - if cmds.shelfLayout(self.name, ex=1): - if cmds.shelfLayout(self.name, q=1, ca=1): - for each in cmds.shelfLayout(self.name, q=1, ca=1): - cmds.deleteUI(each) - else: - cmds.shelfLayout(self.name, p="ShelfLayout") - - -def update_content_on_context_change(): - """ - This will update scene content to match new asset on context change - """ - scene_sets = cmds.listSets(allSets=True) - asset_doc = get_current_project_asset() - new_asset = asset_doc["name"] - new_data = asset_doc["data"] - for s in scene_sets: - try: - if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": - attr = cmds.listAttr(s) - print(s) - if "asset" in attr: - print(" - setting asset to: [ {} ]".format(new_asset)) - cmds.setAttr("{}.asset".format(s), - new_asset, type="string") - if "frameStart" in attr: - cmds.setAttr("{}.frameStart".format(s), - new_data["frameStart"]) - if "frameEnd" in attr: - cmds.setAttr("{}.frameEnd".format(s), - new_data["frameEnd"],) - except ValueError: - pass - - -def show_message(title, msg): - from qtpy import QtWidgets - from openpype.widgets import message_window - - # Find maya main window - top_level_widgets = {w.objectName(): w for w in - QtWidgets.QApplication.topLevelWidgets()} - - parent = top_level_widgets.get("MayaWindow", None) - if parent is None: - pass - else: - message_window.message(title=title, message=msg, parent=parent) - - -def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None): - """Yield edits as a set of actions.""" - - attributes = relationships.get("attributes", []) - shader_data = relationships.get("relationships", {}) - - shading_engines = cmds.ls(shader_nodes, type="objectSet", long=True) - assert shading_engines, "Error in retrieving objectSets from reference" - - # region compute lookup - shading_engines_by_id = defaultdict(list) - for shad in shading_engines: - shading_engines_by_id[get_id(shad)].append(shad) - # endregion - - # region assign shading engines and other sets - for data in shader_data.values(): - # collect all unique IDs of the set members - shader_uuid = data["uuid"] - member_uuids = [ - (member["uuid"], member.get("components")) - for member in data["members"]] - - filtered_nodes = list() - for _uuid, components in member_uuids: - nodes = nodes_by_id.get(_uuid, None) - if nodes is None: - continue - - if components: - # Assign to the components - nodes = [".".join([node, components]) for node in nodes] - - filtered_nodes.extend(nodes) - - id_shading_engines = shading_engines_by_id[shader_uuid] - if not id_shading_engines: - log.error("{} - No shader found with cbId " - "'{}'".format(label, shader_uuid)) - continue - elif len(id_shading_engines) > 1: - log.error("{} - Skipping shader assignment. " - "More than one shader found with cbId " - "'{}'. (found: {})".format(label, shader_uuid, - id_shading_engines)) - continue - - if not filtered_nodes: - log.warning("{} - No nodes found for shading engine " - "'{}'".format(label, id_shading_engines[0])) - continue - - yield {"action": "assign", - "uuid": data["uuid"], - "nodes": filtered_nodes, - "shader": id_shading_engines[0]} - - for data in attributes: - nodes = nodes_by_id.get(data["uuid"], []) - attr_value = data["attributes"] - yield {"action": "setattr", - "uuid": data["uuid"], - "nodes": nodes, - "attributes": attr_value} - - -def set_colorspace(): - """Set Colorspace from project configuration""" - - project_name = get_current_project_name() - imageio = get_project_settings(project_name)["maya"]["imageio"] - - # ocio compatibility variables - ocio_v2_maya_version = 2022 - maya_version = int(cmds.about(version=True)) - ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version - is_ocio_set = bool(os.environ.get("OCIO")) - - use_workfile_settings = imageio.get("workfile", {}).get("enabled") - if use_workfile_settings: - root_dict = imageio["workfile"] - else: - # TODO: deprecated code from 3.15.5 - remove - # Maya 2022+ introduces new OCIO v2 color management settings that - # can override the old color management preferences. OpenPype has - # separate settings for both so we fall back when necessary. - use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"] - if use_ocio_v2 and not ocio_v2_support: - # Fallback to legacy behavior with a warning - log.warning( - "Color Management Preference v2 is enabled but not " - "supported by current Maya version: {} (< {}). Falling " - "back to legacy settings.".format( - maya_version, ocio_v2_maya_version) - ) - - if use_ocio_v2: - root_dict = imageio["colorManagementPreference_v2"] - else: - root_dict = imageio["colorManagementPreference"] - - if not isinstance(root_dict, dict): - msg = "set_colorspace(): argument should be dictionary" - log.error(msg) - return - - # backward compatibility - # TODO: deprecated code from 3.15.5 - remove with deprecated code above - view_name = root_dict.get("viewTransform") - if view_name is None: - view_name = root_dict.get("viewName") - - log.debug(">> root_dict: {}".format(pformat(root_dict))) - if not root_dict: - return - - # set color spaces for rendering space and view transforms - def _colormanage(**kwargs): - """Wrapper around `cmds.colorManagementPrefs`. - - This logs errors instead of raising an error so color management - settings get applied as much as possible. - - """ - assert len(kwargs) == 1, "Must receive one keyword argument" - try: - cmds.colorManagementPrefs(edit=True, **kwargs) - log.debug("Setting Color Management Preference: {}".format(kwargs)) - except RuntimeError as exc: - log.error(exc) - - # enable color management - cmds.colorManagementPrefs(edit=True, cmEnabled=True) - cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True) - - if use_ocio_v2: - log.info("Using Maya OCIO v2") - if not is_ocio_set: - # Set the Maya 2022+ default OCIO v2 config file path - log.info("Setting default Maya OCIO v2 config") - # Note: Setting "" as value also sets this default however - # introduces a bug where launching a file on startup will prompt - # to save the empty scene before it, so we set using the path. - # This value has been the same for 2022, 2023 and 2024 - path = "/OCIO-configs/Maya2022-default/config.ocio" - cmds.colorManagementPrefs(edit=True, configFilePath=path) - - # set rendering space and view transform - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - _colormanage(viewName=view_name) - _colormanage(displayName=root_dict["displayName"]) - else: - log.info("Using Maya OCIO v1 (legacy)") - if not is_ocio_set: - # Set the Maya default config file path - log.info("Setting default Maya OCIO v1 legacy config") - cmds.colorManagementPrefs(edit=True, configFilePath="legacy") - - # set rendering space and view transform - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - _colormanage(viewTransformName=view_name) - - -@contextlib.contextmanager -def parent_nodes(nodes, parent=None): - # type: (list, str) -> list - """Context manager to un-parent provided nodes and return them back.""" - - def _as_mdagpath(node): - """Return MDagPath for node path.""" - if not node: - return - sel = OpenMaya.MSelectionList() - sel.add(node) - return sel.getDagPath(0) - - # We can only parent dag nodes so we ensure input contains only dag nodes - nodes = cmds.ls(nodes, type="dagNode", long=True) - if not nodes: - # opt-out early - yield - return - - parent_node_path = None - delete_parent = False - if parent: - if not cmds.objExists(parent): - parent_node = cmds.createNode("transform", - name=parent, - skipSelect=False) - delete_parent = True - else: - parent_node = parent - parent_node_path = cmds.ls(parent_node, long=True)[0] - - # Store original parents - node_parents = [] - for node in nodes: - node_parent = get_node_parent(node) - node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent))) - - try: - for node, node_parent in node_parents: - node_parent_path = node_parent.fullPathName() if node_parent else None # noqa - if node_parent_path == parent_node_path: - # Already a child - continue - - if parent_node_path: - cmds.parent(node.fullPathName(), parent_node_path) - else: - cmds.parent(node.fullPathName(), world=True) - - yield - finally: - # Reparent to original parents - for node, original_parent in node_parents: - node_path = node.fullPathName() - if not node_path: - # Node must have been deleted - continue - - node_parent_path = get_node_parent(node_path) - - original_parent_path = None - if original_parent: - original_parent_path = original_parent.fullPathName() - if not original_parent_path: - # Original parent node must have been deleted - continue - - if node_parent_path != original_parent_path: - if not original_parent_path: - cmds.parent(node_path, world=True) - else: - cmds.parent(node_path, original_parent_path) - - if delete_parent: - cmds.delete(parent_node_path) - - -@contextlib.contextmanager -def maintained_time(): - ct = cmds.currentTime(query=True) - try: - yield - finally: - cmds.currentTime(ct, edit=True) - - -def iter_visible_nodes_in_range(nodes, start, end): - """Yield nodes that are visible in start-end frame range. - - - Ignores intermediateObjects completely. - - Considers animated visibility attributes + upstream visibilities. - - This is optimized for large scenes where some nodes in the parent - hierarchy might have some input connections to the visibilities, - e.g. key, driven keys, connections to other attributes, etc. - - This only does a single time step to `start` if current frame is - not inside frame range since the assumption is made that changing - a frame isn't so slow that it beats querying all visibility - plugs through MDGContext on another frame. - - Args: - nodes (list): List of node names to consider. - start (int, float): Start frame. - end (int, float): End frame. - - Returns: - list: List of node names. These will be long full path names so - might have a longer name than the input nodes. - - """ - # States we consider per node - VISIBLE = 1 # always visible - INVISIBLE = 0 # always invisible - ANIMATED = -1 # animated visibility - - # Ensure integers - start = int(start) - end = int(end) - - # Consider only non-intermediate dag nodes and use the "long" names. - nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode") - if not nodes: - return - - with maintained_time(): - # Go to first frame of the range if the current time is outside - # the queried range so can directly query all visible nodes on - # that frame. - current_time = cmds.currentTime(query=True) - if not (start <= current_time <= end): - cmds.currentTime(start) - - visible = cmds.ls(nodes, long=True, visible=True) - for node in visible: - yield node - if len(visible) == len(nodes) or start == end: - # All are visible on frame one, so they are at least visible once - # inside the frame range. - return - - # For the invisible ones check whether its visibility and/or - # any of its parents visibility attributes are animated. If so, it might - # get visible on other frames in the range. - def memodict(f): - """Memoization decorator for a function taking a single argument. - - See: http://code.activestate.com/recipes/ - 578231-probably-the-fastest-memoization-decorator-in-the-/ - """ - - class memodict(dict): - def __missing__(self, key): - ret = self[key] = f(key) - return ret - - return memodict().__getitem__ - - @memodict - def get_state(node): - plug = node + ".visibility" - connections = cmds.listConnections(plug, - source=True, - destination=False) - if connections: - return ANIMATED - else: - return VISIBLE if cmds.getAttr(plug) else INVISIBLE - - visible = set(visible) - invisible = [node for node in nodes if node not in visible] - always_invisible = set() - # Iterate over the nodes by short to long names to iterate the highest - # in hierarchy nodes first. So the collected data can be used from the - # cache for parent queries in next iterations. - node_dependencies = dict() - for node in sorted(invisible, key=len): - - state = get_state(node) - if state == INVISIBLE: - always_invisible.add(node) - continue - - # If not always invisible by itself we should go through and check - # the parents to see if any of them are always invisible. For those - # that are "ANIMATED" we consider that this node is dependent on - # that attribute, we store them as dependency. - dependencies = set() - if state == ANIMATED: - dependencies.add(node) - - traversed_parents = list() - for parent in iter_parents(node): - - if parent in always_invisible or get_state(parent) == INVISIBLE: - # When parent is always invisible then consider this parent, - # this node we started from and any of the parents we - # have traversed in-between to be *always invisible* - always_invisible.add(parent) - always_invisible.add(node) - always_invisible.update(traversed_parents) - break - - # If we have traversed the parent before and its visibility - # was dependent on animated visibilities then we can just extend - # its dependencies for to those for this node and break further - # iteration upwards. - parent_dependencies = node_dependencies.get(parent, None) - if parent_dependencies is not None: - dependencies.update(parent_dependencies) - break - - state = get_state(parent) - if state == ANIMATED: - dependencies.add(parent) - - traversed_parents.append(parent) - - if node not in always_invisible and dependencies: - node_dependencies[node] = dependencies - - if not node_dependencies: - return - - # Now we only have to check the visibilities for nodes that have animated - # visibility dependencies upstream. The fastest way to check these - # visibility attributes across different frames is with Python api 2.0 - # so we do that. - @memodict - def get_visibility_mplug(node): - """Return api 2.0 MPlug with cached memoize decorator""" - sel = OpenMaya.MSelectionList() - sel.add(node) - dag = sel.getDagPath(0) - return OpenMaya.MFnDagNode(dag).findPlug("visibility", True) - - @contextlib.contextmanager - def dgcontext(mtime): - """MDGContext context manager""" - context = OpenMaya.MDGContext(mtime) - try: - previous = context.makeCurrent() - yield context - finally: - previous.makeCurrent() - - # We skip the first frame as we already used that frame to check for - # overall visibilities. And end+1 to include the end frame. - scene_units = OpenMaya.MTime.uiUnit() - for frame in range(start + 1, end + 1): - mtime = OpenMaya.MTime(frame, unit=scene_units) - - # Build little cache so we don't query the same MPlug's value - # again if it was checked on this frame and also is a dependency - # for another node - frame_visibilities = {} - with dgcontext(mtime) as context: - for node, dependencies in list(node_dependencies.items()): - for dependency in dependencies: - dependency_visible = frame_visibilities.get(dependency, - None) - if dependency_visible is None: - mplug = get_visibility_mplug(dependency) - dependency_visible = mplug.asBool(context) - frame_visibilities[dependency] = dependency_visible - - if not dependency_visible: - # One dependency is not visible, thus the - # node is not visible. - break - - else: - # All dependencies are visible. - yield node - # Remove node with dependencies for next frame iterations - # because it was visible at least once. - node_dependencies.pop(node) - - # If no more nodes to process break the frame iterations.. - if not node_dependencies: - break - - -def get_attribute_input(attr): - connections = cmds.listConnections(attr, plugs=True, destination=False) - return connections[0] if connections else None - - -def convert_to_maya_fps(fps): - """Convert any fps to supported Maya framerates.""" - float_framerates = [ - 23.976023976023978, - # WTF is 29.97 df vs fps? - 29.97002997002997, - 47.952047952047955, - 59.94005994005994 - ] - # 44100 fps evaluates as 41000.0. Why? Omitting for now. - int_framerates = [ - 2, - 3, - 4, - 5, - 6, - 8, - 10, - 12, - 15, - 16, - 20, - 24, - 25, - 30, - 40, - 48, - 50, - 60, - 75, - 80, - 90, - 100, - 120, - 125, - 150, - 200, - 240, - 250, - 300, - 375, - 400, - 500, - 600, - 750, - 1200, - 1500, - 2000, - 3000, - 6000, - 48000 - ] - - # If input fps is a whole number we'll return. - if float(fps).is_integer(): - # Validate fps is part of Maya's fps selection. - if int(fps) not in int_framerates: - raise ValueError( - "Framerate \"{}\" is not supported in Maya".format(fps) - ) - return int(fps) - else: - # Differences to supported float frame rates. - differences = [] - for i in float_framerates: - differences.append(abs(i - fps)) - - # Validate difference does not stray too far from supported framerates. - min_difference = min(differences) - min_index = differences.index(min_difference) - supported_framerate = float_framerates[min_index] - if min_difference > 0.1: - raise ValueError( - "Framerate \"{}\" strays too far from any supported framerate" - " in Maya. Closest supported framerate is \"{}\"".format( - fps, supported_framerate - ) - ) - - return supported_framerate - - -def write_xgen_file(data, filepath): - """Overwrites data in .xgen files. - - Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath". - - Args: - data (dict): Dictionary of key, value. Key matches with xgen file. - For example: - {"xgDataPath": "some/path"} - filepath (string): Absolute path of .xgen file. - """ - # Generate regex lookup for line to key basically - # match any of the keys in `\t{key}\t\t` - keys = "|".join(re.escape(key) for key in data.keys()) - re_keys = re.compile("^\t({})\t\t".format(keys)) - - lines = [] - with open(filepath, "r") as f: - for line in f: - match = re_keys.match(line) - if match: - key = match.group(1) - value = data[key] - line = "\t{}\t\t{}\n".format(key, value) - - lines.append(line) - - with open(filepath, "w") as f: - f.writelines(lines) - - -def get_color_management_preferences(): - """Get and resolve OCIO preferences.""" - data = { - # Is color management enabled. - "enabled": cmds.colorManagementPrefs( - query=True, cmEnabled=True - ), - "rendering_space": cmds.colorManagementPrefs( - query=True, renderingSpaceName=True - ), - "output_transform": cmds.colorManagementPrefs( - query=True, outputTransformName=True - ), - "output_transform_enabled": cmds.colorManagementPrefs( - query=True, outputTransformEnabled=True - ), - "view_transform": cmds.colorManagementPrefs( - query=True, viewTransformName=True - ) - } - - # Split view and display from view_transform. view_transform comes in - # format of "{view} ({display})". - regex = re.compile(r"^(?P.+) \((?P.+)\)$") - if int(cmds.about(version=True)) <= 2020: - # view_transform comes in format of "{view} {display}" in 2020. - regex = re.compile(r"^(?P.+) (?P.+)$") - - match = regex.match(data["view_transform"]) - if not match: - raise ValueError( - "Unable to parse view and display from Maya view transform: '{}' " - "using regex '{}'".format(data["view_transform"], regex.pattern) - ) - - data.update({ - "display": match.group("display"), - "view": match.group("view") - }) - - # Get config absolute path. - path = cmds.colorManagementPrefs( - query=True, configFilePath=True - ) - - # The OCIO config supports a custom token. - maya_resources_token = "" - maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources() - path = path.replace(maya_resources_token, maya_resources_path) - - data["config"] = path - - return data - - -def get_color_management_output_transform(): - preferences = get_color_management_preferences() - colorspace = preferences["rendering_space"] - if preferences["output_transform_enabled"]: - colorspace = preferences["output_transform"] - return colorspace - - -def image_info(file_path): - # type: (str) -> dict - """Based on tha texture path, get its bit depth and format information. - Take reference from makeTx.py in Arnold: - ImageInfo(filename): Get Image Information for colorspace - AiTextureGetFormat(filename): Get Texture Format - AiTextureGetBitDepth(filename): Get Texture bit depth - Args: - file_path (str): Path to the texture file. - Returns: - dict: Dictionary with the information about the texture file. - """ - from arnold import ( - AiTextureGetBitDepth, - AiTextureGetFormat - ) - # Get Texture Information - img_info = {'filename': file_path} - if os.path.isfile(file_path): - img_info['bit_depth'] = AiTextureGetBitDepth(file_path) # noqa - img_info['format'] = AiTextureGetFormat(file_path) # noqa - else: - img_info['bit_depth'] = 8 - img_info['format'] = "unknown" - return img_info - - -def guess_colorspace(img_info): - # type: (dict) -> str - """Guess the colorspace of the input image filename. - Note: - Reference from makeTx.py - Args: - img_info (dict): Image info generated by :func:`image_info` - Returns: - str: color space name use in the `--colorconvert` - option of maketx. - """ - from arnold import ( - AiTextureInvalidate, - # types - AI_TYPE_BYTE, - AI_TYPE_INT, - AI_TYPE_UINT - ) - try: - if img_info['bit_depth'] <= 16: - if img_info['format'] in (AI_TYPE_BYTE, AI_TYPE_INT, AI_TYPE_UINT): # noqa - return 'sRGB' - else: - return 'linear' - # now discard the image file as AiTextureGetFormat has loaded it - AiTextureInvalidate(img_info['filename']) # noqa - except ValueError: - print(("[maketx] Error: Could not guess" - "colorspace for {}").format(img_info["filename"])) - return "linear" - - -def len_flattened(components): - """Return the length of the list as if it was flattened. - - Maya will return consecutive components as a single entry - when requesting with `maya.cmds.ls` without the `flatten` - flag. Though enabling `flatten` on a large list (e.g. millions) - will result in a slow result. This command will return the amount - of entries in a non-flattened list by parsing the result with - regex. - - Args: - components (list): The non-flattened components. - - Returns: - int: The amount of entries. - - """ - assert isinstance(components, (list, tuple)) - n = 0 - - pattern = re.compile(r"\[(\d+):(\d+)\]") - for c in components: - match = pattern.search(c) - if match: - start, end = match.groups() - n += int(end) - int(start) + 1 - else: - n += 1 - return n - - -def get_all_children(nodes): - """Return all children of `nodes` including each instanced child. - Using maya.cmds.listRelatives(allDescendents=True) includes only the first - instance. As such, this function acts as an optimal replacement with a - focus on a fast query. - - """ - - sel = OpenMaya.MSelectionList() - traversed = set() - iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst) - for node in nodes: - - if node in traversed: - # Ignore if already processed as a child - # before - continue - - sel.clear() - sel.add(node) - dag = sel.getDagPath(0) - - iterator.reset(dag) - # ignore self - iterator.next() # noqa: B305 - while not iterator.isDone(): - - path = iterator.fullPathName() - - if path in traversed: - iterator.prune() - iterator.next() # noqa: B305 - continue - - traversed.add(path) - iterator.next() # noqa: B305 - - return list(traversed) - - -def get_capture_preset(task_name, task_type, subset, project_settings, log): - """Get capture preset for playblasting. - - Logic for transitioning from old style capture preset to new capture preset - profiles. - - Args: - task_name (str): Task name. - take_type (str): Task type. - subset (str): Subset name. - project_settings (dict): Project settings. - log (object): Logging object. - """ - capture_preset = None - filtering_criteria = { - "hosts": "maya", - "families": "review", - "task_names": task_name, - "task_types": task_type, - "subset": subset - } - - plugin_settings = project_settings["maya"]["publish"]["ExtractPlayblast"] - if plugin_settings["profiles"]: - profile = filter_profiles( - plugin_settings["profiles"], - filtering_criteria, - logger=log - ) - capture_preset = profile.get("capture_preset") - else: - log.warning("No profiles present for Extract Playblast") - - # Backward compatibility for deprecated Extract Playblast settings - # without profiles. - if capture_preset is None: - log.debug( - "Falling back to deprecated Extract Playblast capture preset " - "because no new style playblast profiles are defined." - ) - capture_preset = plugin_settings["capture_preset"] - - return capture_preset or {} - - -def get_reference_node(members, log=None): - """Get the reference node from the container members - Args: - members: list of node names - - Returns: - str: Reference node name. - - """ - - # Collect the references without .placeHolderList[] attributes as - # unique entries (objects only) and skipping the sharedReferenceNode. - references = set() - for ref in cmds.ls(members, exactType="reference", objectsOnly=True): - - # Ignore any `:sharedReferenceNode` - if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"): - continue - - # Ignore _UNKNOWN_REF_NODE_ (PLN-160) - if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"): - continue - - references.add(ref) - - assert references, "No reference node found in container" - - # Get highest reference node (least parents) - highest = min(references, - key=lambda x: len(get_reference_node_parents(x))) - - # Warn the user when we're taking the highest reference node - if len(references) > 1: - if not log: - log = logging.getLogger(__name__) - - log.warning("More than one reference node found in " - "container, using highest reference node: " - "%s (in: %s)", highest, list(references)) - - return highest - - -def get_reference_node_parents(ref): - """Return all parent reference nodes of reference node - - Args: - ref (str): reference node. - - Returns: - list: The upstream parent reference nodes. - - """ - parent = cmds.referenceQuery(ref, - referenceNode=True, - parent=True) - parents = [] - while parent: - parents.append(parent) - parent = cmds.referenceQuery(parent, - referenceNode=True, - parent=True) - return parents - - -def create_rig_animation_instance( - nodes, context, namespace, options=None, log=None -): - """Create an animation publish instance for loaded rigs. - - See the RecreateRigAnimationInstance inventory action on how to use this - for loaded rig containers. - - Arguments: - nodes (list): Member nodes of the rig instance. - context (dict): Representation context of the rig container - namespace (str): Namespace of the rig container - options (dict, optional): Additional loader data - log (logging.Logger, optional): Logger to log to if provided - - Returns: - None - - """ - if options is None: - options = {} - name = context["representation"]["name"] - output = next((node for node in nodes if - node.endswith("out_SET")), None) - controls = next((node for node in nodes if - node.endswith("controls_SET")), None) - if name != "fbx": - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - anim_skeleton = next((node for node in nodes if - node.endswith("skeletonAnim_SET")), None) - skeleton_mesh = next((node for node in nodes if - node.endswith("skeletonMesh_SET")), None) - - # Find the roots amongst the loaded nodes - roots = ( - cmds.ls(nodes, assemblies=True, long=True) or - get_highest_in_hierarchy(nodes) - ) - assert roots, "No root nodes in rig, this is a bug." - - custom_subset = options.get("animationSubsetName") - if custom_subset: - formatting_data = { - "asset": context["asset"], - "subset": context['subset']['name'], - "family": ( - context['subset']['data'].get('family') or - context['subset']['data']['families'][0] - ) - } - namespace = get_custom_namespace( - custom_subset.format( - **formatting_data - ) - ) - - if log: - log.info("Creating subset: {}".format(namespace)) - - # Fill creator identifier - creator_identifier = "io.openpype.creators.maya.animation" - - host = registered_host() - create_context = CreateContext(host) - # Create the animation instance - rig_sets = [output, controls, anim_skeleton, skeleton_mesh] - # Remove sets that this particular rig does not have - rig_sets = [s for s in rig_sets if s is not None] - with maintained_selection(): - cmds.select(rig_sets + roots, noExpand=True) - create_context.create( - creator_identifier=creator_identifier, - variant=namespace, - pre_create_data={"use_selection": True} - ) diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py deleted file mode 100644 index b5b71a5a36..0000000000 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ /dev/null @@ -1,1453 +0,0 @@ -# -*- coding: utf-8 -*- -"""Module handling expected render output from Maya. - -This module is used in :mod:`collect_render` and :mod:`collect_vray_scene`. - -Note: - To implement new renderer, just create new class inheriting from - :class:`ARenderProducts` and add it to :func:`RenderProducts.get()`. - -Attributes: - R_SINGLE_FRAME (:class:`re.Pattern`): Find single frame number. - R_FRAME_RANGE (:class:`re.Pattern`): Find frame range. - R_FRAME_NUMBER (:class:`re.Pattern`): Find frame number in string. - R_LAYER_TOKEN (:class:`re.Pattern`): Find layer token in image prefixes. - R_AOV_TOKEN (:class:`re.Pattern`): Find AOV token in image prefixes. - R_SUBSTITUTE_AOV_TOKEN (:class:`re.Pattern`): Find and substitute AOV token - in image prefixes. - R_REMOVE_AOV_TOKEN (:class:`re.Pattern`): Find and remove AOV token in - image prefixes. - R_CLEAN_FRAME_TOKEN (:class:`re.Pattern`): Find and remove unfilled - Renderman frame token in image prefix. - R_CLEAN_EXT_TOKEN (:class:`re.Pattern`): Find and remove unfilled Renderman - extension token in image prefix. - R_SUBSTITUTE_LAYER_TOKEN (:class:`re.Pattern`): Find and substitute render - layer token in image prefixes. - R_SUBSTITUTE_SCENE_TOKEN (:class:`re.Pattern`): Find and substitute scene - token in image prefixes. - R_SUBSTITUTE_CAMERA_TOKEN (:class:`re.Pattern`): Find and substitute camera - token in image prefixes. - IMAGE_PREFIXES (dict): Mapping between renderers and their respective - image prefix attribute names. - -Thanks: - Roy Nieterau (BigRoy) / Colorbleed for overhaul of original - *expected_files*. - -""" - -import logging -import re -import os -from abc import ABCMeta, abstractmethod - -import six -import attr - -from . import lib -from . import lib_rendersetup -from openpype.pipeline.colorspace import get_ocio_config_views - -from maya import cmds, mel - -log = logging.getLogger(__name__) - -R_SINGLE_FRAME = re.compile(r"^(-?)\d+$") -R_FRAME_RANGE = re.compile(r"^(?P(-?)\d+)-(?P(-?)\d+)$") -R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") -R_LAYER_TOKEN = re.compile( - r".*((?:%l)|(?:)|(?:)).*", re.IGNORECASE -) -R_AOV_TOKEN = re.compile(r".*%a.*|.*.*|.*.*", re.IGNORECASE) -R_SUBSTITUTE_AOV_TOKEN = re.compile(r"%a||", re.IGNORECASE) -R_REMOVE_AOV_TOKEN = re.compile( - r"_%a|\.%a|_|\.|_|\.", re.IGNORECASE) -# to remove unused renderman tokens -R_CLEAN_FRAME_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) -R_CLEAN_EXT_TOKEN = re.compile(r"\.?\.?", re.IGNORECASE) - -R_SUBSTITUTE_LAYER_TOKEN = re.compile( - r"%l||", re.IGNORECASE -) -R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r"%c|", re.IGNORECASE) -R_SUBSTITUTE_SCENE_TOKEN = re.compile(r"%s|", re.IGNORECASE) - -# not sure about the renderman image prefix -IMAGE_PREFIXES = { - "vray": "vraySettings.fileNamePrefix", - "arnold": "defaultRenderGlobals.imageFilePrefix", - "renderman": "rmanGlobals.imageFileFormat", - "redshift": "defaultRenderGlobals.imageFilePrefix", - "mayahardware2": "defaultRenderGlobals.imageFilePrefix" -} - -RENDERMAN_IMAGE_DIR = "/" - - -def has_tokens(string, tokens): - """Return whether any of tokens is in input string (case-insensitive)""" - pattern = "({})".format("|".join(re.escape(token) for token in tokens)) - match = re.search(pattern, string, re.IGNORECASE) - return bool(match) - - -@attr.s -class LayerMetadata(object): - """Data class for Render Layer metadata.""" - frameStart = attr.ib() - frameEnd = attr.ib() - cameras = attr.ib() - sceneName = attr.ib() - layerName = attr.ib() - renderer = attr.ib() - defaultExt = attr.ib() - filePrefix = attr.ib() - frameStep = attr.ib(default=1) - padding = attr.ib(default=4) - - # Render Products - products = attr.ib(init=False, default=attr.Factory(list)) - - # The AOV separator token. Note that not all renderers define an explicit - # render separator but allow to put the AOV/RenderPass token anywhere in - # the file path prefix. For those renderers we'll fall back to whatever - # is between the last occurrences of and tokens. - aov_separator = attr.ib(default="_") - - -@attr.s -class RenderProduct(object): - """Describes an image or other file-like artifact produced by a render. - - Warning: - This currently does NOT return as a product PER render camera. - A single Render Product will generate files per camera. E.g. with two - cameras each render product generates two sequences on disk assuming - the file path prefix correctly uses the tokens. - - """ - productName = attr.ib() - ext = attr.ib() # extension - colorspace = attr.ib() # colorspace - aov = attr.ib(default=None) # source aov - driver = attr.ib(default=None) # source driver - multipart = attr.ib(default=False) # multichannel file - camera = attr.ib(default=None) # used only when rendering - # from multiple cameras - - -def get(layer, render_instance=None): - # type: (str, object) -> ARenderProducts - """Get render details and products for given renderer and render layer. - - Args: - layer (str): Name of render layer - render_instance (pyblish.api.Instance): Publish instance. - If not provided an empty mock instance is used. - - Returns: - ARenderProducts: The correct RenderProducts instance for that - renderlayer. - - Raises: - :exc:`UnsupportedRendererException`: If requested renderer - is not supported. It needs to be implemented by extending - :class:`ARenderProducts` and added to this methods ``if`` - statement. - - """ - - if render_instance is None: - # For now produce a mock instance - class Instance(object): - data = {} - render_instance = Instance() - - renderer_name = lib.get_attr_in_layer( - "defaultRenderGlobals.currentRenderer", - layer=layer - ) - - renderer = { - "arnold": RenderProductsArnold, - "vray": RenderProductsVray, - "redshift": RenderProductsRedshift, - "renderman": RenderProductsRenderman, - "mayahardware2": RenderProductsMayaHardware - }.get(renderer_name.lower(), None) - if renderer is None: - raise UnsupportedRendererException( - "Unsupported renderer: {}".format(renderer_name) - ) - - return renderer(layer, render_instance) - - -@six.add_metaclass(ABCMeta) -class ARenderProducts: - """Abstract class with common code for all renderers. - - Attributes: - renderer (str): name of renderer. - - """ - - renderer = None - - def __init__(self, layer, render_instance): - """Constructor.""" - self.layer = layer - self.render_instance = render_instance - self.multipart = self.get_multipart() - - # Initialize - self.layer_data = self._get_layer_data() - self.layer_data.products = self.get_render_products() - - def get_multipart(self): - raise NotImplementedError( - "The render product implementation does not have a " - "\"get_multipart\" method." - ) - - def has_camera_token(self): - # type: () -> bool - """Check if camera token is in image prefix. - - Returns: - bool: True/False if camera token is present. - - """ - return "" in self.layer_data.filePrefix.lower() - - @abstractmethod - def get_render_products(self): - """To be implemented by renderer class. - - This should return a list of RenderProducts. - - Returns: - list: List of RenderProduct - - """ - - @staticmethod - def sanitize_camera_name(camera): - # type: (str) -> str - """Sanitize camera name. - - Remove Maya illegal characters from camera name. - - Args: - camera (str): Maya camera name. - - Returns: - (str): sanitized camera name - - Example: - >>> ARenderProducts.sanizite_camera_name('test:camera_01') - test_camera_01 - - """ - return re.sub('[^0-9a-zA-Z_]+', '_', camera) - - def get_renderer_prefix(self): - # type: () -> str - """Return prefix for specific renderer. - - This is for most renderers the same and can be overridden if needed. - - Returns: - str: String with image prefix containing tokens - - Raises: - :exc:`UnsupportedRendererException`: If we requested image - prefix for renderer we know nothing about. - See :data:`IMAGE_PREFIXES` for mapping of renderers and - image prefixes. - - """ - try: - prefix_attr = IMAGE_PREFIXES[self.renderer] - except KeyError: - raise UnsupportedRendererException( - "Unsupported renderer {}".format(self.renderer) - ) - - # Note: When this attribute is never set (e.g. on maya launch) then - # this can return None even though it is a string attribute - prefix = self._get_attr(prefix_attr) - - if not prefix: - # Fall back to scene name by default - log.warning("Image prefix not set, using ") - prefix = "" - - return prefix - - def get_render_attribute(self, attribute): - """Get attribute from render options. - - Args: - attribute (str): name of attribute to be looked up. - - Returns: - Attribute value - - """ - return self._get_attr("defaultRenderGlobals", attribute) - - def _get_attr(self, node_attr, attribute=None): - """Return the value of the attribute in the renderlayer - - For readability this allows passing in the attribute in two ways. - - As a single argument: - _get_attr("node.attr") - Or as two arguments: - _get_attr("node", "attr") - - Returns: - Value of the attribute inside the layer this instance is set to. - - """ - - if attribute is None: - plug = node_attr - else: - plug = "{}.{}".format(node_attr, attribute) - - return lib.get_attr_in_layer(plug, layer=self.layer) - - @staticmethod - def extract_separator(file_prefix): - """Extract AOV separator character from the prefix. - - Default behavior extracts the part between - last occurrences of and - - Todo: - This code also triggers for V-Ray which overrides it explicitly - so this code will invalidly debug log it couldn't extract the - AOV separator even though it does set it in RenderProductsVray. - - Args: - file_prefix (str): File prefix with tokens. - - Returns: - str or None: prefix character if it can be extracted. - """ - layer_tokens = ["", ""] - aov_tokens = ["", ""] - - def match_last(tokens, text): - """regex match the last occurrence from a list of tokens""" - pattern = "(?:.*)({})".format("|".join(tokens)) - return re.search(pattern, text, re.IGNORECASE) - - layer_match = match_last(layer_tokens, file_prefix) - aov_match = match_last(aov_tokens, file_prefix) - separator = None - if layer_match and aov_match: - matches = sorted((layer_match, aov_match), - key=lambda match: match.end(1)) - separator = file_prefix[matches[0].end(1):matches[1].start(1)] - return separator - - def _get_layer_data(self): - # type: () -> LayerMetadata - # ______________________________________________ - # ____________________/ ____________________________________________/ - # 1 - get scene name /__________________/ - # ____________________/ - _, scene_basename = os.path.split(cmds.file(q=True, loc=True)) - scene_name, _ = os.path.splitext(scene_basename) - kwargs = {} - file_prefix = self.get_renderer_prefix() - - # If the Render Layer belongs to a Render Setup layer then the - # output name is based on the Render Setup Layer name without - # the `rs_` prefix. - layer_name = self.layer - rs_layer = lib_rendersetup.get_rendersetup_layer(layer_name) - if rs_layer: - layer_name = rs_layer - - if self.layer == "defaultRenderLayer": - # defaultRenderLayer renders as masterLayer - layer_name = "masterLayer" - - separator = self.extract_separator(file_prefix) - if separator: - kwargs["aov_separator"] = separator - else: - log.debug("Couldn't extract aov separator from " - "file prefix: {}".format(file_prefix)) - - # todo: Support Custom Frames sequences 0,5-10,100-120 - # Deadline allows submitting renders with a custom frame list - # to support those cases we might want to allow 'custom frames' - # to be overridden to `ExpectFiles` class? - return LayerMetadata( - frameStart=int(self.get_render_attribute("startFrame")), - frameEnd=int(self.get_render_attribute("endFrame")), - frameStep=int(self.get_render_attribute("byFrameStep")), - padding=int(self.get_render_attribute("extensionPadding")), - # if we have token in prefix path we'll expect output for - # every renderable camera in layer. - cameras=self.get_renderable_cameras(), - sceneName=scene_name, - layerName=layer_name, - renderer=self.renderer, - defaultExt=self._get_attr("defaultRenderGlobals.imfPluginKey"), - filePrefix=file_prefix, - **kwargs - ) - - def _generate_file_sequence( - self, layer_data, - force_aov_name=None, - force_ext=None, - force_cameras=None): - # type: (LayerMetadata, str, str, list) -> list - expected_files = [] - cameras = force_cameras or layer_data.cameras - ext = force_ext or layer_data.defaultExt - for cam in cameras: - file_prefix = layer_data.filePrefix - mappings = ( - (R_SUBSTITUTE_SCENE_TOKEN, layer_data.sceneName), - (R_SUBSTITUTE_LAYER_TOKEN, layer_data.layerName), - (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)), - # this is required to remove unfilled aov token, for example - # in Redshift - (R_REMOVE_AOV_TOKEN, "") if not force_aov_name \ - else (R_SUBSTITUTE_AOV_TOKEN, force_aov_name), - - (R_CLEAN_FRAME_TOKEN, ""), - (R_CLEAN_EXT_TOKEN, ""), - ) - - for regex, value in mappings: - file_prefix = re.sub(regex, value, file_prefix) - - for frame in range( - int(layer_data.frameStart), - int(layer_data.frameEnd) + 1, - int(layer_data.frameStep), - ): - frame_str = str(frame).rjust(layer_data.padding, "0") - expected_files.append( - "{}.{}.{}".format(file_prefix, frame_str, ext) - ) - return expected_files - - def get_files(self, product): - # type: (RenderProduct) -> list - """Return list of expected files. - - It will translate render token strings ('', etc.) to - their values. This task is tricky as every renderer deals with this - differently. That's why we expose `get_files` as a method on the - Renderer class so it can be overridden for complex cases. - - Args: - product (RenderProduct): Render product to be used for file - generation. - - Returns: - List of files - - """ - return self._generate_file_sequence( - self.layer_data, - force_aov_name=product.productName, - force_ext=product.ext, - force_cameras=[product.camera] - ) - - def get_renderable_cameras(self): - # type: () -> list - """Get all renderable camera transforms. - - Returns: - list: list of renderable cameras. - - """ - - renderable_cameras = [ - cam for cam in cmds.ls(cameras=True) - if self._get_attr(cam, "renderable") - ] - - # The output produces a sanitized name for using its - # shortest unique path of the transform so we'll return - # at least that unique path. This could include a parent - # name too when two cameras have the same name but are - # in a different hierarchy, e.g. "group1|cam" and "group2|cam" - def get_name(camera): - return cmds.ls(cmds.listRelatives(camera, - parent=True, - fullPath=True))[0] - - return [get_name(cam) for cam in renderable_cameras] - - -class RenderProductsArnold(ARenderProducts): - """Render products for Arnold renderer. - - References: - mtoa.utils.getFileName() - mtoa.utils.ui.common.updateArnoldTargetFilePreview() - - Notes: - - Output Denoising AOVs are not currently included. - - Only Frame/Animation ext: name.#.ext is supported. - - Use Custom extension is not supported. - - and tokens not tested - - With Merge AOVs but in File Name Prefix Arnold - will still NOT merge the aovs. This class correctly resolves - it - but user should be aware. - - File Path Prefix overrides per AOV driver are not implemented - - Attributes: - aiDriverExtension (dict): Arnold AOV driver extension mapping. - Is there a better way? - renderer (str): name of renderer. - - """ - renderer = "arnold" - aiDriverExtension = { - "jpeg": "jpg", - "exr": "exr", - "deepexr": "exr", - "png": "png", - "tiff": "tif", - "mtoa_shaders": "ass", # TODO: research what those last two should be - "maya": "", - } - - def get_renderer_prefix(self): - - prefix = super(RenderProductsArnold, self).get_renderer_prefix() - merge_aovs = self._get_attr("defaultArnoldDriver.mergeAOVs") - if not merge_aovs and "" not in prefix.lower(): - # When Merge AOVs is disabled and token not present - # then Arnold prepends / to the output path. - # todo: It's untested what happens if AOV driver has an - # an explicit override path prefix. - prefix = "/" + prefix - - return prefix - - def get_multipart(self): - multipart = False - multilayer = bool(self._get_attr("defaultArnoldDriver.multipart")) - merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs")) - if multilayer or merge_AOVs: - multipart = True - - return multipart - - def _get_aov_render_products(self, aov, cameras=None): - """Return all render products for the AOV""" - - products = [] - aov_name = self._get_attr(aov, "name") - ai_drivers = cmds.listConnections("{}.outputs".format(aov), - source=True, - destination=False, - type="aiAOVDriver") or [] - if not cameras: - cameras = [ - self.sanitize_camera_name( - self.get_renderable_cameras()[0] - ) - ] - - for ai_driver in ai_drivers: - colorspace = self._get_colorspace( - ai_driver + ".colorManagement" - ) - # todo: check aiAOVDriver.prefix as it could have - # a custom path prefix set for this driver - - # Skip Drivers set only for GUI - # 0: GUI, 1: Batch, 2: GUI and Batch - output_mode = self._get_attr(ai_driver, "outputMode") - if output_mode == 0: # GUI only - log.warning("%s has Output Mode set to GUI, " - "skipping...", ai_driver) - continue - - ai_translator = self._get_attr(ai_driver, "aiTranslator") - try: - ext = self.aiDriverExtension[ai_translator] - except KeyError: - raise AOVError( - "Unrecognized arnold driver format " - "for AOV - {}".format(aov_name) - ) - - # If aov RGBA is selected, arnold will translate it to `beauty` - name = aov_name - if name == "RGBA": - name = "beauty" - - # Support Arnold light groups for AOVs - # Global AOV: When disabled the main layer is - # not written: `{pass}` - # All Light Groups: When enabled, a `{pass}_lgroups` file is - # written and is always merged into a - # single file - # Light Groups List: When set, a product per light - # group is written - # e.g. {pass}_front, {pass}_rim - global_aov = self._get_attr(aov, "globalAov") - if global_aov: - for camera in cameras: - product = RenderProduct( - productName=name, - ext=ext, - aov=aov_name, - driver=ai_driver, - multipart=self.multipart, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - all_light_groups = self._get_attr(aov, "lightGroups") - if all_light_groups: - # All light groups is enabled. A single multipart - # Render Product - for camera in cameras: - product = RenderProduct( - productName=name + "_lgroups", - ext=ext, - aov=aov_name, - driver=ai_driver, - # Always multichannel output - multipart=True, - camera=camera, - colorspace=colorspace - ) - products.append(product) - else: - value = self._get_attr(aov, "lightGroupsList") - if not value: - continue - selected_light_groups = value.strip().split() - for light_group in selected_light_groups: - # Render Product per selected light group - aov_light_group_name = "{}_{}".format(name, light_group) - for camera in cameras: - product = RenderProduct( - productName=aov_light_group_name, - aov=aov_name, - driver=ai_driver, - ext=ext, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - return products - - def _get_colorspace(self, attribute): - """Resolve colorspace from Arnold settings.""" - - def _view_transform(): - preferences = lib.get_color_management_preferences() - views_data = get_ocio_config_views(preferences["config"]) - view_data = views_data[ - "{}/{}".format(preferences["display"], preferences["view"]) - ] - return view_data["colorspace"] - - def _raw(): - preferences = lib.get_color_management_preferences() - return preferences["rendering_space"] - - resolved_values = { - "Raw": _raw, - "Use View Transform": _view_transform, - # Default. Same as Maya Preferences. - "Use Output Transform": lib.get_color_management_output_transform - } - return resolved_values[self._get_attr(attribute)]() - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - Raises: - :class:`AOVError`: If AOV cannot be determined. - - """ - - if not cmds.ls("defaultArnoldRenderOptions", type="aiOptions"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no Arnold options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - # check if camera token is in prefix. If so, and we have list of - # renderable cameras, generate render product for each and every - # of them. - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey") - colorspace = self._get_colorspace( - "defaultArnoldDriver.colorManagement" - ) - beauty_products = [ - RenderProduct( - productName="beauty", - ext=default_ext, - driver="defaultArnoldDriver", - camera=camera, - colorspace=colorspace - ) for camera in cameras - ] - - # AOVs > Legacy > Maya Render View > Mode - aovs_enabled = bool( - self._get_attr("defaultArnoldRenderOptions.aovMode") - ) - if not aovs_enabled: - return beauty_products - - # Common > File Output > Merge AOVs or - # We don't need to check for Merge AOVs due to overridden - # `get_renderer_prefix()` behavior which forces - has_renderpass_token = ( - "" in self.layer_data.filePrefix.lower() - ) - if not has_renderpass_token: - for product in beauty_products: - product.multipart = True - return beauty_products - - # AOVs are set to be rendered separately. We should expect - # token in path. - # handle aovs from references - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - aovs = cmds.ls(type="aiAOV") - if not use_ref_aovs: - ref_aovs = cmds.ls(type="aiAOV", referencedNodes=True) - aovs = list(set(aovs) - set(ref_aovs)) - - products = [] - - # Append the AOV products - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - # For now stick to the legacy output format. - aov_products = self._get_aov_render_products(aov, cameras) - products.extend(aov_products) - - if all(product.aov != "RGBA" for product in products): - # Append default 'beauty' as this is arnolds default. - # However, it is excluded whenever a RGBA pass is enabled. - # For legibility add the beauty layer as first entry - products += beauty_products - - # TODO: Output Denoising AOVs? - - return products - - -class RenderProductsVray(ARenderProducts): - """Expected files for V-Ray renderer. - - Notes: - - "Disabled" animation incorrectly returns frames in filename - - "Renumber Frames" is not supported - - Reference: - vrayAddRenderElementImpl() in vrayCreateRenderElementsTab.mel - - """ - # todo: detect whether rendering with V-Ray GPU + whether AOV is supported - - renderer = "vray" - - def get_multipart(self): - multipart = False - image_format = self._get_attr("vraySettings.imageFormatStr") - if image_format == "exr (multichannel)": - multipart = True - - return multipart - - def get_renderer_prefix(self): - # type: () -> str - """Get image prefix for V-Ray. - - This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. This is done only for - non-multipart outputs, where `` token doesn't make sense. - - See also: - :func:`ARenderProducts.get_renderer_prefix()` - - """ - prefix = super(RenderProductsVray, self).get_renderer_prefix() - if self.multipart: - return prefix - aov_separator = self._get_aov_separator() - prefix = "{}{}".format(prefix, aov_separator) - return prefix - - def _get_aov_separator(self): - # type: () -> str - """Return the V-Ray AOV/Render Elements separator""" - return self._get_attr( - "vraySettings.fileNameRenderElementSeparator" - ) - - def _get_layer_data(self): - # type: () -> LayerMetadata - """Override to get vray specific extension.""" - layer_data = super(RenderProductsVray, self)._get_layer_data() - - default_ext = self._get_attr("vraySettings.imageFormatStr") - if default_ext in ["exr (multichannel)", "exr (deep)"]: - default_ext = "exr" - layer_data.defaultExt = default_ext - layer_data.padding = self._get_attr("vraySettings.fileNamePadding") - - layer_data.aov_separator = self._get_aov_separator() - - return layer_data - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - if not cmds.ls("vraySettings", type="VRaySettingsNode"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no VRay options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - image_format_str = self._get_attr("vraySettings.imageFormatStr") - default_ext = image_format_str - if default_ext in {"exr (multichannel)", "exr (deep)"}: - default_ext = "exr" - - colorspace = lib.get_color_management_output_transform() - products = [] - - # add beauty as default when not disabled - dont_save_rgb = self._get_attr("vraySettings.dontSaveRgbChannel") - if not dont_save_rgb: - for camera in cameras: - products.append( - RenderProduct( - productName="", - ext=default_ext, - camera=camera, - colorspace=colorspace, - multipart=self.multipart - ) - ) - - # separate alpha file - separate_alpha = self._get_attr("vraySettings.separateAlpha") - if separate_alpha: - for camera in cameras: - products.append( - RenderProduct( - productName="Alpha", - ext=default_ext, - camera=camera, - colorspace=colorspace, - multipart=self.multipart - ) - ) - if self.multipart: - # AOVs are merged in m-channel file, only main layer is rendered - return products - - # handle aovs from references - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - # this will have list of all aovs no matter if they are coming from - # reference or not. - aov_types = ["VRayRenderElement", "VRayRenderElementSet"] - aovs = cmds.ls(type=aov_types) - if not use_ref_aovs: - ref_aovs = cmds.ls(type=aov_types, referencedNodes=True) or [] - aovs = list(set(aovs) - set(ref_aovs)) - - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - class_type = self._get_attr(aov + ".vrayClassType") - if class_type == "LightMixElement": - # Special case which doesn't define a name by itself but - # instead seems to output multiple Render Products, - # specifically "Self_Illumination" and "Environment" - product_names = ["Self_Illumination", "Environment"] - for camera in cameras: - for name in product_names: - product = RenderProduct(productName=name, - ext=default_ext, - aov=aov, - camera=camera, - colorspace=colorspace) - products.append(product) - # Continue as we've processed this special case AOV - continue - - aov_name = self._get_vray_aov_name(aov) - for camera in cameras: - product = RenderProduct( - productName=aov_name, - ext=default_ext, - aov=aov, - camera=camera, - colorspace=colorspace - ) - products.append(product) - - return products - - def _get_vray_aov_attr(self, node, prefix): - """Get value for attribute that starts with key in name - - V-Ray AOVs have attribute names that include the type - of AOV in the attribute name, for example: - - vray_filename_rawdiffuse - - vray_filename_velocity - - vray_name_gi - - vray_explicit_name_extratex - - To simplify querying the "vray_filename" or "vray_name" - attributes we just find the first attribute that has - that particular "{prefix}_" in the attribute name. - - Args: - node (str): AOV node name - prefix (str): Prefix of the attribute name. - - Returns: - Value of the attribute if it exists, else None - - """ - attrs = cmds.listAttr(node, string="{}_*".format(prefix)) - if not attrs: - return None - - assert len(attrs) == 1, "Found more than one attribute: %s" % attrs - attr = attrs[0] - - return self._get_attr(node, attr) - - def _get_vray_aov_name(self, node): - """Get AOVs name from Vray. - - Args: - node (str): aov node name. - - Returns: - str: aov name. - - """ - - vray_explicit_name = self._get_vray_aov_attr(node, - "vray_explicit_name") - vray_filename = self._get_vray_aov_attr(node, "vray_filename") - vray_name = self._get_vray_aov_attr(node, "vray_name") - final_name = vray_explicit_name or vray_filename or vray_name or None - - class_type = self._get_attr(node, "vrayClassType") - if not vray_explicit_name: - # Explicit name takes precedence and overrides completely - # otherwise add the connected node names to the special cases - # Any namespace colon ':' gets replaced to underscore '_' - # so we sanitize using `sanitize_camera_name` - def _get_source_name(node, attr): - """Return sanitized name of input connection to attribute""" - plug = "{}.{}".format(node, attr) - connections = cmds.listConnections(plug, - source=True, - destination=False) - if connections: - return self.sanitize_camera_name(connections[0]) - - if class_type == "MaterialSelectElement": - # Name suffix is based on the connected material or set - attrs = [ - "vray_mtllist_mtlselect", - "vray_mtl_mtlselect" - ] - for attribute in attrs: - name = _get_source_name(node, attribute) - if name: - final_name += '_{}'.format(name) - break - else: - log.warning("Material Select Element has no " - "selected materials: %s", node) - - elif class_type == "ExtraTexElement": - # Name suffix is based on the connected textures - extratex_type = self._get_attr(node, "vray_type_extratex") - attr = { - 0: "vray_texture_extratex", - 1: "vray_float_texture_extratex", - 2: "vray_int_texture_extratex", - }.get(extratex_type) - name = _get_source_name(node, attr) - if name: - final_name += '_{}'.format(name) - else: - log.warning("Extratex Element has no incoming texture") - - assert final_name, "Output filename not defined for AOV: %s" % node - - return final_name - - -class RenderProductsRedshift(ARenderProducts): - """Expected files for Redshift renderer. - - Notes: - - `get_files()` only supports rendering with frames, like "animation" - - Attributes: - - unmerged_aovs (list): Name of aovs that are not merged into resulting - exr and we need them specified in Render Products output. - - """ - - renderer = "redshift" - unmerged_aovs = {"Cryptomatte"} - - def get_files(self, product): - # When outputting AOVs we need to replace Redshift specific AOV tokens - # with Maya render tokens for generating file sequences. We validate to - # a specific AOV fileprefix so we only need to account for one - # replacement. - if not product.multipart and product.driver: - file_prefix = self._get_attr(product.driver + ".filePrefix") - self.layer_data.filePrefix = file_prefix.replace( - "/", - "//" - ) - - return super(RenderProductsRedshift, self).get_files(product) - - def get_multipart(self): - # For Redshift we don't directly return upon forcing multilayer - # due to some AOVs still being written into separate files, - # like Cryptomatte. - # AOVs are merged in multi-channel file - multipart = False - force_layer = bool( - self._get_attr("redshiftOptions.exrForceMultilayer") - ) - if force_layer: - multipart = True - - return multipart - - def get_renderer_prefix(self): - """Get image prefix for Redshift. - - This overrides :func:`ARenderProducts.get_renderer_prefix()` as - we must add `` token manually. This is done only for - non-multipart outputs, where `` token doesn't make sense. - - See also: - :func:`ARenderProducts.get_renderer_prefix()` - - """ - prefix = super(RenderProductsRedshift, self).get_renderer_prefix() - if self.multipart: - return prefix - separator = self.extract_separator(prefix) - prefix = "{}{}".format(prefix, separator or "_") - return prefix - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - - if not cmds.ls("redshiftOptions", type="RedshiftOptions"): - # this occurs when Render Setting windows was not opened yet. In - # such case there are no Redshift options created so query for AOVs - # will fail. We terminate here as there are no AOVs specified then. - # This state will most probably fail later on some Validator - # anyway. - return [] - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - # Get Redshift Extension from image format - image_format = self._get_attr("redshiftOptions.imageFormat") # integer - ext = mel.eval("redshiftGetImageExtension(%i)" % image_format) - - use_ref_aovs = self.render_instance.data.get( - "useReferencedAovs", False) or False - - aovs = cmds.ls(type="RedshiftAOV") - if not use_ref_aovs: - ref_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=True) - aovs = list(set(aovs) - set(ref_aovs)) - - products = [] - light_groups_enabled = False - has_beauty_aov = False - colorspace = lib.get_color_management_output_transform() - for aov in aovs: - enabled = self._get_attr(aov, "enabled") - if not enabled: - continue - - aov_type = self._get_attr(aov, "aovType") - if self.multipart and aov_type not in self.unmerged_aovs: - continue - - # Any AOVs that still get processed, like Cryptomatte - # by themselves are not multipart files. - - # Redshift skips rendering of masterlayer without AOV suffix - # when a Beauty AOV is rendered. It overrides the main layer. - if aov_type == "Beauty": - has_beauty_aov = True - - aov_name = self._get_attr(aov, "name") - - # Support light Groups - light_groups = [] - if self._get_attr(aov, "supportsLightGroups"): - all_light_groups = self._get_attr(aov, "allLightGroups") - if all_light_groups: - # All light groups is enabled - light_groups = self._get_redshift_light_groups() - else: - value = self._get_attr(aov, "lightGroupList") - # note: string value can return None when never set - if value: - selected_light_groups = value.strip().split() - light_groups = selected_light_groups - - for light_group in light_groups: - aov_light_group_name = "{}_{}".format(aov_name, - light_group) - for camera in cameras: - product = RenderProduct( - productName=aov_light_group_name, - aov=aov_name, - ext=ext, - multipart=False, - camera=camera, - driver=aov, - colorspace=colorspace) - products.append(product) - - if light_groups: - light_groups_enabled = True - - # Redshift AOV Light Select always renders the global AOV - # even when light groups are present so we don't need to - # exclude it when light groups are active - for camera in cameras: - product = RenderProduct(productName=aov_name, - aov=aov_name, - ext=ext, - multipart=False, - camera=camera, - driver=aov, - colorspace=colorspace) - products.append(product) - - # When a Beauty AOV is added manually, it will be rendered as - # 'Beauty_other' in file name and "standard" beauty will have - # 'Beauty' in its name. When disabled, standard output will be - # without `Beauty`. Except when using light groups. - if light_groups_enabled: - return products - - beauty_name = "BeautyAux" if has_beauty_aov else "" - for camera in cameras: - products.insert(0, - RenderProduct(productName=beauty_name, - ext=ext, - multipart=self.multipart, - camera=camera, - colorspace=colorspace)) - - return products - - @staticmethod - def _get_redshift_light_groups(): - return sorted(mel.eval("redshiftAllAovLightGroups")) - - -class RenderProductsRenderman(ARenderProducts): - """Expected files for Renderman renderer. - - Warning: - This is very rudimentary and needs more love and testing. - """ - - renderer = "renderman" - unmerged_aovs = {"PxrCryptomatte"} - - def get_multipart(self): - # Implemented as display specific in "get_render_products". - return False - - def get_render_products(self): - """Get all AOVs. - - See Also: - :func:`ARenderProducts.get_render_products()` - - """ - from rfm2.api.displays import get_displays # noqa - - colorspace = lib.get_color_management_output_transform() - - cameras = [ - self.sanitize_camera_name(c) - for c in self.get_renderable_cameras() - ] - - if not cameras: - cameras = [ - self.sanitize_camera_name( - self.get_renderable_cameras()[0]) - ] - products = [] - - # NOTE: This is guessing extensions from renderman display types. - # Some of them are just framebuffers, d_texture format can be - # set in display setting. We set those now to None, but it - # should be handled more gracefully. - display_types = { - "d_deepexr": "exr", - "d_it": None, - "d_null": None, - "d_openexr": "exr", - "d_png": "png", - "d_pointcloud": "ptc", - "d_targa": "tga", - "d_texture": None, - "d_tiff": "tif" - } - - displays = get_displays(override_dst="render")["displays"] - for name, display in displays.items(): - enabled = display["params"]["enable"]["value"] - if not enabled: - continue - - # Skip display types not producing any file output. - # Is there a better way to do it? - if not display_types.get(display["driverNode"]["type"]): - continue - - has_cryptomatte = cmds.ls(type=self.unmerged_aovs) - matte_enabled = False - if has_cryptomatte: - for cryptomatte in has_cryptomatte: - cryptomatte_aov = cryptomatte - matte_name = "cryptomatte" - rman_globals = cmds.listConnections(cryptomatte + - ".message") - if rman_globals: - matte_enabled = True - - aov_name = name - if aov_name == "rmanDefaultDisplay": - aov_name = "beauty" - - extensions = display_types.get( - display["driverNode"]["type"], "exr") - - for camera in cameras: - # Create render product and set it as multipart only on - # display types supporting it. In all other cases, Renderman - # will create separate output per channel. - if display["driverNode"]["type"] in ["d_openexr", "d_deepexr", "d_tiff"]: # noqa - product = RenderProduct( - productName=aov_name, - ext=extensions, - camera=camera, - multipart=True, - colorspace=colorspace - ) - - if has_cryptomatte and matte_enabled: - cryptomatte = RenderProduct( - productName=matte_name, - aov=cryptomatte_aov, - ext=extensions, - camera=camera, - multipart=True, - colorspace=colorspace - ) - else: - # this code should handle the case where no multipart - # capable format is selected. But since it involves - # shady logic to determine what channel become what - # lets not do that as all productions will use exr anyway. - """ - for channel in display['params']['displayChannels']['value']: # noqa - product = RenderProduct( - productName="{}_{}".format(aov_name, channel), - ext=extensions, - camera=camera, - multipart=False - ) - """ - raise UnsupportedImageFormatException( - "Only exr, deep exr and tiff formats are supported.") - - products.append(product) - - if has_cryptomatte and matte_enabled: - products.append(cryptomatte) - - return products - - def get_files(self, product): - """Get expected files. - - """ - files = super(RenderProductsRenderman, self).get_files(product) - - layer_data = self.layer_data - new_files = [] - - resolved_image_dir = re.sub("", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501 - resolved_image_dir = re.sub("", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501 - for file in files: - new_file = "{}/{}".format(resolved_image_dir, file) - new_files.append(new_file) - - return new_files - - -class RenderProductsMayaHardware(ARenderProducts): - """Expected files for MayaHardware renderer.""" - - renderer = "mayahardware2" - - extensions = [ - {"label": "JPEG", "index": 8, "extension": "jpg"}, - {"label": "PNG", "index": 32, "extension": "png"}, - {"label": "EXR(exr)", "index": 40, "extension": "exr"} - ] - - def get_multipart(self): - # MayaHardware does not support multipart EXRs. - return False - - def _get_extension(self, value): - result = None - if isinstance(value, int): - extensions = { - extension["index"]: extension["extension"] - for extension in self.extensions - } - try: - result = extensions[value] - except KeyError: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - if isinstance(value, six.string_types): - extensions = { - extension["label"]: extension["extension"] - for extension in self.extensions - } - try: - result = extensions[value] - except KeyError: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - if not result: - raise NotImplementedError( - "Could not find extension for {}".format(value) - ) - - return result - - def get_render_products(self): - """Get all AOVs. - See Also: - :func:`ARenderProducts.get_render_products()` - """ - ext = self._get_extension( - self._get_attr("defaultRenderGlobals.imageFormat") - ) - - products = [] - for cam in self.get_renderable_cameras(): - product = RenderProduct( - productName="beauty", - ext=ext, - camera=cam, - colorspace=lib.get_color_management_output_transform() - ) - products.append(product) - - return products - - -class AOVError(Exception): - """Custom exception for determining AOVs.""" - - -class UnsupportedRendererException(Exception): - """Custom exception. - - Raised when requesting data from unsupported renderer. - """ - - -class UnsupportedImageFormatException(Exception): - """Custom exception to report unsupported output image format.""" diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py deleted file mode 100644 index 1f964589a9..0000000000 --- a/openpype/hosts/maya/api/lib_rendersettings.py +++ /dev/null @@ -1,408 +0,0 @@ -# -*- coding: utf-8 -*- -"""Class for handling Render Settings.""" -import six -import sys - -from openpype.lib import Logger -from openpype.settings import get_project_settings - -from openpype.pipeline import CreatorError, get_current_project_name -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.hosts.maya.api.lib import reset_frame_range - - -class RenderSettings(object): - - _image_prefix_nodes = { - 'vray': 'vraySettings.fileNamePrefix', - 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'rmanGlobals.imageFileFormat', - 'redshift': 'defaultRenderGlobals.imageFilePrefix', - 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix' - } - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - log = Logger.get_logger("RenderSettings") - - @classmethod - def get_image_prefix_attr(cls, renderer): - return cls._image_prefix_nodes[renderer] - - @staticmethod - def get_padding_attr(renderer): - """Return attribute for renderer that defines frame padding amount""" - if renderer == "vray": - return "vraySettings.fileNamePadding" - else: - return "defaultRenderGlobals.extensionPadding" - - def __init__(self, project_settings=None): - if not project_settings: - project_settings = get_project_settings( - get_current_project_name() - ) - render_settings = project_settings["maya"]["RenderSettings"] - image_prefixes = { - "vray": render_settings["vray_renderer"]["image_prefix"], - "arnold": render_settings["arnold_renderer"]["image_prefix"], - "renderman": render_settings["renderman_renderer"]["image_prefix"], - "redshift": render_settings["redshift_renderer"]["image_prefix"] - } - - # TODO probably should be stored to more explicit attribute - # Renderman only - renderman_settings = render_settings["renderman_renderer"] - _image_dir = { - "renderman": renderman_settings["image_dir"], - "cryptomatte": renderman_settings["cryptomatte_dir"], - "imageDisplay": renderman_settings["imageDisplay_dir"], - "watermark": renderman_settings["watermark_dir"] - } - self._image_prefixes = image_prefixes - self._image_dir = _image_dir - self._project_settings = project_settings - - def set_default_renderer_settings(self, renderer=None): - """Set basic settings based on renderer.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - if not renderer: - renderer = cmds.getAttr( - 'defaultRenderGlobals.currentRenderer').lower() - - asset_doc = get_current_project_asset() - # project_settings/maya/create/CreateRender/aov_separator - try: - aov_separator = self._aov_chars[( - self._project_settings["maya"] - ["RenderSettings"] - ["aov_separator"] - )] - except KeyError: - aov_separator = "_" - reset_frame = self._project_settings["maya"]["RenderSettings"]["reset_current_frame"] # noqa - - if reset_frame: - start_frame = cmds.getAttr("defaultRenderGlobals.startFrame") - cmds.currentTime(start_frame, edit=True) - - if renderer in self._image_prefix_nodes: - prefix = self._image_prefixes[renderer] - prefix = prefix.replace("{aov_separator}", aov_separator) - cmds.setAttr(self._image_prefix_nodes[renderer], - prefix, type="string") # noqa - else: - print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa - # TODO: handle not having res values in the doc - width = asset_doc["data"].get("resolutionWidth") - height = asset_doc["data"].get("resolutionHeight") - - if renderer == "arnold": - # set renderer settings for Arnold from project settings - self._set_arnold_settings(width, height) - - if renderer == "vray": - self._set_vray_settings(aov_separator, width, height) - - if renderer == "redshift": - self._set_redshift_settings(width, height) - mel.eval("redshiftUpdateActiveAovList") - - if renderer == "renderman": - image_dir = self._image_dir["renderman"] - cmds.setAttr("rmanGlobals.imageOutputDir", - image_dir, type="string") - self._set_renderman_settings(width, height, - aov_separator) - - def _set_arnold_settings(self, width, height): - """Sets settings for Arnold.""" - from mtoa.core import createOptions # noqa - from mtoa.aovs import AOVInterface # noqa - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - createOptions() - render_settings = self._project_settings["maya"]["RenderSettings"] - arnold_render_presets = render_settings["arnold_renderer"] # noqa - # Force resetting settings and AOV list to avoid having to deal with - # AOV checking logic, for now. - # This is a work around because the standard - # function to revert render settings does not reset AOVs list in MtoA - # Fetch current aovs in case there's any. - current_aovs = AOVInterface().getAOVs() - remove_aovs = render_settings["remove_aovs"] - if remove_aovs: - # Remove fetched AOVs - AOVInterface().removeAOVs(current_aovs) - mel.eval("unifiedRenderGlobalsRevertToDefault") - img_ext = arnold_render_presets["image_format"] - img_prefix = arnold_render_presets["image_prefix"] - aovs = arnold_render_presets["aov_list"] - img_tiled = arnold_render_presets["tiled"] - multi_exr = arnold_render_presets["multilayer_exr"] - additional_options = arnold_render_presets["additional_options"] - for aov in aovs: - if aov in current_aovs and not remove_aovs: - continue - AOVInterface('defaultArnoldRenderOptions').addAOV(aov) - - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - - self._set_global_output_settings() - - cmds.setAttr( - "defaultRenderGlobals.imageFilePrefix", img_prefix, type="string") - - cmds.setAttr( - "defaultArnoldDriver.ai_translator", img_ext, type="string") - - cmds.setAttr( - "defaultArnoldDriver.exrTiled", img_tiled) - - cmds.setAttr( - "defaultArnoldDriver.mergeAOVs", multi_exr) - self._additional_attribs_setter(additional_options) - reset_frame_range(playback=False, fps=False, render=True) - - def _set_redshift_settings(self, width, height): - """Sets settings for Redshift.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - render_settings = self._project_settings["maya"]["RenderSettings"] - redshift_render_presets = render_settings["redshift_renderer"] - - remove_aovs = render_settings["remove_aovs"] - all_rs_aovs = cmds.ls(type='RedshiftAOV') - if remove_aovs: - for aov in all_rs_aovs: - enabled = cmds.getAttr("{}.enabled".format(aov)) - if enabled: - cmds.delete(aov) - - redshift_aovs = redshift_render_presets["aov_list"] - # list all the aovs - all_rs_aovs = cmds.ls(type='RedshiftAOV') - for rs_aov in redshift_aovs: - rs_layername = "rsAov_{}".format(rs_aov.replace(" ", "")) - if rs_layername in all_rs_aovs: - continue - cmds.rsCreateAov(type=rs_aov) - # update the AOV list - mel.eval("redshiftUpdateActiveAovList") - - rs_p_engine = redshift_render_presets["primary_gi_engine"] - rs_s_engine = redshift_render_presets["secondary_gi_engine"] - - if int(rs_p_engine) or int(rs_s_engine) != 0: - cmds.setAttr("redshiftOptions.GIEnabled", 1) - if int(rs_p_engine) == 0: - # reset the primary GI Engine as default - cmds.setAttr("redshiftOptions.primaryGIEngine", 4) - if int(rs_s_engine) == 0: - # reset the secondary GI Engine as default - cmds.setAttr("redshiftOptions.secondaryGIEngine", 2) - else: - cmds.setAttr("redshiftOptions.GIEnabled", 0) - - cmds.setAttr("redshiftOptions.primaryGIEngine", int(rs_p_engine)) - cmds.setAttr("redshiftOptions.secondaryGIEngine", int(rs_s_engine)) - - additional_options = redshift_render_presets["additional_options"] - ext = redshift_render_presets["image_format"] - img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"] - img_ext = img_exts.index(ext) - - self._set_global_output_settings() - cmds.setAttr("redshiftOptions.imageFormat", img_ext) - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - self._additional_attribs_setter(additional_options) - - def _set_renderman_settings(self, width, height, aov_separator): - """Sets settings for Renderman""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - rman_render_presets = ( - self._project_settings - ["maya"] - ["RenderSettings"] - ["renderman_renderer"] - ) - display_filters = rman_render_presets["display_filters"] - d_filters_number = len(display_filters) - for i in range(d_filters_number): - d_node = cmds.ls(typ=display_filters[i]) - if len(d_node) > 0: - filter_nodes = d_node[0] - else: - filter_nodes = cmds.createNode(display_filters[i]) - - cmds.connectAttr(filter_nodes + ".message", - "rmanGlobals.displayFilters[%i]" % i, - force=True) - if filter_nodes.startswith("PxrImageDisplayFilter"): - imageDisplay_dir = self._image_dir["imageDisplay"] - imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - imageDisplay_dir, type="string") - - sample_filters = rman_render_presets["sample_filters"] - s_filters_number = len(sample_filters) - for n in range(s_filters_number): - s_node = cmds.ls(typ=sample_filters[n]) - if len(s_node) > 0: - filter_nodes = s_node[0] - else: - filter_nodes = cmds.createNode(sample_filters[n]) - - cmds.connectAttr(filter_nodes + ".message", - "rmanGlobals.sampleFilters[%i]" % n, - force=True) - - if filter_nodes.startswith("PxrCryptomatte"): - matte_dir = self._image_dir["cryptomatte"] - matte_dir = matte_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - matte_dir, type="string") - elif filter_nodes.startswith("PxrWatermarkFilter"): - watermark_dir = self._image_dir["watermark"] - watermark_dir = watermark_dir.replace("{aov_separator}", - aov_separator) - cmds.setAttr(filter_nodes + ".filename", - watermark_dir, type="string") - - additional_options = rman_render_presets["additional_options"] - - self._set_global_output_settings() - cmds.setAttr("defaultResolution.width", width) - cmds.setAttr("defaultResolution.height", height) - self._additional_attribs_setter(additional_options) - - def _set_vray_settings(self, aov_separator, width, height): - # type: (str, int, int) -> None - """Sets important settings for Vray.""" - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - - settings = cmds.ls(type="VRaySettingsNode") - node = settings[0] if settings else cmds.createNode("VRaySettingsNode") - render_settings = self._project_settings["maya"]["RenderSettings"] - vray_render_presets = render_settings["vray_renderer"] - # vrayRenderElement - remove_aovs = render_settings["remove_aovs"] - all_vray_aovs = cmds.ls(type='VRayRenderElement') - lightSelect_aovs = cmds.ls(type='VRayRenderElementSet') - if remove_aovs: - for aov in all_vray_aovs: - # remove all aovs except LightSelect - enabled = cmds.getAttr("{}.enabled".format(aov)) - if enabled: - cmds.delete(aov) - # remove LightSelect - for light_aovs in lightSelect_aovs: - light_enabled = cmds.getAttr("{}.enabled".format(light_aovs)) - if light_enabled: - cmds.delete(lightSelect_aovs) - - vray_aovs = vray_render_presets["aov_list"] - for renderlayer in vray_aovs: - renderElement = "vrayAddRenderElement {}".format(renderlayer) - RE_name = mel.eval(renderElement) - # if there is more than one same render element - if RE_name.endswith("1"): - cmds.delete(RE_name) - # Set aov separator - # First we need to explicitly set the UI items in Render Settings - # because that is also what V-Ray updates to when that Render Settings - # UI did initialize before and refreshes again. - MENU = "vrayRenderElementSeparator" - if cmds.optionMenuGrp(MENU, query=True, exists=True): - items = cmds.optionMenuGrp(MENU, query=True, ill=True) - separators = [cmds.menuItem(i, query=True, label=True) for i in items] # noqa: E501 - try: - sep_idx = separators.index(aov_separator) - except ValueError: - six.reraise( - CreatorError, - CreatorError( - "AOV character {} not in {}".format( - aov_separator, separators)), - sys.exc_info()[2]) - - cmds.optionMenuGrp(MENU, edit=True, select=sep_idx + 1) - - # Set the render element attribute as string. This is also what V-Ray - # sets whenever the `vrayRenderElementSeparator` menu items switch - cmds.setAttr( - "{}.fileNameRenderElementSeparator".format(node), - aov_separator, - type="string" - ) - - # Set render file format to exr - ext = vray_render_presets["image_format"] - cmds.setAttr("{}.imageFormatStr".format(node), ext, type="string") - - # animType - cmds.setAttr("{}.animType".format(node), 1) - - # resolution - cmds.setAttr("{}.width".format(node), width) - cmds.setAttr("{}.height".format(node), height) - - additional_options = vray_render_presets["additional_options"] - - self._additional_attribs_setter(additional_options) - - @staticmethod - def _set_global_output_settings(): - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - # enable animation - cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) - cmds.setAttr("defaultRenderGlobals.animation", 1) - cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) - cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) - - def _additional_attribs_setter(self, additional_attribs): - # Not all hosts can import this module. - from maya import cmds # noqa: F401 - import maya.mel as mel # noqa: F401 - - for item in additional_attribs: - attribute, value = item - attribute = str(attribute) # ensure str conversion from settings - attribute_type = cmds.getAttr(attribute, type=True) - if attribute_type in {"long", "bool"}: - cmds.setAttr(attribute, int(value)) - elif attribute_type == "string": - cmds.setAttr(attribute, str(value), type="string") - elif attribute_type in {"double", "doubleAngle", "doubleLinear"}: - cmds.setAttr(attribute, float(value)) - else: - self.log.error( - "Attribute {attribute} can not be set due to unsupported " - "type: {attribute_type}".format( - attribute=attribute, - attribute_type=attribute_type) - ) diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py deleted file mode 100644 index 18a4ea0e9a..0000000000 --- a/openpype/hosts/maya/api/menu.py +++ /dev/null @@ -1,261 +0,0 @@ -import os -import logging -from functools import partial - -from qtpy import QtWidgets, QtGui - -import maya.utils -import maya.cmds as cmds - -from openpype.pipeline import ( - get_current_asset_name, - get_current_task_name -) -from openpype.pipeline.workfile import BuildWorkfile -from openpype.tools.utils import host_tools -from openpype.hosts.maya.api import lib, lib_rendersettings -from .lib import get_main_window, IS_HEADLESS -from ..tools import show_look_assigner - -from .workfile_template_builder import ( - create_placeholder, - update_placeholder, - build_workfile_template, - update_workfile_template, -) - -log = logging.getLogger(__name__) - -MENU_NAME = "op_maya_menu" - - -def _get_menu(menu_name=None): - """Return the menu instance if it currently exists in Maya""" - if menu_name is None: - menu_name = MENU_NAME - - widgets = {w.objectName(): w for w in QtWidgets.QApplication.allWidgets()} - return widgets.get(menu_name) - - -def get_context_label(): - return "{}, {}".format( - get_current_asset_name(), - get_current_task_name() - ) - - -def install(project_settings): - if cmds.about(batch=True): - log.info("Skipping openpype.menu initialization in batch mode..") - return - - def add_menu(): - pyblish_icon = host_tools.get_pyblish_icon() - parent_widget = get_main_window() - cmds.menu( - MENU_NAME, - label=os.environ.get("AVALON_LABEL") or "OpenPype", - tearOff=True, - parent="MayaWindow" - ) - - # Create context menu - cmds.menuItem( - "currentContext", - label=get_context_label(), - parent=MENU_NAME, - enable=False - ) - - cmds.setParent("..", menu=True) - - cmds.menuItem(divider=True) - - cmds.menuItem( - "Create...", - command=lambda *args: host_tools.show_publisher( - parent=parent_widget, - tab="create" - ) - ) - - cmds.menuItem( - "Load...", - command=lambda *args: host_tools.show_loader( - parent=parent_widget, - use_context=True - ) - ) - - cmds.menuItem( - "Publish...", - command=lambda *args: host_tools.show_publisher( - parent=parent_widget, - tab="publish" - ), - image=pyblish_icon - ) - - cmds.menuItem( - "Manage...", - command=lambda *args: host_tools.show_scene_inventory( - parent=parent_widget - ) - ) - - cmds.menuItem( - "Library...", - command=lambda *args: host_tools.show_library_loader( - parent=parent_widget - ) - ) - - cmds.menuItem(divider=True) - - cmds.menuItem( - "Work Files...", - command=lambda *args: host_tools.show_workfiles( - parent=parent_widget - ), - ) - - cmds.menuItem( - "Set Frame Range", - command=lambda *args: lib.reset_frame_range() - ) - - cmds.menuItem( - "Set Resolution", - command=lambda *args: lib.reset_scene_resolution() - ) - - cmds.menuItem( - "Set Colorspace", - command=lambda *args: lib.set_colorspace(), - ) - - cmds.menuItem( - "Set Render Settings", - command=lambda *args: lib_rendersettings.RenderSettings().set_default_renderer_settings() # noqa - ) - - cmds.menuItem(divider=True, parent=MENU_NAME) - cmds.menuItem( - "Build First Workfile", - parent=MENU_NAME, - command=lambda *args: BuildWorkfile().process() - ) - - cmds.menuItem( - "Look assigner...", - command=lambda *args: show_look_assigner( - parent_widget - ) - ) - - cmds.menuItem( - "Experimental tools...", - command=lambda *args: host_tools.show_experimental_tools_dialog( - parent_widget - ) - ) - - builder_menu = cmds.menuItem( - "Template Builder", - subMenu=True, - tearOff=True, - parent=MENU_NAME - ) - cmds.menuItem( - "Create Placeholder", - parent=builder_menu, - command=create_placeholder - ) - cmds.menuItem( - "Update Placeholder", - parent=builder_menu, - command=update_placeholder - ) - cmds.menuItem( - "Build Workfile from template", - parent=builder_menu, - command=build_workfile_template - ) - cmds.menuItem( - "Update Workfile from template", - parent=builder_menu, - command=update_workfile_template - ) - - cmds.setParent(MENU_NAME, menu=True) - - def add_scripts_menu(project_settings): - try: - import scriptsmenu.launchformaya as launchformaya - except ImportError: - log.warning( - "Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable." - ) - return - - config = project_settings["maya"]["scriptsmenu"]["definition"] - _menu = project_settings["maya"]["scriptsmenu"]["name"] - - if not config: - log.warning("Skipping studio menu, no definition found.") - return - - # run the launcher for Maya menu - studio_menu = launchformaya.main( - title=_menu.title(), - objectName=_menu.title().lower().replace(" ", "_") - ) - - # apply configuration - studio_menu.build_from_configuration(studio_menu, config) - - # Allow time for uninstallation to finish. - # We use Maya's executeDeferred instead of QTimer.singleShot - # so that it only gets called after Maya UI has initialized too. - # This is crucial with Maya 2020+ which initializes without UI - # first as a QCoreApplication - maya.utils.executeDeferred(add_menu) - cmds.evalDeferred(partial(add_scripts_menu, project_settings), - lowestPriority=True) - - -def uninstall(): - menu = _get_menu() - if menu: - log.info("Attempting to uninstall ...") - - try: - menu.deleteLater() - del menu - except Exception as e: - log.error(e) - - -def popup(): - """Pop-up the existing menu near the mouse cursor.""" - menu = _get_menu() - cursor = QtGui.QCursor() - point = cursor.pos() - menu.exec_(point) - - -def update_menu_task_label(): - """Update the task label in Avalon menu to current session""" - - if IS_HEADLESS: - return - - object_name = "{}|currentContext".format(MENU_NAME) - if not cmds.menuItem(object_name, query=True, exists=True): - log.warning("Can't find menuItem: {}".format(object_name)) - return - - label = get_context_label() - cmds.menuItem(object_name, edit=True, label=label) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py deleted file mode 100644 index 1ecfdfaa40..0000000000 --- a/openpype/hosts/maya/api/pipeline.py +++ /dev/null @@ -1,767 +0,0 @@ -import json -import base64 -import os -import errno -import logging -import contextlib -import shutil - -from maya import utils, cmds, OpenMaya -import maya.api.OpenMaya as om - -import pyblish.api - -from openpype.settings import get_project_settings -from openpype.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost, - HostDirmap, -) -from openpype.tools.utils import host_tools -from openpype.tools.workfiles.lock_dialog import WorkfileLockDialog -from openpype.lib import ( - register_event_callback, - emit_event -) -from openpype.pipeline import ( - legacy_io, - get_current_project_name, - register_loader_plugin_path, - register_inventory_action_path, - register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_inventory_action_path, - deregister_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.pipeline.load import any_outdated_containers -from openpype.pipeline.workfile.lock_workfile import ( - create_workfile_lock, - remove_workfile_lock, - is_workfile_locked, - is_workfile_lock_enabled -) -from openpype.hosts.maya import MAYA_ROOT_DIR -from openpype.hosts.maya.lib import create_workspace_mel - -from . import menu, lib -from .workfile_template_builder import MayaPlaceholderLoadPlugin -from .workio import ( - open_file, - save_file, - file_extensions, - has_unsaved_changes, - work_root, - current_file -) - -log = logging.getLogger("openpype.hosts.maya") - -PLUGINS_DIR = os.path.join(MAYA_ROOT_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - - -class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "maya" - - def __init__(self): - super(MayaHost, self).__init__() - self._op_events = {} - - def install(self): - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - # process path mapping - dirmap_processor = MayaDirmap("maya", project_name, project_settings) - dirmap_processor.process_dirmap() - - pyblish.api.register_plugin_path(PUBLISH_PATH) - pyblish.api.register_host("mayabatch") - pyblish.api.register_host("mayapy") - pyblish.api.register_host("maya") - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - self.log.info(PUBLISH_PATH) - - self.log.info("Installing callbacks ... ") - register_event_callback("init", on_init) - - _set_project() - - if lib.IS_HEADLESS: - self.log.info(( - "Running in headless mode, skipping Maya save/open/new" - " callback installation.." - )) - - return - - self._register_callbacks() - - menu.install(project_settings) - - register_event_callback("save", on_save) - register_event_callback("open", on_open) - register_event_callback("new", on_new) - register_event_callback("before.save", on_before_save) - register_event_callback("after.save", on_after_save) - register_event_callback("before.close", on_before_close) - register_event_callback("before.file.open", before_file_open) - register_event_callback("taskChanged", on_task_changed) - register_event_callback("workfile.open.before", before_workfile_open) - register_event_callback("workfile.save.before", before_workfile_save) - register_event_callback( - "workfile.save.before", workfile_save_before_xgen - ) - register_event_callback("workfile.save.after", after_workfile_save) - - def open_workfile(self, filepath): - return open_file(filepath) - - def save_workfile(self, filepath=None): - return save_file(filepath) - - def work_root(self, session): - return work_root(session) - - def get_current_workfile(self): - return current_file() - - def workfile_has_unsaved_changes(self): - return has_unsaved_changes() - - def get_workfile_extensions(self): - return file_extensions() - - def get_containers(self): - return ls() - - def get_workfile_build_placeholder_plugins(self): - return [ - MayaPlaceholderLoadPlugin - ] - - @contextlib.contextmanager - def maintained_selection(self): - with lib.maintained_selection(): - yield - - def get_context_data(self): - data = cmds.fileInfo("OpenPypeContext", query=True) - if not data: - return {} - - data = data[0] # Maya seems to return a list - decoded = base64.b64decode(data).decode("utf-8") - return json.loads(decoded) - - def update_context_data(self, data, changes): - json_str = json.dumps(data) - encoded = base64.b64encode(json_str.encode("utf-8")) - return cmds.fileInfo("OpenPypeContext", encoded) - - def _register_callbacks(self): - for handler, event in self._op_events.copy().items(): - if event is None: - continue - - try: - OpenMaya.MMessage.removeCallback(event) - self._op_events[handler] = None - except RuntimeError as exc: - self.log.info(exc) - - self._op_events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save - ) - - self._op_events[_after_scene_save] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterSave, - _after_scene_save - ) - ) - - self._op_events[_before_scene_save] = ( - OpenMaya.MSceneMessage.addCheckCallback( - OpenMaya.MSceneMessage.kBeforeSaveCheck, - _before_scene_save - ) - ) - - self._op_events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterNew, _on_scene_new - ) - - self._op_events[_on_maya_initialized] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaInitialized, - _on_maya_initialized - ) - ) - - self._op_events[_on_scene_open] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kAfterOpen, - _on_scene_open - ) - ) - - self._op_events[_before_scene_open] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kBeforeOpen, - _before_scene_open - ) - ) - - self._op_events[_before_close_maya] = ( - OpenMaya.MSceneMessage.addCallback( - OpenMaya.MSceneMessage.kMayaExiting, - _before_close_maya - ) - ) - - self.log.info("Installed event handler _on_scene_save..") - self.log.info("Installed event handler _before_scene_save..") - self.log.info("Installed event handler _on_after_save..") - self.log.info("Installed event handler _on_scene_new..") - self.log.info("Installed event handler _on_maya_initialized..") - self.log.info("Installed event handler _on_scene_open..") - self.log.info("Installed event handler _check_lock_file..") - self.log.info("Installed event handler _before_close_maya..") - - -def _set_project(): - """Sets the maya project to the current Session's work directory. - - Returns: - None - - """ - workdir = legacy_io.Session["AVALON_WORKDIR"] - - try: - os.makedirs(workdir) - except OSError as e: - # An already existing working directory is fine. - if e.errno == errno.EEXIST: - pass - else: - raise - - cmds.workspace(workdir, openWorkspace=True) - - -def _on_maya_initialized(*args): - emit_event("init") - - if cmds.about(batch=True): - log.warning("Running batch mode ...") - return - - # Keep reference to the main Window, once a main window exists. - lib.get_main_window() - - -def _on_scene_new(*args): - emit_event("new") - - -def _after_scene_save(*arg): - emit_event("after.save") - - -def _on_scene_save(*args): - emit_event("save") - - -def _on_scene_open(*args): - emit_event("open") - - -def _before_close_maya(*args): - emit_event("before.close") - - -def _before_scene_open(*args): - emit_event("before.file.open") - - -def _before_scene_save(return_code, client_data): - - # Default to allowing the action. Registered - # callbacks can optionally set this to False - # in order to block the operation. - OpenMaya.MScriptUtil.setBool(return_code, True) - - emit_event( - "before.save", - {"return_code": return_code} - ) - - -def _remove_workfile_lock(): - """Remove workfile lock on current file""" - if not handle_workfile_locks(): - return - filepath = current_file() - log.info("Removing lock on current file {}...".format(filepath)) - if filepath: - remove_workfile_lock(filepath) - - -def handle_workfile_locks(): - if lib.IS_HEADLESS: - return False - project_name = get_current_project_name() - return is_workfile_lock_enabled(MayaHost.name, project_name) - - -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - pyblish.api.deregister_host("mayabatch") - pyblish.api.deregister_host("mayapy") - pyblish.api.deregister_host("maya") - - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - deregister_inventory_action_path(INVENTORY_PATH) - - menu.uninstall() - - -def parse_container(container): - """Return the container node's full container data. - - Args: - container (str): A container node name. - - Returns: - dict: The container schema data for this container node. - - """ - data = lib.read(container) - - # Backwards compatibility pre-schemas for containers - data["schema"] = data.get("schema", "openpype:container-1.0") - - # Append transient data - data["objectName"] = container - - return data - - -def _ls(): - """Yields Avalon container node names. - - Used by `ls()` to retrieve the nodes and then query the full container's - data. - - Yields: - str: Avalon container node name (objectSet) - - """ - - def _maya_iterate(iterator): - """Helper to iterate a maya iterator""" - while not iterator.isDone(): - yield iterator.thisNode() - iterator.next() - - ids = {AVALON_CONTAINER_ID, - # Backwards compatibility - "pyblish.mindbender.container"} - - # Iterate over all 'set' nodes in the scene to detect whether - # they have the avalon container ".id" attribute. - fn_dep = om.MFnDependencyNode() - iterator = om.MItDependencyNodes(om.MFn.kSet) - for mobject in _maya_iterate(iterator): - if mobject.apiTypeStr != "kSet": - # Only match by exact type - continue - - fn_dep.setObject(mobject) - if not fn_dep.hasAttribute("id"): - continue - - plug = fn_dep.findPlug("id", True) - value = plug.asString() - if value in ids: - yield fn_dep.name() - - -def ls(): - """Yields containers from active Maya scene - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Maya; once loaded - they are called 'containers' - - Yields: - dict: container - - """ - container_names = _ls() - for container in sorted(container_names): - yield parse_container(container) - - -def containerise(name, - namespace, - nodes, - context, - loader=None, - suffix="CON"): - """Bundle `nodes` into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - nodes (list): Long names of nodes to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - - """ - container = cmds.sets(nodes, name="%s_%s_%s" % (namespace, name, suffix)) - - data = [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", name), - ("namespace", namespace), - ("loader", loader), - ("representation", context["representation"]["_id"]), - ] - - for key, value in data: - cmds.addAttr(container, longName=key, dataType="string") - cmds.setAttr(container + "." + key, str(value), type="string") - - main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet") - if not main_container: - main_container = cmds.sets(empty=True, name=AVALON_CONTAINERS) - - # Implement #399: Maya 2019+ hide AVALON_CONTAINERS on creation.. - if cmds.attributeQuery("hiddenInOutliner", - node=main_container, - exists=True): - cmds.setAttr(main_container + ".hiddenInOutliner", True) - else: - main_container = main_container[0] - - cmds.sets(container, addElement=main_container) - - # Implement #399: Maya 2019+ hide containers in outliner - if cmds.attributeQuery("hiddenInOutliner", - node=container, - exists=True): - cmds.setAttr(container + ".hiddenInOutliner", True) - - return container - - -def on_init(): - log.info("Running callback on init..") - - def safe_deferred(fn): - """Execute deferred the function in a try-except""" - - def _fn(): - """safely call in deferred callback""" - try: - fn() - except Exception as exc: - print(exc) - - try: - utils.executeDeferred(_fn) - except Exception as exc: - print(exc) - - # Force load Alembic so referenced alembics - # work correctly on scene open - cmds.loadPlugin("AbcImport", quiet=True) - cmds.loadPlugin("AbcExport", quiet=True) - - # Force load objExport plug-in (requested by artists) - cmds.loadPlugin("objExport", quiet=True) - - if not lib.IS_HEADLESS: - launch_workfiles = os.environ.get("WORKFILES_STARTUP") - if launch_workfiles: - safe_deferred(host_tools.show_workfiles) - - from .customize import ( - override_component_mask_commands, - override_toolbox_ui - ) - safe_deferred(override_component_mask_commands) - safe_deferred(override_toolbox_ui) - - -def on_before_save(): - """Run validation for scene's FPS prior to saving""" - return lib.validate_fps() - - -def on_after_save(): - """Check if there is a lockfile after save""" - check_lock_on_current_file() - - -def check_lock_on_current_file(): - - """Check if there is a user opening the file""" - if not handle_workfile_locks(): - return - log.info("Running callback on checking the lock file...") - - # add the lock file when opening the file - filepath = current_file() - # Skip if current file is 'untitled' - if not filepath: - return - - if is_workfile_locked(filepath): - # add lockfile dialog - workfile_dialog = WorkfileLockDialog(filepath) - if not workfile_dialog.exec_(): - cmds.file(new=True) - return - - create_workfile_lock(filepath) - - -def on_before_close(): - """Delete the lock file after user quitting the Maya Scene""" - log.info("Closing Maya...") - # delete the lock file - filepath = current_file() - if handle_workfile_locks(): - remove_workfile_lock(filepath) - - -def before_file_open(): - """check lock file when the file changed""" - # delete the lock file - _remove_workfile_lock() - - -def on_save(): - """Automatically add IDs to new nodes - - Any transform of a mesh, without an existing ID, is given one - automatically on file save. - """ - log.info("Running callback on save..") - # remove lockfile if users jumps over from one scene to another - _remove_workfile_lock() - - # Generate ids of the current context on nodes in the scene - nodes = lib.get_id_required_nodes(referenced_nodes=False) - for node, new_id in lib.generate_ids(nodes): - lib.set_id(node, new_id, overwrite=False) - - -def on_open(): - """On scene open let's assume the containers have changed.""" - - from openpype.widgets import popup - - # Validate FPS after update_task_from_path to - # ensure it is using correct FPS for the asset - lib.validate_fps() - lib.fix_incompatible_containers() - - if any_outdated_containers(): - log.warning("Scene has outdated content.") - - # Find maya main window - parent = lib.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Maya window can't be found.") - else: - - # Show outdated pop-up - def _on_show_inventory(): - host_tools.show_scene_inventory(parent=parent) - - dialog = popup.Popup(parent=parent) - dialog.setWindowTitle("Maya scene has outdated content") - dialog.setMessage("There are outdated containers in " - "your Maya scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - # create lock file for the maya scene - check_lock_on_current_file() - - -def on_new(): - """Set project resolution and fps when create a new file""" - log.info("Running callback on new..") - with lib.suspended_refresh(): - lib.set_context_settings() - - _remove_workfile_lock() - - -def on_task_changed(): - """Wrapped function of app initialize and maya's on task changed""" - # Run - menu.update_menu_task_label() - - workdir = legacy_io.Session["AVALON_WORKDIR"] - if os.path.exists(workdir): - log.info("Updating Maya workspace for task change to %s", workdir) - _set_project() - - # Set Maya fileDialog's start-dir to /scenes - frule_scene = cmds.workspace(fileRuleEntry="scene") - cmds.optionVar(stringValue=("browserLocationmayaBinaryscene", - workdir + "/" + frule_scene)) - - else: - log.warning(( - "Can't set project for new context because path does not exist: {}" - ).format(workdir)) - - with lib.suspended_refresh(): - lib.set_context_settings() - lib.update_content_on_context_change() - - -def before_workfile_open(): - if handle_workfile_locks(): - _remove_workfile_lock() - - -def before_workfile_save(event): - project_name = get_current_project_name() - if handle_workfile_locks(): - _remove_workfile_lock() - workdir_path = event["workdir_path"] - if workdir_path: - create_workspace_mel(workdir_path, project_name) - - -def workfile_save_before_xgen(event): - """Manage Xgen external files when switching context. - - Xgen has various external files that needs to be unique and relative to the - workfile, so we need to copy and potentially overwrite these files when - switching context. - - Args: - event (Event) - openpype/lib/events.py - """ - if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True): - return - - import xgenm - - current_work_dir = legacy_io.Session["AVALON_WORKDIR"].replace("\\", "/") - expected_work_dir = event.data["workdir_path"].replace("\\", "/") - if current_work_dir == expected_work_dir: - return - - palettes = cmds.ls(type="xgmPalette", long=True) - if not palettes: - return - - transfers = [] - overwrites = [] - attribute_changes = {} - attrs = ["xgFileName", "xgBaseFile"] - for palette in palettes: - sanitized_palette = palette.replace("|", "") - project_path = xgenm.getAttr("xgProjectPath", sanitized_palette) - _, maya_extension = os.path.splitext(event.data["filename"]) - - for attr in attrs: - node_attr = "{}.{}".format(palette, attr) - attr_value = cmds.getAttr(node_attr) - - if not attr_value: - continue - - source = os.path.join(project_path, attr_value) - - attr_value = event.data["filename"].replace( - maya_extension, - "__{}{}".format( - sanitized_palette.replace(":", "__"), - os.path.splitext(attr_value)[1] - ) - ) - target = os.path.join(expected_work_dir, attr_value) - - transfers.append((source, target)) - attribute_changes[node_attr] = attr_value - - relative_path = xgenm.getAttr( - "xgDataPath", sanitized_palette - ).split(os.pathsep)[0] - absolute_path = relative_path.replace("${PROJECT}", project_path) - for root, _, files in os.walk(absolute_path): - for f in files: - source = os.path.join(root, f).replace("\\", "/") - target = source.replace(project_path, expected_work_dir + "/") - transfers.append((source, target)) - if os.path.exists(target): - overwrites.append(target) - - # Ask user about overwriting files. - if overwrites: - log.warning( - "WARNING! Potential loss of data.\n\n" - "Found duplicate Xgen files in new context.\n{}".format( - "\n".join(overwrites) - ) - ) - return - - for source, destination in transfers: - if not os.path.exists(os.path.dirname(destination)): - os.makedirs(os.path.dirname(destination)) - shutil.copy(source, destination) - - for attribute, value in attribute_changes.items(): - cmds.setAttr(attribute, value, type="string") - - -def after_workfile_save(event): - workfile_name = event["filename"] - if ( - handle_workfile_locks() - and workfile_name - and not is_workfile_locked(workfile_name) - ): - create_workfile_lock(workfile_name) - - -class MayaDirmap(HostDirmap): - def on_enable_dirmap(self): - cmds.dirmap(en=True) - - def dirmap_routine(self, source_path, destination_path): - cmds.dirmap(m=(source_path, destination_path)) - cmds.dirmap(m=(destination_path, source_path)) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py deleted file mode 100644 index e684a91fe2..0000000000 --- a/openpype/hosts/maya/api/plugin.py +++ /dev/null @@ -1,961 +0,0 @@ -import json -import os -from abc import ABCMeta - -import qargparse -import six -from maya import cmds -from maya.app.renderSetup.model import renderSetup - -from openpype import AYON_SERVER_ENABLED -from openpype.lib import BoolDef, Logger -from openpype.settings import get_project_settings -from openpype.pipeline import ( - AVALON_CONTAINER_ID, - Anatomy, - - CreatedInstance, - Creator as NewCreator, - AutoCreator, - HiddenCreator, - - CreatorError, - LegacyCreator, - LoaderPlugin, - get_representation_path, -) -from openpype.pipeline.load import LoadError -from openpype.client import get_asset_by_name -from openpype.pipeline.create import get_subset_name - -from . import lib -from .lib import imprint, read -from .pipeline import containerise - -log = Logger.get_logger() - - -def _get_attr(node, attr, default=None): - """Helper to get attribute which allows attribute to not exist.""" - if not cmds.attributeQuery(attr, node=node, exists=True): - return default - return cmds.getAttr("{}.{}".format(node, attr)) - - -# Backwards compatibility: these functions has been moved to lib. -def get_reference_node(*args, **kwargs): - """Get the reference node from the container members - - Deprecated: - This function was moved and will be removed in 3.16.x. - """ - msg = "Function 'get_reference_node' has been moved." - log.warning(msg) - cmds.warning(msg) - return lib.get_reference_node(*args, **kwargs) - - -def get_reference_node_parents(*args, **kwargs): - """ - Deprecated: - This function was moved and will be removed in 3.16.x. - """ - msg = "Function 'get_reference_node_parents' has been moved." - log.warning(msg) - cmds.warning(msg) - return lib.get_reference_node_parents(*args, **kwargs) - - -class Creator(LegacyCreator): - defaults = ['Main'] - - def process(self): - nodes = list() - - with lib.undo_chunk(): - if (self.options or {}).get("useSelection"): - nodes = cmds.ls(selection=True) - - instance = cmds.sets(nodes, name=self.name) - lib.imprint(instance, self.data) - - return instance - - -@six.add_metaclass(ABCMeta) -class MayaCreatorBase(object): - - @staticmethod - def cache_subsets(shared_data): - """Cache instances for Creators to shared data. - - Create `maya_cached_subsets` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `maya_cached_legacy_subsets` there and fill it with - all legacy subsets under family as a key. - - Args: - Dict[str, Any]: Shared data. - - Return: - Dict[str, Any]: Shared data dictionary. - - """ - if shared_data.get("maya_cached_subsets") is None: - cache = dict() - cache_legacy = dict() - - for node in cmds.ls(type="objectSet"): - - if _get_attr(node, attr="id") != "pyblish.avalon.instance": - continue - - creator_id = _get_attr(node, attr="creator_identifier") - if creator_id is not None: - # creator instance - cache.setdefault(creator_id, []).append(node) - else: - # legacy instance - family = _get_attr(node, attr="family") - if family is None: - # must be a broken instance - continue - - cache_legacy.setdefault(family, []).append(node) - - shared_data["maya_cached_subsets"] = cache - shared_data["maya_cached_legacy_subsets"] = cache_legacy - return shared_data - - def get_publish_families(self): - """Return families for the instances of this creator. - - Allow a Creator to define multiple families so that a creator can - e.g. specify `usd` and `usdMaya` and another USD creator can also - specify `usd` but apply different extractors like `usdMultiverse`. - - There is no need to override this method if you only have the - primary family defined by the `family` property as that will always - be set. - - Returns: - list: families for instances of this creator - - """ - return [] - - def imprint_instance_node(self, node, data): - - # We never store the instance_node as value on the node since - # it's the node name itself - data.pop("instance_node", None) - data.pop("instance_id", None) - - # Don't store `families` since it's up to the creator itself - # to define the initial publish families - not a stored attribute of - # `families` - data.pop("families", None) - - # We store creator attributes at the root level and assume they - # will not clash in names with `subset`, `task`, etc. and other - # default names. This is just so these attributes in many cases - # are still editable in the maya UI by artists. - # note: pop to move to end of dict to sort attributes last on the node - creator_attributes = data.pop("creator_attributes", {}) - - # We only flatten value types which `imprint` function supports - json_creator_attributes = {} - for key, value in dict(creator_attributes).items(): - if isinstance(value, (list, tuple, dict)): - creator_attributes.pop(key) - json_creator_attributes[key] = value - - # Flatten remaining creator attributes to the node itself - data.update(creator_attributes) - - # We know the "publish_attributes" will be complex data of - # settings per plugins, we'll store this as a flattened json structure - # pop to move to end of dict to sort attributes last on the node - data["publish_attributes"] = json.dumps( - data.pop("publish_attributes", {}) - ) - - # Persist the non-flattened creator attributes (special value types, - # like multiselection EnumDef) - data["creator_attributes"] = json.dumps(json_creator_attributes) - - # Since we flattened the data structure for creator attributes we want - # to correctly detect which flattened attributes should end back in the - # creator attributes when reading the data from the node, so we store - # the relevant keys as a string - data["__creator_attributes_keys"] = ",".join(creator_attributes.keys()) - - # Kill any existing attributes just so we can imprint cleanly again - for attr in data.keys(): - if cmds.attributeQuery(attr, node=node, exists=True): - cmds.deleteAttr("{}.{}".format(node, attr)) - - return imprint(node, data) - - def read_instance_node(self, node): - node_data = read(node) - - # Never care about a cbId attribute on the object set - # being read as 'data' - node_data.pop("cbId", None) - - # Make sure we convert any creator attributes from the json string - creator_attributes = node_data.get("creator_attributes") - if creator_attributes: - node_data["creator_attributes"] = json.loads(creator_attributes) - else: - node_data["creator_attributes"] = {} - - # Move the relevant attributes into "creator_attributes" that - # we flattened originally - creator_attribute_keys = node_data.pop("__creator_attributes_keys", - "").split(",") - for key in creator_attribute_keys: - if key in node_data: - node_data["creator_attributes"][key] = node_data.pop(key) - - # Make sure we convert any publish attributes from the json string - publish_attributes = node_data.get("publish_attributes") - if publish_attributes: - node_data["publish_attributes"] = json.loads(publish_attributes) - - # Explicitly re-parse the node name - node_data["instance_node"] = node - node_data["instance_id"] = node - - # If the creator plug-in specifies - families = self.get_publish_families() - if families: - node_data["families"] = families - - return node_data - - def _default_collect_instances(self): - self.cache_subsets(self.collection_shared_data) - cached_subsets = self.collection_shared_data["maya_cached_subsets"] - for node in cached_subsets.get(self.identifier, []): - node_data = self.read_instance_node(node) - - created_instance = CreatedInstance.from_existing(node_data, self) - self._add_instance_to_context(created_instance) - - def _default_update_instances(self, update_list): - for created_inst, _changes in update_list: - data = created_inst.data_to_store() - node = data.get("instance_node") - - self.imprint_instance_node(node, data) - - def _default_remove_instances(self, instances): - """Remove specified instance from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - for instance in instances: - node = instance.data.get("instance_node") - if node: - cmds.delete(node) - - self._remove_instance_from_context(instance) - - -@six.add_metaclass(ABCMeta) -class MayaCreator(NewCreator, MayaCreatorBase): - - settings_category = "maya" - - def create(self, subset_name, instance_data, pre_create_data): - - members = list() - if pre_create_data.get("use_selection"): - members = cmds.ls(selection=True) - - # Allow a Creator to define multiple families - publish_families = self.get_publish_families() - if publish_families: - families = instance_data.setdefault("families", []) - for family in self.get_publish_families(): - if family not in families: - families.append(family) - - with lib.undo_chunk(): - instance_node = cmds.sets(members, name=subset_name) - instance_data["instance_node"] = instance_node - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self) - self._add_instance_to_context(instance) - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - return instance - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", - label="Use selection", - default=True) - ] - - -class MayaAutoCreator(AutoCreator, MayaCreatorBase): - """Automatically triggered creator for Maya. - - The plugin is not visible in UI, and 'create' method does not expect - any arguments. - """ - - settings_category = "maya" - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - -class MayaHiddenCreator(HiddenCreator, MayaCreatorBase): - """Hidden creator for Maya. - - The plugin is not visible in UI, and it does not have strictly defined - arguments for 'create' method. - """ - - settings_category = "maya" - - def create(self, *args, **kwargs): - return MayaCreator.create(self, *args, **kwargs) - - def collect_instances(self): - return self._default_collect_instances() - - def update_instances(self, update_list): - return self._default_update_instances(update_list) - - def remove_instances(self, instances): - return self._default_remove_instances(instances) - - -def ensure_namespace(namespace): - """Make sure the namespace exists. - - Args: - namespace (str): The preferred namespace name. - - Returns: - str: The generated or existing namespace - - """ - exists = cmds.namespace(exists=namespace) - if exists: - return namespace - else: - return cmds.namespace(add=namespace) - - -class RenderlayerCreator(NewCreator, MayaCreatorBase): - """Creator which creates an instance per renderlayer in the workfile. - - Create and manages renderlayer subset per renderLayer in workfile. - This generates a singleton node in the scene which, if it exists, tells the - Creator to collect Maya rendersetup renderlayers as individual instances. - As such, triggering create doesn't actually create the instance node per - layer but only the node which tells the Creator it may now collect - an instance per renderlayer. - - """ - - # These are required to be overridden in subclass - singleton_node_name = "" - - # These are optional to be overridden in subclass - layer_instance_prefix = None - - def _get_singleton_node(self, return_all=False): - nodes = lib.lsattr("pre_creator_identifier", self.identifier) - if nodes: - return nodes if return_all else nodes[0] - - def create(self, subset_name, instance_data, pre_create_data): - # A Renderlayer is never explicitly created using the create method. - # Instead, renderlayers from the scene are collected. Thus "create" - # would only ever be called to say, 'hey, please refresh collect' - self.create_singleton_node() - - # if no render layers are present, create default one with - # asterisk selector - rs = renderSetup.instance() - if not rs.getRenderLayers(): - render_layer = rs.createRenderLayer("Main") - collection = render_layer.createCollection("defaultCollection") - collection.getSelector().setPattern('*') - - # By RenderLayerCreator.create we make it so that the renderlayer - # instances directly appear even though it just collects scene - # renderlayers. This doesn't actually 'create' any scene contents. - self.collect_instances() - - def create_singleton_node(self): - if self._get_singleton_node(): - raise CreatorError("A Render instance already exists - only " - "one can be configured.") - - with lib.undo_chunk(): - node = cmds.sets(empty=True, name=self.singleton_node_name) - lib.imprint(node, data={ - "pre_creator_identifier": self.identifier - }) - - return node - - def collect_instances(self): - - # We only collect if the global render instance exists - if not self._get_singleton_node(): - return - - rs = renderSetup.instance() - layers = rs.getRenderLayers() - for layer in layers: - layer_instance_node = self.find_layer_instance_node(layer) - if layer_instance_node: - data = self.read_instance_node(layer_instance_node) - instance = CreatedInstance.from_existing(data, creator=self) - else: - # No existing scene instance node for this layer. Note that - # this instance will not have the `instance_node` data yet - # until it's been saved/persisted at least once. - project_name = self.create_context.get_current_project_name() - asset_name = self.create_context.get_current_asset_name() - instance_data = { - "task": self.create_context.get_current_task_name(), - "variant": layer.name(), - } - if AYON_SERVER_ENABLED: - instance_data["folderPath"] = asset_name - else: - instance_data["asset"] = asset_name - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - layer.name(), - instance_data["task"], - asset_doc, - project_name) - - instance = CreatedInstance( - family=self.family, - subset_name=subset_name, - data=instance_data, - creator=self - ) - - instance.transient_data["layer"] = layer - self._add_instance_to_context(instance) - - def find_layer_instance_node(self, layer): - connected_sets = cmds.listConnections( - "{}.message".format(layer.name()), - source=False, - destination=True, - type="objectSet" - ) or [] - - for node in connected_sets: - if not cmds.attributeQuery("creator_identifier", - node=node, - exists=True): - continue - - creator_identifier = cmds.getAttr(node + ".creator_identifier") - if creator_identifier == self.identifier: - self.log.info("Found node: {}".format(node)) - return node - - def _create_layer_instance_node(self, layer): - - # We only collect if a CreateRender instance exists - create_render_set = self._get_singleton_node() - if not create_render_set: - raise CreatorError("Creating a renderlayer instance node is not " - "allowed if no 'CreateRender' instance exists") - - namespace = "_{}".format(self.singleton_node_name) - namespace = ensure_namespace(namespace) - - name = "{}:{}".format(namespace, layer.name()) - render_set = cmds.sets(name=name, empty=True) - - # Keep an active link with the renderlayer so we can retrieve it - # later by a physical maya connection instead of relying on the layer - # name - cmds.addAttr(render_set, longName="renderlayer", at="message") - cmds.connectAttr("{}.message".format(layer.name()), - "{}.renderlayer".format(render_set), force=True) - - # Add the set to the 'CreateRender' set. - cmds.sets(render_set, forceElement=create_render_set) - - return render_set - - def update_instances(self, update_list): - # We only generate the persisting layer data into the scene once - # we save with the UI on e.g. validate or publish - for instance, _changes in update_list: - instance_node = instance.data.get("instance_node") - - # Ensure a node exists to persist the data to - if not instance_node: - layer = instance.transient_data["layer"] - instance_node = self._create_layer_instance_node(layer) - instance.data["instance_node"] = instance_node - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - - def imprint_instance_node(self, node, data): - # Do not ever try to update the `renderlayer` since it'll try - # to remove the attribute and recreate it but fail to keep it a - # message attribute link. We only ever imprint that on the initial - # node creation. - # TODO: Improve how this is handled - data.pop("renderlayer", None) - data.get("creator_attributes", {}).pop("renderlayer", None) - - return super(RenderlayerCreator, self).imprint_instance_node(node, - data=data) - - def remove_instances(self, instances): - """Remove specified instances from the scene. - - This is only removing `id` parameter so instance is no longer - instance, because it might contain valuable data for artist. - - """ - # Instead of removing the single instance or renderlayers we instead - # remove the CreateRender node this creator relies on to decide whether - # it should collect anything at all. - nodes = self._get_singleton_node(return_all=True) - if nodes: - cmds.delete(nodes) - - # Remove ALL the instances even if only one gets deleted - for instance in list(self.create_context.instances): - if instance.get("creator_identifier") == self.identifier: - self._remove_instance_from_context(instance) - - # Remove the stored settings per renderlayer too - node = instance.data.get("instance_node") - if node and cmds.objExists(node): - cmds.delete(node) - - def get_subset_name( - self, - variant, - task_name, - asset_doc, - project_name, - host_name=None, - instance=None - ): - # creator.family != 'render' as expected - return get_subset_name(self.layer_instance_prefix, - variant, - task_name, - asset_doc, - project_name) - - -class Loader(LoaderPlugin): - hosts = ["maya"] - - load_settings = {} # defined in settings - - @classmethod - def apply_settings(cls, project_settings, system_settings): - super(Loader, cls).apply_settings(project_settings, system_settings) - cls.load_settings = project_settings['maya']['load'] - - def get_custom_namespace_and_group(self, context, options, loader_key): - """Queries Settings to get custom template for namespace and group. - - Group template might be empty >> this forces to not wrap imported items - into separate group. - - Args: - context (dict) - options (dict): artist modifiable options from dialog - loader_key (str): key to get separate configuration from Settings - ('reference_loader'|'import_loader') - """ - - options["attach_to_root"] = True - custom_naming = self.load_settings[loader_key] - - if not custom_naming['namespace']: - raise LoadError("No namespace specified in " - "Maya ReferenceLoader settings") - elif not custom_naming['group_name']: - self.log.debug("No custom group_name, no group will be created.") - options["attach_to_root"] = False - - asset = context['asset'] - subset = context['subset'] - formatting_data = { - "asset_name": asset['name'], - "asset_type": asset['type'], - "folder": { - "name": asset["name"], - }, - "subset": subset['name'], - "family": ( - subset['data'].get('family') or - subset['data']['families'][0] - ) - } - - custom_namespace = custom_naming['namespace'].format( - **formatting_data - ) - - custom_group_name = custom_naming['group_name'].format( - **formatting_data - ) - - return custom_group_name, custom_namespace, options - - -class ReferenceLoader(Loader): - """A basic ReferenceLoader for Maya - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - - options = [ - qargparse.Integer( - "count", - label="Count", - default=1, - min=1, - help="How many times to load?" - ), - qargparse.Double3( - "offset", - label="Position Offset", - help="Offset loaded models for easier selection." - ), - qargparse.Boolean( - "attach_to_root", - label="Group imported asset", - default=True, - help="Should a group be created to encapsulate" - " imported representation ?" - ) - ] - - def load( - self, - context, - name=None, - namespace=None, - options=None - ): - path = self.filepath_from_context(context) - assert os.path.exists(path), "%s does not exist." % path - - custom_group_name, custom_namespace, options = \ - self.get_custom_namespace_and_group(context, options, - "reference_loader") - - count = options.get("count") or 1 - - loaded_containers = [] - for c in range(0, count): - namespace = lib.get_custom_namespace(custom_namespace) - group_name = "{}:{}".format( - namespace, - custom_group_name - ) - - options['group_name'] = group_name - - # Offset loaded subset - if "offset" in options: - offset = [i * c for i in options["offset"]] - options["translate"] = offset - - self.log.info(options) - - self.process_reference( - context=context, - name=name, - namespace=namespace, - options=options - ) - - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return - - ref_node = lib.get_reference_node(nodes, self.log) - container = containerise( - name=name, - namespace=namespace, - nodes=[ref_node], - context=context, - loader=self.__class__.__name__ - ) - loaded_containers.append(container) - self._organize_containers(nodes, container) - c += 1 - - return loaded_containers - - def process_reference(self, context, name, namespace, options): - """To be implemented by subclass""" - raise NotImplementedError("Must be implemented by subclass") - - def update(self, container, representation): - from maya import cmds - - from openpype.hosts.maya.api.lib import get_container_members - - node = container["objectName"] - - path = get_representation_path(representation) - - # Get reference node from container members - members = get_container_members(node) - reference_node = lib.get_reference_node(members, self.log) - namespace = cmds.referenceQuery(reference_node, namespace=True) - - file_type = { - "ma": "mayaAscii", - "mb": "mayaBinary", - "abc": "Alembic", - "fbx": "FBX", - "usd": "USD Import" - }.get(representation["name"]) - - assert file_type, "Unsupported representation: %s" % representation - - assert os.path.exists(path), "%s does not exist." % path - - # Need to save alembic settings and reapply, cause referencing resets - # them to incoming data. - alembic_attrs = ["speed", "offset", "cycleType", "time"] - alembic_data = {} - if representation["name"] == "abc": - alembic_nodes = cmds.ls( - "{}:*".format(namespace), type="AlembicNode" - ) - if alembic_nodes: - for attr in alembic_attrs: - node_attr = "{}.{}".format(alembic_nodes[0], attr) - data = { - "input": lib.get_attribute_input(node_attr), - "value": cmds.getAttr(node_attr) - } - - alembic_data[attr] = data - else: - self.log.debug("No alembic nodes found in {}".format(members)) - - try: - path = self.prepare_root_value(path, - representation["context"] - ["project"] - ["name"]) - content = cmds.file(path, - loadReference=reference_node, - type=file_type, - returnNewNodes=True) - except RuntimeError as exc: - # When changing a reference to a file that has load errors the - # command will raise an error even if the file is still loaded - # correctly (e.g. when raising errors on Arnold attributes) - # When the file is loaded and has content, we consider it's fine. - if not cmds.referenceQuery(reference_node, isLoaded=True): - raise - - content = cmds.referenceQuery(reference_node, - nodes=True, - dagPath=True) - if not content: - raise - - self.log.warning("Ignoring file read error:\n%s", exc) - - self._organize_containers(content, container["objectName"]) - - # Reapply alembic settings. - if representation["name"] == "abc" and alembic_data: - alembic_nodes = cmds.ls( - "{}:*".format(namespace), type="AlembicNode" - ) - if alembic_nodes: - alembic_node = alembic_nodes[0] # assume single AlembicNode - for attr, data in alembic_data.items(): - node_attr = "{}.{}".format(alembic_node, attr) - input = lib.get_attribute_input(node_attr) - if data["input"]: - if data["input"] != input: - cmds.connectAttr( - data["input"], node_attr, force=True - ) - else: - if input: - cmds.disconnectAttr(input, node_attr) - cmds.setAttr(node_attr, data["value"]) - - # Fix PLN-40 for older containers created with Avalon that had the - # `.verticesOnlySet` set to True. - if cmds.getAttr("{}.verticesOnlySet".format(node)): - self.log.info("Setting %s.verticesOnlySet to False", node) - cmds.setAttr("{}.verticesOnlySet".format(node), False) - - # Remove any placeHolderList attribute entries from the set that - # are remaining from nodes being removed from the referenced file. - members = cmds.sets(node, query=True) - invalid = [x for x in members if ".placeHolderList" in x] - if invalid: - cmds.sets(invalid, remove=node) - - # Update metadata - cmds.setAttr("{}.representation".format(node), - str(representation["_id"]), - type="string") - - # When an animation or pointcache gets connected to an Xgen container, - # the compound attribute "xgenContainers" gets created. When animation - # containers gets updated we also need to update the cacheFileName on - # the Xgen collection. - compound_name = "xgenContainers" - if cmds.objExists("{}.{}".format(node, compound_name)): - import xgenm - container_amount = cmds.getAttr( - "{}.{}".format(node, compound_name), size=True - ) - # loop through all compound children - for i in range(container_amount): - attr = "{}.{}[{}].container".format(node, compound_name, i) - objectset = cmds.listConnections(attr)[0] - reference_node = cmds.sets(objectset, query=True)[0] - palettes = cmds.ls( - cmds.referenceQuery(reference_node, nodes=True), - type="xgmPalette" - ) - for palette in palettes: - for description in xgenm.descriptions(palette): - xgenm.setAttr( - "cacheFileName", - path.replace("\\", "/"), - palette, - description, - "SplinePrimitive" - ) - - # Refresh UI and viewport. - de = xgenm.xgGlobal.DescriptionEditor - de.refresh("Full") - - def remove(self, container): - """Remove an existing `container` from Maya scene - - Deprecated; this functionality is replaced by `api.remove()` - - Arguments: - container (openpype:container-1.0): Which container - to remove from scene. - - """ - from maya import cmds - - node = container["objectName"] - - # Assume asset has been referenced - members = cmds.sets(node, query=True) - reference_node = lib.get_reference_node(members, self.log) - - assert reference_node, ("Imported container not supported; " - "container must be referenced.") - - self.log.info("Removing '%s' from Maya.." % container["name"]) - - namespace = cmds.referenceQuery(reference_node, namespace=True) - fname = cmds.referenceQuery(reference_node, filename=True) - cmds.file(fname, removeReference=True) - - try: - cmds.delete(node) - except ValueError: - # Already implicitly deleted by Maya upon removing reference - pass - - try: - # If container is not automatically cleaned up by May (issue #118) - cmds.namespace(removeNamespace=namespace, - deleteNamespaceContent=True) - except RuntimeError: - pass - - def prepare_root_value(self, file_url, project_name): - """Replace root value with env var placeholder. - - Use ${OPENPYPE_ROOT_WORK} (or any other root) instead of proper root - value when storing referenced url into a workfile. - Useful for remote workflows with SiteSync. - - Args: - file_url (str) - project_name (dict) - Returns: - (str) - """ - settings = get_project_settings(project_name) - use_env_var_as_root = (settings["maya"] - ["maya-dirmap"] - ["use_env_var_as_root"]) - if use_env_var_as_root: - anatomy = Anatomy(project_name) - file_url = anatomy.replace_root_with_env_key(file_url, '${{{}}}') - - return file_url - - @staticmethod - def _organize_containers(nodes, container): - # type: (list, str) -> None - """Put containers in loaded data to correct hierarchy.""" - for node in nodes: - id_attr = "{}.id".format(node) - if not cmds.attributeQuery("id", node=node, exists=True): - continue - if cmds.getAttr(id_attr) == AVALON_CONTAINER_ID: - cmds.sets(node, forceElement=container) diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py deleted file mode 100644 index d1ba3cc306..0000000000 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ /dev/null @@ -1,375 +0,0 @@ -import json - -from maya import cmds - -from openpype.pipeline import registered_host, get_current_asset_name -from openpype.pipeline.workfile.workfile_template_builder import ( - TemplateAlreadyImported, - AbstractTemplateBuilder, - PlaceholderPlugin, - LoadPlaceholderItem, - PlaceholderLoadMixin, -) -from openpype.tools.workfile_template_build import ( - WorkfileBuildPlaceholderDialog, -) - -from .lib import read, imprint, get_reference_node, get_main_window - -PLACEHOLDER_SET = "PLACEHOLDERS_SET" - - -class MayaTemplateBuilder(AbstractTemplateBuilder): - """Concrete implementation of AbstractTemplateBuilder for maya""" - - use_legacy_creators = True - - def import_template(self, path): - """Import template into current scene. - Block if a template is already loaded. - - Args: - path (str): A path to current template (usually given by - get_template_preset implementation) - - Returns: - bool: Whether the template was successfully imported or not - """ - - if cmds.objExists(PLACEHOLDER_SET): - raise TemplateAlreadyImported(( - "Build template already loaded\n" - "Clean scene if needed (File > New Scene)" - )) - - cmds.sets(name=PLACEHOLDER_SET, empty=True) - new_nodes = cmds.file( - path, - i=True, - returnNewNodes=True, - preserveReferences=True, - loadReferenceDepth="all", - ) - - # make default cameras non-renderable - default_cameras = [cam for cam in cmds.ls(cameras=True) - if cmds.camera(cam, query=True, startupCamera=True)] - for cam in default_cameras: - if not cmds.attributeQuery("renderable", node=cam, exists=True): - self.log.debug( - "Camera {} has no attribute 'renderable'".format(cam) - ) - continue - cmds.setAttr("{}.renderable".format(cam), 0) - - cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) - - imported_sets = cmds.ls(new_nodes, set=True) - if not imported_sets: - return True - - # update imported sets information - asset_name = get_current_asset_name() - for node in imported_sets: - if not cmds.attributeQuery("id", node=node, exists=True): - continue - if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": - continue - if not cmds.attributeQuery("asset", node=node, exists=True): - continue - - cmds.setAttr( - "{}.asset".format(node), asset_name, type="string") - - return True - - -class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): - identifier = "maya.load" - label = "Maya load" - - def _collect_scene_placeholders(self): - # Cache placeholder data to shared data - placeholder_nodes = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if placeholder_nodes is None: - attributes = cmds.ls("*.plugin_identifier", long=True) - placeholder_nodes = {} - for attribute in attributes: - node_name = attribute.rpartition(".")[0] - placeholder_nodes[node_name] = ( - self._parse_placeholder_node_data(node_name) - ) - - self.builder.set_shared_populate_data( - "placeholder_nodes", placeholder_nodes - ) - return placeholder_nodes - - def _parse_placeholder_node_data(self, node_name): - placeholder_data = read(node_name) - parent_name = ( - cmds.getAttr(node_name + ".parent", asString=True) - or node_name.rpartition("|")[0] - or "" - ) - if parent_name: - siblings = cmds.listRelatives(parent_name, children=True) - else: - siblings = cmds.ls(assemblies=True) - node_shortname = node_name.rpartition("|")[2] - current_index = cmds.getAttr(node_name + ".index", asString=True) - if current_index < 0: - current_index = siblings.index(node_shortname) - - placeholder_data.update({ - "parent": parent_name, - "index": current_index - }) - return placeholder_data - - def _create_placeholder_name(self, placeholder_data): - placeholder_name_parts = placeholder_data["builder_type"].split("_") - - pos = 1 - # add family in any - placeholder_family = placeholder_data["family"] - if placeholder_family: - placeholder_name_parts.insert(pos, placeholder_family) - pos += 1 - - # add loader arguments if any - loader_args = placeholder_data["loader_args"] - if loader_args: - loader_args = json.loads(loader_args.replace('\'', '\"')) - values = [v for v in loader_args.values()] - for value in values: - placeholder_name_parts.insert(pos, value) - pos += 1 - - placeholder_name = "_".join(placeholder_name_parts) - - return placeholder_name.capitalize() - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - try: - containers = cmds.sets("AVALON_CONTAINERS", q=True) - except ValueError: - containers = [] - - loaded_representation_ids = { - cmds.getAttr(container + ".representation") - for container in containers - } - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def create_placeholder(self, placeholder_data): - selection = cmds.ls(selection=True) - if len(selection) > 1: - raise ValueError("More then one item are selected") - - parent = selection[0] if selection else None - - placeholder_data["plugin_identifier"] = self.identifier - - placeholder_name = self._create_placeholder_name(placeholder_data) - - placeholder = cmds.spaceLocator(name=placeholder_name)[0] - if parent: - placeholder = cmds.parent(placeholder, selection[0])[0] - - imprint(placeholder, placeholder_data) - - # Add helper attributes to keep placeholder info - cmds.addAttr( - placeholder, - longName="parent", - hidden=True, - dataType="string" - ) - cmds.addAttr( - placeholder, - longName="index", - hidden=True, - attributeType="short", - defaultValue=-1 - ) - - cmds.setAttr(placeholder + ".parent", "", type="string") - - def update_placeholder(self, placeholder_item, placeholder_data): - node_name = placeholder_item.scene_identifier - new_values = {} - for key, value in placeholder_data.items(): - placeholder_value = placeholder_item.data.get(key) - if value != placeholder_value: - new_values[key] = value - placeholder_item.data[key] = value - - for key in new_values.keys(): - cmds.deleteAttr(node_name + "." + key) - - imprint(node_name, new_values) - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, placeholder_data in scene_placeholders.items(): - if placeholder_data.get("plugin_identifier") != self.identifier: - continue - - # TODO do data validations and maybe upgrades if they are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # Hide placeholder and add them to placeholder set - node = placeholder.scene_identifier - - cmds.sets(node, addElement=PLACEHOLDER_SET) - cmds.hide(node) - cmds.setAttr(node + ".hiddenInOutliner", True) - - def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful""" - cmds.delete(placeholder.scene_identifier) - - def load_succeed(self, placeholder, container): - self._parent_in_hierarchy(placeholder, container) - - def _parent_in_hierarchy(self, placeholder, container): - """Parent loaded container to placeholder's parent. - - ie : Set loaded content as placeholder's sibling - - Args: - container (str): Placeholder loaded containers - """ - - if not container: - return - - roots = cmds.sets(container, q=True) - ref_node = None - try: - ref_node = get_reference_node(roots) - except AssertionError as e: - self.log.info(e.args[0]) - - nodes_to_parent = [] - for root in roots: - if ref_node: - ref_root = cmds.referenceQuery(root, nodes=True)[0] - ref_root = ( - cmds.listRelatives(ref_root, parent=True, path=True) or - [ref_root] - ) - nodes_to_parent.extend(ref_root) - continue - if root.endswith("_RN"): - # Backwards compatibility for hardcoded reference names. - refRoot = cmds.referenceQuery(root, n=True)[0] - refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] - nodes_to_parent.extend(refRoot) - elif root not in cmds.listSets(allSets=True): - nodes_to_parent.append(root) - - elif not cmds.sets(root, q=True): - return - - # Move loaded nodes to correct index in outliner hierarchy - placeholder_form = cmds.xform( - placeholder.scene_identifier, - q=True, - matrix=True, - worldSpace=True - ) - scene_parent = cmds.listRelatives( - placeholder.scene_identifier, parent=True, fullPath=True - ) - for node in set(nodes_to_parent): - cmds.reorder(node, front=True) - cmds.reorder(node, relative=placeholder.data["index"]) - cmds.xform(node, matrix=placeholder_form, ws=True) - if scene_parent: - cmds.parent(node, scene_parent) - else: - cmds.parent(node, world=True) - - holding_sets = cmds.listSets(object=placeholder.scene_identifier) - if not holding_sets: - return - for holding_set in holding_sets: - cmds.sets(roots, forceElement=holding_set) - - -def build_workfile_template(*args): - builder = MayaTemplateBuilder(registered_host()) - builder.build_template() - - -def update_workfile_template(*args): - builder = MayaTemplateBuilder(registered_host()) - builder.rebuild_template() - - -def create_placeholder(*args): - host = registered_host() - builder = MayaTemplateBuilder(host) - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.show() - - -def update_placeholder(*args): - host = registered_host() - builder = MayaTemplateBuilder(host) - placeholder_items_by_id = { - placeholder_item.scene_identifier: placeholder_item - for placeholder_item in builder.get_placeholders() - } - placeholder_items = [] - for node_name in cmds.ls(selection=True, long=True): - if node_name in placeholder_items_by_id: - placeholder_items.append(placeholder_items_by_id[node_name]) - - # TODO show UI at least - if len(placeholder_items) == 0: - raise ValueError("No node selected") - - if len(placeholder_items) > 1: - raise ValueError("Too many selected nodes") - - placeholder_item = placeholder_items[0] - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.set_update_mode(placeholder_item) - window.exec_() diff --git a/openpype/hosts/maya/lib.py b/openpype/hosts/maya/lib.py deleted file mode 100644 index 765c60381b..0000000000 --- a/openpype/hosts/maya/lib.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from openpype.settings import get_project_settings -from openpype.lib import Logger - - -def create_workspace_mel(workdir, project_name, project_settings=None): - dst_filepath = os.path.join(workdir, "workspace.mel") - if os.path.exists(dst_filepath): - return - - if not os.path.exists(workdir): - os.makedirs(workdir) - - if not project_settings: - project_settings = get_project_settings(project_name) - mel_script = project_settings["maya"].get("mel_workspace") - - # Skip if mel script in settings is empty - if not mel_script: - log = Logger.get_logger("create_workspace_mel") - log.debug("File 'workspace.mel' not created. Settings value is empty.") - return - - with open(dst_filepath, "w") as mel_file: - mel_file.write(mel_script) diff --git a/openpype/hosts/maya/plugins/create/convert_legacy.py b/openpype/hosts/maya/plugins/create/convert_legacy.py deleted file mode 100644 index cd8faf291b..0000000000 --- a/openpype/hosts/maya/plugins/create/convert_legacy.py +++ /dev/null @@ -1,178 +0,0 @@ -from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin -from openpype.hosts.maya.api import plugin -from openpype.hosts.maya.api.lib import read - -from openpype.client import get_asset_by_name - -from maya import cmds -from maya.app.renderSetup.model import renderSetup - - -class MayaLegacyConvertor(SubsetConvertorPlugin, - plugin.MayaCreatorBase): - """Find and convert any legacy subsets in the scene. - - This Convertor will find all legacy subsets in the scene and will - transform them to the current system. Since the old subsets doesn't - retain any information about their original creators, the only mapping - we can do is based on their families. - - Its limitation is that you can have multiple creators creating subset - of the same family and there is no way to handle it. This code should - nevertheless cover all creators that came with OpenPype. - - """ - identifier = "io.openpype.creators.maya.legacy" - - # Cases where the identifier or new family doesn't correspond to the - # original family on the legacy instances - special_family_conversions = { - "rendering": "io.openpype.creators.maya.renderlayer", - } - - def find_instances(self): - - self.cache_subsets(self.collection_shared_data) - legacy = self.collection_shared_data.get("maya_cached_legacy_subsets") - if not legacy: - return - - self.add_convertor_item("Convert legacy instances") - - def convert(self): - self.remove_convertor_item() - - # We can't use the collected shared data cache here - # we re-query it here directly to convert all found. - cache = {} - self.cache_subsets(cache) - legacy = cache.get("maya_cached_legacy_subsets") - if not legacy: - return - - # From all current new style manual creators find the mapping - # from family to identifier - family_to_id = {} - for identifier, creator in self.create_context.creators.items(): - family = getattr(creator, "family", None) - if not family: - continue - - if family in family_to_id: - # We have a clash of family -> identifier. Multiple - # new style creators use the same family - self.log.warning("Clash on family->identifier: " - "{}".format(identifier)) - family_to_id[family] = identifier - - family_to_id.update(self.special_family_conversions) - - # We also embed the current 'task' into the instance since legacy - # instances didn't store that data on the instances. The old style - # logic was thus to be live to the current task to begin with. - data = dict() - data["task"] = self.create_context.get_current_task_name() - for family, instance_nodes in legacy.items(): - if family not in family_to_id: - self.log.warning( - "Unable to convert legacy instance with family '{}'" - " because there is no matching new creator's family" - "".format(family) - ) - continue - - creator_id = family_to_id[family] - creator = self.create_context.creators[creator_id] - data["creator_identifier"] = creator_id - - if isinstance(creator, plugin.RenderlayerCreator): - self._convert_per_renderlayer(instance_nodes, data, creator) - else: - self._convert_regular(instance_nodes, data) - - def _convert_regular(self, instance_nodes, data): - # We only imprint the creator identifier for it to identify - # as the new style creator - for instance_node in instance_nodes: - self.imprint_instance_node(instance_node, - data=data.copy()) - - def _convert_per_renderlayer(self, instance_nodes, data, creator): - # Split the instance into an instance per layer - rs = renderSetup.instance() - layers = rs.getRenderLayers() - if not layers: - self.log.error( - "Can't convert legacy renderlayer instance because no existing" - " renderSetup layers exist in the scene." - ) - return - - creator_attribute_names = { - attr_def.key for attr_def in creator.get_instance_attr_defs() - } - - for instance_node in instance_nodes: - - # Ensure we have the new style singleton node generated - # TODO: Make function public - singleton_node = creator._get_singleton_node() - if singleton_node: - self.log.error( - "Can't convert legacy renderlayer instance '{}' because" - " new style instance '{}' already exists".format( - instance_node, - singleton_node - ) - ) - continue - - creator.create_singleton_node() - - # We are creating new nodes to replace the original instance - # Copy the attributes of the original instance to the new node - original_data = read(instance_node) - - # The family gets converted to the new family (this is due to - # "rendering" family being converted to "renderlayer" family) - original_data["family"] = creator.family - - # recreate subset name as without it would be - # `renderingMain` vs correct `renderMain` - project_name = self.create_context.get_current_project_name() - asset_doc = get_asset_by_name(project_name, - original_data["asset"]) - subset_name = creator.get_subset_name( - original_data["variant"], - data["task"], - asset_doc, - project_name) - original_data["subset"] = subset_name - - # Convert to creator attributes when relevant - creator_attributes = {} - for key in list(original_data.keys()): - # Iterate in order of the original attributes to preserve order - # in the output creator attributes - if key in creator_attribute_names: - creator_attributes[key] = original_data.pop(key) - original_data["creator_attributes"] = creator_attributes - - # For layer in maya layers - for layer in layers: - layer_instance_node = creator.find_layer_instance_node(layer) - if not layer_instance_node: - # TODO: Make function public - layer_instance_node = creator._create_layer_instance_node( - layer - ) - - # Transfer the main attributes of the original instance - layer_data = original_data.copy() - layer_data.update(data) - - self.imprint_instance_node(layer_instance_node, - data=layer_data) - - # Delete the legacy instance node - cmds.delete(instance_node) diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py deleted file mode 100644 index 115c73c0d3..0000000000 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ /dev/null @@ -1,89 +0,0 @@ -from openpype.hosts.maya.api import ( - lib, - plugin -) -from openpype.lib import ( - BoolDef, - TextDef -) - - -class CreateAnimation(plugin.MayaHiddenCreator): - """Animation output for character rigs - - We hide the animation creator from the UI since the creation of it is - automated upon loading a rig. There's an inventory action to recreate it - for loaded rigs if by chance someone deleted the animation instance. - """ - identifier = "io.openpype.creators.maya.animation" - name = "animationDefault" - label = "Animation" - family = "animation" - icon = "male" - - write_color_sets = False - write_face_sets = False - include_parent_hierarchy = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("writeNormals", - label="Write normals", - tooltip="Write normals with the deforming geometry", - default=True), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=self.include_parent_hierarchy), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def apply_settings(self, project_settings): - super(CreateAnimation, self).apply_settings(project_settings) - # Hardcoding creator to be enabled due to existing settings would - # disable the creator causing the creator plugin to not be - # discoverable. - self.enabled = True diff --git a/openpype/hosts/maya/plugins/create/create_camera.py b/openpype/hosts/maya/plugins/create/create_camera.py deleted file mode 100644 index 0219f56330..0000000000 --- a/openpype/hosts/maya/plugins/create/create_camera.py +++ /dev/null @@ -1,36 +0,0 @@ -from openpype.hosts.maya.api import ( - lib, - plugin -) -from openpype.lib import BoolDef - - -class CreateCamera(plugin.MayaCreator): - """Single baked camera""" - - identifier = "io.openpype.creators.maya.camera" - label = "Camera" - family = "camera" - icon = "video-camera" - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("bakeToWorldSpace", - label="Bake to World-Space", - tooltip="Bake to World-Space", - default=True), - ]) - - return defs - - -class CreateCameraRig(plugin.MayaCreator): - """Complex hierarchy with camera.""" - - identifier = "io.openpype.creators.maya.camerarig" - label = "Camera Rig" - family = "camerarig" - icon = "video-camera" diff --git a/openpype/hosts/maya/plugins/create/create_layout.py b/openpype/hosts/maya/plugins/create/create_layout.py deleted file mode 100644 index 168743d4dc..0000000000 --- a/openpype/hosts/maya/plugins/create/create_layout.py +++ /dev/null @@ -1,21 +0,0 @@ -from openpype.hosts.maya.api import plugin -from openpype.lib import BoolDef - - -class CreateLayout(plugin.MayaCreator): - """A grouped package of loaded content""" - - identifier = "io.openpype.creators.maya.layout" - label = "Layout" - family = "layout" - icon = "cubes" - - def get_instance_attr_defs(self): - - return [ - BoolDef("groupLoadedAssets", - label="Group Loaded Assets", - tooltip="Enable this when you want to publish group of " - "loaded asset", - default=False) - ] diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py deleted file mode 100644 index 11a69151fd..0000000000 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ /dev/null @@ -1,47 +0,0 @@ -from openpype.hosts.maya.api import ( - plugin, - lib -) -from openpype.lib import ( - BoolDef, - TextDef -) - - -class CreateLook(plugin.MayaCreator): - """Shader connections defining shape look""" - - identifier = "io.openpype.creators.maya.look" - label = "Look" - family = "look" - icon = "paint-brush" - - make_tx = True - rs_tex = False - - def get_instance_attr_defs(self): - - return [ - # TODO: This value should actually get set on create! - TextDef("renderLayer", - # TODO: Bug: Hidden attribute's label is still shown in UI? - hidden=True, - default=lib.get_current_renderlayer(), - label="Renderlayer", - tooltip="Renderlayer to extract the look from"), - BoolDef("maketx", - label="MakeTX", - tooltip="Whether to generate .tx files for your textures", - default=self.make_tx), - BoolDef("rstex", - label="Convert textures to .rstex", - tooltip="Whether to generate Redshift .rstex files for " - "your textures", - default=self.rs_tex) - ] - - def get_pre_create_attr_defs(self): - # Show same attributes on create but include use selection - defs = super(CreateLook, self).get_pre_create_attr_defs() - defs.extend(self.get_instance_attr_defs()) - return defs diff --git a/openpype/hosts/maya/plugins/create/create_model.py b/openpype/hosts/maya/plugins/create/create_model.py deleted file mode 100644 index 5c3dd04af0..0000000000 --- a/openpype/hosts/maya/plugins/create/create_model.py +++ /dev/null @@ -1,43 +0,0 @@ -from openpype.hosts.maya.api import plugin -from openpype.lib import ( - BoolDef, - TextDef -) - - -class CreateModel(plugin.MayaCreator): - """Polygonal static geometry""" - - identifier = "io.openpype.creators.maya.model" - label = "Model" - family = "model" - icon = "cube" - default_variants = ["Main", "Proxy", "_MD", "_HD", "_LD"] - - write_color_sets = False - write_face_sets = False - - def get_instance_attr_defs(self): - - return [ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ] diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py deleted file mode 100644 index f4e8cbfc9a..0000000000 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ /dev/null @@ -1,88 +0,0 @@ -from maya import cmds - -from openpype.hosts.maya.api import ( - lib, - plugin -) -from openpype.lib import ( - BoolDef, - TextDef -) - - -class CreatePointCache(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.pointcache" - label = "Pointcache" - family = "pointcache" - icon = "gears" - write_color_sets = False - write_face_sets = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=False), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=False), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("refresh", - label="Refresh viewport during export", - default=False), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - default="", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def create(self, subset_name, instance_data, pre_create_data): - - instance = super(CreatePointCache, self).create( - subset_name, instance_data, pre_create_data - ) - instance_node = instance.get("instance_node") - - # For Arnold standin proxy - proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets(proxy_set, forceElement=instance_node) diff --git a/openpype/hosts/maya/plugins/create/create_redshift_proxy.py b/openpype/hosts/maya/plugins/create/create_redshift_proxy.py deleted file mode 100644 index 2490738e8f..0000000000 --- a/openpype/hosts/maya/plugins/create/create_redshift_proxy.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator of Redshift proxy subset types.""" - -from openpype.hosts.maya.api import plugin, lib -from openpype.lib import BoolDef - - -class CreateRedshiftProxy(plugin.MayaCreator): - """Create instance of Redshift Proxy subset.""" - - identifier = "io.openpype.creators.maya.redshiftproxy" - label = "Redshift Proxy" - family = "redshiftproxy" - icon = "gears" - - def get_instance_attr_defs(self): - - defs = [ - BoolDef("animation", - label="Export animation", - default=False) - ] - - defs.extend(lib.collect_animation_defs()) - return defs diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py deleted file mode 100644 index 6266689af4..0000000000 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -"""Create ``Render`` instance in Maya.""" - -from openpype.hosts.maya.api import ( - lib_rendersettings, - plugin -) -from openpype.pipeline import CreatorError -from openpype.lib import ( - BoolDef, - NumberDef, -) - - -class CreateRenderlayer(plugin.RenderlayerCreator): - """Create and manages renderlayer subset per renderLayer in workfile. - - This generates a single node in the scene which tells the Creator to if - it exists collect Maya rendersetup renderlayers as individual instances. - As such, triggering create doesn't actually create the instance node per - layer but only the node which tells the Creator it may now collect - the renderlayers. - - """ - - identifier = "io.openpype.creators.maya.renderlayer" - family = "renderlayer" - label = "Render" - icon = "eye" - - layer_instance_prefix = "render" - singleton_node_name = "renderingMain" - - render_settings = {} - - @classmethod - def apply_settings(cls, project_settings): - cls.render_settings = project_settings["maya"]["RenderSettings"] - - def create(self, subset_name, instance_data, pre_create_data): - # Only allow a single render instance to exist - if self._get_singleton_node(): - raise CreatorError("A Render instance already exists - only " - "one can be configured.") - - # Apply default project render settings on create - if self.render_settings.get("apply_render_settings"): - lib_rendersettings.RenderSettings().set_default_renderer_settings() - - super(CreateRenderlayer, self).create(subset_name, - instance_data, - pre_create_data) - - def get_instance_attr_defs(self): - """Create instance settings.""" - - return [ - BoolDef("review", - label="Review", - tooltip="Mark as reviewable", - default=True), - BoolDef("extendFrames", - label="Extend Frames", - tooltip="Extends the frames on top of the previous " - "publish.\nIf the previous was 1001-1050 and you " - "would now submit 1020-1070 only the new frames " - "1051-1070 would be rendered and published " - "together with the previously rendered frames.\n" - "If 'overrideExistingFrame' is enabled it *will* " - "render any existing frames.", - default=False), - BoolDef("overrideExistingFrame", - label="Override Existing Frame", - tooltip="Override existing rendered frames " - "(if they exist).", - default=True), - - # TODO: Should these move to submit_maya_deadline plugin? - # Tile rendering - BoolDef("tileRendering", - label="Enable tiled rendering", - default=False), - NumberDef("tilesX", - label="Tiles X", - default=2, - minimum=1, - decimals=0), - NumberDef("tilesY", - label="Tiles Y", - default=2, - minimum=1, - decimals=0), - - # Additional settings - BoolDef("convertToScanline", - label="Convert to Scanline", - tooltip="Convert the output images to scanline images", - default=False), - BoolDef("useReferencedAovs", - label="Use Referenced AOVs", - tooltip="Consider the AOVs from referenced scenes as well", - default=False), - - BoolDef("renderSetupIncludeLights", - label="Render Setup Include Lights", - default=self.render_settings.get("enable_all_lights", - False)) - ] diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py deleted file mode 100644 index 18d661b186..0000000000 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ /dev/null @@ -1,147 +0,0 @@ -import json - -from maya import cmds - -from openpype import AYON_SERVER_ENABLED -from openpype.hosts.maya.api import ( - lib, - plugin -) -from openpype.lib import ( - BoolDef, - NumberDef, - EnumDef -) -from openpype.pipeline import CreatedInstance -from openpype.client import get_asset_by_name - -TRANSPARENCIES = [ - "preset", - "simple", - "object sorting", - "weighted average", - "depth peeling", - "alpha cut" -] - - -class CreateReview(plugin.MayaCreator): - """Playblast reviewable""" - - identifier = "io.openpype.creators.maya.review" - label = "Review" - family = "review" - icon = "video-camera" - - useMayaTimeline = True - panZoom = False - - # Overriding "create" method to prefill values from settings. - def create(self, subset_name, instance_data, pre_create_data): - - members = list() - if pre_create_data.get("use_selection"): - members = cmds.ls(selection=True) - - project_name = self.project_name - if AYON_SERVER_ENABLED: - asset_name = instance_data["folderPath"] - else: - asset_name = instance_data["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - task_name = instance_data["task"] - preset = lib.get_capture_preset( - task_name, - asset_doc["data"]["tasks"][task_name]["type"], - subset_name, - self.project_settings, - self.log - ) - self.log.debug( - "Using preset: {}".format( - json.dumps(preset, indent=4, sort_keys=True) - ) - ) - - with lib.undo_chunk(): - instance_node = cmds.sets(members, name=subset_name) - instance_data["instance_node"] = instance_node - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self) - - creator_attribute_defs_by_key = { - x.key: x for x in instance.creator_attribute_defs - } - mapping = { - "review_width": preset["Resolution"]["width"], - "review_height": preset["Resolution"]["height"], - "isolate": preset["Generic"]["isolate_view"], - "imagePlane": preset["Viewport Options"]["imagePlane"], - "panZoom": preset["Generic"]["pan_zoom"] - } - for key, value in mapping.items(): - creator_attribute_defs_by_key[key].default = value - - self._add_instance_to_context(instance) - - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - return instance - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - # Option for using Maya or asset frame range in settings. - if not self.useMayaTimeline: - # Update the defaults to be the asset frame range - frame_range = lib.get_frame_range() - defs_by_key = {attr_def.key: attr_def for attr_def in defs} - for key, value in frame_range.items(): - if key not in defs_by_key: - raise RuntimeError("Attribute definition not found to be " - "updated for key: {}".format(key)) - attr_def = defs_by_key[key] - attr_def.default = value - - defs.extend([ - NumberDef("review_width", - label="Review width", - tooltip="A value of zero will use the asset resolution.", - decimals=0, - minimum=0, - default=0), - NumberDef("review_height", - label="Review height", - tooltip="A value of zero will use the asset resolution.", - decimals=0, - minimum=0, - default=0), - BoolDef("keepImages", - label="Keep Images", - tooltip="Whether to also publish along the image sequence " - "next to the video reviewable.", - default=False), - BoolDef("isolate", - label="Isolate render members of instance", - tooltip="When enabled only the members of the instance " - "will be included in the playblast review.", - default=False), - BoolDef("imagePlane", - label="Show Image Plane", - default=True), - EnumDef("transparency", - label="Transparency", - items=TRANSPARENCIES), - BoolDef("panZoom", - label="Enable camera pan/zoom", - default=True), - EnumDef("displayLights", - label="Display Lights", - items=lib.DISPLAY_LIGHTS_ENUM), - ]) - - return defs diff --git a/openpype/hosts/maya/plugins/create/create_rig.py b/openpype/hosts/maya/plugins/create/create_rig.py deleted file mode 100644 index acd5c98f89..0000000000 --- a/openpype/hosts/maya/plugins/create/create_rig.py +++ /dev/null @@ -1,32 +0,0 @@ -from maya import cmds - -from openpype.hosts.maya.api import plugin - - -class CreateRig(plugin.MayaCreator): - """Artist-friendly rig with controls to direct motion""" - - identifier = "io.openpype.creators.maya.rig" - label = "Rig" - family = "rig" - icon = "wheelchair" - - def create(self, subset_name, instance_data, pre_create_data): - - instance = super(CreateRig, self).create(subset_name, - instance_data, - pre_create_data) - - instance_node = instance.get("instance_node") - - self.log.info("Creating Rig instance set up ...") - # TODO๏ผšchange name (_controls_SET -> _rigs_SET) - controls = cmds.sets(name=subset_name + "_controls_SET", empty=True) - # TODO๏ผšchange name (_out_SET -> _geo_SET) - pointcache = cmds.sets(name=subset_name + "_out_SET", empty=True) - skeleton = cmds.sets( - name=subset_name + "_skeletonAnim_SET", empty=True) - skeleton_mesh = cmds.sets( - name=subset_name + "_skeletonMesh_SET", empty=True) - cmds.sets([controls, pointcache, - skeleton, skeleton_mesh], forceElement=instance_node) diff --git a/openpype/hosts/maya/plugins/create/create_workfile.py b/openpype/hosts/maya/plugins/create/create_workfile.py deleted file mode 100644 index 198f9c4a36..0000000000 --- a/openpype/hosts/maya/plugins/create/create_workfile.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import CreatedInstance, AutoCreator -from openpype.client import get_asset_by_name, get_asset_name_identifier -from openpype.hosts.maya.api import plugin -from maya import cmds - - -class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.maya.workfile" - label = "Workfile" - family = "workfile" - icon = "fa5.file" - - default_variant = "Main" - - def create(self): - - variant = self.default_variant - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - project_name = self.project_name - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - if current_instance is None: - current_instance_asset = None - elif AYON_SERVER_ENABLED: - current_instance_asset = current_instance["folderPath"] - else: - current_instance_asset = current_instance["asset"] - - if current_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - data = { - "task": task_name, - "variant": variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - - data.update( - self.get_dynamic_data( - variant, task_name, asset_doc, - project_name, host_name, current_instance) - ) - self.log.info("Auto-creating workfile instance...") - current_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(current_instance) - elif ( - current_instance_asset != asset_name - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - asset_name = get_asset_name_identifier(asset_doc) - - if AYON_SERVER_ENABLED: - current_instance["folderPath"] = asset_name - else: - current_instance["asset"] = asset_name - current_instance["task"] = task_name - current_instance["subset"] = subset_name - - def collect_instances(self): - self.cache_subsets(self.collection_shared_data) - cached_subsets = self.collection_shared_data["maya_cached_subsets"] - for node in cached_subsets.get(self.identifier, []): - node_data = self.read_instance_node(node) - - created_instance = CreatedInstance.from_existing(node_data, self) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - data = created_inst.data_to_store() - node = data.get("instance_node") - if not node: - node = self.create_node() - created_inst["instance_node"] = node - data = created_inst.data_to_store() - - self.imprint_instance_node(node, data) - - def create_node(self): - node = cmds.sets(empty=True, name="workfileMain") - cmds.setAttr(node + ".hiddenInOutliner", True) - return node diff --git a/openpype/hosts/maya/plugins/inventory/select_containers.py b/openpype/hosts/maya/plugins/inventory/select_containers.py deleted file mode 100644 index f85bf17ab0..0000000000 --- a/openpype/hosts/maya/plugins/inventory/select_containers.py +++ /dev/null @@ -1,46 +0,0 @@ -from maya import cmds - -from openpype.pipeline import InventoryAction, registered_host -from openpype.hosts.maya.api.lib import get_container_members - - -class SelectInScene(InventoryAction): - """Select nodes in the scene from selected containers in scene inventory""" - - label = "Select in scene" - icon = "search" - color = "#888888" - order = 99 - - def process(self, containers): - - all_members = [] - for container in containers: - members = get_container_members(container) - all_members.extend(members) - cmds.select(all_members, replace=True, noExpand=True) - - -class HighlightBySceneSelection(InventoryAction): - """Select containers in scene inventory from the current scene selection""" - - label = "Highlight by scene selection" - icon = "search" - color = "#888888" - order = 100 - - def process(self, containers): - - selection = set(cmds.ls(selection=True, long=True, objectsOnly=True)) - host = registered_host() - - to_select = [] - for container in host.get_containers(): - members = get_container_members(container) - if any(member in selection for member in members): - to_select.append(container["objectName"]) - - return { - "objectNames": to_select, - "options": {"clear": True} - } diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py deleted file mode 100644 index d347ef0d08..0000000000 --- a/openpype/hosts/maya/plugins/load/actions.py +++ /dev/null @@ -1,186 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" -import qargparse -from openpype.pipeline import load -from openpype.hosts.maya.api.lib import ( - maintained_selection, - get_custom_namespace -) -import openpype.hosts.maya.api.plugin - - -class SetFrameRangeLoader(load.LoaderPlugin): - """Set frame range excluding pre- and post-handles""" - - families = ["animation", - "camera", - "proxyAbc", - "pointcache"] - representations = ["abc"] - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import maya.cmds as cmds - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - cmds.playbackOptions(minTime=start, - maxTime=end, - animationStartTime=start, - animationEndTime=end) - - -class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set frame range including pre- and post-handles""" - - families = ["animation", - "camera", - "proxyAbc", - "pointcache"] - representations = ["abc"] - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - import maya.cmds as cmds - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - # Include handles - start -= version_data.get("handleStart", 0) - end += version_data.get("handleEnd", 0) - - cmds.playbackOptions(minTime=start, - maxTime=end, - animationStartTime=start, - animationEndTime=end) - - -class ImportMayaLoader(openpype.hosts.maya.api.plugin.Loader): - """Import action for Maya (unmanaged) - - Warning: - The loaded content will be unmanaged and is *not* visible in the - scene inventory. It's purely intended to merge content into your scene - so you could also use it as a new base. - - """ - representations = ["ma", "mb", "obj"] - families = [ - "model", - "pointcache", - "proxyAbc", - "animation", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "camera", - "rig", - "camerarig", - "staticMesh", - "workfile" - ] - - label = "Import" - order = 10 - icon = "arrow-circle-down" - color = "#775555" - - options = [ - qargparse.Boolean( - "clean_import", - label="Clean import", - default=False, - help="Should all occurrences of cbId be purged?" - ) - ] - - def load(self, context, name=None, namespace=None, data=None): - import maya.cmds as cmds - - choice = self.display_warning() - if choice is False: - return - - custom_group_name, custom_namespace, options = \ - self.get_custom_namespace_and_group(context, data, - "import_loader") - - namespace = get_custom_namespace(custom_namespace) - - if not options.get("attach_to_root", True): - custom_group_name = namespace - - path = self.filepath_from_context(context) - with maintained_selection(): - nodes = cmds.file(path, - i=True, - preserveReferences=True, - namespace=namespace, - returnNewNodes=True, - groupReference=options.get("attach_to_root", - True), - groupName=custom_group_name) - - if data.get("clean_import", False): - remove_attributes = ["cbId"] - for node in nodes: - for attr in remove_attributes: - if cmds.attributeQuery(attr, node=node, exists=True): - full_attr = "{}.{}".format(node, attr) - print("Removing {}".format(full_attr)) - cmds.deleteAttr(full_attr) - - # We do not containerize imported content, it remains unmanaged - return - - def display_warning(self): - """Show warning to ensure the user can't import models by accident - - Returns: - bool - - """ - - from qtpy import QtWidgets - - accept = QtWidgets.QMessageBox.Ok - buttons = accept | QtWidgets.QMessageBox.Cancel - - message = "Are you sure you want import this" - state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) - - return state == accept diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py deleted file mode 100644 index 90cadb31b1..0000000000 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ /dev/null @@ -1,113 +0,0 @@ -from maya import cmds, mel - -from openpype.pipeline import ( - load, - get_representation_path, -) -from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace, get_container_members - - -class AudioLoader(load.LoaderPlugin): - """Specific loader of audio.""" - - families = ["audio"] - label = "Load audio" - representations = ["wav"] - icon = "volume-up" - color = "orange" - - def load(self, context, name, namespace, data): - - start_frame = cmds.playbackOptions(query=True, min=True) - sound_node = cmds.sound( - file=self.filepath_from_context(context), offset=start_frame - ) - cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - edit=True, - sound=sound_node, - displaySound=True - ) - - asset = context["asset"]["name"] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - return containerise( - name=name, - namespace=namespace, - nodes=[sound_node], - context=context, - loader=self.__class__.__name__ - ) - - def update(self, container, representation): - - members = get_container_members(container) - audio_nodes = cmds.ls(members, type="audio") - - assert audio_nodes is not None, "Audio node not found." - audio_node = audio_nodes[0] - - current_sound = cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - query=True, - sound=True - ) - activate_sound = current_sound == audio_node - - path = get_representation_path(representation) - - cmds.sound( - audio_node, - edit=True, - file=path - ) - - # The source start + end does not automatically update itself to the - # length of thew new audio file, even though maya does do that when - # creating a new audio node. So to update we compute it manually. - # This would however override any source start and source end a user - # might have done on the original audio node after load. - audio_frame_count = cmds.getAttr("{}.frameCount".format(audio_node)) - audio_sample_rate = cmds.getAttr("{}.sampleRate".format(audio_node)) - duration_in_seconds = audio_frame_count / audio_sample_rate - fps = mel.eval('currentTimeUnitToFPS()') # workfile FPS - source_start = 0 - source_end = (duration_in_seconds * fps) - cmds.setAttr("{}.sourceStart".format(audio_node), source_start) - cmds.setAttr("{}.sourceEnd".format(audio_node), source_end) - - if activate_sound: - # maya by default deactivates it from timeline on file change - cmds.timeControl( - mel.eval("$gPlayBackSlider=$gPlayBackSlider"), - edit=True, - sound=audio_node, - displaySound=True - ) - - cmds.setAttr( - container["objectName"] + ".representation", - str(representation["_id"]), - type="string" - ) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_image.py b/openpype/hosts/maya/plugins/load/load_image.py deleted file mode 100644 index 27c9ec7118..0000000000 --- a/openpype/hosts/maya/plugins/load/load_image.py +++ /dev/null @@ -1,338 +0,0 @@ -import os -import copy - -from openpype.lib import EnumDef -from openpype.pipeline import ( - load, - get_representation_context, - get_current_host_name, -) -from openpype.pipeline.load.utils import get_representation_path_from_context -from openpype.pipeline.colorspace import ( - get_imageio_file_rules_colorspace_from_filepath, - get_imageio_config, - get_imageio_file_rules -) -from openpype.settings import get_project_settings - -from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import ( - unique_namespace, - namespaced -) - -from maya import cmds - - -def create_texture(): - """Create place2dTexture with file node with uv connections - - Mimics Maya "file [Texture]" creation. - """ - - place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d") - file = cmds.shadingNode("file", asTexture=True, name="file") - - connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV", - "mirrorU", "mirrorV", "stagger", "wrapV", "wrapU", - "repeatUV", "offset", "noiseUV", "vertexUvThree", - "vertexUvTwo", "vertexUvOne", "vertexCameraOne"] - for attr in connections: - src = "{}.{}".format(place, attr) - dest = "{}.{}".format(file, attr) - cmds.connectAttr(src, dest) - - cmds.connectAttr(place + '.outUV', file + '.uvCoord') - cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize') - - return file, place - - -def create_projection(): - """Create texture with place3dTexture and projection - - Mimics Maya "file [Projection]" creation. - """ - - file, place = create_texture() - projection = cmds.shadingNode("projection", asTexture=True, - name="projection") - place3d = cmds.shadingNode("place3dTexture", asUtility=True, - name="place3d") - - cmds.connectAttr(place3d + '.worldInverseMatrix[0]', - projection + ".placementMatrix") - cmds.connectAttr(file + '.outColor', projection + ".image") - - return file, place, projection, place3d - - -def create_stencil(): - """Create texture with extra place2dTexture offset and stencil - - Mimics Maya "file [Stencil]" creation. - """ - - file, place = create_texture() - - place_stencil = cmds.shadingNode("place2dTexture", asUtility=True, - name="place2d_stencil") - stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil") - - for src_attr, dest_attr in [ - ("outUV", "uvCoord"), - ("outUvFilterSize", "uvFilterSize") - ]: - src_plug = "{}.{}".format(place_stencil, src_attr) - cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr)) - cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr)) - - return file, place, stencil, place_stencil - - -class FileNodeLoader(load.LoaderPlugin): - """File node loader.""" - - families = ["image", "plate", "render"] - label = "Load file node" - representations = ["exr", "tif", "png", "jpg"] - icon = "image" - color = "orange" - order = 2 - - options = [ - EnumDef( - "mode", - items={ - "texture": "Texture", - "projection": "Projection", - "stencil": "Stencil" - }, - default="texture", - label="Texture Mode" - ) - ] - - def load(self, context, name, namespace, data): - - asset = context['asset']['name'] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - with namespaced(namespace, new=True) as namespace: - # Create the nodes within the namespace - nodes = { - "texture": create_texture, - "projection": create_projection, - "stencil": create_stencil - }[data.get("mode", "texture")]() - - file_node = cmds.ls(nodes, type="file")[0] - - self._apply_representation_context(context, file_node) - - # For ease of access for the user select all the nodes and select - # the file node last so that UI shows its attributes by default - cmds.select(list(nodes) + [file_node], replace=True) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__ - ) - - def update(self, container, representation): - - members = cmds.sets(container['objectName'], query=True) - file_node = cmds.ls(members, type="file")[0] - - context = get_representation_context(representation) - self._apply_representation_context(context, file_node) - - # Update representation - cmds.setAttr( - container["objectName"] + ".representation", - str(representation["_id"]), - type="string" - ) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass - - def _apply_representation_context(self, context, file_node): - """Update the file node to match the context. - - This sets the file node's attributes for: - - file path - - udim tiling mode (if it is an udim tile) - - use frame extension (if it is a sequence) - - colorspace - - """ - - repre_context = context["representation"]["context"] - has_frames = repre_context.get("frame") is not None - has_udim = repre_context.get("udim") is not None - - # Set UV tiling mode if UDIM tiles - if has_udim: - cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles - else: - cmds.setAttr(file_node + ".uvTilingMode", 0) # off - - # Enable sequence if publish has `startFrame` and `endFrame` and - # `startFrame != endFrame` - if has_frames and self._is_sequence(context): - # When enabling useFrameExtension maya automatically - # connects an expression to .frameExtension to set - # the current frame. However, this expression is generated - # with some delay and thus it'll show a warning if frame 0 - # doesn't exist because we're explicitly setting the - # token. - cmds.setAttr(file_node + ".useFrameExtension", True) - else: - cmds.setAttr(file_node + ".useFrameExtension", False) - - # Set the file node path attribute - path = self._format_path(context) - cmds.setAttr(file_node + ".fileTextureName", path, type="string") - - # Set colorspace - colorspace = self._get_colorspace(context) - if colorspace: - cmds.setAttr(file_node + ".colorSpace", colorspace, type="string") - else: - self.log.debug("Unknown colorspace - setting colorspace skipped.") - - def _is_sequence(self, context): - """Check whether frameStart and frameEnd are not the same.""" - version = context.get("version", {}) - representation = context.get("representation", {}) - - for doc in [representation, version]: - # Frame range can be set on version or representation. - # When set on representation it overrides version data. - data = doc.get("data", {}) - start = data.get("frameStartHandle", data.get("frameStart", None)) - end = data.get("frameEndHandle", data.get("frameEnd", None)) - - if start is None or end is None: - continue - - if start != end: - return True - else: - return False - - return False - - def _get_colorspace(self, context): - """Return colorspace of the file to load. - - Retrieves the explicit colorspace from the publish. If no colorspace - data is stored with published content then project imageio settings - are used to make an assumption of the colorspace based on the file - rules. If no file rules match then None is returned. - - Returns: - str or None: The colorspace of the file or None if not detected. - - """ - - # We can't apply color spaces if management is not enabled - if not cmds.colorManagementPrefs(query=True, cmEnabled=True): - return - - representation = context["representation"] - colorspace_data = representation.get("data", {}).get("colorspaceData") - if colorspace_data: - return colorspace_data["colorspace"] - - # Assume colorspace from filepath based on project settings - project_name = context["project"]["name"] - host_name = get_current_host_name() - project_settings = get_project_settings(project_name) - - config_data = get_imageio_config( - project_name, host_name, - project_settings=project_settings - ) - - # ignore if host imageio is not enabled - if not config_data: - return - - file_rules = get_imageio_file_rules( - project_name, host_name, - project_settings=project_settings - ) - - path = get_representation_path_from_context(context) - colorspace = get_imageio_file_rules_colorspace_from_filepath( - path, - host_name, - project_name, - config_data=config_data, - file_rules=file_rules, - project_settings=project_settings - ) - - return colorspace - - def _format_path(self, context): - """Format the path with correct tokens for frames and udim tiles.""" - - context = copy.deepcopy(context) - representation = context["representation"] - template = representation.get("data", {}).get("template") - if not template: - # No template to find token locations for - return get_representation_path_from_context(context) - - def _placeholder(key): - # Substitute with a long placeholder value so that potential - # custom formatting with padding doesn't find its way into - # our formatting, so that wouldn't be padded as 0 - return "___{}___".format(key) - - # We format UDIM and Frame numbers with their specific tokens. To do so - # we in-place change the representation context data to format the path - # with our own data - tokens = { - "frame": "", - "udim": "" - } - has_tokens = False - repre_context = representation["context"] - for key, _token in tokens.items(): - if key in repre_context: - repre_context[key] = _placeholder(key) - has_tokens = True - - # Replace with our custom template that has the tokens set - representation["data"]["template"] = template - path = get_representation_path_from_context(context) - - if has_tokens: - for key, token in tokens.items(): - if key in repre_context: - path = path.replace(_placeholder(key), token) - - return path diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py deleted file mode 100644 index 20617c77bf..0000000000 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -"""Look loader.""" -import json -from collections import defaultdict - -from qtpy import QtWidgets - -from openpype.client import get_representation_by_name -from openpype.pipeline import ( - get_current_project_name, - get_representation_path, -) -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api import lib -from openpype.widgets.message_window import ScrollMessageBox - -from openpype.hosts.maya.api.lib import get_reference_node - - -class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Specific loader for lookdev""" - - families = ["look"] - representations = ["ma"] - - label = "Reference look" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - from maya import cmds - - with lib.maintained_selection(): - file_url = self.prepare_root_value( - file_url=self.filepath_from_context(context), - project_name=context["project"]["name"] - ) - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True) - - self[:] = nodes - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """ - Called by Scene Inventory when look should be updated to current - version. - If any reference edits cannot be applied, eg. shader renamed and - material not present, reference is unloaded and cleaned. - All failed edits are highlighted to the user via message box. - - Args: - container: object that has look to be updated - representation: (dict): relationship data to get proper - representation from DB and persisted - data in .json - Returns: - None - """ - from maya import cmds - - # Get reference node from container members - members = lib.get_container_members(container) - reference_node = get_reference_node(members, log=self.log) - - shader_nodes = cmds.ls(members, type='shadingEngine') - orig_nodes = set(self._get_nodes_with_shader(shader_nodes)) - - # Trigger the regular reference update on the ReferenceLoader - super(LookLoader, self).update(container, representation) - - # get new applied shaders and nodes from new version - shader_nodes = cmds.ls(members, type='shadingEngine') - nodes = set(self._get_nodes_with_shader(shader_nodes)) - - project_name = get_current_project_name() - json_representation = get_representation_by_name( - project_name, "json", representation["parent"] - ) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - json_data = json.load(f) - - # update of reference could result in failed edits - material is not - # present because of renaming etc. If so highlight failed edits to user - failed_edits = cmds.referenceQuery(reference_node, - editStrings=True, - failedEdits=True, - successfulEdits=False) - if failed_edits: - # clean references - removes failed reference edits - cmds.file(cr=reference_node) # cleanReference - - # reapply shading groups from json representation on orig nodes - lib.apply_shaders(json_data, shader_nodes, orig_nodes) - - msg = ["During reference update some edits failed.", - "All successful edits were kept intact.\n", - "Failed and removed edits:"] - msg.extend(failed_edits) - - msg = ScrollMessageBox(QtWidgets.QMessageBox.Warning, - "Some reference edit failed", - msg) - msg.exec_() - - attributes = json_data.get("attributes", []) - - # region compute lookup - nodes_by_id = defaultdict(list) - for node in nodes: - nodes_by_id[lib.get_id(node)].append(node) - lib.apply_attributes(attributes, nodes_by_id) - - def _get_nodes_with_shader(self, shader_nodes): - """ - Returns list of nodes belonging to specific shaders - Args: - shader_nodes: of Shader groups - Returns - node names - """ - from maya import cmds - - for shader in shader_nodes: - future = cmds.listHistory(shader, future=True) - connections = cmds.listConnections(future, - type='mesh') - if connections: - # Ensure unique entries only to optimize query and results - connections = list(set(connections)) - return cmds.listRelatives(connections, - shapes=True, - fullPath=True) or [] - return [] diff --git a/openpype/hosts/maya/plugins/load/load_matchmove.py b/openpype/hosts/maya/plugins/load/load_matchmove.py deleted file mode 100644 index 46d1be8300..0000000000 --- a/openpype/hosts/maya/plugins/load/load_matchmove.py +++ /dev/null @@ -1,30 +0,0 @@ -from maya import mel -from openpype.pipeline import load - -class MatchmoveLoader(load.LoaderPlugin): - """ - This will run matchmove script to create track in scene. - - Supported script types are .py and .mel - """ - - families = ["matchmove"] - representations = ["py", "mel"] - defaults = ["Camera", "Object", "Mocap"] - - label = "Run matchmove script" - icon = "empire" - color = "orange" - - def load(self, context, name, namespace, data): - path = self.filepath_from_context(context) - if path.lower().endswith(".py"): - exec(open(path).read()) - - elif path.lower().endswith(".mel"): - mel.eval('source "{}"'.format(path)) - - else: - self.log.error("Unsupported script type") - - return True diff --git a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py deleted file mode 100644 index 40385f34d6..0000000000 --- a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -"""Loader for Redshift proxy.""" -import os -import clique - -import maya.cmds as cmds - -from openpype.settings import get_project_settings -from openpype.pipeline import ( - load, - get_representation_path -) -from openpype.hosts.maya.api.lib import ( - namespaced, - maintained_selection, - unique_namespace -) -from openpype.hosts.maya.api.pipeline import containerise - - -class RedshiftProxyLoader(load.LoaderPlugin): - """Load Redshift proxy""" - - families = ["redshiftproxy"] - representations = ["rs"] - - label = "Import Redshift Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name=None, namespace=None, options=None): - """Plugin entry point.""" - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "redshiftproxy" - - asset_name = context['asset']["name"] - namespace = namespace or unique_namespace( - asset_name + "_", - prefix="_" if asset_name[0].isdigit() else "", - suffix="_", - ) - - # Ensure Redshift for Maya is loaded. - cmds.loadPlugin("redshift4maya", quiet=True) - - path = self.filepath_from_context(context) - with maintained_selection(): - cmds.namespace(addNamespace=namespace) - with namespaced(namespace, new=False): - nodes, group_node = self.create_rs_proxy(name, path) - - self[:] = nodes - if not nodes: - return - - # colour the group node - project_name = context["project"]["name"] - settings = get_project_settings(project_name) - colors = settings['maya']['load']['colors'] - c = colors.get(family) - if c is not None: - cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) - cmds.setAttr("{0}.outlinerColor".format(group_node), - c[0], c[1], c[2]) - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - node = container['objectName'] - assert cmds.objExists(node), "Missing container" - - members = cmds.sets(node, query=True) or [] - rs_meshes = cmds.ls(members, type="RedshiftProxyMesh") - assert rs_meshes, "Cannot find RedshiftProxyMesh in container" - - filename = get_representation_path(representation) - - for rs_mesh in rs_meshes: - cmds.setAttr("{}.fileName".format(rs_mesh), - filename, - type="string") - - # Update metadata - cmds.setAttr("{}.representation".format(node), - str(representation["_id"]), - type="string") - - def remove(self, container): - - # Delete container and its contents - if cmds.objExists(container['objectName']): - members = cmds.sets(container['objectName'], query=True) or [] - cmds.delete([container['objectName']] + members) - - # Remove the namespace, if empty - namespace = container['namespace'] - if cmds.namespace(exists=namespace): - members = cmds.namespaceInfo(namespace, listNamespace=True) - if not members: - cmds.namespace(removeNamespace=namespace) - else: - self.log.warning("Namespace not deleted because it " - "still has members: %s", namespace) - - def switch(self, container, representation): - self.update(container, representation) - - def create_rs_proxy(self, name, path): - """Creates Redshift Proxies showing a proxy object. - - Args: - name (str): Proxy name. - path (str): Path to proxy file. - - Returns: - (str, str): Name of mesh with Redshift proxy and its parent - transform. - - """ - rs_mesh = cmds.createNode( - 'RedshiftProxyMesh', name="{}_RS".format(name)) - mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) - - cmds.setAttr("{}.fileName".format(rs_mesh), - path, - type="string") - - cmds.connectAttr("{}.outMesh".format(rs_mesh), - "{}.inMesh".format(mesh_shape)) - - # TODO: use the assigned shading group as shaders if existed - # assign default shader to redshift proxy - if cmds.ls("initialShadingGroup", type="shadingEngine"): - cmds.sets(mesh_shape, forceElement="initialShadingGroup") - - group_node = cmds.group(empty=True, name="{}_GRP".format(name)) - mesh_transform = cmds.listRelatives(mesh_shape, - parent=True, fullPath=True) - cmds.parent(mesh_transform, group_node) - nodes = [rs_mesh, mesh_shape, group_node] - - # determine if we need to enable animation support - files_in_folder = os.listdir(os.path.dirname(path)) - collections, remainder = clique.assemble(files_in_folder) - - if collections: - cmds.setAttr("{}.useFrameExtension".format(rs_mesh), 1) - - return nodes, group_node diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py deleted file mode 100644 index a4ab6c79c1..0000000000 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ /dev/null @@ -1,352 +0,0 @@ -import os -import difflib -import contextlib - -from maya import cmds -import qargparse - -from openpype.settings import get_project_settings -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.lib import ( - maintained_selection, - get_container_members, - parent_nodes, - create_rig_animation_instance -) - - -@contextlib.contextmanager -def preserve_modelpanel_cameras(container, log=None): - """Preserve camera members of container in the modelPanels. - - This is used to ensure a camera remains in the modelPanels after updating - to a new version. - - """ - - # Get the modelPanels that used the old camera - members = get_container_members(container) - old_cameras = set(cmds.ls(members, type="camera", long=True)) - if not old_cameras: - # No need to manage anything - yield - return - - panel_cameras = {} - for panel in cmds.getPanel(type="modelPanel"): - cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), - long=True)[0] - - # Often but not always maya returns the transform from the - # modelPanel as opposed to the camera shape, so we convert it - # to explicitly be the camera shape - if cmds.nodeType(cam) != "camera": - cam = cmds.listRelatives(cam, - children=True, - fullPath=True, - type="camera")[0] - if cam in old_cameras: - panel_cameras[panel] = cam - - if not panel_cameras: - # No need to manage anything - yield - return - - try: - yield - finally: - new_members = get_container_members(container) - new_cameras = set(cmds.ls(new_members, type="camera", long=True)) - if not new_cameras: - return - - for panel, cam_name in panel_cameras.items(): - new_camera = None - if cam_name in new_cameras: - new_camera = cam_name - elif len(new_cameras) == 1: - new_camera = next(iter(new_cameras)) - else: - # Multiple cameras in the updated container but not an exact - # match detected by name. Find the closest match - matches = difflib.get_close_matches(word=cam_name, - possibilities=new_cameras, - n=1) - if matches: - new_camera = matches[0] # best match - if log: - log.info("Camera in '{}' restored with " - "closest match camera: {} (before: {})" - .format(panel, new_camera, cam_name)) - - if not new_camera: - # Unable to find the camera to re-apply in the modelpanel - continue - - cmds.modelPanel(panel, edit=True, camera=new_camera) - - -class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Reference file""" - - families = ["model", - "pointcache", - "proxyAbc", - "animation", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "camera", - "rig", - "camerarig", - "staticMesh", - "skeletalMesh", - "mvLook", - "matchmove"] - - representations = ["ma", "abc", "fbx", "mb"] - - label = "Reference" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - import maya.cmds as cmds - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "model" - - project_name = context["project"]["name"] - # True by default to keep legacy behaviours - attach_to_root = options.get("attach_to_root", True) - group_name = options["group_name"] - - # no group shall be created - if not attach_to_root: - group_name = namespace - - kwargs = {} - if "file_options" in options: - kwargs["options"] = options["file_options"] - if "file_type" in options: - kwargs["type"] = options["file_type"] - - path = self.filepath_from_context(context) - with maintained_selection(): - cmds.loadPlugin("AbcImport.mll", quiet=True) - - file_url = self.prepare_root_value(path, project_name) - nodes = cmds.file(file_url, - namespace=namespace, - sharedReferenceFile=False, - reference=True, - returnNewNodes=True, - groupReference=attach_to_root, - groupName=group_name, - **kwargs) - - shapes = cmds.ls(nodes, shapes=True, long=True) - - new_nodes = (list(set(nodes) - set(shapes))) - - # if there are cameras, try to lock their transforms - self._lock_camera_transforms(new_nodes) - - current_namespace = cmds.namespaceInfo(currentNamespace=True) - - if current_namespace != ":": - group_name = current_namespace + ":" + group_name - - self[:] = new_nodes - - if attach_to_root: - group_name = "|" + group_name - roots = cmds.listRelatives(group_name, - children=True, - fullPath=True) or [] - - if family not in {"layout", "setdress", - "mayaAscii", "mayaScene"}: - # QUESTION Why do we need to exclude these families? - with parent_nodes(roots, parent=None): - cmds.xform(group_name, zeroTransformPivots=True) - - settings = get_project_settings(project_name) - - display_handle = settings['maya']['load'].get( - 'reference_loader', {} - ).get('display_handle', True) - cmds.setAttr( - "{}.displayHandle".format(group_name), display_handle - ) - - colors = settings['maya']['load']['colors'] - c = colors.get(family) - if c is not None: - cmds.setAttr("{}.useOutlinerColor".format(group_name), 1) - cmds.setAttr("{}.outlinerColor".format(group_name), - (float(c[0]) / 255), - (float(c[1]) / 255), - (float(c[2]) / 255)) - - cmds.setAttr( - "{}.displayHandle".format(group_name), display_handle - ) - # get bounding box - bbox = cmds.exactWorldBoundingBox(group_name) - # get pivot position on world space - pivot = cmds.xform(group_name, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr("{}.selectHandleX".format(group_name), cx) - cmds.setAttr("{}.selectHandleY".format(group_name), cy) - cmds.setAttr("{}.selectHandleZ".format(group_name), cz) - - if family == "rig": - self._post_process_rig(namespace, context, options) - else: - if "translate" in options: - if not attach_to_root and new_nodes: - root_nodes = cmds.ls(new_nodes, assemblies=True, - long=True) - # we assume only a single root is ever loaded - group_name = root_nodes[0] - cmds.setAttr("{}.translate".format(group_name), - *options["translate"]) - return new_nodes - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - with preserve_modelpanel_cameras(container, log=self.log): - super(ReferenceLoader, self).update(container, representation) - - # We also want to lock camera transforms on any new cameras in the - # reference or for a camera which might have changed names. - members = get_container_members(container) - self._lock_camera_transforms(members) - - def _post_process_rig(self, namespace, context, options): - - nodes = self[:] - create_rig_animation_instance( - nodes, context, namespace, options=options, log=self.log - ) - - def _lock_camera_transforms(self, nodes): - cameras = cmds.ls(nodes, type="camera") - if not cameras: - return - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - -class MayaUSDReferenceLoader(ReferenceLoader): - """Reference USD file to native Maya nodes using MayaUSDImport reference""" - - label = "Reference Maya USD" - families = ["usd"] - representations = ["usd"] - extensions = {"usd", "usda", "usdc"} - - options = ReferenceLoader.options + [ - qargparse.Boolean( - "readAnimData", - label="Load anim data", - default=True, - help="Load animation data from USD file" - ), - qargparse.Boolean( - "useAsAnimationCache", - label="Use as animation cache", - default=True, - help=( - "Imports geometry prims with time-sampled point data using a " - "point-based deformer that references the imported " - "USD file.\n" - "This provides better import and playback performance when " - "importing time-sampled geometry from USD, and should " - "reduce the weight of the resulting Maya scene." - ) - ), - qargparse.Boolean( - "importInstances", - label="Import instances", - default=True, - help=( - "Import USD instanced geometries as Maya instanced shapes. " - "Will flatten the scene otherwise." - ) - ), - qargparse.String( - "primPath", - label="Prim Path", - default="/", - help=( - "Name of the USD scope where traversing will begin.\n" - "The prim at the specified primPath (including the prim) will " - "be imported.\n" - "Specifying the pseudo-root (/) means you want " - "to import everything in the file.\n" - "If the passed prim path is empty, it will first try to " - "import the defaultPrim for the rootLayer if it exists.\n" - "Otherwise, it will behave as if the pseudo-root was passed " - "in." - ) - ) - ] - - file_type = "USD Import" - - def process_reference(self, context, name, namespace, options): - cmds.loadPlugin("mayaUsdPlugin", quiet=True) - - def bool_option(key, default): - # Shorthand for getting optional boolean file option from options - value = int(bool(options.get(key, default))) - return "{}={}".format(key, value) - - def string_option(key, default): - # Shorthand for getting optional string file option from options - value = str(options.get(key, default)) - return "{}={}".format(key, value) - - options["file_options"] = ";".join([ - string_option("primPath", default="/"), - bool_option("importInstances", default=True), - bool_option("useAsAnimationCache", default=True), - bool_option("readAnimData", default=True), - # TODO: Expose more parameters - # "preferredMaterial=none", - # "importRelativeTextures=Automatic", - # "useCustomFrameRange=0", - # "startTime=0", - # "endTime=0", - # "importUSDZTextures=0" - ]) - options["file_type"] = self.file_type - - return super(MayaUSDReferenceLoader, self).process_reference( - context, name, namespace, options - ) diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py deleted file mode 100644 index 30ed21da9c..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_inputs.py +++ /dev/null @@ -1,213 +0,0 @@ -import copy - -from maya import cmds -import maya.api.OpenMaya as om -import pyblish.api - -from openpype.pipeline import registered_host -from openpype.hosts.maya.api.lib import get_container_members -from openpype.hosts.maya.api.lib_rendersetup import get_shader_in_layer - - -def iter_history(nodes, - filter=om.MFn.kInvalid, - direction=om.MItDependencyGraph.kUpstream): - """Iterate unique upstream history for list of nodes. - - This acts as a replacement to maya.cmds.listHistory. - It's faster by about 2x-3x. It returns less than - maya.cmds.listHistory as it excludes the input nodes - from the output (unless an input node was history - for another input node). It also excludes duplicates. - - Args: - nodes (list): Maya node names to start search from. - filter (om.MFn.Type): Filter to only specific types. - e.g. to dag nodes using om.MFn.kDagNode - direction (om.MItDependencyGraph.Direction): Direction to traverse in. - Defaults to upstream. - - Yields: - str: Node names in upstream history. - - """ - if not nodes: - return - - sel = om.MSelectionList() - for node in nodes: - sel.add(node) - - it = om.MItDependencyGraph(sel.getDependNode(0)) # init iterator - handle = om.MObjectHandle - - traversed = set() - fn_dep = om.MFnDependencyNode() - fn_dag = om.MFnDagNode() - for i in range(sel.length()): - - start_node = sel.getDependNode(i) - start_node_hash = handle(start_node).hashCode() - if start_node_hash in traversed: - continue - - it.resetTo(start_node, - filter=filter, - direction=direction) - while not it.isDone(): - - node = it.currentNode() - node_hash = handle(node).hashCode() - - if node_hash in traversed: - it.prune() - it.next() # noqa: B305 - continue - - traversed.add(node_hash) - - if node.hasFn(om.MFn.kDagNode): - fn_dag.setObject(node) - yield fn_dag.fullPathName() - else: - fn_dep.setObject(node) - yield fn_dep.name() - - it.next() # noqa: B305 - - -def collect_input_containers(containers, nodes): - """Collect containers that contain any of the node in `nodes`. - - This will return any loaded Avalon container that contains at least one of - the nodes. As such, the Avalon container is an input for it. Or in short, - there are member nodes of that container. - - Returns: - list: Input avalon containers - - """ - # Assume the containers have collected their cached '_members' data - # in the collector. - return [container for container in containers - if any(node in container["_members"] for node in nodes)] - - -class CollectUpstreamInputs(pyblish.api.InstancePlugin): - """Collect input source inputs for this publish. - - This will include `inputs` data of which loaded publishes were used in the - generation of this publish. This leaves an upstream trace to what was used - as input. - - """ - - label = "Collect Inputs" - order = pyblish.api.CollectorOrder + 0.34 - hosts = ["maya"] - - def process(self, instance): - - # For large scenes the querying of "host.ls()" can be relatively slow - # e.g. up to a second. Many instances calling it easily slows this - # down. As such, we cache it so we trigger it only once. - # todo: Instead of hidden cache make "CollectContainers" plug-in - cache_key = "__cache_containers" - scene_containers = instance.context.data.get(cache_key, None) - if scene_containers is None: - # Query the scenes' containers if there's no cache yet - host = registered_host() - scene_containers = list(host.ls()) - for container in scene_containers: - # Embed the members into the container dictionary - container_members = set(get_container_members(container)) - container["_members"] = container_members - instance.context.data["__cache_containers"] = scene_containers - - # Collect the relevant input containers for this instance - if "renderlayer" in set(instance.data.get("families", [])): - # Special behavior for renderlayers - self.log.debug("Collecting renderlayer inputs....") - containers = self._collect_renderlayer_inputs(scene_containers, - instance) - - else: - # Basic behavior - nodes = instance[:] - - # Include any input connections of history with long names - # For optimization purposes only trace upstream from shape nodes - # looking for used dag nodes. This way having just a constraint - # on a transform is also ignored which tended to give irrelevant - # inputs for the majority of our use cases. We tend to care more - # about geometry inputs. - shapes = cmds.ls(nodes, - type=("mesh", "nurbsSurface", "nurbsCurve"), - noIntermediate=True) - if shapes: - history = list(iter_history(shapes, filter=om.MFn.kShape)) - history = cmds.ls(history, long=True) - - # Include the transforms in the collected history as shapes - # are excluded from containers - transforms = cmds.listRelatives(cmds.ls(history, shapes=True), - parent=True, - fullPath=True, - type="transform") - if transforms: - history.extend(transforms) - - if history: - nodes = list(set(nodes + history)) - - # Collect containers for the given set of nodes - containers = collect_input_containers(scene_containers, - nodes) - - inputs = [c["representation"] for c in containers] - instance.data["inputRepresentations"] = inputs - self.log.debug("Collected inputs: %s" % inputs) - - def _collect_renderlayer_inputs(self, scene_containers, instance): - """Collects inputs from nodes in renderlayer, incl. shaders + camera""" - - # Get the renderlayer - renderlayer = instance.data.get("renderlayer") - - if renderlayer == "defaultRenderLayer": - # Assume all loaded containers in the scene are inputs - # for the masterlayer - return copy.deepcopy(scene_containers) - else: - # Get the members of the layer - members = cmds.editRenderLayerMembers(renderlayer, - query=True, - fullNames=True) or [] - - # In some cases invalid objects are returned from - # `editRenderLayerMembers` so we filter them out - members = cmds.ls(members, long=True) - - # Include all children - children = cmds.listRelatives(members, - allDescendents=True, - fullPath=True) or [] - members.extend(children) - - # Include assigned shaders in renderlayer - shapes = cmds.ls(members, shapes=True, long=True) - shaders = set() - for shape in shapes: - shape_shaders = get_shader_in_layer(shape, layer=renderlayer) - if not shape_shaders: - continue - shaders.update(shape_shaders) - members.extend(shaders) - - # Explicitly include the camera being rendered in renderlayer - cameras = instance.data.get("cameras") - members.extend(cameras) - - containers = collect_input_containers(scene_containers, members) - - return containers diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py deleted file mode 100644 index 5058da3d01..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_instances.py +++ /dev/null @@ -1,110 +0,0 @@ -from maya import cmds - -import pyblish.api -from openpype.hosts.maya.api.lib import get_all_children - - -class CollectNewInstances(pyblish.api.InstancePlugin): - """Gather members for instances and pre-defined attribute - - This collector takes into account assets that are associated with - an objectSet and marked with a unique identifier; - - Identifier: - id (str): "pyblish.avalon.instance" - - Limitations: - - Does not take into account nodes connected to those - within an objectSet. Extractors are assumed to export - with history preserved, but this limits what they will - be able to achieve and the amount of data available - to validators. An additional collector could also - append this input data into the instance, as we do - for `pype.rig` with collect_history. - - """ - - label = "Collect New Instance Data" - order = pyblish.api.CollectorOrder - hosts = ["maya"] - - valid_empty_families = {"workfile", "renderlayer"} - - def process(self, instance): - - objset = instance.data.get("instance_node") - if not objset: - self.log.debug("Instance has no `instance_node` data") - - # TODO: We might not want to do this in the future - # Merge creator attributes into instance.data just backwards compatible - # code still runs as expected - creator_attributes = instance.data.get("creator_attributes", {}) - if creator_attributes: - instance.data.update(creator_attributes) - - members = cmds.sets(objset, query=True) or [] - if members: - # Collect members - members = cmds.ls(members, long=True) or [] - - dag_members = cmds.ls(members, type="dagNode", long=True) - children = get_all_children(dag_members) - children = cmds.ls(children, noIntermediate=True, long=True) - parents = ( - self.get_all_parents(members) - if creator_attributes.get("includeParentHierarchy", True) - else [] - ) - members_hierarchy = list(set(members + children + parents)) - - instance[:] = members_hierarchy - - elif instance.data["family"] not in self.valid_empty_families: - self.log.warning("Empty instance: \"%s\" " % objset) - # Store the exact members of the object set - instance.data["setMembers"] = members - - # TODO: This might make more sense as a separate collector - # Convert frame values to integers - for attr_name in ( - "handleStart", "handleEnd", "frameStart", "frameEnd", - ): - value = instance.data.get(attr_name) - if value is not None: - instance.data[attr_name] = int(value) - - # Append start frame and end frame to label if present - if "frameStart" in instance.data and "frameEnd" in instance.data: - # Take handles from context if not set locally on the instance - for key in ["handleStart", "handleEnd"]: - if key not in instance.data: - value = instance.context.data[key] - if value is not None: - value = int(value) - instance.data[key] = value - - instance.data["frameStartHandle"] = int( - instance.data["frameStart"] - instance.data["handleStart"] - ) - instance.data["frameEndHandle"] = int( - instance.data["frameEnd"] + instance.data["handleEnd"] - ) - - def get_all_parents(self, nodes): - """Get all parents by using string operations (optimization) - - Args: - nodes (list): the nodes which are found in the objectSet - - Returns: - list - """ - - parents = [] - for node in nodes: - splitted = node.split("|") - items = ["|".join(splitted[0:i]) for i in range(2, len(splitted))] - parents.extend(items) - - return list(set(parents)) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py deleted file mode 100644 index 886c2b4caa..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect render data. - -This collector will go through render layers in maya and prepare all data -needed to create instances and their representations for submission and -publishing on farm. - -Requires: - instance -> families - instance -> setMembers - - context -> currentFile - context -> workspaceDir - context -> user - - session -> AVALON_ASSET - -Optional: - -Provides: - instance -> label - instance -> subset - instance -> attachTo - instance -> setMembers - instance -> publish - instance -> frameStart - instance -> frameEnd - instance -> byFrameStep - instance -> renderer - instance -> family - instance -> families - instance -> asset - instance -> time - instance -> author - instance -> source - instance -> expectedFiles - instance -> resolutionWidth - instance -> resolutionHeight - instance -> pixelAspect -""" - -import os -import platform -import json - -from maya import cmds - -import pyblish.api - -from openpype.pipeline import KnownPublishError -from openpype.lib import get_formatted_current_time -from openpype.hosts.maya.api.lib_renderproducts import ( - get as get_layer_render_products, - UnsupportedRendererException -) -from openpype.hosts.maya.api import lib - - -class CollectMayaRender(pyblish.api.InstancePlugin): - """Gather all publishable render layers from renderSetup.""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["maya"] - families = ["renderlayer"] - label = "Collect Render Layers" - sync_workfile_version = False - - _aov_chars = { - "dot": ".", - "dash": "-", - "underscore": "_" - } - - def process(self, instance): - - # TODO: Re-add force enable of workfile instance? - # TODO: Re-add legacy layer support with LAYER_ prefix but in Creator - # TODO: Set and collect active state of RenderLayer in Creator using - # renderlayer.isRenderable() - context = instance.context - - layer = instance.data["transientData"]["layer"] - objset = instance.data.get("instance_node") - filepath = context.data["currentFile"].replace("\\", "/") - workspace = context.data["workspaceDir"] - - # check if layer is renderable - if not layer.isRenderable(): - msg = "Render layer [ {} ] is not " "renderable".format( - layer.name() - ) - self.log.warning(msg) - - # detect if there are sets (subsets) to attach render to - sets = cmds.sets(objset, query=True) or [] - attach_to = [] - for s in sets: - if not cmds.attributeQuery("family", node=s, exists=True): - continue - - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.debug(" -> attach render to: {}".format(s)) - - layer_name = layer.name() - - # collect all frames we are expecting to be rendered - # return all expected files for all cameras and aovs in given - # frame range - try: - layer_render_products = get_layer_render_products(layer.name()) - except UnsupportedRendererException as exc: - raise KnownPublishError(exc) - render_products = layer_render_products.layer_data.products - assert render_products, "no render products generated" - expected_files = [] - multipart = False - for product in render_products: - if product.multipart: - multipart = True - product_name = product.productName - if product.camera and layer_render_products.has_camera_token(): - product_name = "{}{}".format( - product.camera, - "_{}".format(product_name) if product_name else "") - expected_files.append( - { - product_name: layer_render_products.get_files( - product) - }) - - has_cameras = any(product.camera for product in render_products) - assert has_cameras, "No render cameras found." - - self.log.debug("multipart: {}".format( - multipart)) - assert expected_files, "no file names were generated, this is a bug" - self.log.debug( - "expected files: {}".format( - json.dumps(expected_files, indent=4, sort_keys=True) - ) - ) - - # if we want to attach render to subset, check if we have AOV's - # in expectedFiles. If so, raise error as we cannot attach AOV - # (considered to be subset on its own) to another subset - if attach_to: - assert isinstance(expected_files, list), ( - "attaching multiple AOVs or renderable cameras to " - "subset is not supported" - ) - - # append full path - aov_dict = {} - image_directory = os.path.join( - cmds.workspace(query=True, rootDirectory=True), - cmds.workspace(fileRuleEntry="images") - ) - # replace relative paths with absolute. Render products are - # returned as list of dictionaries. - publish_meta_path = None - for aov in expected_files: - full_paths = [] - aov_first_key = list(aov.keys())[0] - for file in aov[aov_first_key]: - full_path = os.path.join(image_directory, file) - full_path = full_path.replace("\\", "/") - full_paths.append(full_path) - publish_meta_path = os.path.dirname(full_path) - aov_dict[aov_first_key] = full_paths - full_exp_files = [aov_dict] - self.log.debug(full_exp_files) - - if publish_meta_path is None: - raise KnownPublishError("Unable to detect any expected output " - "images for: {}. Make sure you have a " - "renderable camera and a valid frame " - "range set for your renderlayer." - "".format(instance.name)) - - frame_start_render = int(self.get_render_attribute( - "startFrame", layer=layer_name)) - frame_end_render = int(self.get_render_attribute( - "endFrame", layer=layer_name)) - - if (int(context.data["frameStartHandle"]) == frame_start_render - and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - frame_start_handle = context.data["frameStartHandle"] - frame_end_handle = context.data["frameEndHandle"] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - # find common path to store metadata - # so if image prefix is branching to many directories - # metadata file will be located in top-most common - # directory. - # TODO: use `os.path.commonpath()` after switch to Python 3 - publish_meta_path = os.path.normpath(publish_meta_path) - common_publish_meta_path = os.path.splitdrive( - publish_meta_path)[0] - if common_publish_meta_path: - common_publish_meta_path += os.path.sep - for part in publish_meta_path.replace( - common_publish_meta_path, "").split(os.path.sep): - common_publish_meta_path = os.path.join( - common_publish_meta_path, part) - if part == layer_name: - break - - # TODO: replace this terrible linux hotfix with real solution :) - if platform.system().lower() in ["linux", "darwin"]: - common_publish_meta_path = "/" + common_publish_meta_path - - self.log.debug( - "Publish meta path: {}".format(common_publish_meta_path)) - - # Get layer specific settings, might be overrides - colorspace_data = lib.get_color_management_preferences() - data = { - "farm": True, - "attachTo": attach_to, - - "multipartExr": multipart, - "review": instance.data.get("review") or False, - - # Frame range - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int( - self.get_render_attribute("byFrameStep", - layer=layer_name)), - - # Renderlayer - "renderer": self.get_render_attribute( - "currentRenderer", layer=layer_name).lower(), - "setMembers": layer._getLegacyNodeName(), # legacy renderlayer - "renderlayer": layer_name, - - # todo: is `time` and `author` still needed? - "time": get_formatted_current_time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath, - "expectedFiles": full_exp_files, - "publishRenderMetadataFolder": common_publish_meta_path, - "renderProducts": layer_render_products, - "resolutionWidth": lib.get_attr_in_layer( - "defaultResolution.width", layer=layer_name - ), - "resolutionHeight": lib.get_attr_in_layer( - "defaultResolution.height", layer=layer_name - ), - "pixelAspect": lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer_name - ), - - # todo: Following are likely not needed due to collecting from the - # instance itself if they are attribute definitions - "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 - "tilesX": instance.data.get("tilesX") or 2, - "tilesY": instance.data.get("tilesY") or 2, - "convertToScanline": instance.data.get( - "convertToScanline") or False, - "useReferencedAovs": instance.data.get( - "useReferencedAovs") or instance.data.get( - "vrayUseReferencedAovs") or False, - "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 - "renderSetupIncludeLights": instance.data.get( - "renderSetupIncludeLights" - ), - "colorspaceConfig": colorspace_data["config"], - "colorspaceDisplay": colorspace_data["display"], - "colorspaceView": colorspace_data["view"], - } - - rr_settings = ( - context.data["system_settings"]["modules"]["royalrender"] - ) - if rr_settings["enabled"]: - data["rrPathName"] = instance.data.get("rrPathName") - self.log.debug(data["rrPathName"]) - - if self.sync_workfile_version: - data["version"] = context.data["version"] - for _instance in context: - if _instance.data['family'] == "workfile": - _instance.data["version"] = context.data["version"] - - # Define nice label - label = "{0} ({1})".format(layer_name, instance.data["asset"]) - label += " [{0}-{1}]".format( - int(data["frameStartHandle"]), int(data["frameEndHandle"]) - ) - data["label"] = label - - # Override frames should be False if extendFrames is False. This is - # to ensure it doesn't go off doing crazy unpredictable things - extend_frames = instance.data.get("extendFrames", False) - if not extend_frames: - instance.data["overrideExistingFrame"] = False - - # Update the instace - instance.data.update(data) - - @staticmethod - def get_render_attribute(attr, layer): - """Get attribute from render options. - - Args: - attr (str): name of attribute to be looked up - layer (str): name of render layer - - Returns: - Attribute value - - """ - return lib.get_attr_in_layer( - "defaultRenderGlobals.{}".format(attr), layer=layer - ) diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py deleted file mode 100644 index 0930da8f27..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ /dev/null @@ -1,186 +0,0 @@ -from maya import cmds, mel - -import pyblish.api - -from openpype.client import get_subset_by_name -from openpype.pipeline import KnownPublishError -from openpype.hosts.maya.api import lib - - -class CollectReview(pyblish.api.InstancePlugin): - """Collect Review data - - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = 'Collect Review Data' - families = ["review"] - - def process(self, instance): - - # Get panel. - instance.data["panel"] = cmds.playblast( - activeEditor=True - ).rsplit("|", 1)[-1] - - # get cameras - members = instance.data['setMembers'] - self.log.debug('members: {}'.format(members)) - cameras = cmds.ls(members, long=True, dag=True, cameras=True) - camera = cameras[0] if cameras else None - - context = instance.context - objectset = { - i.data.get("instance_node") for i in context - } - - # Collect display lights. - display_lights = instance.data.get("displayLights", "default") - if display_lights == "project_settings": - settings = instance.context.data["project_settings"] - settings = settings["maya"]["publish"]["ExtractPlayblast"] - settings = settings["capture_preset"]["Viewport Options"] - display_lights = settings["displayLights"] - - # Collect camera focal length. - burninDataMembers = instance.data.get("burninDataMembers", {}) - if camera is not None: - attr = camera + ".focalLength" - if lib.get_attribute_input(attr): - start = instance.data["frameStart"] - end = instance.data["frameEnd"] + 1 - time_range = range(int(start), int(end)) - focal_length = [cmds.getAttr(attr, time=t) for t in time_range] - else: - focal_length = cmds.getAttr(attr) - - burninDataMembers["focalLength"] = focal_length - - # Account for nested instances like model. - reviewable_subsets = list(set(members) & objectset) - if reviewable_subsets: - if len(reviewable_subsets) > 1: - raise KnownPublishError( - "Multiple attached subsets for review are not supported. " - "Attached: {}".format(", ".join(reviewable_subsets)) - ) - - reviewable_subset = reviewable_subsets[0] - self.log.debug( - "Subset attached to review: {}".format(reviewable_subset) - ) - - # Find the relevant publishing instance in the current context - reviewable_inst = next(inst for inst in context - if inst.name == reviewable_subset) - data = reviewable_inst.data - - self.log.debug( - 'Adding review family to {}'.format(reviewable_subset) - ) - if data.get('families'): - data['families'].append('review') - else: - data['families'] = ['review'] - - data["cameras"] = cameras - data['review_camera'] = camera - data['frameStartFtrack'] = instance.data["frameStartHandle"] - data['frameEndFtrack'] = instance.data["frameEndHandle"] - data['frameStartHandle'] = instance.data["frameStartHandle"] - data['frameEndHandle'] = instance.data["frameEndHandle"] - data['handleStart'] = instance.data["handleStart"] - data['handleEnd'] = instance.data["handleEnd"] - data["frameStart"] = instance.data["frameStart"] - data["frameEnd"] = instance.data["frameEnd"] - data['step'] = instance.data['step'] - # this (with other time related data) should be set on - # representations. Once plugins like Extract Review start - # using representations, this should be removed from here - # as Extract Playblast is already adding fps to representation. - data['fps'] = context.data['fps'] - data['review_width'] = instance.data['review_width'] - data['review_height'] = instance.data['review_height'] - data["isolate"] = instance.data["isolate"] - data["panZoom"] = instance.data.get("panZoom", False) - data["panel"] = instance.data["panel"] - data["displayLights"] = display_lights - data["burninDataMembers"] = burninDataMembers - - for key, value in instance.data["publish_attributes"].items(): - data["publish_attributes"][key] = value - - # The review instance must be active - cmds.setAttr(str(instance) + '.active', 1) - - instance.data['remove'] = True - - else: - project_name = instance.context.data["projectName"] - asset_doc = instance.context.data['assetEntity'] - task = instance.context.data["task"] - legacy_subset_name = task + 'Review' - subset_doc = get_subset_by_name( - project_name, - legacy_subset_name, - asset_doc["_id"], - fields=["_id"] - ) - if subset_doc: - self.log.debug("Existing subsets found, keep legacy name.") - instance.data['subset'] = legacy_subset_name - - instance.data["cameras"] = cameras - instance.data['review_camera'] = camera - instance.data['frameStartFtrack'] = \ - instance.data["frameStartHandle"] - instance.data['frameEndFtrack'] = \ - instance.data["frameEndHandle"] - instance.data["displayLights"] = display_lights - instance.data["burninDataMembers"] = burninDataMembers - # this (with other time related data) should be set on - # representations. Once plugins like Extract Review start - # using representations, this should be removed from here - # as Extract Playblast is already adding fps to representation. - instance.data["fps"] = instance.context.data["fps"] - - # make ftrack publishable - instance.data.setdefault("families", []).append('ftrack') - - cmds.setAttr(str(instance) + '.active', 1) - - # Collect audio - playback_slider = mel.eval('$tmpVar=$gPlayBackSlider') - audio_name = cmds.timeControl(playback_slider, - query=True, - sound=True) - display_sounds = cmds.timeControl( - playback_slider, query=True, displaySound=True - ) - - def get_audio_node_data(node): - return { - "offset": cmds.getAttr("{}.offset".format(node)), - "filename": cmds.getAttr("{}.filename".format(node)) - } - - audio_data = [] - - if audio_name: - audio_data.append(get_audio_node_data(audio_name)) - - elif display_sounds: - start_frame = int(cmds.playbackOptions(query=True, min=True)) - end_frame = int(cmds.playbackOptions(query=True, max=True)) - - for node in cmds.ls(type="audio"): - # Check if frame range and audio range intersections, - # for whether to include this audio node or not. - duration = cmds.getAttr("{}.duration".format(node)) - start_audio = cmds.getAttr("{}.offset".format(node)) - end_audio = start_audio + duration - - if start_audio <= end_frame and end_audio > start_frame: - audio_data.append(get_audio_node_data(node)) - - instance.data["audio"] = audio_data diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx.py b/openpype/hosts/maya/plugins/publish/extract_fbx.py deleted file mode 100644 index 4f7eaf57bf..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_fbx.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -from maya import cmds # noqa -import maya.mel as mel # noqa -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection -from openpype.hosts.maya.api import fbx - - -class ExtractFBX(publish.Extractor): - """Extract FBX from Maya. - - This extracts reproducible FBX exports ignoring any of the - settings set on the local machine in the FBX export options window. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract FBX" - families = ["fbx"] - - def process(self, instance): - fbx_exporter = fbx.FBXExtractor(log=self.log) - - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - - # The export requires forward slashes because we need - # to format it into a string in a mel expression - path = path.replace('\\', '/') - - self.log.debug("Extracting FBX to: {0}".format(path)) - - members = instance.data["setMembers"] - self.log.debug("Members: {0}".format(members)) - self.log.debug("Instance: {0}".format(instance[:])) - - fbx_exporter.set_options_from_instance(instance) - - # Export - with maintained_selection(): - fbx_exporter.export(members, path) - cmds.select(members, r=1, noExpand=True) - mel.eval('FBXExport -f "{}" -s'.format(path)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extract FBX successful to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_fbx_animation.py b/openpype/hosts/maya/plugins/publish/extract_fbx_animation.py deleted file mode 100644 index 756158d4f0..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_fbx_animation.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -from maya import cmds # noqa -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.maya.api import fbx -from openpype.hosts.maya.api.lib import ( - namespaced, get_namespace, strip_namespace -) - - -class ExtractFBXAnimation(publish.Extractor): - """Extract Rig in FBX format from Maya. - - This extracts the rig in fbx with the constraints - and referenced asset content included. - This also optionally extract animated rig in fbx with - geometries included. - - """ - order = pyblish.api.ExtractorOrder - label = "Extract Animation (FBX)" - hosts = ["maya"] - families = ["animation.fbx"] - - def process(self, instance): - # Define output path - staging_dir = self.staging_dir(instance) - filename = "{0}.fbx".format(instance.name) - path = os.path.join(staging_dir, filename) - path = path.replace("\\", "/") - - fbx_exporter = fbx.FBXExtractor(log=self.log) - out_members = instance.data.get("animated_skeleton", []) - # Export - instance.data["constraints"] = True - instance.data["skeletonDefinitions"] = True - instance.data["referencedAssetsContent"] = True - fbx_exporter.set_options_from_instance(instance) - # Export from the rig's namespace so that the exported - # FBX does not include the namespace but preserves the node - # names as existing in the rig workfile - if not out_members: - skeleton_set = [ - i for i in instance - if i.endswith("skeletonAnim_SET") - ] - self.log.debug( - "Top group of animated skeleton not found in " - "{}.\nSkipping fbx animation extraction.".format(skeleton_set)) - return - - namespace = get_namespace(out_members[0]) - relative_out_members = [ - strip_namespace(node, namespace) for node in out_members - ] - with namespaced( - ":" + namespace, - new=False, - relative_names=True - ) as namespace: - fbx_exporter.export(relative_out_members, path) - - representations = instance.data.setdefault("representations", []) - representations.append({ - 'name': 'fbx', - 'ext': 'fbx', - 'files': filename, - "stagingDir": staging_dir - }) - - self.log.debug( - "Extracted FBX animation to: {0}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_layout.py b/openpype/hosts/maya/plugins/publish/extract_layout.py deleted file mode 100644 index 75920b44a2..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_layout.py +++ /dev/null @@ -1,163 +0,0 @@ -import math -import os -import json - -from maya import cmds -from maya.api import OpenMaya as om - -from openpype.client import get_representation_by_id -from openpype.pipeline import publish - - -class ExtractLayout(publish.Extractor): - """Extract a layout.""" - - label = "Extract Layout" - hosts = ["maya"] - families = ["layout"] - project_container = "AVALON_CONTAINERS" - optional = True - - def process(self, instance): - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.debug("Performing extraction..") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_data = [] - # TODO representation queries can be refactored to be faster - project_name = instance.context.data["projectName"] - - for asset in cmds.sets(str(instance), query=True): - # Find the container - project_container = self.project_container - container_list = cmds.ls(project_container) - if len(container_list) == 0: - self.log.warning("Project container is not found!") - self.log.warning("The asset(s) may not be properly loaded after published") # noqa - continue - - grp_loaded_ass = instance.data.get("groupLoadedAssets", False) - if grp_loaded_ass: - asset_list = cmds.listRelatives(asset, children=True) - for asset in asset_list: - grp_name = asset.split(':')[0] - else: - grp_name = asset.split(':')[0] - containers = cmds.ls("{}*_CON".format(grp_name)) - if len(containers) == 0: - self.log.warning("{} isn't from the loader".format(asset)) - self.log.warning("It may not be properly loaded after published") # noqa - continue - container = containers[0] - - representation_id = cmds.getAttr( - "{}.representation".format(container)) - - representation = get_representation_by_id( - project_name, - representation_id, - fields=["parent", "context.family"] - ) - - self.log.debug(representation) - - version_id = representation.get("parent") - family = representation.get("context").get("family") - - json_element = { - "family": family, - "instance_name": cmds.getAttr( - "{}.namespace".format(container)), - "representation": str(representation_id), - "version": str(version_id) - } - - loc = cmds.xform(asset, query=True, translation=True) - rot = cmds.xform(asset, query=True, rotation=True, euler=True) - scl = cmds.xform(asset, query=True, relative=True, scale=True) - - json_element["transform"] = { - "translation": { - "x": loc[0], - "y": loc[1], - "z": loc[2] - }, - "rotation": { - "x": math.radians(rot[0]), - "y": math.radians(rot[1]), - "z": math.radians(rot[2]) - }, - "scale": { - "x": scl[0], - "y": scl[1], - "z": scl[2] - } - } - - row_length = 4 - t_matrix_list = cmds.xform(asset, query=True, matrix=True) - - transform_mm = om.MMatrix(t_matrix_list) - transform = om.MTransformationMatrix(transform_mm) - - t = transform.translation(om.MSpace.kWorld) - t = om.MVector(t.x, t.z, -t.y) - transform.setTranslation(t, om.MSpace.kWorld) - transform.rotateBy( - om.MEulerRotation(math.radians(-90), 0, 0), om.MSpace.kWorld) - transform.scaleBy([1.0, 1.0, -1.0], om.MSpace.kObject) - - t_matrix_list = list(transform.asMatrix()) - - t_matrix = [] - for i in range(0, len(t_matrix_list), row_length): - t_matrix.append(t_matrix_list[i:i + row_length]) - - json_element["transform_matrix"] = [ - list(row) - for row in t_matrix - ] - - basis_list = [ - 1, 0, 0, 0, - 0, 1, 0, 0, - 0, 0, -1, 0, - 0, 0, 0, 1 - ] - - basis_mm = om.MMatrix(basis_list) - basis = om.MTransformationMatrix(basis_mm) - - b_matrix_list = list(basis.asMatrix()) - b_matrix = [] - - for i in range(0, len(b_matrix_list), row_length): - b_matrix.append(b_matrix_list[i:i + row_length]) - - json_element["basis"] = [] - for row in b_matrix: - json_element["basis"].append(list(row)) - - json_data.append(json_element) - - json_filename = "{}.json".format(instance.name) - json_path = os.path.join(stagingdir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(json_representation) - - self.log.debug("Extracted instance '%s' to: %s", - instance.name, json_representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py deleted file mode 100644 index 635c2c425c..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ /dev/null @@ -1,896 +0,0 @@ -# -*- coding: utf-8 -*- -"""Maya look extractor.""" -import sys -from abc import ABCMeta, abstractmethod -from collections import OrderedDict -import contextlib -import json -import logging -import os -import tempfile -import six -import attr - -import pyblish.api - -from maya import cmds # noqa - -from openpype.lib import ( - find_executable, - source_hash, - run_subprocess, - get_oiio_tool_args, - ToolNotFoundError, -) - -from openpype.pipeline import legacy_io, publish, KnownPublishError -from openpype.hosts.maya.api import lib -from openpype import AYON_SERVER_ENABLED - -# Modes for transfer -COPY = 1 -HARDLINK = 2 - - -@attr.s -class TextureResult(object): - """The resulting texture of a processed file for a resource""" - # Path to the file - path = attr.ib() - # Colorspace of the resulting texture. This might not be the input - # colorspace of the texture if a TextureProcessor has processed the file. - colorspace = attr.ib() - # Hash generated for the texture using openpype.lib.source_hash - file_hash = attr.ib() - # The transfer mode, e.g. COPY or HARDLINK - transfer_mode = attr.ib() - - -def find_paths_by_hash(texture_hash): - """Find the texture hash key in the dictionary. - - All paths that originate from it. - - Args: - texture_hash (str): Hash of the texture. - - Return: - str: path to texture if found. - - """ - if AYON_SERVER_ENABLED: - raise KnownPublishError( - "This is a bug. \"find_paths_by_hash\" is not compatible with " - "AYON." - ) - - key = "data.sourceHashes.{0}".format(texture_hash) - return legacy_io.distinct(key, {"type": "version"}) - - -@contextlib.contextmanager -def no_workspace_dir(): - """Force maya to a fake temporary workspace directory. - - Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory' - - This helps to avoid Maya automatically remapping image paths to files - relative to the currently set directory. - - """ - - # Store current workspace - original = cmds.workspace(query=True, directory=True) - - # Set a fake workspace - fake_workspace_dir = tempfile.mkdtemp() - cmds.workspace(directory=fake_workspace_dir) - - try: - yield - finally: - try: - cmds.workspace(directory=original) - except RuntimeError: - # If the original workspace directory didn't exist either - # ignore the fact that it fails to reset it to the old path - pass - - # Remove the temporary directory - os.rmdir(fake_workspace_dir) - - -@six.add_metaclass(ABCMeta) -class TextureProcessor: - - extension = None - - def __init__(self, log=None): - if log is None: - log = logging.getLogger(self.__class__.__name__) - self.log = log - - def apply_settings(self, system_settings, project_settings): - """Apply OpenPype system/project settings to the TextureProcessor - - Args: - system_settings (dict): OpenPype system settings - project_settings (dict): OpenPype project settings - - Returns: - None - - """ - pass - - @abstractmethod - def process(self, - source, - colorspace, - color_management, - staging_dir): - """Process the `source` texture. - - Must be implemented on inherited class. - - This must always return a TextureResult even when it does not generate - a texture. If it doesn't generate a texture then it should return a - TextureResult using the input path and colorspace. - - Args: - source (str): Path to source file. - colorspace (str): Colorspace of the source file. - color_management (dict): Maya Color management data from - `lib.get_color_management_preferences` - staging_dir (str): Output directory to write to. - - Returns: - TextureResult: The resulting texture information. - - """ - pass - - def __repr__(self): - # Log instance as class name - return self.__class__.__name__ - - -class MakeRSTexBin(TextureProcessor): - """Make `.rstexbin` using `redshiftTextureProcessor`""" - - extension = ".rstexbin" - - def process(self, - source, - colorspace, - color_management, - staging_dir): - - texture_processor_path = self.get_redshift_tool( - "redshiftTextureProcessor" - ) - if not texture_processor_path: - raise KnownPublishError("Must have Redshift available.") - - subprocess_args = [ - texture_processor_path, - source - ] - - # if color management is enabled we pass color space information - if color_management["enabled"]: - config_path = color_management["config"] - if not os.path.exists(config_path): - raise RuntimeError("OCIO config not found at: " - "{}".format(config_path)) - - if not os.getenv("OCIO"): - self.log.debug( - "OCIO environment variable not set." - "Setting it with OCIO config from Maya." - ) - os.environ["OCIO"] = config_path - - self.log.debug("converting colorspace {0} to redshift render " - "colorspace".format(colorspace)) - subprocess_args.extend(["-cs", colorspace]) - - hash_args = ["rstex"] - texture_hash = source_hash(source, *hash_args) - - # Redshift stores the output texture next to the input but with - # the extension replaced to `.rstexbin` - basename, ext = os.path.splitext(source) - destination = "{}{}".format(basename, self.extension) - - self.log.debug(" ".join(subprocess_args)) - try: - run_subprocess(subprocess_args, logger=self.log) - except Exception: - self.log.error("Texture .rstexbin conversion failed", - exc_info=True) - six.reraise(*sys.exc_info()) - - return TextureResult( - path=destination, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=COPY - ) - - @staticmethod - def get_redshift_tool(tool_name): - """Path to redshift texture processor. - - On Windows it adds .exe extension if missing from tool argument. - - Args: - tool_name (string): Tool name. - - Returns: - str: Full path to redshift texture processor executable. - """ - if "REDSHIFT_COREDATAPATH" not in os.environ: - raise RuntimeError("Must have Redshift available.") - - redshift_tool_path = os.path.join( - os.environ["REDSHIFT_COREDATAPATH"], - "bin", - tool_name - ) - - return find_executable(redshift_tool_path) - - -class MakeTX(TextureProcessor): - """Make `.tx` using `maketx` with some default settings. - - Some hardcoded arguments passed to `maketx` are based on the defaults used - in Arnold's txManager tool. - - """ - - extension = ".tx" - - def __init__(self, log=None): - super(MakeTX, self).__init__(log=log) - self.extra_args = [] - - def apply_settings(self, system_settings, project_settings): - # Allow extra maketx arguments from project settings - args_settings = ( - project_settings["maya"]["publish"] - .get("ExtractLook", {}).get("maketx_arguments", []) - ) - extra_args = [] - for arg_data in args_settings: - argument = arg_data["argument"] - parameters = arg_data["parameters"] - if not argument: - self.log.debug("Ignoring empty parameter from " - "`maketx_arguments` setting..") - continue - - extra_args.append(argument) - extra_args.extend(parameters) - - self.extra_args = extra_args - - def process(self, - source, - colorspace, - color_management, - staging_dir): - """Process the texture. - - This function requires the `maketx` executable to be available in an - OpenImageIO toolset detectable by OpenPype. - - Args: - source (str): Path to source file. - colorspace (str): Colorspace of the source file. - color_management (dict): Maya Color management data from - `lib.get_color_management_preferences` - staging_dir (str): Output directory to write to. - - Returns: - TextureResult: The resulting texture information. - - """ - - try: - maketx_args = get_oiio_tool_args("maketx") - except ToolNotFoundError: - raise KnownPublishError( - "OpenImageIO is not available on the machine") - - # Define .tx filepath in staging if source file is not .tx - fname, ext = os.path.splitext(os.path.basename(source)) - if ext == ".tx": - # Do nothing if the source file is already a .tx file. - return TextureResult( - path=source, - file_hash=source_hash(source), - colorspace=colorspace, - transfer_mode=COPY - ) - - # Hardcoded default arguments for maketx conversion based on Arnold's - # txManager in Maya - args = [ - # unpremultiply before conversion (recommended when alpha present) - "--unpremult", - # use oiio-optimized settings for tile-size, planarconfig, metadata - "--oiio", - "--filter", "lanczos3", - ] - if color_management["enabled"]: - config_path = color_management["config"] - if not os.path.exists(config_path): - raise RuntimeError("OCIO config not found at: " - "{}".format(config_path)) - - render_colorspace = color_management["rendering_space"] - - self.log.debug("tx: converting colorspace {0} " - "-> {1}".format(colorspace, - render_colorspace)) - args.extend(["--colorconvert", colorspace, render_colorspace]) - args.extend(["--colorconfig", config_path]) - - else: - # Maya Color management is disabled. We cannot rely on an OCIO - self.log.debug("tx: Maya color management is disabled. No color " - "conversion will be applied to .tx conversion for: " - "{}".format(source)) - # Assume linear - render_colorspace = "linear" - - # Note: The texture hash is only reliable if we include any potential - # conversion arguments provide to e.g. `maketx` - hash_args = ["maketx"] + args + self.extra_args - texture_hash = source_hash(source, *hash_args) - - # Ensure folder exists - resources_dir = os.path.join(staging_dir, "resources") - if not os.path.exists(resources_dir): - os.makedirs(resources_dir) - - self.log.debug("Generating .tx file for %s .." % source) - - subprocess_args = maketx_args + [ - "-v", # verbose - "-u", # update mode - # --checknan doesn't influence the output file but aborts the - # conversion if it finds any. So we can avoid it for the file hash - "--checknan", - source - ] - - subprocess_args.extend(args) - if self.extra_args: - subprocess_args.extend(self.extra_args) - - # Add source hash attribute after other arguments for log readability - # Note: argument is excluded from the hash since it is the hash itself - subprocess_args.extend([ - "--sattrib", - "sourceHash", - texture_hash - ]) - - destination = os.path.join(resources_dir, fname + ".tx") - subprocess_args.extend(["-o", destination]) - - # We want to make sure we are explicit about what OCIO config gets - # used. So when we supply no --colorconfig flag that no fallback to - # an OCIO env var occurs. - env = os.environ.copy() - env.pop("OCIO", None) - - self.log.debug(" ".join(subprocess_args)) - try: - run_subprocess(subprocess_args, env=env) - except Exception: - self.log.error("Texture maketx conversion failed", - exc_info=True) - raise - - return TextureResult( - path=destination, - file_hash=texture_hash, - colorspace=render_colorspace, - transfer_mode=COPY - ) - - @staticmethod - def _has_arnold(): - """Return whether the arnold package is available and importable.""" - try: - import arnold # noqa: F401 - return True - except (ImportError, ModuleNotFoundError): - return False - - -class ExtractLook(publish.Extractor): - """Extract Look (Maya Scene + JSON) - - Only extracts the sets (shadingEngines and alike) alongside a .json file - that stores it relationships for the sets and "attribute" data for the - instance members. - - """ - - label = "Extract Look (Maya Scene + JSON)" - hosts = ["maya"] - families = ["look", "mvLook"] - order = pyblish.api.ExtractorOrder + 0.2 - scene_type = "ma" - look_data_type = "json" - - def get_maya_scene_type(self, instance): - """Get Maya scene type from settings. - - Args: - instance (pyblish.api.Instance): Instance with collected - project settings. - - """ - ext_mapping = ( - instance.context.data["project_settings"]["maya"]["ext_mapping"] - ) - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - - return "mayaAscii" if self.scene_type == "ma" else "mayaBinary" - - def process(self, instance): - """Plugin entry point. - - Args: - instance: Instance to process. - - """ - _scene_type = self.get_maya_scene_type(instance) - - # Define extract output file path - dir_path = self.staging_dir(instance) - maya_fname = "{0}.{1}".format(instance.name, self.scene_type) - json_fname = "{0}.{1}".format(instance.name, self.look_data_type) - maya_path = os.path.join(dir_path, maya_fname) - json_path = os.path.join(dir_path, json_fname) - - # Remove all members of the sets so they are not included in the - # exported file by accident - self.log.debug("Processing sets..") - lookdata = instance.data["lookData"] - relationships = lookdata["relationships"] - sets = list(relationships.keys()) - if not sets: - self.log.debug("No sets found for the look") - return - - # Specify texture processing executables to activate - # TODO: Load these more dynamically once we support more processors - processors = [] - context = instance.context - for key, Processor in { - # Instance data key to texture processor mapping - "maketx": MakeTX, - "rstex": MakeRSTexBin - }.items(): - if instance.data.get(key, False): - processor = Processor(log=self.log) - processor.apply_settings(context.data["system_settings"], - context.data["project_settings"]) - processors.append(processor) - - if processors: - self.log.debug("Collected texture processors: " - "{}".format(processors)) - - self.log.debug("Processing resources..") - results = self.process_resources(instance, - staging_dir=dir_path, - processors=processors) - transfers = results["fileTransfers"] - hardlinks = results["fileHardlinks"] - hashes = results["fileHashes"] - remap = results["attrRemap"] - - # Extract in correct render layer - self.log.debug("Extracting look maya scene file: {}".format(maya_path)) - layer = instance.data.get("renderlayer", "defaultRenderLayer") - with lib.renderlayer(layer): - # TODO: Ensure membership edits don't become renderlayer overrides - with lib.empty_sets(sets, force=True): - # To avoid Maya trying to automatically remap the file - # textures relative to the `workspace -directory` we force - # it to a fake temporary workspace. This fixes textures - # getting incorrectly remapped. - with no_workspace_dir(): - with lib.attribute_values(remap): - with lib.maintained_selection(): - cmds.select(sets, noExpand=True) - cmds.file( - maya_path, - force=True, - typ=_scene_type, - exportSelected=True, - preserveReferences=False, - channels=True, - constraints=True, - expressions=True, - constructionHistory=True, - ) - - # Write the JSON data - data = { - "attributes": lookdata["attributes"], - "relationships": relationships - } - - self.log.debug("Extracting json file: {}".format(json_path)) - with open(json_path, "w") as f: - json.dump(data, f) - - if "files" not in instance.data: - instance.data["files"] = [] - if "hardlinks" not in instance.data: - instance.data["hardlinks"] = [] - if "transfers" not in instance.data: - instance.data["transfers"] = [] - - instance.data["files"].append(maya_fname) - instance.data["files"].append(json_fname) - - if instance.data.get("representations") is None: - instance.data["representations"] = [] - - instance.data["representations"].append( - { - "name": self.scene_type, - "ext": self.scene_type, - "files": os.path.basename(maya_fname), - "stagingDir": os.path.dirname(maya_fname), - } - ) - instance.data["representations"].append( - { - "name": self.look_data_type, - "ext": self.look_data_type, - "files": os.path.basename(json_fname), - "stagingDir": os.path.dirname(json_fname), - } - ) - - # Set up the resources transfers/links for the integrator - instance.data["transfers"].extend(transfers) - instance.data["hardlinks"].extend(hardlinks) - - # Source hash for the textures - instance.data["sourceHashes"] = hashes - - self.log.debug("Extracted instance '%s' to: %s" % (instance.name, - maya_path)) - - def _set_resource_result_colorspace(self, resource, colorspace): - """Update resource resulting colorspace after texture processing""" - if "result_color_space" in resource: - if resource["result_color_space"] == colorspace: - return - - self.log.warning( - "Resource already has a resulting colorspace but is now " - "being overridden to a new one: {} -> {}".format( - resource["result_color_space"], colorspace - ) - ) - resource["result_color_space"] = colorspace - - def process_resources(self, instance, staging_dir, processors): - """Process all resources in the instance. - - It is assumed that all resources are nodes using file textures. - - Extract the textures to transfer, possibly convert with maketx and - remap the node paths to the destination path. Note that a source - might be included more than once amongst the resources as they could - be the input file to multiple nodes. - - """ - - resources = instance.data["resources"] - color_management = lib.get_color_management_preferences() - - # TODO: Temporary disable all hardlinking, due to the feature not being - # used or properly working. - self.log.info( - "Forcing copy instead of hardlink." - ) - force_copy = True - - if not force_copy and platform.system().lower() == "windows": - # Temporary fix to NOT create hardlinks on windows machines - self.log.warning( - "Forcing copy instead of hardlink due to issues on Windows..." - ) - force_copy = True - - destinations_cache = {} - - def get_resource_destination_cached(path): - """Get resource destination with cached result per filepath""" - if path not in destinations_cache: - destination = self.get_resource_destination( - path, instance.data["resourcesDir"], processors) - destinations_cache[path] = destination - return destinations_cache[path] - - # Process all resource's individual files - processed_files = {} - transfers = [] - hardlinks = [] - hashes = {} - remap = OrderedDict() - for resource in resources: - colorspace = resource["color_space"] - - for filepath in resource["files"]: - filepath = os.path.normpath(filepath) - - if filepath in processed_files: - # The file was already processed, likely due to usage by - # another resource in the scene. We confirm here it - # didn't do color spaces different than the current - # resource. - processed_file = processed_files[filepath] - self.log.debug( - "File was already processed. Likely used by another " - "resource too: {}".format(filepath) - ) - - if colorspace != processed_file["color_space"]: - self.log.warning( - "File '{}' was already processed using colorspace " - "'{}' instead of the current resource's " - "colorspace '{}'. The already processed texture " - "result's colorspace '{}' will be used." - "".format(filepath, - colorspace, - processed_file["color_space"], - processed_file["result_color_space"])) - - self._set_resource_result_colorspace( - resource, - colorspace=processed_file["result_color_space"] - ) - continue - - texture_result = self._process_texture( - filepath, - processors=processors, - staging_dir=staging_dir, - force_copy=force_copy, - color_management=color_management, - colorspace=colorspace - ) - - # Set the resulting color space on the resource - self._set_resource_result_colorspace( - resource, colorspace=texture_result.colorspace - ) - - processed_files[filepath] = { - "color_space": colorspace, - "result_color_space": texture_result.colorspace, - } - - source = texture_result.path - destination = get_resource_destination_cached(source) - if force_copy or texture_result.transfer_mode == COPY: - transfers.append((source, destination)) - self.log.debug('file will be copied {} -> {}'.format( - source, destination)) - elif texture_result.transfer_mode == HARDLINK: - hardlinks.append((source, destination)) - self.log.debug('file will be hardlinked {} -> {}'.format( - source, destination)) - - # Store the hashes from hash to destination to include in the - # database - hashes[texture_result.file_hash] = destination - - # Set up remapping attributes for the node during the publish - # The order of these can be important if one attribute directly - # affects another, e.g. we set colorspace after filepath because - # maya sometimes tries to guess the colorspace when changing - # filepaths (which is avoidable, but we don't want to have those - # attributes changed in the resulting publish) - # Remap filepath to publish destination - # TODO It would be much better if we could use the destination path - # from the actual processed texture results, but since the - # attribute will need to preserve tokens like , etc for - # now we will define the output path from the attribute value - # including the tokens to persist them. - filepath_attr = resource["attribute"] - remap[filepath_attr] = get_resource_destination_cached( - resource["source"] - ) - - # Preserve color space values (force value after filepath change) - # This will also trigger in the same order at end of context to - # ensure after context it's still the original value. - node = resource["node"] - if cmds.attributeQuery("colorSpace", node=node, exists=True): - color_space_attr = "{}.colorSpace".format(node) - remap[color_space_attr] = resource["result_color_space"] - - self.log.debug("Finished remapping destinations ...") - - return { - "fileTransfers": transfers, - "fileHardlinks": hardlinks, - "fileHashes": hashes, - "attrRemap": remap, - } - - def get_resource_destination(self, filepath, resources_dir, processors): - """Get resource destination path. - - This is utility function to change path if resource file name is - changed by some external tool like `maketx`. - - Args: - filepath (str): Resource source path - resources_dir (str): Destination dir for resources in publish. - processors (list): Texture processors converting resource. - - Returns: - str: Path to resource file - - """ - # Compute destination location - basename, ext = os.path.splitext(os.path.basename(filepath)) - - # Get extension from the last processor - for processor in reversed(processors): - processor_ext = processor.extension - if processor_ext and ext != processor_ext: - self.log.debug("Processor {} overrides extension to '{}' " - "for path: {}".format(processor, - processor_ext, - filepath)) - ext = processor_ext - break - - return os.path.join( - resources_dir, basename + ext - ) - - def _get_existing_hashed_texture(self, texture_hash): - """Return the first found filepath from a texture hash""" - - # If source has been published before with the same settings, - # then don't reprocess but hardlink from the original - existing = find_paths_by_hash(texture_hash) - if existing: - source = next((p for p in existing if os.path.exists(p)), None) - if source: - return source - else: - self.log.warning( - "Paths not found on disk, " - "skipping hardlink: {}".format(existing) - ) - - def _process_texture(self, - filepath, - processors, - staging_dir, - force_copy, - color_management, - colorspace): - """Process a single texture file on disk for publishing. - - This will: - 1. Check whether it's already published, if so it will do hardlink - (if the texture hash is found and force copy is not enabled) - 2. It will process the texture using the supplied texture - processors like MakeTX and MakeRSTexBin if enabled. - 3. Compute the destination path for the source file. - - Args: - filepath (str): The source file path to process. - processors (list): List of TextureProcessor processing the texture - staging_dir (str): The staging directory to write to. - force_copy (bool): Whether to force a copy even if a file hash - might have existed already in the project, otherwise - hardlinking the existing file is allowed. - color_management (dict): Maya's Color Management settings from - `lib.get_color_management_preferences` - colorspace (str): The source colorspace of the resources this - texture belongs to. - - Returns: - TextureResult: The texture result information. - """ - - if len(processors) > 1: - raise KnownPublishError( - "More than one texture processor not supported. " - "Current processors enabled: {}".format(processors) - ) - - for processor in processors: - self.log.debug("Processing texture {} with processor {}".format( - filepath, processor - )) - - processed_result = processor.process(filepath, - colorspace, - color_management, - staging_dir) - if not processed_result: - raise RuntimeError("Texture Processor {} returned " - "no result.".format(processor)) - self.log.debug("Generated processed " - "texture: {}".format(processed_result.path)) - - # TODO: Currently all processors force copy instead of allowing - # hardlinks using source hashes. This should be refactored - return processed_result - - # No texture processing for this file - texture_hash = source_hash(filepath) - if not force_copy: - existing = self._get_existing_hashed_texture(filepath) - if existing: - self.log.debug("Found hash in database, preparing hardlink..") - return TextureResult( - path=filepath, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=HARDLINK - ) - - return TextureResult( - path=filepath, - file_hash=texture_hash, - colorspace=colorspace, - transfer_mode=COPY - ) - - -class ExtractModelRenderSets(ExtractLook): - """Extract model render attribute sets as model metadata - - Only extracts the render attrib sets (NO shadingEngines) alongside - a .json file that stores it relationships for the sets and "attribute" - data for the instance members. - - """ - - label = "Model Render Sets" - hosts = ["maya"] - families = ["model"] - scene_type_prefix = "meta.render." - look_data_type = "meta.render.json" - - def get_maya_scene_type(self, instance): - typ = super(ExtractModelRenderSets, self).get_maya_scene_type(instance) - # add prefix - self.scene_type = self.scene_type_prefix + self.scene_type - - return typ diff --git a/openpype/hosts/maya/plugins/publish/extract_model.py b/openpype/hosts/maya/plugins/publish/extract_model.py deleted file mode 100644 index 29c952ebbc..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_model.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract model as Maya Scene.""" -import os - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib - - -class ExtractModel(publish.Extractor, - publish.OptionalPyblishPluginMixin): - """Extract as Model (Maya Scene). - - Only extracts contents based on the original "setMembers" data to ensure - publishing the least amount of required shapes. From that it only takes - the shapes that are not intermediateObjects - - During export it sets a temporary context to perform a clean extraction. - The context ensures: - - Smooth preview is turned off for the geometry - - Default shader is assigned (no materials are exported) - - Remove display layers - - """ - - label = "Model (Maya Scene)" - hosts = ["maya"] - families = ["model"] - scene_type = "ma" - optional = True - - def process(self, instance): - """Plugin entry point.""" - if not self.is_active(instance.data): - return - - ext_mapping = ( - instance.context.data["project_settings"]["maya"]["ext_mapping"] - ) - if ext_mapping: - self.log.debug("Looking in settings for scene type ...") - # use extension mapping for first family found - for family in self.families: - try: - self.scene_type = ext_mapping[family] - self.log.debug( - "Using {} as scene type".format(self.scene_type)) - break - except KeyError: - # no preset found - pass - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}.{1}".format(instance.name, self.scene_type) - path = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.debug("Performing extraction ...") - - # Get only the shape contents we need in such a way that we avoid - # taking along intermediateObjects - members = instance.data("setMembers") - members = cmds.ls(members, - dag=True, - shapes=True, - type=("mesh", "nurbsCurve"), - noIntermediate=True, - long=True) - - with lib.no_display_layers(instance): - with lib.displaySmoothness(members, - divisionsU=0, - divisionsV=0, - pointsWire=4, - pointsShaded=1, - polygonObject=1): - with lib.shader(members, - shadingEngine="initialShadingGroup"): - with lib.maintained_selection(): - cmds.select(members, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 - exportSelected=True, - preserveReferences=False, - channels=False, - constraints=False, - expressions=False, - constructionHistory=False) - - # Store reference for integration - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': self.scene_type, - 'ext': self.scene_type, - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" % (instance.name, - path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py deleted file mode 100644 index 507229a7b3..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ /dev/null @@ -1,106 +0,0 @@ -import os - -import clique - -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib - -from maya import cmds - - -class ExtractPlayblast(publish.Extractor): - """Extract viewport playblast. - - Takes review camera and creates review Quicktime video based on viewport - capture. - - """ - - label = "Extract Playblast" - hosts = ["maya"] - families = ["review"] - optional = True - capture_preset = {} - profiles = None - - def process(self, instance): - self.log.debug("Extracting playblast..") - - # get scene fps - fps = instance.data.get("fps") or instance.context.data.get("fps") - - # if start and end frames cannot be determined, get them - # from Maya timeline - start = instance.data.get("frameStartFtrack") - end = instance.data.get("frameEndFtrack") - if start is None: - start = cmds.playbackOptions(query=True, animationStartTime=True) - if end is None: - end = cmds.playbackOptions(query=True, animationEndTime=True) - - self.log.debug("start: {}, end: {}".format(start, end)) - task_data = instance.data["anatomyData"].get("task", {}) - capture_preset = lib.get_capture_preset( - task_data.get("name"), - task_data.get("type"), - instance.data["subset"], - instance.context.data["project_settings"], - self.log - ) - stagingdir = self.staging_dir(instance) - filename = instance.name - path = os.path.join(stagingdir, filename) - self.log.debug("Outputting images to %s" % path) - # get cameras - camera = instance.data["review_camera"] - preset = lib.generate_capture_preset( - instance, camera, path, - start=start, end=end, - capture_preset=capture_preset) - lib.render_capture_preset(preset) - - # Find playblast sequence - collected_files = os.listdir(stagingdir) - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(collected_files, - minimum_items=1, - patterns=patterns) - - self.log.debug("Searching playblast collection for: %s", path) - frame_collection = None - for collection in collections: - filebase = collection.format("{head}").rstrip(".") - self.log.debug("Checking collection head: %s", filebase) - if filebase in path: - frame_collection = collection - self.log.debug( - "Found playblast collection: %s", frame_collection - ) - - tags = ["review"] - if not instance.data.get("keepImages"): - tags.append("delete") - - # Add camera node name to representation data - camera_node_name = cmds.listRelatives(camera, parent=True)[0] - - collected_files = list(frame_collection) - # single frame file shouldn't be in list, only as a string - if len(collected_files) == 1: - collected_files = collected_files[0] - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": capture_preset["Codec"]["compression"], - "ext": capture_preset["Codec"]["compression"], - "files": collected_files, - "stagingDir": stagingdir, - "frameStart": int(start), - "frameEnd": int(end), - "fps": fps, - "tags": tags, - "camera_name": camera_node_name - } - instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py deleted file mode 100644 index 7fc8760a70..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -"""Redshift Proxy extractor.""" -import os - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection - - -class ExtractRedshiftProxy(publish.Extractor): - """Extract the content of the instance to a redshift proxy file.""" - - label = "Redshift Proxy (.rs)" - hosts = ["maya"] - families = ["redshiftproxy"] - - def process(self, instance): - """Extractor entry point.""" - - staging_dir = self.staging_dir(instance) - file_name = "{}.rs".format(instance.name) - file_path = os.path.join(staging_dir, file_name) - - anim_on = instance.data["animation"] - rs_options = "exportConnectivity=0;enableCompression=1;keepUnused=0;" - repr_files = file_name - - if not anim_on: - # Remove animation information because it is not required for - # non-animated subsets - keys = ["frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "frameStartHandle", - "frameEndHandle"] - for key in keys: - instance.data.pop(key, None) - - else: - start_frame = instance.data["frameStartHandle"] - end_frame = instance.data["frameEndHandle"] - rs_options = "{}startFrame={};endFrame={};frameStep={};".format( - rs_options, start_frame, - end_frame, instance.data["step"] - ) - - root, ext = os.path.splitext(file_path) - # Padding is taken from number of digits of the end_frame. - # Not sure where Redshift is taking it. - repr_files = [ - "{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501 - for frame in range( - int(start_frame), - int(end_frame) + 1, - int(instance.data["step"]) - )] - # vertex_colors = instance.data.get("vertexColors", False) - - # Write out rs file - self.log.debug("Writing: '%s'" % file_path) - with maintained_selection(): - cmds.select(instance.data["setMembers"], noExpand=True) - cmds.file(file_path, - pr=False, - force=True, - type="Redshift Proxy", - exportSelected=True, - options=rs_options) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - self.log.debug("Files: {}".format(repr_files)) - - representation = { - 'name': 'rs', - 'ext': 'rs', - 'files': repr_files, - "stagingDir": staging_dir, - } - instance.data["representations"].append(representation) - - self.log.debug("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py deleted file mode 100644 index 28362b355c..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,120 +0,0 @@ -import os -import glob -import tempfile - -from openpype.pipeline import publish -from openpype.hosts.maya.api import lib - - -class ExtractThumbnail(publish.Extractor): - """Extract viewport thumbnail. - - Takes review camera and creates a thumbnail based on viewport - capture. - - """ - - label = "Thumbnail" - hosts = ["maya"] - families = ["review"] - - def process(self, instance): - self.log.debug("Extracting thumbnail..") - - camera = instance.data["review_camera"] - - task_data = instance.data["anatomyData"].get("task", {}) - capture_preset = lib.get_capture_preset( - task_data.get("name"), - task_data.get("type"), - instance.data["subset"], - instance.context.data["project_settings"], - self.log - ) - - # Create temp directory for thumbnail - # - this is to avoid "override" of source file - dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_thumbnail") - self.log.debug( - "Create temp directory {} for thumbnail".format(dst_staging) - ) - # Store new staging to cleanup paths - filename = instance.name - path = os.path.join(dst_staging, filename) - - self.log.debug("Outputting images to %s" % path) - - preset = lib.generate_capture_preset( - instance, camera, path, - start=1, end=1, - capture_preset=capture_preset) - - preset["camera_options"].update({ - "displayGateMask": False, - "displayResolution": False, - "displayFilmGate": False, - "displayFieldChart": False, - "displaySafeAction": False, - "displaySafeTitle": False, - "displayFilmPivot": False, - "displayFilmOrigin": False, - "overscan": 1.0, - }) - path = lib.render_capture_preset(preset) - - playblast = self._fix_playblast_output_path(path) - - _, thumbnail = os.path.split(playblast) - - self.log.debug("file list {}".format(thumbnail)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail, - "stagingDir": dst_staging, - "thumbnail": True - } - instance.data["representations"].append(representation) - - def _fix_playblast_output_path(self, filepath): - """Workaround a bug in maya.cmds.playblast to return correct filepath. - - When the `viewer` argument is set to False and maya.cmds.playblast - does not automatically open the playblasted file the returned - filepath does not have the file's extension added correctly. - - To workaround this we just glob.glob() for any file extensions and - assume the latest modified file is the correct file and return it. - - """ - # Catch cancelled playblast - if filepath is None: - self.log.warning("Playblast did not result in output path. " - "Playblast is probably interrupted.") - return None - - # Fix: playblast not returning correct filename (with extension) - # Lets assume the most recently modified file is the correct one. - if not os.path.exists(filepath): - directory = os.path.dirname(filepath) - filename = os.path.basename(filepath) - # check if the filepath is has frame based filename - # example : capture.####.png - parts = filename.split(".") - if len(parts) == 3: - query = os.path.join(directory, "{}.*.{}".format(parts[0], - parts[-1])) - files = glob.glob(query) - else: - files = glob.glob("{}.*".format(filepath)) - - if not files: - raise RuntimeError("Couldn't find playblast from: " - "{0}".format(filepath)) - filepath = max(files, key=os.path.getmtime) - - return filepath diff --git a/openpype/hosts/maya/plugins/publish/save_scene.py b/openpype/hosts/maya/plugins/publish/save_scene.py deleted file mode 100644 index 495c339731..0000000000 --- a/openpype/hosts/maya/plugins/publish/save_scene.py +++ /dev/null @@ -1,35 +0,0 @@ -import pyblish.api -from openpype.pipeline.workfile.lock_workfile import ( - is_workfile_lock_enabled, - remove_workfile_lock -) - - -class SaveCurrentScene(pyblish.api.ContextPlugin): - """Save current scene - - """ - - label = "Save current file" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["maya"] - families = ["renderlayer", "workfile"] - - def process(self, context): - import maya.cmds as cmds - - current = cmds.file(query=True, sceneName=True) - assert context.data['currentFile'] == current - - # If file has no modifications, skip forcing a file save - if not cmds.file(query=True, modified=True): - self.log.debug("Skipping file save as there " - "are no modifications..") - return - project_name = context.data["projectName"] - project_settings = context.data["project_settings"] - # remove lockfile before saving - if is_workfile_lock_enabled("maya", project_name, project_settings): - remove_workfile_lock(current) - self.log.info("Saving current file: {}".format(current)) - cmds.file(save=True, force=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py deleted file mode 100644 index c76d979fbf..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_attributes.py +++ /dev/null @@ -1,112 +0,0 @@ -from collections import defaultdict - -import pyblish.api -from maya import cmds - -from openpype.hosts.maya.api.lib import set_attribute -from openpype.pipeline.publish import ( - OptionalPyblishPluginMixin, PublishValidationError, RepairAction, - ValidateContentsOrder) - - -class ValidateAttributes(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Ensure attributes are consistent. - - Attributes to validate and their values comes from the - "maya/attributes.json" preset, which needs this structure: - { - "family": { - "node_name.attribute_name": attribute_value - } - } - """ - - order = ValidateContentsOrder - label = "Attributes" - hosts = ["maya"] - actions = [RepairAction] - optional = True - - attributes = None - - def process(self, instance): - if not self.is_active(instance.data): - return - - # Check for preset existence. - if not self.attributes: - return - - invalid = self.get_invalid(instance, compute=True) - if invalid: - raise PublishValidationError( - "Found attributes with invalid values: {}".format(invalid) - ) - - @classmethod - def get_invalid(cls, instance, compute=False): - if compute: - return cls.get_invalid_attributes(instance) - else: - return instance.data.get("invalid_attributes", []) - - @classmethod - def get_invalid_attributes(cls, instance): - invalid_attributes = [] - - # Filter families. - families = [instance.data["family"]] - families += instance.data.get("families", []) - families = set(families) & set(cls.attributes.keys()) - if not families: - return [] - - # Get all attributes to validate. - attributes = defaultdict(dict) - for family in families: - if family not in cls.attributes: - # No attributes to validate for family - continue - - for preset_attr, preset_value in cls.attributes[family].items(): - node_name, attribute_name = preset_attr.split(".", 1) - attributes[node_name][attribute_name] = preset_value - - if not attributes: - return [] - - # Get invalid attributes. - nodes = cmds.ls(long=True) - for node in nodes: - node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - if node_name not in attributes: - continue - - for attr_name, expected in attributes[node_name].items(): - - # Skip if attribute does not exist - if not cmds.attributeQuery(attr_name, node=node, exists=True): - continue - - plug = "{}.{}".format(node, attr_name) - value = cmds.getAttr(plug) - if value != expected: - invalid_attributes.append( - { - "attribute": plug, - "expected": expected, - "current": value - } - ) - - instance.data["invalid_attributes"] = invalid_attributes - return invalid_attributes - - @classmethod - def repair(cls, instance): - invalid = cls.get_invalid(instance) - for data in invalid: - node, attr = data["attribute"].split(".", 1) - value = data["expected"] - set_attribute(node=node, attribute=attr, value=value) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py b/openpype/hosts/maya/plugins/publish/validate_camera_contents.py deleted file mode 100644 index 767ac55718..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_camera_contents.py +++ /dev/null @@ -1,78 +0,0 @@ -import pyblish.api -from maya import cmds - -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( - PublishValidationError, ValidateContentsOrder) - - -class ValidateCameraContents(pyblish.api.InstancePlugin): - """Validates Camera instance contents. - - A Camera instance may only hold a SINGLE camera's transform, nothing else. - - It may hold a "locator" as shape, but different shapes are down the - hierarchy. - - """ - - order = ValidateContentsOrder - families = ['camera'] - hosts = ['maya'] - label = 'Camera Contents' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - validate_shapes = True - - @classmethod - def get_invalid(cls, instance): - - # get cameras - members = instance.data['setMembers'] - shapes = cmds.ls(members, dag=True, shapes=True, long=True) - - # single camera - invalid = [] - cameras = cmds.ls(shapes, type='camera', long=True) - if len(cameras) != 1: - cls.log.error("Camera instance must have a single camera. " - "Found {0}: {1}".format(len(cameras), cameras)) - invalid.extend(cameras) - - # We need to check this edge case because returning an extended - # list when there are no actual cameras results in - # still an empty 'invalid' list - if len(cameras) < 1: - if members: - # If there are members in the instance return all of - # them as 'invalid' so the user can still select invalid - cls.log.error("No cameras found in instance " - "members: {}".format(members)) - return members - - raise PublishValidationError( - "No cameras found in empty instance.") - - if not cls.validate_shapes: - cls.log.debug("Not validating shapes in the camera content" - " because 'validate shapes' is disabled") - return invalid - - # non-camera shapes - valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True) - shapes = set(shapes) - set(valid_shapes) - if shapes: - shapes = list(shapes) - cls.log.error("Camera instance should only contain camera " - "shapes. Found: {0}".format(shapes)) - invalid.extend(shapes) - - invalid = list(set(invalid)) - return invalid - - def process(self, instance): - """Process all the nodes in the instance""" - - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Invalid camera contents: " - "{0}".format(invalid)) diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py deleted file mode 100644 index a7043b8407..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ /dev/null @@ -1,204 +0,0 @@ -import pyblish.api - -from maya import cmds -from openpype.pipeline.publish import ( - RepairAction, - ValidateContentsOrder, - PublishValidationError, - OptionalPyblishPluginMixin -) -from openpype.hosts.maya.api.lib_rendersetup import ( - get_attr_overrides, - get_attr_in_layer, -) -from maya.app.renderSetup.model.override import AbsOverride - - -class ValidateFrameRange(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validates the frame ranges. - - This is an optional validator checking if the frame range on instance - matches the frame range specified for the asset. - - It also validates render frame ranges of render layers. - - Repair action will change everything to match the asset frame range. - - This can be turned off by the artist to allow custom ranges. - """ - - label = "Validate Frame Range" - order = ValidateContentsOrder - families = ["animation", - "pointcache", - "camera", - "proxyAbc", - "renderlayer", - "review", - "yeticache"] - optional = True - actions = [RepairAction] - exclude_families = [] - - def process(self, instance): - if not self.is_active(instance.data): - return - - context = instance.context - if instance.data.get("tileRendering"): - self.log.debug( - "Skipping frame range validation because " - "tile rendering is enabled." - ) - return - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - handle_start = int(context.data.get("handleStart")) - handle_end = int(context.data.get("handleEnd")) - frame_start = int(context.data.get("frameStart")) - frame_end = int(context.data.get("frameEnd")) - - inst_start = int(instance.data.get("frameStartHandle")) - inst_end = int(instance.data.get("frameEndHandle")) - inst_frame_start = int(instance.data.get("frameStart")) - inst_frame_end = int(instance.data.get("frameEnd")) - inst_handle_start = int(instance.data.get("handleStart")) - inst_handle_end = int(instance.data.get("handleEnd")) - - # basic sanity checks - assert frame_start_handle <= frame_end_handle, ( - "start frame is lower then end frame") - - # compare with data on instance - errors = [] - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return - if (inst_start != frame_start_handle): - errors.append("Instance start frame [ {} ] doesn't " - "match the one set on asset [ {} ]: " - "{}/{}/{}/{} (handle/start/end/handle)".format( - inst_start, - frame_start_handle, - handle_start, frame_start, frame_end, handle_end - )) - - if (inst_end != frame_end_handle): - errors.append("Instance end frame [ {} ] doesn't " - "match the one set on asset [ {} ]: " - "{}/{}/{}/{} (handle/start/end/handle)".format( - inst_end, - frame_end_handle, - handle_start, frame_start, frame_end, handle_end - )) - - checks = { - "frame start": (frame_start, inst_frame_start), - "frame end": (frame_end, inst_frame_end), - "handle start": (handle_start, inst_handle_start), - "handle end": (handle_end, inst_handle_end) - } - for label, values in checks.items(): - if values[0] != values[1]: - errors.append( - "{} on instance ({}) does not match with the asset " - "({}).".format(label.title(), values[1], values[0]) - ) - - if errors: - report = "Frame range settings are incorrect.\n\n" - for error in errors: - report += "- {}\n\n".format(error) - - raise PublishValidationError(report, title="Frame Range incorrect") - - @classmethod - def repair(cls, instance): - """ - Repair instance container to match asset data. - """ - - if "renderlayer" in instance.data.get("families"): - # Special behavior for renderlayers - cls.repair_renderlayer(instance) - return - - node = instance.data["name"] - context = instance.context - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - handle_start = int(context.data.get("handleStart")) - handle_end = int(context.data.get("handleEnd")) - frame_start = int(context.data.get("frameStart")) - frame_end = int(context.data.get("frameEnd")) - - # Start - if cmds.attributeQuery("handleStart", node=node, exists=True): - cmds.setAttr("{}.handleStart".format(node), handle_start) - cmds.setAttr("{}.frameStart".format(node), frame_start) - else: - # Include start handle in frame start if no separate handleStart - # attribute exists on the node - cmds.setAttr("{}.frameStart".format(node), frame_start_handle) - - # End - if cmds.attributeQuery("handleEnd", node=node, exists=True): - cmds.setAttr("{}.handleEnd".format(node), handle_end) - cmds.setAttr("{}.frameEnd".format(node), frame_end) - else: - # Include end handle in frame end if no separate handleEnd - # attribute exists on the node - cmds.setAttr("{}.frameEnd".format(node), frame_end_handle) - - @classmethod - def repair_renderlayer(cls, instance): - """Apply frame range in render settings""" - - layer = instance.data["renderlayer"] - context = instance.context - - start_attr = "defaultRenderGlobals.startFrame" - end_attr = "defaultRenderGlobals.endFrame" - - frame_start_handle = int(context.data.get("frameStartHandle")) - frame_end_handle = int(context.data.get("frameEndHandle")) - - cls._set_attr_in_layer(start_attr, layer, frame_start_handle) - cls._set_attr_in_layer(end_attr, layer, frame_end_handle) - - @classmethod - def _set_attr_in_layer(cls, node_attr, layer, value): - - if get_attr_in_layer(node_attr, layer=layer) == value: - # Already ok. This can happen if you have multiple renderlayers - # validated and there are no frame range overrides. The first - # layer's repair would have fixed the global value already - return - - overrides = list(get_attr_overrides(node_attr, layer=layer)) - if overrides: - # We set the last absolute override if it is an absolute override - # otherwise we'll add an Absolute override - last_override = overrides[-1][1] - if not isinstance(last_override, AbsOverride): - collection = last_override.parent() - node, attr = node_attr.split(".", 1) - last_override = collection.createAbsoluteOverride(node, attr) - - cls.log.debug("Setting {attr} absolute override in " - "layer '{layer}': {value}".format(layer=layer, - attr=node_attr, - value=value)) - cmds.setAttr(last_override.name() + ".attrValue", value) - - else: - # Set the attribute directly - # (Note that this will set the global attribute) - cls.log.debug("Setting global {attr}: {value}".format( - attr=node_attr, - value=value - )) - cmds.setAttr(node_attr, value) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py deleted file mode 100644 index 7234f5a025..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py +++ /dev/null @@ -1,39 +0,0 @@ -import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishValidationError -) - - -class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): - """Validates instance objectSet has *any* members.""" - - order = ValidateContentsOrder - hosts = ["maya"] - label = 'Instance has members' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - - @classmethod - def get_invalid(cls, instance): - invalid = list() - if not instance.data.get("setMembers"): - objectset_name = instance.data['name'] - invalid.append(objectset_name) - - return invalid - - def process(self, instance): - # Allow renderlayer, rendersetup and workfile to be empty - skip_families = {"workfile", "renderlayer", "rendersetup"} - if instance.data.get("family") in skip_families: - return - - invalid = self.get_invalid(instance) - if invalid: - # Invalid will always be a single entry, we log the single name - name = invalid[0] - raise PublishValidationError( - title="Empty instance", - message="Instance '{0}' is empty".format(name) - ) diff --git a/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py b/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py deleted file mode 100644 index eac13053db..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_loaded_plugin.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import pyblish.api -import maya.cmds as cmds - -from openpype.pipeline.publish import ( - RepairContextAction, - PublishValidationError -) - - -class ValidateLoadedPlugin(pyblish.api.ContextPlugin): - """Ensure there are no unauthorized loaded plugins""" - - label = "Loaded Plugin" - order = pyblish.api.ValidatorOrder - host = ["maya"] - actions = [RepairContextAction] - - @classmethod - def get_invalid(cls, context): - - invalid = [] - loaded_plugin = cmds.pluginInfo(query=True, listPlugins=True) - # get variable from OpenPype settings - whitelist_native_plugins = cls.whitelist_native_plugins - authorized_plugins = cls.authorized_plugins or [] - - for plugin in loaded_plugin: - if not whitelist_native_plugins and os.getenv('MAYA_LOCATION') \ - in cmds.pluginInfo(plugin, query=True, path=True): - continue - if plugin not in authorized_plugins: - invalid.append(plugin) - - return invalid - - def process(self, context): - - invalid = self.get_invalid(context) - if invalid: - raise PublishValidationError( - "Found forbidden plugin name: {}".format(", ".join(invalid)) - ) - - @classmethod - def repair(cls, context): - """Unload forbidden plugins""" - - for plugin in cls.get_invalid(context): - cmds.pluginInfo(plugin, edit=True, autoload=False) - cmds.unloadPlugin(plugin, force=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py deleted file mode 100644 index c382d1b983..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ /dev/null @@ -1,89 +0,0 @@ -from maya import cmds - -import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( - ValidateMeshOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) -from openpype.hosts.maya.api.lib import len_flattened - - -class ValidateMeshHasUVs(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate the current mesh has UVs. - - It validates whether the current UV set has non-zero UVs and - at least more than the vertex count. It's not really bulletproof, - but a simple quick validation to check if there are likely - UVs for every face. - """ - - order = ValidateMeshOrder - hosts = ['maya'] - families = ['model'] - label = 'Mesh Has UVs' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - optional = True - - @classmethod - def get_invalid(cls, instance): - invalid = [] - - for node in cmds.ls(instance, type='mesh'): - num_vertices = cmds.polyEvaluate(node, vertex=True) - - if num_vertices == 0: - cls.log.warning( - "Skipping \"{}\", cause it does not have any " - "vertices.".format(node) - ) - continue - - uv = cmds.polyEvaluate(node, uv=True) - - if uv == 0: - invalid.append(node) - continue - - vertex = cmds.polyEvaluate(node, vertex=True) - if uv < vertex: - # Workaround: - # Maya can have instanced UVs in a single mesh, for example - # imported from an Alembic. With instanced UVs the UV count - # from `maya.cmds.polyEvaluate(uv=True)` will only result in - # the unique UV count instead of for all vertices. - # - # Note: Maya can save instanced UVs to `mayaAscii` but cannot - # load this as instanced. So saving, opening and saving - # again will lose this information. - map_attr = "{}.map[*]".format(node) - uv_to_vertex = cmds.polyListComponentConversion(map_attr, - toVertex=True) - uv_vertex_count = len_flattened(uv_to_vertex) - if uv_vertex_count < vertex: - invalid.append(node) - else: - cls.log.warning("Node has instanced UV points: " - "{0}".format(node)) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - invalid = self.get_invalid(instance) - if invalid: - - names = "
".join( - " - {}".format(node) for node in invalid - ) - - raise PublishValidationError( - title="Mesh has missing UVs", - message="Model meshes are required to have UVs.

" - "Meshes detected with invalid or missing UVs:
" - "{0}".format(names) - ) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py deleted file mode 100644 index 48b4d0f557..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_no_negative_scale.py +++ /dev/null @@ -1,64 +0,0 @@ -from maya import cmds - -import pyblish.api -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( - ValidateMeshOrder, - PublishValidationError -) - - -def _as_report_list(values, prefix="- ", suffix="\n"): - """Return list as bullet point list for a report""" - if not values: - return "" - return prefix + (suffix + prefix).join(values) - - -class ValidateMeshNoNegativeScale(pyblish.api.Validator): - """Ensure that meshes don't have a negative scale. - - Using negatively scaled proxies in a VRayMesh results in inverted - normals. As such we want to avoid this. - - We also avoid this on the rig or model because these are often the - previous steps for those that are cached to proxies so we can catch this - issue early. - - """ - - order = ValidateMeshOrder - hosts = ['maya'] - families = ['model'] - label = 'Mesh No Negative Scale' - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - - @staticmethod - def get_invalid(instance): - meshes = cmds.ls(instance, - type='mesh', - long=True, - noIntermediate=True) - - invalid = [] - for mesh in meshes: - transform = cmds.listRelatives(mesh, parent=True, fullPath=True)[0] - scale = cmds.getAttr("{0}.scale".format(transform))[0] - - if any(x < 0 for x in scale): - invalid.append(mesh) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance 'objectSet'""" - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError( - "Meshes found with negative scale:\n\n{0}".format( - _as_report_list(sorted(invalid)) - ), - title="Negative scale" - ) diff --git a/openpype/hosts/maya/plugins/publish/validate_resources.py b/openpype/hosts/maya/plugins/publish/validate_resources.py deleted file mode 100644 index 7d894a2bef..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_resources.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -from collections import defaultdict - -import pyblish.api -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishValidationError -) - - -class ValidateResources(pyblish.api.InstancePlugin): - """Validates mapped resources. - - These are external files to the current application, for example - these could be textures, image planes, cache files or other linked - media. - - This validates: - - The resources have unique filenames (without extension) - - """ - - order = ValidateContentsOrder - label = "Resources Unique" - - def process(self, instance): - - resources = instance.data.get("resources", []) - if not resources: - self.log.debug("No resources to validate..") - return - - basenames = defaultdict(set) - - for resource in resources: - files = resource.get("files", []) - for filename in files: - - # Use normalized paths in comparison and ignore case - # sensitivity - filename = os.path.normpath(filename).lower() - - basename = os.path.splitext(os.path.basename(filename))[0] - basenames[basename].add(filename) - - invalid_resources = list() - for basename, sources in basenames.items(): - if len(sources) > 1: - invalid_resources.extend(sources) - - self.log.error( - "Non-unique resource name: {0}" - "{0} (sources: {1})".format( - basename, - list(sources) - ) - ) - - if invalid_resources: - raise PublishValidationError("Invalid resources in instance.") diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py deleted file mode 100644 index 906ff17ec9..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py +++ /dev/null @@ -1,78 +0,0 @@ -from maya import cmds - -import pyblish.api - -import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishValidationError -) - - -class ValidateTransformZero(pyblish.api.Validator): - """Transforms can't have any values - - To solve this issue, try freezing the transforms. So long - as the transforms, rotation and scale values are zero, - you're all good. - - """ - - order = ValidateContentsOrder - hosts = ["maya"] - families = ["model"] - label = "Transform Zero (Freeze)" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - - _identity = [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - _tolerance = 1e-30 - - @classmethod - def get_invalid(cls, instance): - """Returns the invalid transforms in the instance. - - This is the same as checking: - - translate == [0, 0, 0] and rotate == [0, 0, 0] and - scale == [1, 1, 1] and shear == [0, 0, 0] - - .. note:: - This will also catch camera transforms if those - are in the instances. - - Returns: - list: Transforms that are not identity matrix - - """ - - transforms = cmds.ls(instance, type="transform") - - invalid = [] - for transform in transforms: - if ('_LOC' in transform) or ('_loc' in transform): - continue - mat = cmds.xform(transform, q=1, matrix=True, objectSpace=True) - if not all(abs(x-y) < cls._tolerance - for x, y in zip(cls._identity, mat)): - invalid.append(transform) - - return invalid - - def process(self, instance): - """Process all the nodes in the instance "objectSet""" - - invalid = self.get_invalid(instance) - if invalid: - - names = "
".join( - " - {}".format(node) for node in invalid - ) - - raise PublishValidationError( - title="Transform Zero", - message="The model publish allows no transformations. You must" - " freeze transformations to continue.

" - "Nodes found with transform values: " - "{0}".format(names)) diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py deleted file mode 100644 index 42d3dc3ac8..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- coding: utf-8 -*- -"""Validator for correct naming of Static Meshes.""" -import re - -import pyblish.api - -import openpype.hosts.maya.api.action -from openpype.pipeline import legacy_io -from openpype.settings import get_project_settings -from openpype.pipeline.publish import ( - ValidateContentsOrder, - OptionalPyblishPluginMixin, - PublishValidationError -) - - -class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate name of Unreal Static Mesh - - Unreals naming convention states that staticMesh should start with `SM` - prefix - SM_[Name]_## (Eg. SM_sube_01).These prefixes can be configured - in Settings UI. This plugin also validates other types of - meshes - collision meshes: - - UBX_[RenderMeshName]*: - Boxes are created with the Box objects type in - Max or with the Cube polygonal primitive in Maya. - You cannot move the vertices around or deform it - in any way to make it something other than a - rectangular prism, or else it will not work. - - UCP_[RenderMeshName]*: - Capsules are created with the Capsule object type. - The capsule does not need to have many segments - (8 is a good number) at all because it is - converted into a true capsule for collision. Like - boxes, you should not move the individual - vertices around. - - USP_[RenderMeshName]*: - Spheres are created with the Sphere object type. - The sphere does not need to have many segments - (8 is a good number) at all because it is - converted into a true sphere for collision. Like - boxes, you should not move the individual - vertices around. - - UCX_[RenderMeshName]*: - Convex objects can be any completely closed - convex 3D shape. For example, a box can also be - a convex object - - This validator also checks if collision mesh [RenderMeshName] matches one - of SM_[RenderMeshName]. - - """ - optional = True - order = ValidateContentsOrder - hosts = ["maya"] - families = ["staticMesh"] - label = "Unreal Static Mesh Name" - actions = [openpype.hosts.maya.api.action.SelectInvalidAction] - regex_mesh = r"(?P.*))" - regex_collision = r"(?P.*)" - - @classmethod - def get_invalid(cls, instance): - - invalid = [] - - collision_prefixes = ( - instance.context.data["project_settings"] - ["maya"] - ["create"] - ["CreateUnrealStaticMesh"] - ["collision_prefixes"] - ) - - if cls.validate_mesh: - # compile regex for testing names - regex_mesh = "{}{}".format( - ("_" + cls.static_mesh_prefix) or "", cls.regex_mesh - ) - sm_r = re.compile(regex_mesh) - if not sm_r.match(instance.data.get("subset")): - cls.log.error("Mesh doesn't comply with name validation.") - return True - - if cls.validate_collision: - collision_set = instance.data.get("collisionMembers", None) - # soft-fail is there are no collision objects - if not collision_set: - cls.log.warning("No collision objects to validate.") - return False - - regex_collision = "{}{}_(\\d+)".format( - "(?P({}))_".format( - "|".join("{0}".format(p) for p in collision_prefixes) - ) or "", cls.regex_collision - ) - - cl_r = re.compile(regex_collision) - - asset_name = instance.data["assetEntity"]["name"] - mesh_name = "{}{}".format(asset_name, - instance.data.get("variant", [])) - - for obj in collision_set: - cl_m = cl_r.match(obj) - if not cl_m: - cls.log.error("{} is invalid".format(obj)) - invalid.append(obj) - else: - expected_collision = "{}_{}".format( - cl_m.group("prefix"), - mesh_name - ) - - if not obj.startswith(expected_collision): - - cls.log.error( - "Collision object name doesn't match " - "static mesh name" - ) - cls.log.error("{}_{} != {}_{}*".format( - cl_m.group("prefix"), - cl_m.group("renderName"), - cl_m.group("prefix"), - mesh_name, - )) - invalid.append(obj) - - return invalid - - def process(self, instance): - if not self.is_active(instance.data): - return - - if not self.validate_mesh and not self.validate_collision: - self.log.debug("Validation of both mesh and collision names" - "is disabled.") - return - - if not instance.data.get("collisionMembers", None): - self.log.debug("There are no collision objects to validate") - return - - invalid = self.get_invalid(instance) - - if invalid: - raise PublishValidationError("Model naming is invalid. See log.") diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py deleted file mode 100644 index f2899cdb37..0000000000 --- a/openpype/hosts/maya/startup/userSetup.py +++ /dev/null @@ -1,69 +0,0 @@ -import os - -from openpype.settings import get_project_settings -from openpype.pipeline import install_host, get_current_project_name -from openpype.hosts.maya.api import MayaHost - -from maya import cmds - - -host = MayaHost() -install_host(host) - -print("Starting OpenPype usersetup...") - -project_name = get_current_project_name() -settings = get_project_settings(project_name) - -# Loading plugins explicitly. -explicit_plugins_loading = settings["maya"]["explicit_plugins_loading"] -if explicit_plugins_loading["enabled"]: - def _explicit_load_plugins(): - for plugin in explicit_plugins_loading["plugins_to_load"]: - if plugin["enabled"]: - print("Loading plug-in: " + plugin["name"]) - try: - cmds.loadPlugin(plugin["name"], quiet=True) - except RuntimeError as e: - print(e) - - # We need to load plugins deferred as loading them directly does not work - # correctly due to Maya's initialization. - cmds.evalDeferred( - _explicit_load_plugins, - lowestPriority=True - ) - -# Open Workfile Post Initialization. -key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" -if bool(int(os.environ.get(key, "0"))): - def _log_and_open(): - path = os.environ["AVALON_LAST_WORKFILE"] - print("Opening \"{}\"".format(path)) - cmds.file(path, open=True, force=True) - cmds.evalDeferred( - _log_and_open, - lowestPriority=True - ) - -# Build a shelf. -shelf_preset = settings['maya'].get('project_shelf') -if shelf_preset: - icon_path = os.path.join( - os.environ['OPENPYPE_PROJECT_SCRIPTS'], - project_name, - "icons") - icon_path = os.path.abspath(icon_path) - - for i in shelf_preset['imports']: - import_string = "from {} import {}".format(project_name, i) - print(import_string) - exec(import_string) - - cmds.evalDeferred( - "mlib.shelf(name=shelf_preset['name'], iconPath=icon_path," - " preset=shelf_preset)" - ) - - -print("Finished OpenPype usersetup.") diff --git a/openpype/hosts/maya/tools/__init__.py b/openpype/hosts/maya/tools/__init__.py deleted file mode 100644 index bd1e302cd2..0000000000 --- a/openpype/hosts/maya/tools/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from openpype.tools.utils.host_tools import qt_app_context - - -class MayaToolsSingleton: - _look_assigner = None - - -def get_look_assigner_tool(parent): - """Create, cache and return look assigner tool window.""" - if MayaToolsSingleton._look_assigner is None: - from .mayalookassigner import MayaLookAssignerWindow - mayalookassigner_window = MayaLookAssignerWindow(parent) - MayaToolsSingleton._look_assigner = mayalookassigner_window - return MayaToolsSingleton._look_assigner - - -def show_look_assigner(parent=None): - """Look manager is Maya specific tool for look management.""" - - with qt_app_context(): - look_assigner_tool = get_look_assigner_tool(parent) - look_assigner_tool.show() - - # Pull window to the front. - look_assigner_tool.raise_() - look_assigner_tool.activateWindow() - look_assigner_tool.showNormal() diff --git a/openpype/hosts/maya/tools/mayalookassigner/app.py b/openpype/hosts/maya/tools/mayalookassigner/app.py deleted file mode 100644 index b5ce7ada34..0000000000 --- a/openpype/hosts/maya/tools/mayalookassigner/app.py +++ /dev/null @@ -1,309 +0,0 @@ -import sys -import time -import logging - -from qtpy import QtWidgets, QtCore - -from openpype import style -from openpype.client import get_last_version_by_subset_id -from openpype.pipeline import get_current_project_name -from openpype.tools.utils.lib import qt_app_context -from openpype.hosts.maya.api.lib import ( - assign_look_by_version, - get_main_window -) - -from maya import cmds -# old api for MFileIO -import maya.OpenMaya -import maya.api.OpenMaya as om - -from .widgets import ( - AssetOutliner, - LookOutliner -) -from .commands import ( - get_workfile, - remove_unused_looks -) -from .vray_proxies import vrayproxy_assign_look -from . import arnold_standin - -module = sys.modules[__name__] -module.window = None - - -class MayaLookAssignerWindow(QtWidgets.QWidget): - - def __init__(self, parent=None): - super(MayaLookAssignerWindow, self).__init__(parent=parent) - - self.log = logging.getLogger(__name__) - - # Store callback references - self._callbacks = [] - self._connections_set_up = False - - filename = get_workfile() - - self.setObjectName("lookManager") - self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename)) - self.setWindowFlags(QtCore.Qt.Window) - self.setParent(parent) - - self.resize(750, 500) - - self.setup_ui() - - # Force refresh check on initialization - self._on_renderlayer_switch() - - def setup_ui(self): - """Build the UI""" - - main_splitter = QtWidgets.QSplitter(self) - - # Assets (left) - asset_outliner = AssetOutliner(main_splitter) - - # Looks (right) - looks_widget = QtWidgets.QWidget(main_splitter) - - look_outliner = LookOutliner(looks_widget) # Database look overview - - assign_selected = QtWidgets.QCheckBox( - "Assign to selected only", looks_widget - ) - assign_selected.setToolTip("Whether to assign only to selected nodes " - "or to the full asset") - remove_unused_btn = QtWidgets.QPushButton( - "Remove Unused Looks", looks_widget - ) - - looks_layout = QtWidgets.QVBoxLayout(looks_widget) - looks_layout.addWidget(look_outliner) - looks_layout.addWidget(assign_selected) - looks_layout.addWidget(remove_unused_btn) - - main_splitter.addWidget(asset_outliner) - main_splitter.addWidget(looks_widget) - main_splitter.setSizes([350, 200]) - - # Footer - status = QtWidgets.QStatusBar(self) - status.setSizeGripEnabled(False) - status.setFixedHeight(25) - warn_layer = QtWidgets.QLabel( - "Current Layer is not defaultRenderLayer", self - ) - warn_layer.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) - warn_layer.setStyleSheet("color: #DD5555; font-weight: bold;") - warn_layer.setFixedHeight(25) - - footer = QtWidgets.QHBoxLayout() - footer.setContentsMargins(0, 0, 0, 0) - footer.addWidget(status) - footer.addWidget(warn_layer) - - # Build up widgets - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setSpacing(0) - main_layout.addWidget(main_splitter) - main_layout.addLayout(footer) - - # Set column width - asset_outliner.view.setColumnWidth(0, 200) - look_outliner.view.setColumnWidth(0, 150) - - asset_outliner.selection_changed.connect( - self.on_asset_selection_changed) - - asset_outliner.refreshed.connect( - lambda: self.echo("Loaded assets..") - ) - - look_outliner.menu_apply_action.connect(self.on_process_selected) - remove_unused_btn.clicked.connect(remove_unused_looks) - - # Open widgets - self.asset_outliner = asset_outliner - self.look_outliner = look_outliner - self.status = status - self.warn_layer = warn_layer - - # Buttons - self.remove_unused = remove_unused_btn - self.assign_selected = assign_selected - - self._first_show = True - - def setup_connections(self): - """Connect interactive widgets with actions""" - if self._connections_set_up: - return - - # Maya renderlayer switch callback - callback = om.MEventMessage.addEventCallback( - "renderLayerManagerChange", - self._on_renderlayer_switch - ) - self._callbacks.append(callback) - self._connections_set_up = True - - def remove_connection(self): - # Delete callbacks - for callback in self._callbacks: - om.MMessage.removeCallback(callback) - - self._callbacks = [] - self._connections_set_up = False - - def showEvent(self, event): - self.setup_connections() - super(MayaLookAssignerWindow, self).showEvent(event) - if self._first_show: - self._first_show = False - self.setStyleSheet(style.load_stylesheet()) - - def closeEvent(self, event): - self.remove_connection() - super(MayaLookAssignerWindow, self).closeEvent(event) - - def _on_renderlayer_switch(self, *args): - """Callback that updates on Maya renderlayer switch""" - - if maya.OpenMaya.MFileIO.isNewingFile(): - # Don't perform a check during file open or file new as - # the renderlayers will not be in a valid state yet. - return - - layer = cmds.editRenderLayerGlobals(query=True, - currentRenderLayer=True) - if layer != "defaultRenderLayer": - self.warn_layer.show() - else: - self.warn_layer.hide() - - def echo(self, message): - self.status.showMessage(message, 1500) - - def refresh(self): - """Refresh the content""" - - # Get all containers and information - self.asset_outliner.clear() - found_items = self.asset_outliner.get_all_assets() - if not found_items: - self.look_outliner.clear() - - def on_asset_selection_changed(self): - """Get selected items from asset loader and fill look outliner""" - - items = self.asset_outliner.get_selected_items() - self.look_outliner.clear() - self.look_outliner.add_items(items) - - def on_process_selected(self): - """Process all selected looks for the selected assets""" - - assets = self.asset_outliner.get_selected_items() - assert assets, "No asset selected" - - # Collect the looks we want to apply (by name) - look_items = self.look_outliner.get_selected_items() - looks = {look["subset"] for look in look_items} - - selection = self.assign_selected.isChecked() - asset_nodes = self.asset_outliner.get_nodes(selection=selection) - - project_name = get_current_project_name() - start = time.time() - for i, (asset, item) in enumerate(asset_nodes.items()): - - # Label prefix - prefix = "({}/{})".format(i + 1, len(asset_nodes)) - - # Assign the first matching look relevant for this asset - # (since assigning multiple to the same nodes makes no sense) - assign_look = next((subset for subset in item["looks"] - if subset["name"] in looks), None) - if not assign_look: - self.echo( - "{} No matching selected look for {}".format(prefix, asset) - ) - continue - - # Get the latest version of this asset's look subset - version = get_last_version_by_subset_id( - project_name, assign_look["_id"], fields=["_id"] - ) - - subset_name = assign_look["name"] - self.echo("{} Assigning {} to {}\t".format( - prefix, subset_name, asset - )) - nodes = item["nodes"] - - # Assign Vray Proxy look. - if cmds.pluginInfo('vrayformaya', query=True, loaded=True): - self.echo("Getting vray proxy nodes ...") - vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) - - for vp in vray_proxies: - if vp in nodes: - vrayproxy_assign_look(vp, subset_name) - - nodes = list(set(nodes).difference(vray_proxies)) - else: - self.echo( - "Could not assign to VRayProxy because vrayformaya plugin " - "is not loaded." - ) - - # Assign Arnold Standin look. - if cmds.pluginInfo("mtoa", query=True, loaded=True): - arnold_standins = set(cmds.ls(type="aiStandIn", long=True)) - - for standin in arnold_standins: - if standin in nodes: - arnold_standin.assign_look(standin, subset_name) - - nodes = list(set(nodes).difference(arnold_standins)) - else: - self.echo( - "Could not assign to aiStandIn because mtoa plugin is not " - "loaded." - ) - - # Assign look - if nodes: - assign_look_by_version(nodes, version_id=version["_id"]) - - end = time.time() - - self.echo("Finished assigning.. ({0:.3f}s)".format(end - start)) - - -def show(): - """Display Loader GUI - - Arguments: - debug (bool, optional): Run loader in debug-mode, - defaults to False - - """ - - try: - module.window.close() - del module.window - except (RuntimeError, AttributeError): - pass - - # Get Maya main window - mainwindow = get_main_window() - - with qt_app_context(): - window = MayaLookAssignerWindow(parent=mainwindow) - window.show() - - module.window = window diff --git a/openpype/hosts/maya/tools/mayalookassigner/commands.py b/openpype/hosts/maya/tools/mayalookassigner/commands.py deleted file mode 100644 index 86df502ecd..0000000000 --- a/openpype/hosts/maya/tools/mayalookassigner/commands.py +++ /dev/null @@ -1,197 +0,0 @@ -import os -import logging -from collections import defaultdict - -import maya.cmds as cmds - -from openpype.client import get_assets, get_asset_name_identifier -from openpype.pipeline import ( - remove_container, - registered_host, - get_current_project_name, -) -from openpype.hosts.maya.api import lib - -from .vray_proxies import get_alembic_ids_cache -from . import arnold_standin - -log = logging.getLogger(__name__) - - -def get_workfile(): - path = cmds.file(query=True, sceneName=True) or "untitled" - return os.path.basename(path) - - -def get_workfolder(): - return os.path.dirname(cmds.file(query=True, sceneName=True)) - - -def select(nodes): - cmds.select(nodes) - - -def get_namespace_from_node(node): - """Get the namespace from the given node - - Args: - node (str): name of the node - - Returns: - namespace (str) - - """ - parts = node.rsplit("|", 1)[-1].rsplit(":", 1) - return parts[0] if len(parts) > 1 else u":" - - -def get_selected_nodes(): - """Get information from current selection""" - - selection = cmds.ls(selection=True, long=True) - hierarchy = lib.get_all_children(selection) - return list(set(selection + hierarchy)) - - -def get_all_asset_nodes(): - """Get all assets from the scene, container based - - Returns: - list: list of dictionaries - """ - return cmds.ls(dag=True, noIntermediate=True, long=True) - - -def create_asset_id_hash(nodes): - """Create a hash based on cbId attribute value - Args: - nodes (list): a list of nodes - - Returns: - dict - """ - node_id_hash = defaultdict(list) - for node in nodes: - # iterate over content of reference node - if cmds.nodeType(node) == "reference": - ref_hashes = create_asset_id_hash( - list(set(cmds.referenceQuery(node, nodes=True, dp=True)))) - for asset_id, ref_nodes in ref_hashes.items(): - node_id_hash[asset_id] += ref_nodes - elif cmds.pluginInfo('vrayformaya', query=True, - loaded=True) and cmds.nodeType( - node) == "VRayProxy": - path = cmds.getAttr("{}.fileName".format(node)) - ids = get_alembic_ids_cache(path) - for k, _ in ids.items(): - id = k.split(":")[0] - node_id_hash[id].append(node) - elif cmds.nodeType(node) == "aiStandIn": - for id, _ in arnold_standin.get_nodes_by_id(node).items(): - id = id.split(":")[0] - node_id_hash[id].append(node) - else: - value = lib.get_id(node) - if value is None: - continue - - asset_id = value.split(":")[0] - node_id_hash[asset_id].append(node) - - return dict(node_id_hash) - - -def create_items_from_nodes(nodes): - """Create an item for the view based the container and content of it - - It fetches the look document based on the asset ID found in the content. - The item will contain all important information for the tool to work. - - If there is an asset ID which is not registered in the project's collection - it will log a warning message. - - Args: - nodes (list): list of maya nodes - - Returns: - list of dicts - - """ - - asset_view_items = [] - - id_hashes = create_asset_id_hash(nodes) - - if not id_hashes: - log.warning("No id hashes") - return asset_view_items - - project_name = get_current_project_name() - asset_ids = set(id_hashes.keys()) - fields = {"_id", "name", "data.parents"} - asset_docs = get_assets(project_name, asset_ids, fields=fields) - asset_docs_by_id = { - str(asset_doc["_id"]): asset_doc - for asset_doc in asset_docs - } - - for asset_id, id_nodes in id_hashes.items(): - asset_doc = asset_docs_by_id.get(asset_id) - # Skip if asset id is not found - if not asset_doc: - log.warning( - "Id found on {num} nodes for which no asset is found database," - " skipping '{asset_id}'".format( - num=len(nodes), - asset_id=asset_id - ) - ) - continue - - # Collect available look subsets for this asset - looks = lib.list_looks(project_name, asset_doc["_id"]) - - # Collect namespaces the asset is found in - namespaces = set() - for node in id_nodes: - namespace = get_namespace_from_node(node) - namespaces.add(namespace) - - label = get_asset_name_identifier(asset_doc) - asset_view_items.append({ - "label": label, - "asset": asset_doc, - "looks": looks, - "namespaces": namespaces - }) - - return asset_view_items - - -def remove_unused_looks(): - """Removes all loaded looks for which none of the shaders are used. - - This will cleanup all loaded "LookLoader" containers that are unused in - the current scene. - - """ - - host = registered_host() - - unused = [] - for container in host.ls(): - if container['loader'] == "LookLoader": - members = lib.get_container_members(container['objectName']) - look_sets = cmds.ls(members, type="objectSet") - for look_set in look_sets: - # If the set is used than we consider this look *in use* - if cmds.sets(look_set, query=True): - break - else: - unused.append(container) - - for container in unused: - log.info("Removing unused look container: %s", container['objectName']) - remove_container(container) - - log.info("Finished removing unused looks. (see log for details)") diff --git a/openpype/hosts/maya/tools/mayalookassigner/lib.py b/openpype/hosts/maya/tools/mayalookassigner/lib.py deleted file mode 100644 index fddaf6112d..0000000000 --- a/openpype/hosts/maya/tools/mayalookassigner/lib.py +++ /dev/null @@ -1,87 +0,0 @@ -import json -import logging - -from openpype.pipeline import ( - legacy_io, - get_representation_path, - registered_host, - discover_loader_plugins, - loaders_from_representation, - load_container -) -from openpype.client import get_representation_by_name -from openpype.hosts.maya.api import lib - - -log = logging.getLogger(__name__) - - -def get_look_relationships(version_id): - # type: (str) -> dict - """Get relations for the look. - - Args: - version_id (str): Parent version Id. - - Returns: - dict: Dictionary of relations. - """ - - project_name = legacy_io.active_project() - json_representation = get_representation_by_name( - project_name, representation_name="json", version_id=version_id - ) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - relationships = json.load(f) - - return relationships - - -def load_look(version_id): - # type: (str) -> list - """Load look from version. - - Get look from version and invoke Loader for it. - - Args: - version_id (str): Version ID - - Returns: - list of shader nodes. - - """ - - project_name = legacy_io.active_project() - # Get representations of shader file and relationships - look_representation = get_representation_by_name( - project_name, representation_name="ma", version_id=version_id - ) - - # See if representation is already loaded, if so reuse it. - host = registered_host() - representation_id = str(look_representation['_id']) - for container in host.ls(): - if (container['loader'] == "LookLoader" and - container['representation'] == representation_id): - log.info("Reusing loaded look ...") - container_node = container['objectName'] - break - else: - log.info("Using look for the first time ...") - - # Load file - all_loaders = discover_loader_plugins() - loaders = loaders_from_representation(all_loaders, representation_id) - loader = next( - (i for i in loaders if i.__name__ == "LookLoader"), None) - if loader is None: - raise RuntimeError("Could not find LookLoader, this is a bug") - - # Reference the look file - with lib.maintained_selection(): - container_node = load_container(loader, look_representation)[0] - - return lib.get_container_members(container_node), container_node diff --git a/openpype/hosts/maya/tools/mayalookassigner/models.py b/openpype/hosts/maya/tools/mayalookassigner/models.py deleted file mode 100644 index ed6a68bee0..0000000000 --- a/openpype/hosts/maya/tools/mayalookassigner/models.py +++ /dev/null @@ -1,129 +0,0 @@ -from collections import defaultdict - -from qtpy import QtCore -import qtawesome - -from openpype.tools.utils import models -from openpype.style import get_default_entity_icon_color - - -class AssetModel(models.TreeModel): - - Columns = ["label"] - - def __init__(self, *args, **kwargs): - super(AssetModel, self).__init__(*args, **kwargs) - - self._icon_color = get_default_entity_icon_color() - - def add_items(self, items): - """ - Add items to model with needed data - Args: - items(list): collection of item data - - Returns: - None - """ - - self.beginResetModel() - - # Add the items sorted by label - sorter = lambda x: x["label"] - - for item in sorted(items, key=sorter): - - asset_item = models.Item() - asset_item.update(item) - asset_item["icon"] = "folder" - - # Add namespace children - namespaces = item["namespaces"] - for namespace in sorted(namespaces): - child = models.Item() - child.update(item) - child.update({ - "label": (namespace if namespace != ":" - else "(no namespace)"), - "namespace": namespace, - "looks": item["looks"], - "icon": "folder-o" - }) - asset_item.add_child(child) - - self.add_child(asset_item) - - self.endResetModel() - - def data(self, index, role): - - if not index.isValid(): - return - - if role == models.TreeModel.ItemRole: - node = index.internalPointer() - return node - - # Add icon - if role == QtCore.Qt.DecorationRole: - if index.column() == 0: - node = index.internalPointer() - icon = node.get("icon") - if icon: - return qtawesome.icon( - "fa.{0}".format(icon), - color=self._icon_color - ) - - return super(AssetModel, self).data(index, role) - - -class LookModel(models.TreeModel): - """Model displaying a list of looks and matches for assets""" - - Columns = ["label", "match"] - - def add_items(self, items): - """Add items to model with needed data - - An item exists of: - { - "subset": 'name of subset', - "asset": asset_document - } - - Args: - items(list): collection of item data - - Returns: - None - """ - - self.beginResetModel() - - # Collect the assets per look name (from the items of the AssetModel) - look_subsets = defaultdict(list) - for asset_item in items: - asset = asset_item["asset"] - for look in asset_item["looks"]: - look_subsets[look["name"]].append(asset) - - for subset in sorted(look_subsets.keys()): - assets = look_subsets[subset] - - # Define nice label without "look" prefix for readability - label = subset if not subset.startswith("look") else subset[4:] - - item_node = models.Item() - item_node["label"] = label - item_node["subset"] = subset - - # Amount of matching assets for this look - item_node["match"] = len(assets) - - # Store the assets that have this subset available - item_node["assets"] = assets - - self.add_child(item_node) - - self.endResetModel() diff --git a/openpype/hosts/maya/tools/mayalookassigner/widgets.py b/openpype/hosts/maya/tools/mayalookassigner/widgets.py deleted file mode 100644 index ef29a4c726..0000000000 --- a/openpype/hosts/maya/tools/mayalookassigner/widgets.py +++ /dev/null @@ -1,256 +0,0 @@ -import logging -from collections import defaultdict - -from qtpy import QtWidgets, QtCore - -from openpype.client import get_asset_name_identifier -from openpype.tools.utils.models import TreeModel -from openpype.tools.utils.lib import ( - preserve_expanded_rows, - preserve_selection, -) - -from .models import ( - AssetModel, - LookModel -) -from . import commands -from .views import View - -from maya import cmds - - -class AssetOutliner(QtWidgets.QWidget): - refreshed = QtCore.Signal() - selection_changed = QtCore.Signal() - - def __init__(self, parent=None): - super(AssetOutliner, self).__init__(parent) - - title = QtWidgets.QLabel("Assets", self) - title.setAlignment(QtCore.Qt.AlignCenter) - title.setStyleSheet("font-weight: bold; font-size: 12px") - - model = AssetModel() - view = View(self) - view.setModel(model) - view.customContextMenuRequested.connect(self.right_mouse_menu) - view.setSortingEnabled(False) - view.setHeaderHidden(True) - view.setIndentation(10) - - from_all_asset_btn = QtWidgets.QPushButton( - "Get All Assets", self - ) - from_selection_btn = QtWidgets.QPushButton( - "Get Assets From Selection", self - ) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(title) - layout.addWidget(from_all_asset_btn) - layout.addWidget(from_selection_btn) - layout.addWidget(view) - - # Build connections - from_selection_btn.clicked.connect(self.get_selected_assets) - from_all_asset_btn.clicked.connect(self.get_all_assets) - - selection_model = view.selectionModel() - selection_model.selectionChanged.connect(self.selection_changed) - - self.view = view - self.model = model - - self.log = logging.getLogger(__name__) - - def clear(self): - self.model.clear() - - # fix looks remaining visible when no items present after "refresh" - # todo: figure out why this workaround is needed. - self.selection_changed.emit() - - def add_items(self, items): - """Add new items to the outliner""" - - self.model.add_items(items) - self.refreshed.emit() - - def get_selected_items(self): - """Get current selected items from view - - Returns: - list: list of dictionaries - """ - - selection_model = self.view.selectionModel() - return [row.data(TreeModel.ItemRole) - for row in selection_model.selectedRows(0)] - - def get_all_assets(self): - """Add all items from the current scene""" - - with preserve_expanded_rows(self.view): - with preserve_selection(self.view): - self.clear() - nodes = commands.get_all_asset_nodes() - items = commands.create_items_from_nodes(nodes) - self.add_items(items) - return len(items) > 0 - - def get_selected_assets(self): - """Add all selected items from the current scene""" - - with preserve_expanded_rows(self.view): - with preserve_selection(self.view): - self.clear() - nodes = commands.get_selected_nodes() - items = commands.create_items_from_nodes(nodes) - self.add_items(items) - - def get_nodes(self, selection=False): - """Find the nodes in the current scene per asset.""" - - items = self.get_selected_items() - - # Collect all nodes by hash (optimization) - if not selection: - nodes = cmds.ls(dag=True, long=True) - else: - nodes = commands.get_selected_nodes() - id_nodes = commands.create_asset_id_hash(nodes) - - # Collect the asset item entries per asset - # and collect the namespaces we'd like to apply - assets = {} - asset_namespaces = defaultdict(set) - for item in items: - asset_id = str(item["asset"]["_id"]) - asset_name = get_asset_name_identifier(item["asset"]) - asset_namespaces[asset_name].add(item.get("namespace")) - - if asset_name in assets: - continue - - assets[asset_name] = item - assets[asset_name]["nodes"] = id_nodes.get(asset_id, []) - - # Filter nodes to namespace (if only namespaces were selected) - for asset_name in assets: - namespaces = asset_namespaces[asset_name] - - # When None is present there should be no filtering - if None in namespaces: - continue - - # Else only namespaces are selected and *not* the top entry so - # we should filter to only those namespaces. - nodes = assets[asset_name]["nodes"] - nodes = [node for node in nodes if - commands.get_namespace_from_node(node) in namespaces] - assets[asset_name]["nodes"] = nodes - - return assets - - def select_asset_from_items(self): - """Select nodes from listed asset""" - - items = self.get_nodes(selection=False) - nodes = [] - for item in items.values(): - nodes.extend(item["nodes"]) - - commands.select(nodes) - - def right_mouse_menu(self, pos): - """Build RMB menu for asset outliner""" - - active = self.view.currentIndex() # index under mouse - active = active.sibling(active.row(), 0) # get first column - globalpos = self.view.viewport().mapToGlobal(pos) - - menu = QtWidgets.QMenu(self.view) - - # Direct assignment - apply_action = QtWidgets.QAction(menu, text="Select nodes") - apply_action.triggered.connect(self.select_asset_from_items) - - if not active.isValid(): - apply_action.setEnabled(False) - - menu.addAction(apply_action) - - menu.exec_(globalpos) - - -class LookOutliner(QtWidgets.QWidget): - menu_apply_action = QtCore.Signal() - - def __init__(self, parent=None): - super(LookOutliner, self).__init__(parent) - - # Looks from database - title = QtWidgets.QLabel("Looks", self) - title.setAlignment(QtCore.Qt.AlignCenter) - title.setStyleSheet("font-weight: bold; font-size: 12px") - title.setAlignment(QtCore.Qt.AlignCenter) - - model = LookModel() - - # Proxy for dynamic sorting - proxy = QtCore.QSortFilterProxyModel() - proxy.setSourceModel(model) - - view = View(self) - view.setModel(proxy) - view.setMinimumHeight(180) - view.setToolTip("Use right mouse button menu for direct actions") - view.customContextMenuRequested.connect(self.right_mouse_menu) - view.sortByColumn(0, QtCore.Qt.AscendingOrder) - - # look manager layout - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(10) - layout.addWidget(title) - layout.addWidget(view) - - self.view = view - self.model = model - - def clear(self): - self.model.clear() - - def add_items(self, items): - self.model.add_items(items) - - def get_selected_items(self): - """Get current selected items from view - - Returns: - list: list of dictionaries - """ - - items = [i.data(TreeModel.ItemRole) for i in self.view.get_indices()] - return [item for item in items if item is not None] - - def right_mouse_menu(self, pos): - """Build RMB menu for look view""" - - active = self.view.currentIndex() # index under mouse - active = active.sibling(active.row(), 0) # get first column - globalpos = self.view.viewport().mapToGlobal(pos) - - if not active.isValid(): - return - - menu = QtWidgets.QMenu(self.view) - - # Direct assignment - apply_action = QtWidgets.QAction(menu, text="Assign looks..") - apply_action.triggered.connect(self.menu_apply_action) - - menu.addAction(apply_action) - - menu.exec_(globalpos) diff --git a/openpype/hosts/nuke/addon.py b/openpype/hosts/nuke/addon.py deleted file mode 100644 index 6a4b91a76d..0000000000 --- a/openpype/hosts/nuke/addon.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import platform -from openpype.modules import OpenPypeModule, IHostAddon - -NUKE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class NukeAddon(OpenPypeModule, IHostAddon): - name = "nuke" - host_name = "nuke" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to NUKE_PATH - new_nuke_paths = [ - os.path.join(NUKE_ROOT_DIR, "startup") - ] - old_nuke_path = env.get("NUKE_PATH") or "" - for path in old_nuke_path.split(os.pathsep): - if not path: - continue - - norm_path = os.path.normpath(path) - if norm_path not in new_nuke_paths: - new_nuke_paths.append(norm_path) - - env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths) - # Remove auto screen scale factor for Qt - # - let Nuke decide it's value - env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) - # Remove tkinter library paths if are set - env.pop("TK_LIBRARY", None) - env.pop("TCL_LIBRARY", None) - - # Add vendor to PYTHONPATH - python_path = env["PYTHONPATH"] - python_path_parts = [] - if python_path: - python_path_parts = python_path.split(os.pathsep) - vendor_path = os.path.join(NUKE_ROOT_DIR, "vendor") - python_path_parts.insert(0, vendor_path) - env["PYTHONPATH"] = os.pathsep.join(python_path_parts) - - # Set default values if are not already set via settings - defaults = { - "LOGLEVEL": "DEBUG" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - # Try to add QuickTime to PATH - quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem" - if platform.system() == "windows" and os.path.exists(quick_time_path): - path_value = env.get("PATH") or "" - path_paths = [ - path - for path in path_value.split(os.pathsep) - if path - ] - path_paths.append(quick_time_path) - env["PATH"] = os.pathsep.join(path_paths) - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(NUKE_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".nk"] diff --git a/openpype/hosts/nuke/api/actions.py b/openpype/hosts/nuke/api/actions.py deleted file mode 100644 index 995e6427af..0000000000 --- a/openpype/hosts/nuke/api/actions.py +++ /dev/null @@ -1,77 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import get_errored_instances_from_context -from .lib import ( - reset_selection, - select_nodes -) - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Nuke when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid nodes" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid nodes..") - invalid = set() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.update(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - if invalid: - self.log.info("Selecting invalid nodes: {}".format(invalid)) - reset_selection() - select_nodes(invalid) - else: - self.log.info("No invalid nodes found.") - - -class SelectInstanceNodeAction(pyblish.api.Action): - """Select instance node for failed plugin.""" - label = "Select instance node" - on = "failed" # This action is only available on a failed plug-in - icon = "mdi.cursor-default-click" - - def process(self, context, plugin): - - # Get the errored instances for the plug-in - errored_instances = get_errored_instances_from_context( - context, plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding instance nodes..") - nodes = set() - for instance in errored_instances: - instance_node = instance.data.get("transientData", {}).get("node") - if not instance_node: - raise RuntimeError( - "No transientData['node'] found on instance: {}".format( - instance - ) - ) - nodes.add(instance_node) - - if nodes: - self.log.info("Selecting instance nodes: {}".format(nodes)) - reset_selection() - select_nodes(nodes) - else: - self.log.info("No instance nodes found.") diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py deleted file mode 100644 index cdefd05c11..0000000000 --- a/openpype/hosts/nuke/api/lib.py +++ /dev/null @@ -1,3525 +0,0 @@ -import os -from pprint import pformat -import re -import json -import six -import functools -import warnings -import platform -import tempfile -import contextlib -from collections import OrderedDict - -import nuke -from qtpy import QtCore, QtWidgets - -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( - get_project, - get_asset_by_name, - get_versions, - get_last_versions, - get_representations, -) - -from openpype.host import HostDirmap -from openpype.tools.utils import host_tools -from openpype.pipeline.workfile.workfile_template_builder import ( - TemplateProfileNotFound -) -from openpype.lib import ( - env_value_to_bool, - Logger, - get_version_from_path, - StringTemplate, -) - -from openpype.settings import ( - get_project_settings, - get_current_project_settings, -) -from openpype.modules import ModulesManager -from openpype.pipeline.template_data import get_template_data_with_names -from openpype.pipeline import ( - discover_legacy_creator_plugins, - Anatomy, - get_current_host_name, - get_current_project_name, - get_current_asset_name, -) -from openpype.pipeline.context_tools import ( - get_custom_workfile_template_from_session -) -from openpype.pipeline.colorspace import get_imageio_config -from openpype.pipeline.workfile import BuildWorkfile -from . import gizmo_menu -from .constants import ASSIST - -from .workio import save_file -from .utils import get_node_outputs - -log = Logger.get_logger(__name__) - -_NODE_TAB_NAME = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") -AVALON_LABEL = os.getenv("AVALON_LABEL") or "Avalon" -AVALON_TAB = "{}".format(AVALON_LABEL) -AVALON_DATA_GROUP = "{}DataGroup".format(AVALON_LABEL.capitalize()) -EXCLUDED_KNOB_TYPE_ON_READ = ( - 20, # Tab Knob - 26, # Text Knob (But for backward compatibility, still be read - # if value is not an empty string.) -) -JSON_PREFIX = "JSON:::" -ROOT_DATA_KNOB = "publish_context" -INSTANCE_DATA_KNOB = "publish_instance" - - -class DeprecatedWarning(DeprecationWarning): - pass - - -def deprecated(new_destination): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - func = None - if callable(new_destination): - func = new_destination - new_destination = None - - def _decorator(decorated_func): - if new_destination is None: - warning_message = ( - " Please check content of deprecated function to figure out" - " possible replacement." - ) - else: - warning_message = " Please replace your usage with '{}'.".format( - new_destination - ) - - @functools.wraps(decorated_func) - def wrapper(*args, **kwargs): - warnings.simplefilter("always", DeprecatedWarning) - warnings.warn( - ( - "Call to deprecated function '{}'" - "\nFunction was moved or removed.{}" - ).format(decorated_func.__name__, warning_message), - category=DeprecatedWarning, - stacklevel=4 - ) - return decorated_func(*args, **kwargs) - return wrapper - - if func is None: - return _decorator - return _decorator(func) - - -class Context: - main_window = None - context_action_item = None - project_name = os.getenv("AVALON_PROJECT") - # Workfile related code - workfiles_launched = False - workfiles_tool_timer = None - - # Seems unused - _project_doc = None - - -def get_main_window(): - """Acquire Nuke's main window""" - if Context.main_window is None: - - top_widgets = QtWidgets.QApplication.topLevelWidgets() - name = "Foundry::UI::DockMainWindow" - for widget in top_widgets: - if ( - widget.inherits("QMainWindow") - and widget.metaObject().className() == name - ): - Context.main_window = widget - break - return Context.main_window - - -def set_node_data(node, knobname, data): - """Write data to node invisible knob - - Will create new in case it doesn't exists - or update the one already created. - - Args: - node (nuke.Node): node object - knobname (str): knob name - data (dict): data to be stored in knob - """ - # if exists then update data - if knobname in node.knobs(): - log.debug("Updating knobname `{}` on node `{}`".format( - knobname, node.name() - )) - update_node_data(node, knobname, data) - return - - log.debug("Creating knobname `{}` on node `{}`".format( - knobname, node.name() - )) - # else create new - knob_value = JSON_PREFIX + json.dumps(data) - knob = nuke.String_Knob(knobname) - knob.setValue(knob_value) - knob.setFlag(nuke.INVISIBLE) - node.addKnob(knob) - - -def get_node_data(node, knobname): - """Read data from node. - - Args: - node (nuke.Node): node object - knobname (str): knob name - - Returns: - dict: data stored in knob - """ - if knobname not in node.knobs(): - return - - rawdata = node[knobname].getValue() - if ( - isinstance(rawdata, six.string_types) - and rawdata.startswith(JSON_PREFIX) - ): - try: - return json.loads(rawdata[len(JSON_PREFIX):]) - except json.JSONDecodeError: - return - - -def update_node_data(node, knobname, data): - """Update already present data. - - Args: - node (nuke.Node): node object - knobname (str): knob name - data (dict): data to update knob value - """ - knob = node[knobname] - node_data = get_node_data(node, knobname) or {} - node_data.update(data) - knob_value = JSON_PREFIX + json.dumps(node_data) - knob.setValue(knob_value) - - -class Knobby(object): - """[DEPRECATED] For creating knob which it's type isn't - mapped in `create_knobs` - - Args: - type (string): Nuke knob type name - value: Value to be set with `Knob.setValue`, put `None` if not required - flags (list, optional): Knob flags to be set with `Knob.setFlag` - *args: Args other than knob name for initializing knob class - - """ - - def __init__(self, type, value, flags=None, *args): - self.type = type - self.value = value - self.flags = flags or [] - self.args = args - - def create(self, name, nice=None): - knob_cls = getattr(nuke, self.type) - knob = knob_cls(name, nice, *self.args) - if self.value is not None: - knob.setValue(self.value) - for flag in self.flags: - knob.setFlag(flag) - return knob - - @staticmethod - def nice_naming(key): - """Convert camelCase name into UI Display Name""" - words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) - return " ".join(words) - - -def create_knobs(data, tab=None): - """Create knobs by data - - Depending on the type of each dict value and creates the correct Knob. - - Mapped types: - bool: nuke.Boolean_Knob - int: nuke.Int_Knob - float: nuke.Double_Knob - list: nuke.Enumeration_Knob - six.string_types: nuke.String_Knob - - dict: If it's a nested dict (all values are dict), will turn into - A tabs group. Or just a knobs group. - - Args: - data (dict): collection of attributes and their value - tab (string, optional): Knobs' tab name - - Returns: - list: A list of `nuke.Knob` objects - - """ - def nice_naming(key): - """Convert camelCase name into UI Display Name""" - words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) - return " ".join(words) - - # Turn key-value pairs into knobs - knobs = list() - - if tab: - knobs.append(nuke.Tab_Knob(tab)) - - for key, value in data.items(): - # Knob name - if isinstance(key, tuple): - name, nice = key - else: - name, nice = key, nice_naming(key) - - # Create knob by value type - if isinstance(value, Knobby): - knobby = value - knob = knobby.create(name, nice) - - elif isinstance(value, float): - knob = nuke.Double_Knob(name, nice) - knob.setValue(value) - - elif isinstance(value, bool): - knob = nuke.Boolean_Knob(name, nice) - knob.setValue(value) - knob.setFlag(nuke.STARTLINE) - - elif isinstance(value, int): - knob = nuke.Int_Knob(name, nice) - knob.setValue(value) - - elif isinstance(value, six.string_types): - knob = nuke.String_Knob(name, nice) - knob.setValue(value) - - elif isinstance(value, list): - knob = nuke.Enumeration_Knob(name, nice, value) - - elif isinstance(value, dict): - if all(isinstance(v, dict) for v in value.values()): - # Create a group of tabs - begain = nuke.BeginTabGroup_Knob() - end = nuke.EndTabGroup_Knob() - begain.setName(name) - end.setName(name + "_End") - knobs.append(begain) - for k, v in value.items(): - knobs += create_knobs(v, tab=k) - knobs.append(end) - else: - # Create a group of knobs - knobs.append(nuke.Tab_Knob( - name, nice, nuke.TABBEGINCLOSEDGROUP)) - knobs += create_knobs(value) - knobs.append( - nuke.Tab_Knob(name + "_End", nice, nuke.TABENDGROUP)) - continue - - else: - raise TypeError("Unsupported type: %r" % type(value)) - - knobs.append(knob) - - return knobs - - -def imprint(node, data, tab=None): - """Store attributes with value on node - - Parse user data into Node knobs. - Use `collections.OrderedDict` to ensure knob order. - - Args: - node(nuke.Node): node object from Nuke - data(dict): collection of attributes and their value - - Returns: - None - - Examples: - ``` - import nuke - from openpype.hosts.nuke.api import lib - - node = nuke.createNode("NoOp") - data = { - # Regular type of attributes - "myList": ["x", "y", "z"], - "myBool": True, - "myFloat": 0.1, - "myInt": 5, - - # Creating non-default imprint type of knob - "MyFilePath": lib.Knobby("File_Knob", "/file/path"), - "divider": lib.Knobby("Text_Knob", ""), - - # Manual nice knob naming - ("my_knob", "Nice Knob Name"): "some text", - - # dict type will be created as knob group - "KnobGroup": { - "knob1": 5, - "knob2": "hello", - "knob3": ["a", "b"], - }, - - # Nested dict will be created as tab group - "TabGroup": { - "tab1": {"count": 5}, - "tab2": {"isGood": True}, - "tab3": {"direction": ["Left", "Right"]}, - }, - } - lib.imprint(node, data, tab="Demo") - - ``` - - """ - for knob in create_knobs(data, tab): - node.addKnob(knob) - - -@deprecated -def add_publish_knob(node): - """[DEPRECATED] Add Publish knob to node - - Arguments: - node (nuke.Node): nuke node to be processed - - Returns: - node (nuke.Node): processed nuke node - - """ - if "publish" not in node.knobs(): - body = OrderedDict() - body[("divd", "Publishing")] = Knobby("Text_Knob", '') - body["publish"] = True - imprint(node, body) - return node - - -@deprecated("openpype.hosts.nuke.api.lib.set_node_data") -def set_avalon_knob_data(node, data=None, prefix="avalon:"): - """[DEPRECATED] Sets data into nodes's avalon knob - - This function is still used but soon will be deprecated. - Use `set_node_data` instead. - - Arguments: - node (nuke.Node): Nuke node to imprint with data, - data (dict, optional): Data to be imprinted into AvalonTab - prefix (str, optional): filtering prefix - - Returns: - node (nuke.Node) - - Examples: - data = { - 'asset': 'sq020sh0280', - 'family': 'render', - 'subset': 'subsetMain' - } - """ - data = data or dict() - create = OrderedDict() - - tab_name = AVALON_TAB - editable = ["asset", "subset", "name", "namespace"] - - existed_knobs = node.knobs() - - for key, value in data.items(): - knob_name = prefix + key - gui_name = key - - if knob_name in existed_knobs: - # Set value - try: - node[knob_name].setValue(value) - except TypeError: - node[knob_name].setValue(str(value)) - else: - # New knob - name = (knob_name, gui_name) # Hide prefix on GUI - if key in editable: - create[name] = value - else: - create[name] = Knobby("String_Knob", - str(value), - flags=[nuke.READ_ONLY]) - if tab_name in existed_knobs: - tab_name = None - else: - tab = OrderedDict() - warn = Knobby("Text_Knob", "Warning! Do not change following data!") - divd = Knobby("Text_Knob", "") - head = [ - (("warn", ""), warn), - (("divd", ""), divd), - ] - tab[AVALON_DATA_GROUP] = OrderedDict(head + list(create.items())) - create = tab - - imprint(node, create, tab=tab_name) - return node - - -@deprecated("openpype.hosts.nuke.api.lib.get_node_data") -def get_avalon_knob_data(node, prefix="avalon:", create=True): - """[DEPRECATED] Gets a data from nodes's avalon knob - - This function is still used but soon will be deprecated. - Use `get_node_data` instead. - - Arguments: - node (obj): Nuke node to search for data, - prefix (str, optional): filtering prefix - - Returns: - data (dict) - """ - - data = {} - if AVALON_TAB not in node.knobs(): - return data - - # check if lists - if not isinstance(prefix, list): - prefix = [prefix] - - # loop prefix - for p in prefix: - # check if the node is avalon tracked - try: - # check if data available on the node - test = node[AVALON_DATA_GROUP].value() - log.debug("Only testing if data available: `{}`".format(test)) - except NameError as e: - # if it doesn't then create it - log.debug("Creating avalon knob: `{}`".format(e)) - if create: - node = set_avalon_knob_data(node) - return get_avalon_knob_data(node) - return {} - - # get data from filtered knobs - data.update({k.replace(p, ''): node[k].value() - for k in node.knobs().keys() - if p in k}) - - return data - - -@deprecated -def fix_data_for_node_create(data): - """[DEPRECATED] Fixing data to be used for nuke knobs - """ - for k, v in data.items(): - if isinstance(v, six.text_type): - data[k] = str(v) - if str(v).startswith("0x"): - data[k] = int(v, 16) - return data - - -@deprecated -def add_write_node_legacy(name, **kwarg): - """[DEPRECATED] Adding nuke write node - Arguments: - name (str): nuke node name - kwarg (attrs): data for nuke knobs - Returns: - node (obj): nuke write node - """ - use_range_limit = kwarg.get("use_range_limit", None) - - w = nuke.createNode( - "Write", - "name {}".format(name), - inpanel=False - ) - - w["file"].setValue(kwarg["file"]) - - for k, v in kwarg.items(): - if "frame_range" in k: - continue - log.info([k, v]) - try: - w[k].setValue(v) - except KeyError as e: - log.debug(e) - continue - - if use_range_limit: - w["use_limit"].setValue(True) - w["first"].setValue(kwarg["frame_range"][0]) - w["last"].setValue(kwarg["frame_range"][1]) - - return w - - -def add_write_node(name, file_path, knobs, **kwarg): - """Adding nuke write node - - Arguments: - name (str): nuke node name - kwarg (attrs): data for nuke knobs - - Returns: - node (obj): nuke write node - """ - use_range_limit = kwarg.get("use_range_limit", None) - - w = nuke.createNode( - "Write", - "name {}".format(name), - inpanel=False - ) - - w["file"].setValue(file_path) - - # finally add knob overrides - set_node_knobs_from_settings(w, knobs, **kwarg) - - if use_range_limit: - w["use_limit"].setValue(True) - w["first"].setValue(kwarg["frame_range"][0]) - w["last"].setValue(kwarg["frame_range"][1]) - - return w - - -def read_avalon_data(node): - """Return user-defined knobs from given `node` - - Args: - node (nuke.Node): Nuke node object - - Returns: - list: A list of nuke.Knob object - - """ - def compat_prefixed(knob_name): - if knob_name.startswith("avalon:"): - return knob_name[len("avalon:"):] - elif knob_name.startswith("ak:"): - return knob_name[len("ak:"):] - - data = dict() - - pattern = ("(?<=addUserKnob {)" - "([0-9]*) (\\S*)" # Matching knob type and knob name - "(?=[ |}])") - tcl_script = node.writeKnobs(nuke.WRITE_USER_KNOB_DEFS) - result = re.search(pattern, tcl_script) - - if result: - first_user_knob = result.group(2) - # Collect user knobs from the end of the knob list - for knob in reversed(node.allKnobs()): - knob_name = knob.name() - if not knob_name: - # Ignore unnamed knob - continue - - knob_type = nuke.knob(knob.fullyQualifiedName(), type=True) - value = knob.value() - - if ( - knob_type not in EXCLUDED_KNOB_TYPE_ON_READ or - # For compating read-only string data that imprinted - # by `nuke.Text_Knob`. - (knob_type == 26 and value) - ): - key = compat_prefixed(knob_name) - if key is not None: - data[key] = value - - if knob_name == first_user_knob: - break - - return data - - -def get_node_path(path, padding=4): - """Get filename for the Nuke write with padded number as '#' - - Arguments: - path (str): The path to render to. - - Returns: - tuple: head, padding, tail (extension) - - Examples: - >>> get_frame_path("test.exr") - ('test', 4, '.exr') - - >>> get_frame_path("filename.#####.tif") - ('filename.', 5, '.tif') - - >>> get_frame_path("foobar##.tif") - ('foobar', 2, '.tif') - - >>> get_frame_path("foobar_%08d.tif") - ('foobar_', 8, '.tif') - """ - filename, ext = os.path.splitext(path) - - # Find a final number group - if '%' in filename: - match = re.match('.*?(%[0-9]+d)$', filename) - if match: - padding = int(match.group(1).replace('%', '').replace('d', '')) - # remove number from end since fusion - # will swap it with the frame number - filename = filename.replace(match.group(1), '') - elif '#' in filename: - match = re.match('.*?(#+)$', filename) - - if match: - padding = len(match.group(1)) - # remove number from end since fusion - # will swap it with the frame number - filename = filename.replace(match.group(1), '') - - return filename, padding, ext - - -def get_nuke_imageio_settings(): - return get_project_settings(Context.project_name)["nuke"]["imageio"] - - -@deprecated("openpype.hosts.nuke.api.lib.get_nuke_imageio_settings") -def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): - '''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth) - ''' - - assert any([creator, nodeclass]), nuke.message( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) - - imageio_nodes = get_nuke_imageio_settings()["nodes"] - required_nodes = imageio_nodes["requiredNodes"] - - # HACK: for backward compatibility this needs to be optional - override_nodes = imageio_nodes.get("overrideNodes", []) - - imageio_node = None - for node in required_nodes: - log.info(node) - if ( - nodeclass in node["nukeNodeClass"] - and creator in node["plugins"] - ): - imageio_node = node - break - - log.debug("__ imageio_node: {}".format(imageio_node)) - - # find matching override node - override_imageio_node = None - for onode in override_nodes: - log.info(onode) - if nodeclass not in node["nukeNodeClass"]: - continue - - if creator not in node["plugins"]: - continue - - if ( - onode["subsets"] - and not any( - re.search(s.lower(), subset.lower()) - for s in onode["subsets"] - ) - ): - continue - - override_imageio_node = onode - break - - log.debug("__ override_imageio_node: {}".format(override_imageio_node)) - # add overrides to imageio_node - if override_imageio_node: - # get all knob names in imageio_node - knob_names = [k["name"] for k in imageio_node["knobs"]] - - for oknob in override_imageio_node["knobs"]: - for knob in imageio_node["knobs"]: - # override matching knob name - if oknob["name"] == knob["name"]: - log.debug( - "_ overriding knob: `{}` > `{}`".format( - knob, oknob - )) - if not oknob["value"]: - # remove original knob if no value found in oknob - imageio_node["knobs"].remove(knob) - else: - # override knob value with oknob's - knob["value"] = oknob["value"] - - # add missing knobs into imageio_node - if oknob["name"] not in knob_names: - log.debug( - "_ adding knob: `{}`".format(oknob)) - imageio_node["knobs"].append(oknob) - knob_names.append(oknob["name"]) - - log.info("ImageIO node: {}".format(imageio_node)) - return imageio_node - - -def get_imageio_node_setting(node_class, plugin_name, subset): - ''' Get preset data for dataflow (fileType, compression, bitDepth) - ''' - imageio_nodes = get_nuke_imageio_settings()["nodes"] - required_nodes = imageio_nodes["requiredNodes"] - - imageio_node = None - for node in required_nodes: - log.info(node) - if ( - node_class in node["nukeNodeClass"] - and plugin_name in node["plugins"] - ): - imageio_node = node - break - - log.debug("__ imageio_node: {}".format(imageio_node)) - - if not imageio_node: - return - - # find overrides and update knobs with them - get_imageio_node_override_setting( - node_class, - plugin_name, - subset, - imageio_node["knobs"] - ) - - log.info("ImageIO node: {}".format(imageio_node)) - return imageio_node - - -def get_imageio_node_override_setting( - node_class, plugin_name, subset, knobs_settings -): - ''' Get imageio node overrides from settings - ''' - imageio_nodes = get_nuke_imageio_settings()["nodes"] - override_nodes = imageio_nodes["overrideNodes"] - - # find matching override node - override_imageio_node = None - for onode in override_nodes: - log.debug("__ onode: {}".format(onode)) - log.debug("__ subset: {}".format(subset)) - if node_class not in onode["nukeNodeClass"]: - continue - - if plugin_name not in onode["plugins"]: - continue - - if ( - onode["subsets"] - and not any( - re.search(s.lower(), subset.lower()) - for s in onode["subsets"] - ) - ): - continue - - override_imageio_node = onode - break - - log.debug("__ override_imageio_node: {}".format(override_imageio_node)) - # add overrides to imageio_node - if override_imageio_node: - # get all knob names in imageio_node - knob_names = [k["name"] for k in knobs_settings] - - for oknob in override_imageio_node["knobs"]: - for knob in knobs_settings: - # override matching knob name - if oknob["name"] == knob["name"]: - log.debug( - "_ overriding knob: `{}` > `{}`".format( - knob, oknob - )) - if not oknob["value"]: - # remove original knob if no value found in oknob - knobs_settings.remove(knob) - else: - # override knob value with oknob's - knob["value"] = oknob["value"] - - # add missing knobs into imageio_node - if oknob["name"] not in knob_names: - log.debug( - "_ adding knob: `{}`".format(oknob)) - knobs_settings.append(oknob) - knob_names.append(oknob["name"]) - - return knobs_settings - - -def get_imageio_input_colorspace(filename): - ''' Get input file colorspace based on regex in settings. - ''' - imageio_regex_inputs = ( - get_nuke_imageio_settings()["regexInputs"]["inputs"]) - - preset_clrsp = None - for regexInput in imageio_regex_inputs: - if bool(re.search(regexInput["regex"], filename)): - preset_clrsp = str(regexInput["colorspace"]) - - return preset_clrsp - - -def get_view_process_node(): - reset_selection() - - ipn_node = None - for v_ in nuke.allNodes(filter="Viewer"): - ipn = v_['input_process_node'].getValue() - ipn_node = nuke.toNode(ipn) - - # skip if no input node is set - if not ipn: - continue - - if ipn == "VIEWER_INPUT" and not ipn_node: - # since it is set by default we can ignore it - # nobody usually use this but use it if - # it exists in nodes - continue - - if not ipn_node: - # in case a Viewer node is transferred from - # different workfile with old values - raise NameError(( - "Input process node name '{}' set in " - "Viewer '{}' is doesn't exists in nodes" - ).format(ipn, v_.name())) - - ipn_node.setSelected(True) - - if ipn_node: - return duplicate_node(ipn_node) - - -def on_script_load(): - ''' Callback for ffmpeg support - ''' - if nuke.env["LINUX"]: - nuke.tcl('load ffmpegReader') - nuke.tcl('load ffmpegWriter') - else: - nuke.tcl('load movReader') - nuke.tcl('load movWriter') - - -def check_inventory_versions(): - """ - Actual version idetifier of Loaded containers - - Any time this function is run it will check all nodes and filter only - Loader nodes for its version. It will get all versions from database - and check if the node is having actual version. If not then it will color - it to red. - """ - from .pipeline import parse_container - - # get all Loader nodes by avalon attribute metadata - node_with_repre_id = [] - repre_ids = set() - # Find all containers and collect it's node and representation ids - for node in nuke.allNodes(): - container = parse_container(node) - - if container: - node = nuke.toNode(container["objectName"]) - avalon_knob_data = read_avalon_data(node) - repre_id = avalon_knob_data["representation"] - - repre_ids.add(repre_id) - node_with_repre_id.append((node, repre_id)) - - # Skip if nothing was found - if not repre_ids: - return - - project_name = get_current_project_name() - # Find representations based on found containers - repre_docs = get_representations( - project_name, - representation_ids=repre_ids, - fields=["_id", "parent"] - ) - # Store representations by id and collect version ids - repre_docs_by_id = {} - version_ids = set() - for repre_doc in repre_docs: - # Use stringed representation id to match value in containers - repre_id = str(repre_doc["_id"]) - repre_docs_by_id[repre_id] = repre_doc - version_ids.add(repre_doc["parent"]) - - version_docs = get_versions( - project_name, version_ids, fields=["_id", "name", "parent"] - ) - # Store versions by id and collect subset ids - version_docs_by_id = {} - subset_ids = set() - for version_doc in version_docs: - version_docs_by_id[version_doc["_id"]] = version_doc - subset_ids.add(version_doc["parent"]) - - # Query last versions based on subset ids - last_versions_by_subset_id = get_last_versions( - project_name, subset_ids=subset_ids, fields=["_id", "parent"] - ) - - # Loop through collected container nodes and their representation ids - for item in node_with_repre_id: - # Some python versions of nuke can't unfold tuple in for loop - node, repre_id = item - repre_doc = repre_docs_by_id.get(repre_id) - # Failsafe for not finding the representation. - if not repre_doc: - log.warning(( - "Could not find the representation on node \"{}\"" - ).format(node.name())) - continue - - version_id = repre_doc["parent"] - version_doc = version_docs_by_id.get(version_id) - if not version_doc: - log.warning(( - "Could not find the version on node \"{}\"" - ).format(node.name())) - continue - - # Get last version based on subset id - subset_id = version_doc["parent"] - last_version = last_versions_by_subset_id[subset_id] - # Check if last version is same as current version - if last_version["_id"] == version_doc["_id"]: - color_value = "0x4ecd25ff" - else: - color_value = "0xd84f20ff" - node["tile_color"].setValue(int(color_value, 16)) - - -def writes_version_sync(): - ''' Callback synchronizing version of publishable write nodes - ''' - try: - rootVersion = get_version_from_path(nuke.root().name()) - padding = len(rootVersion) - new_version = "v" + str("{" + ":0>{}".format(padding) + "}").format( - int(rootVersion) - ) - log.debug("new_version: {}".format(new_version)) - except Exception: - return - - for each in nuke.allNodes(filter="Write"): - # check if the node is avalon tracked - if _NODE_TAB_NAME not in each.knobs(): - continue - - avalon_knob_data = read_avalon_data(each) - - try: - if avalon_knob_data["families"] not in ["render"]: - log.debug(avalon_knob_data["families"]) - continue - - node_file = each["file"].value() - - node_version = "v" + get_version_from_path(node_file) - log.debug("node_version: {}".format(node_version)) - - node_new_file = node_file.replace(node_version, new_version) - each["file"].setValue(node_new_file) - if not os.path.isdir(os.path.dirname(node_new_file)): - log.warning("Path does not exist! I am creating it.") - os.makedirs(os.path.dirname(node_new_file)) - except Exception as e: - log.warning( - "Write node: `{}` has no version in path: {}".format( - each.name(), e)) - - -def version_up_script(): - ''' Raising working script's version - ''' - import nukescripts - nukescripts.script_and_write_nodes_version_up() - - -def check_subsetname_exists(nodes, subset_name): - """ - Checking if node is not already created to secure there is no duplicity - - Arguments: - nodes (list): list of nuke.Node objects - subset_name (str): name we try to find - - Returns: - bool: True of False - """ - return next((True for n in nodes - if subset_name in read_avalon_data(n).get("subset", "")), - False) - - -def format_anatomy(data): - ''' Helping function for formatting of anatomy paths - - Arguments: - data (dict): dictionary with attributes used for formatting - - Return: - path (str) - ''' - - project_name = get_current_project_name() - anatomy = Anatomy(project_name) - log.debug("__ anatomy.templates: {}".format(anatomy.templates)) - - padding = None - if "frame_padding" in anatomy.templates.keys(): - padding = int(anatomy.templates["frame_padding"]) - elif "render" in anatomy.templates.keys(): - padding = int( - anatomy.templates["render"].get( - "frame_padding" - ) - ) - - version = data.get("version", None) - if not version: - file = script_name() - data["version"] = get_version_from_path(file) - - if AYON_SERVER_ENABLED: - asset_name = data["folderPath"] - else: - asset_name = data["asset"] - task_name = data["task"] - host_name = get_current_host_name() - context_data = get_template_data_with_names( - project_name, asset_name, task_name, host_name - ) - data.update(context_data) - data.update({ - "subset": data["subset"], - "family": data["family"], - "frame": "#" * padding, - }) - return anatomy.format(data) - - -def script_name(): - ''' Returns nuke script path - ''' - return nuke.root().knob("name").value() - - -def add_button_write_to_read(node): - name = "createReadNode" - label = "Read From Rendered" - value = "import write_to_read;\ - write_to_read.write_to_read(nuke.thisNode(), allow_relative=False)" - knob = nuke.PyScript_Knob(name, label, value) - knob.clearFlag(nuke.STARTLINE) - node.addKnob(knob) - - -def add_button_clear_rendered(node, path): - name = "clearRendered" - label = "Clear Rendered" - value = "import clear_rendered;\ - clear_rendered.clear_rendered(\"{}\")".format(path) - knob = nuke.PyScript_Knob(name, label, value) - node.addKnob(knob) - - -def create_prenodes( - prev_node, - nodes_setting, - plugin_name=None, - subset=None, - **kwargs -): - last_node = None - for_dependency = {} - for name, node in nodes_setting.items(): - # get attributes - nodeclass = node["nodeclass"] - knobs = node["knobs"] - - # create node - now_node = nuke.createNode( - nodeclass, - "name {}".format(name), - inpanel=False - ) - - # add for dependency linking - for_dependency[name] = { - "node": now_node, - "dependent": node["dependent"] - } - - if all([plugin_name, subset]): - # find imageio overrides - get_imageio_node_override_setting( - now_node.Class(), - plugin_name, - subset, - knobs - ) - - # add data to knob - set_node_knobs_from_settings(now_node, knobs, **kwargs) - - # switch actual node to previous - last_node = now_node - - for _node_name, node_prop in for_dependency.items(): - if not node_prop["dependent"]: - node_prop["node"].setInput( - 0, prev_node) - elif node_prop["dependent"] in for_dependency: - _prev_node = for_dependency[node_prop["dependent"]]["node"] - node_prop["node"].setInput( - 0, _prev_node) - else: - log.warning("Dependency has wrong name of node: {}".format( - node_prop - )) - - return last_node - - -def create_write_node( - name, - data, - input=None, - prenodes=None, - linked_knobs=None, - **kwargs -): - ''' Creating write node which is group node - - Arguments: - name (str): name of node - data (dict): creator write instance data - input (node)[optional]: selected node to connect to - prenodes (dict)[optional]: - nodes to be created before write with dependency - review (bool)[optional]: adding review knob - farm (bool)[optional]: rendering workflow target - kwargs (dict)[optional]: additional key arguments for formatting - - Example: - prenodes = { - "nodeName": { - "nodeclass": "Reformat", - "dependent": [ - following_node_01, - ... - ], - "knobs": [ - { - "type": "text", - "name": "knobname", - "value": "knob value" - }, - ... - ] - }, - ... - } - - - Return: - node (obj): group node with avalon data as Knobs - ''' - prenodes = prenodes or {} - - # filtering variables - plugin_name = data["creator"] - subset = data["subset"] - - # get knob settings for write node - imageio_writes = get_imageio_node_setting( - node_class="Write", - plugin_name=plugin_name, - subset=subset - ) - - for knob in imageio_writes["knobs"]: - if knob["name"] == "file_type": - ext = knob["value"] - - data.update({ - "imageio_writes": imageio_writes, - "ext": ext - }) - anatomy_filled = format_anatomy(data) - - # build file path to workfiles - fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") - data["work"] = fdir - fpath = StringTemplate(data["fpath_template"]).format_strict(data) - - # create directory - if not os.path.isdir(os.path.dirname(fpath)): - log.warning("Path does not exist! I am creating it.") - os.makedirs(os.path.dirname(fpath)) - - GN = nuke.createNode("Group", "name {}".format(name)) - - prev_node = None - with GN: - if input: - input_name = str(input.name()).replace(" ", "") - # if connected input node was defined - prev_node = nuke.createNode( - "Input", - "name {}".format(input_name), - inpanel=False - ) - else: - # generic input node connected to nothing - prev_node = nuke.createNode( - "Input", - "name {}".format("rgba"), - inpanel=False - ) - - # creating pre-write nodes `prenodes` - last_prenode = create_prenodes( - prev_node, - prenodes, - plugin_name, - subset, - **kwargs - ) - if last_prenode: - prev_node = last_prenode - - # creating write node - write_node = now_node = add_write_node( - "inside_{}".format(name), - fpath, - imageio_writes["knobs"], - **data - ) - # connect to previous node - now_node.setInput(0, prev_node) - - # switch actual node to previous - prev_node = now_node - - now_node = nuke.createNode("Output", "name Output1", inpanel=False) - - # connect to previous node - now_node.setInput(0, prev_node) - - # add divider - GN.addKnob(nuke.Text_Knob('', 'Rendering')) - - # Add linked knobs. - linked_knob_names = [] - - # add input linked knobs and create group only if any input - if linked_knobs: - linked_knob_names.append("_grp-start_") - linked_knob_names.extend(linked_knobs) - linked_knob_names.append("_grp-end_") - - linked_knob_names.append("Render") - - for _k_name in linked_knob_names: - if "_grp-start_" in _k_name: - knob = nuke.Tab_Knob( - "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) - GN.addKnob(knob) - elif "_grp-end_" in _k_name: - knob = nuke.Tab_Knob( - "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) - GN.addKnob(knob) - else: - if "___" in _k_name: - # add divider - GN.addKnob(nuke.Text_Knob("")) - else: - # add linked knob by _k_name - link = nuke.Link_Knob("") - link.makeLink(write_node.name(), _k_name) - link.setName(_k_name) - - # make render - if "Render" in _k_name: - link.setLabel("Render Local") - link.setFlag(0x1000) - GN.addKnob(link) - - # adding write to read button - add_button_write_to_read(GN) - - # adding write to read button - add_button_clear_rendered(GN, os.path.dirname(fpath)) - - # set tile color - tile_color = next( - iter( - k["value"] for k in imageio_writes["knobs"] - if "tile_color" in k["name"] - ), [255, 0, 0, 255] - ) - GN["tile_color"].setValue( - color_gui_to_int(tile_color)) - - return GN - - -@deprecated("openpype.hosts.nuke.api.lib.create_write_node") -def create_write_node_legacy( - name, data, input=None, prenodes=None, - review=True, linked_knobs=None, farm=True -): - ''' Creating write node which is group node - - Arguments: - name (str): name of node - data (dict): data to be imprinted - input (node): selected node to connect to - prenodes (list, optional): list of lists, definitions for nodes - to be created before write - review (bool): adding review knob - - Example: - prenodes = [ - { - "nodeName": { - "class": "" # string - "knobs": [ - ("knobName": value), - ... - ], - "dependent": [ - following_node_01, - ... - ] - } - }, - ... - ] - - Return: - node (obj): group node with avalon data as Knobs - ''' - knob_overrides = data.get("knobs", []) - nodeclass = data["nodeclass"] - creator = data["creator"] - subset = data["subset"] - - imageio_writes = get_created_node_imageio_setting_legacy( - nodeclass, creator, subset - ) - for knob in imageio_writes["knobs"]: - if knob["name"] == "file_type": - representation = knob["value"] - - host_name = get_current_host_name() - try: - data.update({ - "app": host_name, - "imageio_writes": imageio_writes, - "representation": representation, - }) - anatomy_filled = format_anatomy(data) - - except Exception as e: - msg = "problem with resolving anatomy template: {}".format(e) - log.error(msg) - nuke.message(msg) - - # build file path to workfiles - fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") - fpath = data["fpath_template"].format( - work=fdir, version=data["version"], subset=data["subset"], - frame=data["frame"], - ext=representation - ) - - # create directory - if not os.path.isdir(os.path.dirname(fpath)): - log.warning("Path does not exist! I am creating it.") - os.makedirs(os.path.dirname(fpath)) - - _data = OrderedDict({ - "file": fpath - }) - - # adding dataflow template - log.debug("imageio_writes: `{}`".format(imageio_writes)) - for knob in imageio_writes["knobs"]: - _data[knob["name"]] = knob["value"] - - _data = fix_data_for_node_create(_data) - - log.debug("_data: `{}`".format(_data)) - - if "frame_range" in data.keys(): - _data["frame_range"] = data.get("frame_range", None) - log.debug("_data[frame_range]: `{}`".format(_data["frame_range"])) - - GN = nuke.createNode("Group", "name {}".format(name)) - - prev_node = None - with GN: - if input: - input_name = str(input.name()).replace(" ", "") - # if connected input node was defined - prev_node = nuke.createNode( - "Input", "name {}".format(input_name)) - else: - # generic input node connected to nothing - prev_node = nuke.createNode( - "Input", - "name {}".format("rgba"), - inpanel=False - ) - # creating pre-write nodes `prenodes` - if prenodes: - for node in prenodes: - # get attributes - pre_node_name = node["name"] - klass = node["class"] - knobs = node["knobs"] - dependent = node["dependent"] - - # create node - now_node = nuke.createNode( - klass, - "name {}".format(pre_node_name), - inpanel=False - ) - - # add data to knob - for _knob in knobs: - knob, value = _knob - try: - now_node[knob].value() - except NameError: - log.warning( - "knob `{}` does not exist on node `{}`".format( - knob, now_node["name"].value() - )) - continue - - if not knob and not value: - continue - - log.info((knob, value)) - - if isinstance(value, str): - if "[" in value: - now_node[knob].setExpression(value) - else: - now_node[knob].setValue(value) - - # connect to previous node - if dependent: - if isinstance(dependent, (tuple or list)): - for i, node_name in enumerate(dependent): - input_node = nuke.createNode( - "Input", - "name {}".format(node_name), - inpanel=False - ) - now_node.setInput(1, input_node) - - elif isinstance(dependent, str): - input_node = nuke.createNode( - "Input", - "name {}".format(node_name), - inpanel=False - ) - now_node.setInput(0, input_node) - - else: - now_node.setInput(0, prev_node) - - # switch actual node to previous - prev_node = now_node - - # creating write node - - write_node = now_node = add_write_node_legacy( - "inside_{}".format(name), - **_data - ) - # connect to previous node - now_node.setInput(0, prev_node) - - # switch actual node to previous - prev_node = now_node - - now_node = nuke.createNode("Output", "name Output1", inpanel=False) - - # connect to previous node - now_node.setInput(0, prev_node) - - # imprinting group node - set_avalon_knob_data(GN, data["avalon"]) - add_publish_knob(GN) - add_rendering_knobs(GN, farm) - - if review: - add_review_knob(GN) - - # add divider - GN.addKnob(nuke.Text_Knob('', 'Rendering')) - - # Add linked knobs. - linked_knob_names = [] - - # add input linked knobs and create group only if any input - if linked_knobs: - linked_knob_names.append("_grp-start_") - linked_knob_names.extend(linked_knobs) - linked_knob_names.append("_grp-end_") - - linked_knob_names.append("Render") - - for _k_name in linked_knob_names: - if "_grp-start_" in _k_name: - knob = nuke.Tab_Knob( - "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) - GN.addKnob(knob) - elif "_grp-end_" in _k_name: - knob = nuke.Tab_Knob( - "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) - GN.addKnob(knob) - else: - if "___" in _k_name: - # add divider - GN.addKnob(nuke.Text_Knob("")) - else: - # add linked knob by _k_name - link = nuke.Link_Knob("") - link.makeLink(write_node.name(), _k_name) - link.setName(_k_name) - - # make render - if "Render" in _k_name: - link.setLabel("Render Local") - link.setFlag(0x1000) - GN.addKnob(link) - - # adding write to read button - add_button_write_to_read(GN) - - # adding write to read button - add_button_clear_rendered(GN, os.path.dirname(fpath)) - - # Deadline tab. - add_deadline_tab(GN) - - # open the our Tab as default - GN[_NODE_TAB_NAME].setFlag(0) - - # set tile color - tile_color = _data.get("tile_color", "0xff0000ff") - GN["tile_color"].setValue(tile_color) - - # override knob values from settings - for knob in knob_overrides: - knob_type = knob["type"] - knob_name = knob["name"] - knob_value = knob["value"] - if knob_name not in GN.knobs(): - continue - if not knob_value: - continue - - # set correctly knob types - if knob_type == "string": - knob_value = str(knob_value) - if knob_type == "number": - knob_value = int(knob_value) - if knob_type == "decimal_number": - knob_value = float(knob_value) - if knob_type == "bool": - knob_value = bool(knob_value) - if knob_type in ["2d_vector", "3d_vector", "color", "box"]: - knob_value = list(knob_value) - - GN[knob_name].setValue(knob_value) - - return GN - - -def set_node_knobs_from_settings(node, knob_settings, **kwargs): - """ Overriding knob values from settings - - Using `schema_nuke_knob_inputs` for knob type definitions. - - Args: - node (nuke.Node): nuke node - knob_settings (list): list of dict. Keys are `type`, `name`, `value` - kwargs (dict)[optional]: keys for formattable knob settings - """ - for knob in knob_settings: - log.debug("__ knob: {}".format(pformat(knob))) - knob_type = knob["type"] - knob_name = knob["name"] - - if knob_name not in node.knobs(): - continue - - if knob_type == "expression": - knob_expression = knob["expression"] - node[knob_name].setExpression( - knob_expression - ) - continue - - # first deal with formattable knob settings - if knob_type == "formatable": - template = knob["template"] - to_type = knob["to_type"] - try: - _knob_value = template.format( - **kwargs - ) - except KeyError as msg: - raise KeyError( - "Not able to format expression: {}".format(msg)) - - # convert value to correct type - if to_type == "2d_vector": - knob_value = _knob_value.split(";").split(",") - else: - knob_value = _knob_value - - knob_type = to_type - - else: - knob_value = knob["value"] - - if not knob_value: - continue - - knob_value = convert_knob_value_to_correct_type( - knob_type, knob_value) - - node[knob_name].setValue(knob_value) - - -def convert_knob_value_to_correct_type(knob_type, knob_value): - # first convert string types to string - # just to ditch unicode - if isinstance(knob_value, six.text_type): - knob_value = str(knob_value) - - # set correctly knob types - if knob_type == "bool": - knob_value = bool(knob_value) - elif knob_type == "decimal_number": - knob_value = float(knob_value) - elif knob_type == "number": - knob_value = int(knob_value) - elif knob_type == "text": - knob_value = knob_value - elif knob_type == "color_gui": - knob_value = color_gui_to_int(knob_value) - elif knob_type in ["2d_vector", "3d_vector", "color", "box"]: - knob_value = [float(val_) for val_ in knob_value] - - return knob_value - - -def color_gui_to_int(color_gui): - hex_value = ( - "0x{0:0>2x}{1:0>2x}{2:0>2x}{3:0>2x}").format(*color_gui) - return int(hex_value, 16) - - -@deprecated -def add_rendering_knobs(node, farm=True): - ''' Adds additional rendering knobs to given node - - Arguments: - node (obj): nuke node object to be fixed - - Return: - node (obj): with added knobs - ''' - knob_options = ["Use existing frames", "Local"] - if farm: - knob_options.append("On farm") - - if "render" not in node.knobs(): - knob = nuke.Enumeration_Knob("render", "", knob_options) - knob.clearFlag(nuke.STARTLINE) - node.addKnob(knob) - return node - - -@deprecated -def add_review_knob(node): - ''' Adds additional review knob to given node - - Arguments: - node (obj): nuke node object to be fixed - - Return: - node (obj): with added knob - ''' - if "review" not in node.knobs(): - knob = nuke.Boolean_Knob("review", "Review") - knob.setValue(True) - node.addKnob(knob) - return node - - -@deprecated -def add_deadline_tab(node): - # TODO: remove this as it is only linked to legacy create - node.addKnob(nuke.Tab_Knob("Deadline")) - - knob = nuke.Int_Knob("deadlinePriority", "Priority") - knob.setValue(50) - node.addKnob(knob) - - knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(0) - node.addKnob(knob) - - knob = nuke.Int_Knob("deadlineConcurrentTasks", "Concurrent tasks") - # zero as default will get value from Settings during collection - # instead of being an explicit user override, see precollect_write.py - knob.setValue(0) - node.addKnob(knob) - - knob = nuke.Text_Knob("divd", '') - knob.setValue('') - node.addKnob(knob) - - knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish") - knob.setValue(False) - node.addKnob(knob) - - -@deprecated -def get_deadline_knob_names(): - # TODO: remove this as it is only linked to legacy - # validate_write_deadline_tab - return [ - "Deadline", - "deadlineChunkSize", - "deadlinePriority", - "deadlineConcurrentTasks" - ] - - -def create_backdrop(label="", color=None, layer=0, - nodes=None): - """ - Create Backdrop node - - Arguments: - color (str): nuke compatible string with color code - layer (int): layer of node usually used (self.pos_layer - 1) - label (str): the message - nodes (list): list of nodes to be wrapped into backdrop - - """ - assert isinstance(nodes, list), "`nodes` should be a list of nodes" - - # Calculate bounds for the backdrop node. - bdX = min([node.xpos() for node in nodes]) - bdY = min([node.ypos() for node in nodes]) - bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX - bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY - - # Expand the bounds to leave a little border. Elements are offsets - # for left, top, right and bottom edges respectively - left, top, right, bottom = (-20, -65, 20, 60) - bdX += left - bdY += top - bdW += (right - left) - bdH += (bottom - top) - - bdn = nuke.createNode("BackdropNode") - bdn["z_order"].setValue(layer) - - if color: - bdn["tile_color"].setValue(int(color, 16)) - - bdn["xpos"].setValue(bdX) - bdn["ypos"].setValue(bdY) - bdn["bdwidth"].setValue(bdW) - bdn["bdheight"].setValue(bdH) - - if label: - bdn["label"].setValue(label) - - bdn["note_font_size"].setValue(20) - return bdn - - -class WorkfileSettings(object): - """ - All settings for workfile will be set - - This object is setting all possible root settings to the workfile. - Including Colorspace, Frame ranges, Resolution format. It can set it - to Root node or to any given node. - - Arguments: - root (node): nuke's root node - nodes (list): list of nuke's nodes - nodes_filter (list): filtering classes for nodes - - """ - - def __init__(self, root_node=None, nodes=None, **kwargs): - project_doc = kwargs.get("project") - if project_doc is None: - project_name = get_current_project_name() - project_doc = get_project(project_name) - else: - project_name = project_doc["name"] - - Context._project_doc = project_doc - self._project_name = project_name - self._asset = ( - kwargs.get("asset_name") - or get_current_asset_name() - ) - self._asset_entity = get_asset_by_name(project_name, self._asset) - self._root_node = root_node or nuke.root() - self._nodes = self.get_nodes(nodes=nodes) - - self.data = kwargs - - def get_nodes(self, nodes=None, nodes_filter=None): - - if not isinstance(nodes, list) and not isinstance(nodes_filter, list): - return [n for n in nuke.allNodes()] - elif not isinstance(nodes, list) and isinstance(nodes_filter, list): - nodes = list() - for filter in nodes_filter: - [nodes.append(n) for n in nuke.allNodes(filter=filter)] - return nodes - elif isinstance(nodes, list) and not isinstance(nodes_filter, list): - return [n for n in self._nodes] - elif isinstance(nodes, list) and isinstance(nodes_filter, list): - for filter in nodes_filter: - return [n for n in self._nodes if filter in n.Class()] - - def set_viewers_colorspace(self, viewer_dict): - ''' Adds correct colorspace to viewer - - Arguments: - viewer_dict (dict): adjustments from presets - - ''' - if not isinstance(viewer_dict, dict): - msg = "set_viewers_colorspace(): argument should be dictionary" - log.error(msg) - nuke.message(msg) - return - - filter_knobs = [ - "viewerProcess", - "wipe_position" - ] - - erased_viewers = [] - for v in nuke.allNodes(filter="Viewer"): - # set viewProcess to preset from settings - v["viewerProcess"].setValue( - str(viewer_dict["viewerProcess"]) - ) - - if str(viewer_dict["viewerProcess"]) \ - not in v["viewerProcess"].value(): - copy_inputs = v.dependencies() - copy_knobs = {k: v[k].value() for k in v.knobs() - if k not in filter_knobs} - - # delete viewer with wrong settings - erased_viewers.append(v["name"].value()) - nuke.delete(v) - - # create new viewer - nv = nuke.createNode("Viewer") - - # connect to original inputs - for i, n in enumerate(copy_inputs): - nv.setInput(i, n) - - # set copied knobs - for k, v in copy_knobs.items(): - print(k, v) - nv[k].setValue(v) - - # set viewerProcess - nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) - - if erased_viewers: - log.warning( - "Attention! Viewer nodes {} were erased." - "It had wrong color profile".format(erased_viewers)) - - def set_root_colorspace(self, imageio_host): - ''' Adds correct colorspace to root - - Arguments: - imageio_host (dict): host colorspace configurations - - ''' - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="nuke" - ) - - workfile_settings = imageio_host["workfile"] - viewer_process_settings = imageio_host["viewer"]["viewerProcess"] - - if not config_data: - # TODO: backward compatibility for old projects - remove later - # perhaps old project overrides is having it set to older version - # with use of `customOCIOConfigPath` - resolved_path = None - if workfile_settings.get("customOCIOConfigPath"): - unresolved_path = workfile_settings["customOCIOConfigPath"] - ocio_paths = unresolved_path[platform.system().lower()] - - for ocio_p in ocio_paths: - resolved_path = str(ocio_p).format(**os.environ) - if not os.path.exists(resolved_path): - continue - - if resolved_path: - # set values to root - self._root_node["colorManagement"].setValue("OCIO") - self._root_node["OCIO_config"].setValue("custom") - self._root_node["customOCIOConfigPath"].setValue( - resolved_path) - else: - # no ocio config found and no custom path used - if self._root_node["colorManagement"].value() \ - not in str(workfile_settings["colorManagement"]): - self._root_node["colorManagement"].setValue( - str(workfile_settings["colorManagement"])) - - # second set ocio version - if self._root_node["OCIO_config"].value() \ - not in str(workfile_settings["OCIO_config"]): - self._root_node["OCIO_config"].setValue( - str(workfile_settings["OCIO_config"])) - - else: - # OCIO config path is defined from prelaunch hook - self._root_node["colorManagement"].setValue("OCIO") - - # print previous settings in case some were found in workfile - residual_path = self._root_node["customOCIOConfigPath"].value() - if residual_path: - log.info("Residual OCIO config path found: `{}`".format( - residual_path - )) - - # we dont need the key anymore - workfile_settings.pop("customOCIOConfigPath", None) - workfile_settings.pop("colorManagement", None) - workfile_settings.pop("OCIO_config", None) - - # get monitor lut from settings respecting Nuke version differences - monitor_lut = workfile_settings.pop("monitorLut", None) - monitor_lut_data = self._get_monitor_settings( - viewer_process_settings, monitor_lut) - - # set monitor related knobs luts (MonitorOut, Thumbnails) - for knob, value_ in monitor_lut_data.items(): - workfile_settings[knob] = value_ - - # then set the rest - for knob, value_ in workfile_settings.items(): - # skip unfilled ocio config path - # it will be dict in value - if isinstance(value_, dict): - continue - # skip empty values - if not value_: - continue - if self._root_node[knob].value() not in value_: - self._root_node[knob].setValue(str(value_)) - log.debug("nuke.root()['{}'] changed to: {}".format( - knob, value_)) - - # set ocio config path - if config_data: - config_path = config_data["path"].replace("\\", "/") - log.info("OCIO config path found: `{}`".format( - config_path)) - - # check if there's a mismatch between environment and settings - correct_settings = self._is_settings_matching_environment( - config_data) - - # if there's no mismatch between environment and settings - if correct_settings: - self._set_ocio_config_path_to_workfile(config_data) - - def _get_monitor_settings(self, viewer_lut, monitor_lut): - """ Get monitor settings from viewer and monitor lut - - Args: - viewer_lut (str): viewer lut string - monitor_lut (str): monitor lut string - - Returns: - dict: monitor settings - """ - output_data = {} - m_display, m_viewer = get_viewer_config_from_string(monitor_lut) - v_display, v_viewer = get_viewer_config_from_string(viewer_lut) - - # set monitor lut differently for nuke version 14 - if nuke.NUKE_VERSION_MAJOR >= 14: - output_data["monitorOutLUT"] = create_viewer_profile_string( - m_viewer, m_display, path_like=False) - # monitorLut=thumbnails - viewerProcess makes more sense - output_data["monitorLut"] = create_viewer_profile_string( - v_viewer, v_display, path_like=False) - - if nuke.NUKE_VERSION_MAJOR == 13: - output_data["monitorOutLUT"] = create_viewer_profile_string( - m_viewer, m_display, path_like=False) - # monitorLut=thumbnails - viewerProcess makes more sense - output_data["monitorLut"] = create_viewer_profile_string( - v_viewer, v_display, path_like=True) - if nuke.NUKE_VERSION_MAJOR <= 12: - output_data["monitorLut"] = create_viewer_profile_string( - m_viewer, m_display, path_like=True) - - return output_data - - def _is_settings_matching_environment(self, config_data): - """ Check if OCIO config path is different from environment - - Args: - config_data (dict): OCIO config data from settings - - Returns: - bool: True if settings are matching environment, False otherwise - """ - current_ocio_path = os.environ["OCIO"] - settings_ocio_path = config_data["path"] - - # normalize all paths to forward slashes - current_ocio_path = current_ocio_path.replace("\\", "/") - settings_ocio_path = settings_ocio_path.replace("\\", "/") - - if current_ocio_path != settings_ocio_path: - message = """ -It seems like there's a mismatch between the OCIO config path set in your Nuke -settings and the actual path set in your OCIO environment. - -To resolve this, please follow these steps: -1. Close Nuke if it's currently open. -2. Reopen Nuke. - -Please note the paths for your reference: - -- The OCIO environment path currently set: - `{env_path}` - -- The path in your current Nuke settings: - `{settings_path}` - -Reopening Nuke should synchronize these paths and resolve any discrepancies. -""" - nuke.message( - message.format( - env_path=current_ocio_path, - settings_path=settings_ocio_path - ) - ) - return False - - return True - - def _set_ocio_config_path_to_workfile(self, config_data): - """ Set OCIO config path to workfile - - Path set into nuke workfile. It is trying to replace path with - environment variable if possible. If not, it will set it as it is. - It also saves the script to apply the change, but only if it's not - empty Untitled script. - - Args: - config_data (dict): OCIO config data from settings - - """ - # replace path with env var if possible - ocio_path = self._replace_ocio_path_with_env_var(config_data) - - log.info("Setting OCIO config path to: `{}`".format( - ocio_path)) - - self._root_node["customOCIOConfigPath"].setValue( - ocio_path - ) - self._root_node["OCIO_config"].setValue("custom") - - # only save script if it's not empty - if self._root_node["name"].value() != "": - log.info("Saving script to apply OCIO config path change.") - nuke.scriptSave() - - def _get_included_vars(self, config_template): - """ Get all environment variables included in template - - Args: - config_template (str): OCIO config template from settings - - Returns: - list: list of environment variables included in template - """ - # resolve all environments for whitelist variables - included_vars = [ - "BUILTIN_OCIO_ROOT", - ] - - # include all project root related env vars - for env_var in os.environ: - if env_var.startswith("OPENPYPE_PROJECT_ROOT_"): - included_vars.append(env_var) - - # use regex to find env var in template with format {ENV_VAR} - # this way we make sure only template used env vars are included - env_var_regex = r"\{([A-Z0-9_]+)\}" - env_var = re.findall(env_var_regex, config_template) - if env_var: - included_vars.append(env_var[0]) - - return included_vars - - def _replace_ocio_path_with_env_var(self, config_data): - """ Replace OCIO config path with environment variable - - Environment variable is added as TCL expression to path. TCL expression - is also replacing backward slashes found in path for windows - formatted values. - - Args: - config_data (str): OCIO config dict from settings - - Returns: - str: OCIO config path with environment variable TCL expression - """ - config_path = config_data["path"].replace("\\", "/") - config_template = config_data["template"] - - included_vars = self._get_included_vars(config_template) - - # make sure we return original path if no env var is included - new_path = config_path - - for env_var in included_vars: - env_path = os.getenv(env_var) - if not env_path: - continue - - # it has to be directory current process can see - if not os.path.isdir(env_path): - continue - - # make sure paths are in same format - env_path = env_path.replace("\\", "/") - path = config_path.replace("\\", "/") - - # check if env_path is in path and replace to first found positive - if env_path in path: - # with regsub we make sure path format of slashes is correct - resub_expr = ( - "[regsub -all {{\\\\}} [getenv {}] \"/\"]").format(env_var) - - new_path = path.replace( - env_path, resub_expr - ) - break - - return new_path - - def set_writes_colorspace(self): - ''' Adds correct colorspace to write node dict - - ''' - for node in nuke.allNodes(filter="Group", group=self._root_node): - log.info("Setting colorspace to `{}`".format(node.name())) - - # get data from avalon knob - avalon_knob_data = read_avalon_data(node) - node_data = get_node_data(node, INSTANCE_DATA_KNOB) - - if ( - # backward compatibility - # TODO: remove this once old avalon data api will be removed - avalon_knob_data - and avalon_knob_data.get("id") != "pyblish.avalon.instance" - ): - continue - elif ( - node_data - and node_data.get("id") != "pyblish.avalon.instance" - ): - continue - - if ( - # backward compatibility - # TODO: remove this once old avalon data api will be removed - avalon_knob_data - and "creator" not in avalon_knob_data - ): - continue - elif ( - node_data - and "creator_identifier" not in node_data - ): - continue - - nuke_imageio_writes = None - if avalon_knob_data: - # establish families - families = [avalon_knob_data["family"]] - if avalon_knob_data.get("families"): - families.append(avalon_knob_data.get("families")) - - nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["families"], - plugin_name=avalon_knob_data["creator"], - subset=avalon_knob_data["subset"] - ) - elif node_data: - nuke_imageio_writes = get_write_node_template_attr(node) - - log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes)) - - if not nuke_imageio_writes: - return - - write_node = None - - # get into the group node - node.begin() - for x in nuke.allNodes(): - if x.Class() == "Write": - write_node = x - node.end() - - if not write_node: - return - - try: - # write all knobs to node - for knob in nuke_imageio_writes["knobs"]: - value = knob["value"] - if isinstance(value, six.text_type): - value = str(value) - if str(value).startswith("0x"): - value = int(value, 16) - - log.debug("knob: {}| value: {}".format( - knob["name"], value - )) - write_node[knob["name"]].setValue(value) - except TypeError: - log.warning( - "Legacy workflow didn't work, switching to current") - - set_node_knobs_from_settings( - write_node, nuke_imageio_writes["knobs"]) - - def set_reads_colorspace(self, read_clrs_inputs): - """ Setting colorspace to Read nodes - - Looping through all read nodes and tries to set colorspace based - on regex rules in presets - """ - changes = {} - for n in nuke.allNodes(): - file = nuke.filename(n) - if n.Class() != "Read": - continue - - # check if any colorspace presets for read is matching - preset_clrsp = None - - for input in read_clrs_inputs: - if not bool(re.search(input["regex"], file)): - continue - preset_clrsp = input["colorspace"] - - if preset_clrsp is not None: - current = n["colorspace"].value() - future = str(preset_clrsp) - if current != future: - changes[n.name()] = { - "from": current, - "to": future - } - - log.debug(changes) - if changes: - msg = "Read nodes are not set to correct colorspace:\n\n" - for nname, knobs in changes.items(): - msg += ( - " - node: '{0}' is now '{1}' but should be '{2}'\n" - ).format(nname, knobs["from"], knobs["to"]) - - msg += "\nWould you like to change it?" - - if nuke.ask(msg): - for nname, knobs in changes.items(): - n = nuke.toNode(nname) - n["colorspace"].setValue(knobs["to"]) - log.info( - "Setting `{0}` to `{1}`".format( - nname, - knobs["to"])) - - def set_colorspace(self): - ''' Setting colorspace following presets - ''' - # get imageio - nuke_colorspace = get_nuke_imageio_settings() - - log.info("Setting colorspace to workfile...") - try: - self.set_root_colorspace(nuke_colorspace) - except AttributeError as _error: - msg = "Set Colorspace to workfile error: {}".format(_error) - nuke.message(msg) - - log.info("Setting colorspace to viewers...") - try: - self.set_viewers_colorspace(nuke_colorspace["viewer"]) - except AttributeError as _error: - msg = "Set Colorspace to viewer error: {}".format(_error) - nuke.message(msg) - - log.info("Setting colorspace to write nodes...") - try: - self.set_writes_colorspace() - except AttributeError as _error: - nuke.message(_error) - log.error(_error) - - log.info("Setting colorspace to read nodes...") - read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", []) - if read_clrs_inputs: - self.set_reads_colorspace(read_clrs_inputs) - - def reset_frame_range_handles(self): - """Set frame range to current asset""" - - if "data" not in self._asset_entity: - msg = "Asset {} don't have set any 'data'".format(self._asset) - log.warning(msg) - nuke.message(msg) - return - - asset_data = self._asset_entity["data"] - - missing_cols = [] - check_cols = ["fps", "frameStart", "frameEnd", - "handleStart", "handleEnd"] - - for col in check_cols: - if col not in asset_data: - missing_cols.append(col) - - if len(missing_cols) > 0: - missing = ", ".join(missing_cols) - msg = "'{}' are not set for asset '{}'!".format( - missing, self._asset) - log.warning(msg) - nuke.message(msg) - return - - # get handles values - handle_start = asset_data["handleStart"] - handle_end = asset_data["handleEnd"] - - fps = float(asset_data["fps"]) - frame_start_handle = int(asset_data["frameStart"]) - handle_start - frame_end_handle = int(asset_data["frameEnd"]) + handle_end - - self._root_node["lock_range"].setValue(False) - self._root_node["fps"].setValue(fps) - self._root_node["first_frame"].setValue(frame_start_handle) - self._root_node["last_frame"].setValue(frame_end_handle) - self._root_node["lock_range"].setValue(True) - - # update node graph so knobs are updated - update_node_graph() - - frame_range = '{0}-{1}'.format( - int(asset_data["frameStart"]), - int(asset_data["frameEnd"]) - ) - - for node in nuke.allNodes(filter="Viewer"): - node['frame_range'].setValue(frame_range) - node['frame_range_lock'].setValue(True) - node['frame_range'].setValue(frame_range) - node['frame_range_lock'].setValue(True) - - if not ASSIST: - set_node_data( - self._root_node, - INSTANCE_DATA_KNOB, - { - "handleStart": int(handle_start), - "handleEnd": int(handle_end) - } - ) - else: - log.warning( - "NukeAssist mode is not allowing " - "updating custom knobs..." - ) - - def reset_resolution(self): - """Set resolution to project resolution.""" - log.info("Resetting resolution") - project_name = get_current_project_name() - asset_data = self._asset_entity["data"] - - format_data = { - "width": int(asset_data.get( - 'resolutionWidth', - asset_data.get('resolution_width'))), - "height": int(asset_data.get( - 'resolutionHeight', - asset_data.get('resolution_height'))), - "pixel_aspect": asset_data.get( - 'pixelAspect', - asset_data.get('pixel_aspect', 1)), - "name": project_name - } - - if any(x_ for x_ in format_data.values() if x_ is None): - msg = ("Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Aspect: `{pixel_aspect}`").format(**format_data) - log.error(msg) - nuke.message(msg) - - existing_format = None - for format in nuke.formats(): - if format_data["name"] == format.name(): - existing_format = format - break - - if existing_format: - # Enforce existing format to be correct. - existing_format.setWidth(format_data["width"]) - existing_format.setHeight(format_data["height"]) - existing_format.setPixelAspect(format_data["pixel_aspect"]) - else: - format_string = self.make_format_string(**format_data) - log.info("Creating new format: {}".format(format_string)) - nuke.addFormat(format_string) - - nuke.root()["format"].setValue(format_data["name"]) - log.info("Format is set.") - - # update node graph so knobs are updated - update_node_graph() - - def make_format_string(self, **kwargs): - if kwargs.get("r"): - return ( - "{width} " - "{height} " - "{x} " - "{y} " - "{r} " - "{t} " - "{pixel_aspect:.2f} " - "{name}".format(**kwargs) - ) - else: - return ( - "{width} " - "{height} " - "{pixel_aspect:.2f} " - "{name}".format(**kwargs) - ) - - def set_context_settings(self): - # replace reset resolution from avalon core to pype's - self.reset_resolution() - # replace reset resolution from avalon core to pype's - self.reset_frame_range_handles() - # add colorspace menu item - self.set_colorspace() - - def set_favorites(self): - from .utils import set_context_favorites - - work_dir = os.getenv("AVALON_WORKDIR") - asset = get_current_asset_name() - favorite_items = OrderedDict() - - # project - # get project's root and split to parts - projects_root = os.path.normpath(work_dir.split( - Context.project_name)[0]) - # add project name - project_dir = os.path.join(projects_root, Context.project_name) + "/" - # add to favorites - favorite_items.update({"Project dir": project_dir.replace("\\", "/")}) - - # asset - asset_root = os.path.normpath(work_dir.split( - asset)[0]) - # add asset name - asset_dir = os.path.join(asset_root, asset) + "/" - # add to favorites - favorite_items.update({"Shot dir": asset_dir.replace("\\", "/")}) - - # workdir - favorite_items.update({"Work dir": work_dir.replace("\\", "/")}) - - set_context_favorites(favorite_items) - - -def get_write_node_template_attr(node): - ''' Gets all defined data from presets - - ''' - - # TODO: add identifiers to settings and rename settings key - plugin_names_mapping = { - "create_write_image": "CreateWriteImage", - "create_write_prerender": "CreateWritePrerender", - "create_write_render": "CreateWriteRender" - } - # get avalon data from node - node_data = get_node_data(node, INSTANCE_DATA_KNOB) - identifier = node_data["creator_identifier"] - - # return template data - return get_imageio_node_setting( - node_class="Write", - plugin_name=plugin_names_mapping[identifier], - subset=node_data["subset"] - ) - - -def get_dependent_nodes(nodes): - """Get all dependent nodes connected to the list of nodes. - - Looking for connections outside of the nodes in incoming argument. - - Arguments: - nodes (list): list of nuke.Node objects - - Returns: - connections_in: dictionary of nodes and its dependencies - connections_out: dictionary of nodes and its dependency - """ - - connections_in = dict() - connections_out = dict() - node_names = [n.name() for n in nodes] - for node in nodes: - inputs = node.dependencies() - outputs = node.dependent() - # collect all inputs outside - test_in = [(i, n) for i, n in enumerate(inputs) - if n.name() not in node_names] - if test_in: - connections_in.update({ - node: test_in - }) - # collect all outputs outside - test_out = [i for i in outputs if i.name() not in node_names] - if test_out: - # only one dependent node is allowed - connections_out.update({ - node: test_out[-1] - }) - - return connections_in, connections_out - - -def update_node_graph(): - # Resetting frame will update knob values - try: - root_node_lock = nuke.root()["lock_range"].value() - nuke.root()["lock_range"].setValue(not root_node_lock) - nuke.root()["lock_range"].setValue(root_node_lock) - - current_frame = nuke.frame() - nuke.frame(1) - nuke.frame(int(current_frame)) - except Exception as error: - log.warning(error) - - -def find_free_space_to_paste_nodes( - nodes, - group=nuke.root(), - direction="right", - offset=300 -): - """ - For getting coordinates in DAG (node graph) for placing new nodes - - Arguments: - nodes (list): list of nuke.Node objects - group (nuke.Node) [optional]: object in which context it is - direction (str) [optional]: where we want it to be placed - [left, right, top, bottom] - offset (int) [optional]: what offset it is from rest of nodes - - Returns: - xpos (int): x coordinace in DAG - ypos (int): y coordinace in DAG - """ - if len(nodes) == 0: - return 0, 0 - - group_xpos = list() - group_ypos = list() - - # get local coordinates of all nodes - nodes_xpos = [n.xpos() for n in nodes] + \ - [n.xpos() + n.screenWidth() for n in nodes] - - nodes_ypos = [n.ypos() for n in nodes] + \ - [n.ypos() + n.screenHeight() for n in nodes] - - # get complete screen size of all nodes to be placed in - nodes_screen_width = max(nodes_xpos) - min(nodes_xpos) - nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos) - - # get screen size (r,l,t,b) of all nodes in `group` - with group: - group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \ - [n.xpos() + n.screenWidth() for n in nuke.allNodes() - if n not in nodes] - group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \ - [n.ypos() + n.screenHeight() for n in nuke.allNodes() - if n not in nodes] - - # calc output left - if direction in "left": - xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset) - ypos = min(group_ypos) - return xpos, ypos - # calc output right - if direction in "right": - xpos = max(group_xpos) + abs(offset) - ypos = min(group_ypos) - return xpos, ypos - # calc output top - if direction in "top": - xpos = min(group_xpos) - ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset) - return xpos, ypos - # calc output bottom - if direction in "bottom": - xpos = min(group_xpos) - ypos = max(group_ypos) + abs(offset) - return xpos, ypos - - -@contextlib.contextmanager -def maintained_selection(exclude_nodes=None): - """Maintain selection during context - - Maintain selection during context and unselect - all nodes after context is done. - - Arguments: - exclude_nodes (list[nuke.Node]): list of nodes to be unselected - before context is done - - Example: - >>> with maintained_selection(): - ... node["selected"].setValue(True) - >>> print(node["selected"].value()) - False - """ - if exclude_nodes: - for node in exclude_nodes: - node["selected"].setValue(False) - - previous_selection = nuke.selectedNodes() - - try: - yield - finally: - # unselect all selection in case there is some - reset_selection() - - # and select all previously selected nodes - if previous_selection: - select_nodes(previous_selection) - - -@contextlib.contextmanager -def swap_node_with_dependency(old_node, new_node): - """ Swap node with dependency - - Swap node with dependency and reconnect all inputs and outputs. - It removes old node. - - Arguments: - old_node (nuke.Node): node to be replaced - new_node (nuke.Node): node to replace with - - Example: - >>> old_node_name = old_node["name"].value() - >>> print(old_node_name) - old_node_name_01 - >>> with swap_node_with_dependency(old_node, new_node) as node_name: - ... new_node["name"].setValue(node_name) - >>> print(new_node["name"].value()) - old_node_name_01 - """ - # preserve position - xpos, ypos = old_node.xpos(), old_node.ypos() - # preserve selection after all is done - outputs = get_node_outputs(old_node) - inputs = old_node.dependencies() - node_name = old_node["name"].value() - - try: - nuke.delete(old_node) - - yield node_name - finally: - - # Reconnect inputs - for i, node in enumerate(inputs): - new_node.setInput(i, node) - # Reconnect outputs - if outputs: - for n, pipes in outputs.items(): - for i in pipes: - n.setInput(i, new_node) - # return to original position - new_node.setXYpos(xpos, ypos) - - -def reset_selection(): - """Deselect all selected nodes""" - for node in nuke.selectedNodes(): - node["selected"].setValue(False) - - -def select_nodes(nodes): - """Selects all inputted nodes - - Arguments: - nodes (Union[list, tuple, set]): nuke nodes to be selected - """ - assert isinstance(nodes, (list, tuple, set)), \ - "nodes has to be list, tuple or set" - - for node in nodes: - node["selected"].setValue(True) - - -def launch_workfiles_app(): - """Show workfiles tool on nuke launch. - - Trigger to show workfiles tool on application launch. Can be executed only - once all other calls are ignored. - - Workfiles tool show is deferred after application initialization using - QTimer. - """ - - if Context.workfiles_launched: - return - - Context.workfiles_launched = True - - # get all imortant settings - open_at_start = env_value_to_bool( - env_key="OPENPYPE_WORKFILE_TOOL_ON_START", - default=None) - - # return if none is defined - if not open_at_start: - return - - # Show workfiles tool using timer - # - this will be probably triggered during initialization in that case - # the application is not be able to show uis so it must be - # deferred using timer - # - timer should be processed when initialization ends - # When applications starts to process events. - timer = QtCore.QTimer() - timer.timeout.connect(_launch_workfile_app) - timer.setInterval(100) - Context.workfiles_tool_timer = timer - timer.start() - - -def _launch_workfile_app(): - # Safeguard to not show window when application is still starting up - # or is already closing down. - closing_down = QtWidgets.QApplication.closingDown() - starting_up = QtWidgets.QApplication.startingUp() - - # Stop the timer if application finished start up of is closing down - if closing_down or not starting_up: - Context.workfiles_tool_timer.stop() - Context.workfiles_tool_timer = None - - # Skip if application is starting up or closing down - if starting_up or closing_down: - return - - # Make sure on top is enabled on first show so the window is not hidden - # under main nuke window - # - this happened on Centos 7 and it is because the focus of nuke - # changes to the main window after showing because of initialization - # which moves workfiles tool under it - host_tools.show_workfiles(parent=None, on_top=True) - - -@deprecated("openpype.hosts.nuke.api.lib.start_workfile_template_builder") -def process_workfile_builder(): - """ [DEPRECATED] Process workfile builder on nuke start - - This function is deprecated and will be removed in future versions. - Use settings for `project_settings/nuke/templated_workfile_build` which are - supported by api `start_workfile_template_builder()`. - """ - - # to avoid looping of the callback, remove it! - nuke.removeOnCreate(process_workfile_builder, nodeClass="Root") - - # get state from settings - project_settings = get_current_project_settings() - workfile_builder = project_settings["nuke"].get( - "workfile_builder", {}) - - # get settings - create_fv_on = workfile_builder.get("create_first_version") or None - builder_on = workfile_builder.get("builder_on_start") or None - - last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE") - - # generate first version in file not existing and feature is enabled - if create_fv_on and not os.path.exists(last_workfile_path): - # get custom template path if any - custom_template_path = get_custom_workfile_template_from_session( - project_settings=project_settings - ) - - # if custom template is defined - if custom_template_path: - log.info("Adding nodes from `{}`...".format( - custom_template_path - )) - try: - # import nodes into current script - nuke.nodePaste(custom_template_path) - except RuntimeError: - raise RuntimeError(( - "Template defined for project: {} is not working. " - "Talk to your manager for an advise").format( - custom_template_path)) - - # if builder at start is defined - if builder_on: - log.info("Building nodes from presets...") - # build nodes by defined presets - BuildWorkfile().process() - - log.info("Saving script as version `{}`...".format( - last_workfile_path - )) - # safe file as version - save_file(last_workfile_path) - return - - -def start_workfile_template_builder(): - from .workfile_template_builder import ( - build_workfile_template - ) - - # remove callback since it would be duplicating the workfile - nuke.removeOnCreate(start_workfile_template_builder, nodeClass="Root") - - # to avoid looping of the callback, remove it! - log.info("Starting workfile template builder...") - try: - build_workfile_template(workfile_creation_enabled=True) - except TemplateProfileNotFound: - log.warning("Template profile not found. Skipping...") - - -@deprecated -def recreate_instance(origin_node, avalon_data=None): - """Recreate input instance to different data - - Args: - origin_node (nuke.Node): Nuke node to be recreating from - avalon_data (dict, optional): data to be used in new node avalon_data - - Returns: - nuke.Node: newly created node - """ - knobs_wl = ["render", "publish", "review", "ypos", - "use_limit", "first", "last"] - # get data from avalon knobs - data = get_avalon_knob_data( - origin_node) - - # add input data to avalon data - if avalon_data: - data.update(avalon_data) - - # capture all node knobs allowed in op_knobs - knobs_data = {k: origin_node[k].value() - for k in origin_node.knobs() - for key in knobs_wl - if key in k} - - # get node dependencies - inputs = origin_node.dependencies() - outputs = origin_node.dependent() - - # remove the node - nuke.delete(origin_node) - - # create new node - # get appropriate plugin class - creator_plugin = None - for Creator in discover_legacy_creator_plugins(): - if Creator.__name__ == data["creator"]: - creator_plugin = Creator - break - - # create write node with creator - new_node_name = data["subset"] - new_node = creator_plugin(new_node_name, data["asset"]).process() - - # white listed knobs to the new node - for _k, _v in knobs_data.items(): - try: - print(_k, _v) - new_node[_k].setValue(_v) - except Exception as e: - print(e) - - # connect to original inputs - for i, n in enumerate(inputs): - new_node.setInput(i, n) - - # connect to outputs - if len(outputs) > 0: - for dn in outputs: - dn.setInput(0, new_node) - - return new_node - - -def add_scripts_menu(): - try: - from scriptsmenu import launchfornuke - except ImportError: - log.warning( - "Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable." - ) - return - - # load configuration of custom menu - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - config = project_settings["nuke"]["scriptsmenu"]["definition"] - _menu = project_settings["nuke"]["scriptsmenu"]["name"] - - if not config: - log.warning("Skipping studio menu, no definition found.") - return - - # run the launcher for Maya menu - studio_menu = launchfornuke.main(title=_menu.title()) - - # apply configuration - studio_menu.build_from_configuration(studio_menu, config) - - -def add_scripts_gizmo(): - - # load configuration of custom menu - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - platform_name = platform.system().lower() - - for gizmo_settings in project_settings["nuke"]["gizmo"]: - gizmo_list_definition = gizmo_settings["gizmo_definition"] - toolbar_name = gizmo_settings["toolbar_menu_name"] - # gizmo_toolbar_path = gizmo_settings["gizmo_toolbar_path"] - gizmo_source_dir = gizmo_settings.get( - "gizmo_source_dir", {}).get(platform_name) - toolbar_icon_path = gizmo_settings.get( - "toolbar_icon_path", {}).get(platform_name) - - if not gizmo_source_dir: - log.debug("Skipping studio gizmo `{}`, " - "no gizmo path found.".format(toolbar_name) - ) - return - - if not gizmo_list_definition: - log.debug("Skipping studio gizmo `{}`, " - "no definition found.".format(toolbar_name) - ) - return - - if toolbar_icon_path: - try: - toolbar_icon_path = toolbar_icon_path.format(**os.environ) - except KeyError as e: - log.error( - "This environment variable doesn't exist: {}".format(e) - ) - - existing_gizmo_path = [] - for source_dir in gizmo_source_dir: - try: - resolve_source_dir = source_dir.format(**os.environ) - except KeyError as e: - log.error( - "This environment variable doesn't exist: {}".format(e) - ) - continue - if not os.path.exists(resolve_source_dir): - log.warning( - "The source of gizmo `{}` does not exists".format( - resolve_source_dir - ) - ) - continue - existing_gizmo_path.append(resolve_source_dir) - - # run the launcher for Nuke toolbar - toolbar_menu = gizmo_menu.GizmoMenu( - title=toolbar_name, - icon=toolbar_icon_path - ) - - # apply configuration - toolbar_menu.add_gizmo_path(existing_gizmo_path) - toolbar_menu.build_from_configuration(gizmo_list_definition) - - -class NukeDirmap(HostDirmap): - def __init__(self, file_name, *args, **kwargs): - """ - Args: - file_name (str): full path of referenced file from workfiles - *args (tuple): Positional arguments for 'HostDirmap' class - **kwargs (dict): Keyword arguments for 'HostDirmap' class - """ - - self.file_name = file_name - super(NukeDirmap, self).__init__(*args, **kwargs) - - def on_enable_dirmap(self): - pass - - def dirmap_routine(self, source_path, destination_path): - source_path = source_path.lower().replace(os.sep, '/') - destination_path = destination_path.lower().replace(os.sep, '/') - log.debug("Map: {} with: {}->{}".format(self.file_name, - source_path, destination_path)) - if platform.system().lower() == "windows": - self.file_name = self.file_name.lower().replace( - source_path, destination_path) - else: - self.file_name = self.file_name.replace( - source_path, destination_path) - - -class DirmapCache: - """Caching class to get settings and sync_module easily and only once.""" - _project_name = None - _project_settings = None - _sync_module_discovered = False - _sync_module = None - _mapping = None - - @classmethod - def project_name(cls): - if cls._project_name is None: - cls._project_name = os.getenv("AVALON_PROJECT") - return cls._project_name - - @classmethod - def project_settings(cls): - if cls._project_settings is None: - cls._project_settings = get_project_settings(cls.project_name()) - return cls._project_settings - - @classmethod - def sync_module(cls): - if not cls._sync_module_discovered: - cls._sync_module_discovered = True - cls._sync_module = ModulesManager().modules_by_name.get( - "sync_server") - return cls._sync_module - - @classmethod - def mapping(cls): - return cls._mapping - - @classmethod - def set_mapping(cls, mapping): - cls._mapping = mapping - - -def dirmap_file_name_filter(file_name): - """Nuke callback function with single full path argument. - - Checks project settings for potential mapping from source to dest. - """ - - dirmap_processor = NukeDirmap( - file_name, - "nuke", - DirmapCache.project_name(), - DirmapCache.project_settings(), - DirmapCache.sync_module(), - ) - if not DirmapCache.mapping(): - DirmapCache.set_mapping(dirmap_processor.get_mappings()) - - dirmap_processor.process_dirmap(DirmapCache.mapping()) - if os.path.exists(dirmap_processor.file_name): - return dirmap_processor.file_name - return file_name - - -@contextlib.contextmanager -def node_tempfile(): - """Create a temp file where node is pasted during duplication. - - This is to avoid using clipboard for node duplication. - """ - - tmp_file = tempfile.NamedTemporaryFile( - mode="w", prefix="openpype_nuke_temp_", suffix=".nk", delete=False - ) - tmp_file.close() - node_tempfile_path = tmp_file.name - - try: - # Yield the path where node can be copied - yield node_tempfile_path - - finally: - # Remove the file at the end - os.remove(node_tempfile_path) - - -def duplicate_node(node): - reset_selection() - - # select required node for duplication - node.setSelected(True) - - with node_tempfile() as filepath: - # copy selected to temp filepath - nuke.nodeCopy(filepath) - - # reset selection - reset_selection() - - # paste node and selection is on it only - dupli_node = nuke.nodePaste(filepath) - - # reset selection - reset_selection() - - return dupli_node - - -def get_group_io_nodes(nodes): - """Get the input and the output of a group of nodes.""" - - if not nodes: - raise ValueError("there is no nodes in the list") - - input_node = None - output_node = None - - if len(nodes) == 1: - input_node = output_node = nodes[0] - - else: - for node in nodes: - if "Input" in node.name(): - input_node = node - - if "Output" in node.name(): - output_node = node - - if input_node is not None and output_node is not None: - break - - if input_node is None: - log.warning("No Input found") - - if output_node is None: - log.warning("No Output found") - - return input_node, output_node - - -def get_extreme_positions(nodes): - """Get the 4 numbers that represent the box of a group of nodes.""" - - if not nodes: - raise ValueError("there is no nodes in the list") - - nodes_xpos = [n.xpos() for n in nodes] + \ - [n.xpos() + n.screenWidth() for n in nodes] - - nodes_ypos = [n.ypos() for n in nodes] + \ - [n.ypos() + n.screenHeight() for n in nodes] - - min_x, min_y = (min(nodes_xpos), min(nodes_ypos)) - max_x, max_y = (max(nodes_xpos), max(nodes_ypos)) - return min_x, min_y, max_x, max_y - - -def refresh_node(node): - """Correct a bug caused by the multi-threading of nuke. - - Refresh the node to make sure that it takes the desired attributes. - """ - - x = node.xpos() - y = node.ypos() - nuke.autoplaceSnap(node) - node.setXYpos(x, y) - - -def refresh_nodes(nodes): - for node in nodes: - refresh_node(node) - - -def get_names_from_nodes(nodes): - """Get list of nodes names. - - Args: - nodes(List[nuke.Node]): List of nodes to convert into names. - - Returns: - List[str]: Name of passed nodes. - """ - - return [ - node.name() - for node in nodes - ] - - -def get_nodes_by_names(names): - """Get list of nuke nodes based on their names. - - Args: - names (List[str]): List of node names to be found. - - Returns: - List[nuke.Node]: List of nodes found by name. - """ - - return [ - nuke.toNode(name) - for name in names - ] - - -def get_viewer_config_from_string(input_string): - """Convert string to display and viewer string - - Args: - input_string (str): string with viewer - - Raises: - IndexError: if more then one slash in input string - IndexError: if missing closing bracket - - Returns: - tuple[str]: display, viewer - """ - display = None - viewer = input_string - # check if () or / or \ in name - if "/" in viewer: - split = viewer.split("/") - - # rise if more then one column - if len(split) > 2: - raise IndexError(( - "Viewer Input string is not correct. " - "more then two `/` slashes! {}" - ).format(input_string)) - - viewer = split[1] - display = split[0] - elif "(" in viewer: - pattern = r"([\w\d\s\.\-]+).*[(](.*)[)]" - result_ = re.findall(pattern, viewer) - try: - result_ = result_.pop() - display = str(result_[1]).rstrip() - viewer = str(result_[0]).rstrip() - except IndexError: - raise IndexError(( - "Viewer Input string is not correct. " - "Missing bracket! {}" - ).format(input_string)) - - return (display, viewer) - - -def create_viewer_profile_string(viewer, display=None, path_like=False): - """Convert viewer and display to string - - Args: - viewer (str): viewer name - display (Optional[str]): display name - path_like (Optional[bool]): if True, return path like string - - Returns: - str: viewer config string - """ - if not display: - return viewer - - if path_like: - return "{}/{}".format(display, viewer) - return "{} ({})".format(viewer, display) - - -def get_filenames_without_hash(filename, frame_start, frame_end): - """Get filenames without frame hash - i.e. "renderCompositingMain.baking.0001.exr" - - Args: - filename (str): filename with frame hash - frame_start (str): start of the frame - frame_end (str): end of the frame - - Returns: - list: filename per frame of the sequence - """ - filenames = [] - for frame in range(int(frame_start), (int(frame_end) + 1)): - if "#" in filename: - # use regex to convert #### to {:0>4} - def replace(match): - return "{{:0>{}}}".format(len(match.group())) - filename_without_hashes = re.sub("#+", replace, filename) - new_filename = filename_without_hashes.format(frame) - filenames.append(new_filename) - return filenames - - -def create_camera_node_by_version(): - """Function to create the camera with the latest node class - For Nuke version 14.0 or later, the Camera4 camera node class - would be used - For the version before, the Camera2 camera node class - would be used - Returns: - Node: camera node - """ - nuke_number_version = nuke.NUKE_VERSION_MAJOR - if nuke_number_version >= 14: - return nuke.createNode("Camera4") - else: - return nuke.createNode("Camera2") - - -def link_knobs(knobs, node, group_node): - """Link knobs from inside `group_node`""" - - missing_knobs = [] - for knob in knobs: - if knob in group_node.knobs(): - continue - - if knob not in node.knobs().keys(): - missing_knobs.append(knob) - - link = nuke.Link_Knob("") - link.makeLink(node.name(), knob) - link.setName(knob) - link.setFlag(0x1000) - group_node.addKnob(link) - - if missing_knobs: - raise ValueError( - "Write node exposed knobs missing:\n\n{}\n\nPlease review" - " project settings.".format("\n".join(missing_knobs)) - ) diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py deleted file mode 100644 index c2fc684c21..0000000000 --- a/openpype/hosts/nuke/api/pipeline.py +++ /dev/null @@ -1,633 +0,0 @@ -import nuke - -import os -import importlib -from collections import OrderedDict, defaultdict - -import pyblish.api - -import openpype -from openpype.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost -) -from openpype.settings import get_current_project_settings -from openpype.lib import register_event_callback, Logger -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - register_inventory_action_path, - AVALON_CONTAINER_ID, - get_current_asset_name, - get_current_task_name, -) -from openpype.pipeline.workfile import BuildWorkfile -from openpype.tools.utils import host_tools - -from .command import viewer_update_and_undo_stop -from .lib import ( - Context, - ROOT_DATA_KNOB, - INSTANCE_DATA_KNOB, - get_main_window, - add_publish_knob, - WorkfileSettings, - # TODO: remove this once workfile builder will be removed - process_workfile_builder, - start_workfile_template_builder, - launch_workfiles_app, - check_inventory_versions, - set_avalon_knob_data, - read_avalon_data, - on_script_load, - dirmap_file_name_filter, - add_scripts_menu, - add_scripts_gizmo, - get_node_data, - set_node_data -) -from .workfile_template_builder import ( - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin, - build_workfile_template, - create_placeholder, - update_placeholder, -) -from .workio import ( - open_file, - save_file, - file_extensions, - has_unsaved_changes, - work_root, - current_file -) -from .constants import ASSIST - -log = Logger.get_logger(__name__) - -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -MENU_LABEL = os.environ["AVALON_LABEL"] - -# registering pyblish gui regarding settings in presets -if os.getenv("PYBLISH_GUI", None): - pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) - - -class NukeHost( - HostBase, IWorkfileHost, ILoadHost, IPublishHost -): - name = "nuke" - - def open_workfile(self, filepath): - return open_file(filepath) - - def save_workfile(self, filepath=None): - return save_file(filepath) - - def work_root(self, session): - return work_root(session) - - def get_current_workfile(self): - return current_file() - - def workfile_has_unsaved_changes(self): - return has_unsaved_changes() - - def get_workfile_extensions(self): - return file_extensions() - - def get_workfile_build_placeholder_plugins(self): - return [ - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin - ] - - def get_containers(self): - return ls() - - def install(self): - ''' Installing all requarements for Nuke host - ''' - - pyblish.api.register_host("nuke") - - self.log.info("Registering Nuke plug-ins..") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - # Register Avalon event for workfiles loading. - register_event_callback("workio.open_file", check_inventory_versions) - register_event_callback("taskChanged", change_context_label) - - _install_menu() - - # add script menu - add_scripts_menu() - add_scripts_gizmo() - - add_nuke_callbacks() - - launch_workfiles_app() - - def get_context_data(self): - root_node = nuke.root() - return get_node_data(root_node, ROOT_DATA_KNOB) - - def update_context_data(self, data, changes): - root_node = nuke.root() - set_node_data(root_node, ROOT_DATA_KNOB, data) - - -def add_nuke_callbacks(): - """ Adding all available nuke callbacks - """ - nuke_settings = get_current_project_settings()["nuke"] - workfile_settings = WorkfileSettings() - - # Set context settings. - nuke.addOnCreate( - workfile_settings.set_context_settings, nodeClass="Root") - - # adding favorites to file browser - nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") - - # template builder callbacks - nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root") - - # TODO: remove this callback once workfile builder will be removed - nuke.addOnCreate(process_workfile_builder, nodeClass="Root") - - # fix ffmpeg settings on script - nuke.addOnScriptLoad(on_script_load) - - # set checker for last versions on loaded containers - nuke.addOnScriptLoad(check_inventory_versions) - nuke.addOnScriptSave(check_inventory_versions) - - # set apply all workfile settings on script load and save - nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) - - - if nuke_settings["nuke-dirmap"]["enabled"]: - log.info("Added Nuke's dir-mapping callback ...") - # Add dirmap for file paths. - nuke.addFilenameFilter(dirmap_file_name_filter) - - log.info("Added Nuke callbacks ...") - - -def reload_config(): - """Attempt to reload pipeline at run-time. - - CAUTION: This is primarily for development and debugging purposes. - - """ - - for module in ( - "openpype.hosts.nuke.api.actions", - "openpype.hosts.nuke.api.menu", - "openpype.hosts.nuke.api.plugin", - "openpype.hosts.nuke.api.lib", - ): - log.info("Reloading module: {}...".format(module)) - - module = importlib.import_module(module) - - try: - importlib.reload(module) - except AttributeError as e: - from importlib import reload - log.warning("Cannot reload module: {}".format(e)) - reload(module) - - -def _show_workfiles(): - # Make sure parent is not set - # - this makes Workfiles tool as separated window which - # avoid issues with reopening - # - it is possible to explicitly change on top flag of the tool - host_tools.show_workfiles(parent=None, on_top=False) - - -def get_context_label(): - return "{0}, {1}".format( - get_current_asset_name(), - get_current_task_name() - ) - - -def _install_menu(): - """Install Avalon menu into Nuke's main menu bar.""" - - # uninstall original avalon menu - main_window = get_main_window() - menubar = nuke.menu("Nuke") - menu = menubar.addMenu(MENU_LABEL) - - if not ASSIST: - label = get_context_label() - context_action_item = menu.addCommand("Context") - context_action_item.setEnabled(False) - - Context.context_action_item = context_action_item - - context_action = context_action_item.action() - context_action.setText(label) - - # add separator after context label - menu.addSeparator() - - menu.addCommand( - "Work Files...", - _show_workfiles - ) - - menu.addSeparator() - if not ASSIST: - # only add parent if nuke version is 14 or higher - # known issue with no solution yet - menu.addCommand( - "Create...", - lambda: host_tools.show_publisher( - parent=main_window, - tab="create" - ) - ) - # only add parent if nuke version is 14 or higher - # known issue with no solution yet - menu.addCommand( - "Publish...", - lambda: host_tools.show_publisher( - parent=main_window, - tab="publish" - ) - ) - - menu.addCommand( - "Load...", - lambda: host_tools.show_loader( - parent=main_window, - use_context=True - ) - ) - menu.addCommand( - "Manage...", - lambda: host_tools.show_scene_inventory(parent=main_window) - ) - menu.addSeparator() - menu.addCommand( - "Library...", - lambda: host_tools.show_library_loader( - parent=main_window - ) - ) - menu.addSeparator() - menu.addCommand( - "Set Resolution", - lambda: WorkfileSettings().reset_resolution() - ) - menu.addCommand( - "Set Frame Range", - lambda: WorkfileSettings().reset_frame_range_handles() - ) - menu.addCommand( - "Set Colorspace", - lambda: WorkfileSettings().set_colorspace() - ) - menu.addCommand( - "Apply All Settings", - lambda: WorkfileSettings().set_context_settings() - ) - - menu.addSeparator() - menu.addCommand( - "Build Workfile", - lambda: BuildWorkfile().process() - ) - - menu_template = menu.addMenu("Template Builder") # creating template menu - menu_template.addCommand( - "Build Workfile from template", - lambda: build_workfile_template() - ) - - if not ASSIST: - menu_template.addSeparator() - menu_template.addCommand( - "Create Place Holder", - lambda: create_placeholder() - ) - menu_template.addCommand( - "Update Place Holder", - lambda: update_placeholder() - ) - - menu.addSeparator() - menu.addCommand( - "Experimental tools...", - lambda: host_tools.show_experimental_tools_dialog(parent=main_window) - ) - menu.addSeparator() - # add reload pipeline only in debug mode - if bool(os.getenv("NUKE_DEBUG")): - menu.addSeparator() - menu.addCommand("Reload Pipeline", reload_config) - - # adding shortcuts - add_shortcuts_from_presets() - - -def change_context_label(): - if ASSIST: - return - - context_action_item = Context.context_action_item - if context_action_item is None: - return - context_action = context_action_item.action() - - old_label = context_action.text() - new_label = get_context_label() - - context_action.setText(new_label) - - log.info("Task label changed from `{}` to `{}`".format( - old_label, new_label)) - - -def add_shortcuts_from_presets(): - menubar = nuke.menu("Nuke") - nuke_presets = get_current_project_settings()["nuke"]["general"] - - if nuke_presets.get("menu"): - menu_label_mapping = { - "create": "Create...", - "manage": "Manage...", - "load": "Load...", - "build_workfile": "Build Workfile", - "publish": "Publish..." - } - - for command_name, shortcut_str in nuke_presets.get("menu").items(): - log.info("menu_name `{}` | menu_label `{}`".format( - command_name, MENU_LABEL - )) - log.info("Adding Shortcut `{}` to `{}`".format( - shortcut_str, command_name - )) - try: - menu = menubar.findItem(MENU_LABEL) - item_label = menu_label_mapping[command_name] - menuitem = menu.findItem(item_label) - menuitem.setShortcut(shortcut_str) - except (AttributeError, KeyError) as e: - log.error(e) - - -def containerise(node, - name, - namespace, - context, - loader=None, - data=None): - """Bundle `node` into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - node (nuke.Node): Nuke's node object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of node used to produce this container. - - Returns: - node (nuke.Node): containerised nuke's node object - - """ - data = OrderedDict( - [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", name), - ("namespace", namespace), - ("loader", str(loader)), - ("representation", context["representation"]["_id"]), - ], - - **data or dict() - ) - - set_avalon_knob_data(node, data) - - # set tab to first native - node.setTab(0) - - return node - - -def parse_container(node): - """Returns containerised data of a node - - Reads the imprinted data from `containerise`. - - Arguments: - node (nuke.Node): Nuke's node object to read imprinted data - - Returns: - dict: The container schema data for this container node. - - """ - data = read_avalon_data(node) - - # If not all required data return the empty container - required = ["schema", "id", "name", - "namespace", "loader", "representation"] - if not all(key in data for key in required): - return - - # Store the node's name - data.update({ - "objectName": node.fullName(), - "node": node, - }) - - return data - - -def update_container(node, keys=None): - """Returns node with updateted containder data - - Arguments: - node (nuke.Node): The node in Nuke to imprint as container, - keys (dict, optional): data which should be updated - - Returns: - node (nuke.Node): nuke node with updated container data - - Raises: - TypeError on given an invalid container node - - """ - keys = keys or dict() - - container = parse_container(node) - if not container: - raise TypeError("Not a valid container node.") - - container.update(keys) - node = set_avalon_knob_data(node, container) - - return node - - -def ls(): - """List available containers. - - This function is used by the Container Manager in Nuke. You'll - need to implement a for-loop that then *yields* one Container at - a time. - - See the `container.json` schema for details on how it should look, - and the Maya equivalent, which is in `avalon.maya.pipeline` - """ - all_nodes = nuke.allNodes(recurseGroups=False) - - nodes = [n for n in all_nodes] - - for n in nodes: - container = parse_container(n) - if container: - yield container - - -def list_instances(creator_id=None): - """List all created instances to publish from current workfile. - - For SubsetManager - - Args: - creator_id (Optional[str]): creator identifier - - Returns: - (list) of dictionaries matching instances format - """ - instances_by_order = defaultdict(list) - subset_instances = [] - instance_ids = set() - - for node in nuke.allNodes(recurseGroups=True): - - if node.Class() in ["Viewer", "Dot"]: - continue - - try: - if node["disable"].value(): - continue - except NameError: - # pass if disable knob doesn't exist - pass - - # get data from avalon knob - instance_data = get_node_data( - node, INSTANCE_DATA_KNOB) - - if not instance_data: - continue - - if instance_data["id"] != "pyblish.avalon.instance": - continue - - if creator_id and instance_data["creator_identifier"] != creator_id: - continue - - instance_id = instance_data.get("instance_id") - if not instance_id: - pass - elif instance_id in instance_ids: - instance_data.pop("instance_id") - else: - instance_ids.add(instance_id) - - # node name could change, so update subset name data - _update_subset_name_data(instance_data, node) - - if "render_order" not in node.knobs(): - subset_instances.append((node, instance_data)) - continue - - order = int(node["render_order"].value()) - instances_by_order[order].append((node, instance_data)) - - # Sort instances based on order attribute or subset name. - # TODO: remove in future Publisher enhanced with sorting - ordered_instances = [] - for key in sorted(instances_by_order.keys()): - instances_by_subset = defaultdict(list) - for node, data_ in instances_by_order[key]: - instances_by_subset[data_["subset"]].append((node, data_)) - for subkey in sorted(instances_by_subset.keys()): - ordered_instances.extend(instances_by_subset[subkey]) - - instances_by_subset = defaultdict(list) - for node, data_ in subset_instances: - instances_by_subset[data_["subset"]].append((node, data_)) - for key in sorted(instances_by_subset.keys()): - ordered_instances.extend(instances_by_subset[key]) - - return ordered_instances - - -def _update_subset_name_data(instance_data, node): - """Update subset name data in instance data. - - Args: - instance_data (dict): instance creator data - node (nuke.Node): nuke node - """ - # make sure node name is subset name - old_subset_name = instance_data["subset"] - old_variant = instance_data["variant"] - subset_name_root = old_subset_name.replace(old_variant, "") - - new_subset_name = node.name() - new_variant = new_subset_name.replace(subset_name_root, "") - - instance_data["subset"] = new_subset_name - instance_data["variant"] = new_variant - - -def remove_instance(instance): - """Remove instance from current workfile metadata. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - instance_node = instance.transient_data["node"] - instance_knob = instance_node.knobs()[INSTANCE_DATA_KNOB] - instance_node.removeKnob(instance_knob) - nuke.delete(instance_node) - - -def select_instance(instance): - """ - Select instance in Node View - - Args: - instance (dict): instance representation from subsetmanager model - """ - instance_node = instance.transient_data["node"] - instance_node["selected"].setValue(True) diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py deleted file mode 100644 index c8301b81fd..0000000000 --- a/openpype/hosts/nuke/api/plugin.py +++ /dev/null @@ -1,1355 +0,0 @@ -import nuke -import re -import os -import sys -import six -import random -import string -from collections import OrderedDict, defaultdict -from abc import abstractmethod - -from openpype.settings import get_current_project_settings -from openpype.lib import ( - BoolDef, - EnumDef -) -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, - CreatorError, - Creator as NewCreator, - CreatedInstance, - get_current_task_name -) -from openpype.pipeline.colorspace import ( - get_display_view_colorspace_name, - get_colorspace_settings_from_publish_context, - set_colorspace_data_to_representation -) -from openpype.lib.transcoding import ( - VIDEO_EXTENSIONS -) -from .lib import ( - INSTANCE_DATA_KNOB, - Knobby, - check_subsetname_exists, - maintained_selection, - get_avalon_knob_data, - set_avalon_knob_data, - add_publish_knob, - get_nuke_imageio_settings, - set_node_knobs_from_settings, - set_node_data, - get_node_data, - get_view_process_node, - get_viewer_config_from_string, - deprecated, - get_filenames_without_hash, - link_knobs -) -from .pipeline import ( - list_instances, - remove_instance -) - - -def _collect_and_cache_nodes(creator): - key = "openpype.nuke.nodes" - if key not in creator.collection_shared_data: - instances_by_identifier = defaultdict(list) - for item in list_instances(): - _, instance_data = item - identifier = instance_data["creator_identifier"] - instances_by_identifier[identifier].append(item) - creator.collection_shared_data[key] = instances_by_identifier - return creator.collection_shared_data[key] - - -class NukeCreatorError(CreatorError): - pass - - -class NukeCreator(NewCreator): - selected_nodes = [] - - def pass_pre_attributes_to_instance( - self, - instance_data, - pre_create_data, - keys=None - ): - if not keys: - keys = pre_create_data.keys() - - creator_attrs = instance_data["creator_attributes"] = {} - for pass_key in keys: - creator_attrs[pass_key] = pre_create_data[pass_key] - - def check_existing_subset(self, subset_name): - """Make sure subset name is unique. - - It search within all nodes recursively - and checks if subset name is found in - any node having instance data knob. - - Arguments: - subset_name (str): Subset name - """ - - for node in nuke.allNodes(recurseGroups=True): - # make sure testing node is having instance knob - if INSTANCE_DATA_KNOB not in node.knobs().keys(): - continue - node_data = get_node_data(node, INSTANCE_DATA_KNOB) - - if not node_data: - # a node has no instance data - continue - - # test if subset name is matching - if node_data.get("subset") == subset_name: - raise NukeCreatorError( - ( - "A publish instance for '{}' already exists " - "in nodes! Please change the variant " - "name to ensure unique output." - ).format(subset_name) - ) - - def create_instance_node( - self, - node_name, - knobs=None, - parent=None, - node_type=None - ): - """Create node representing instance. - - Arguments: - node_name (str): Name of the new node. - knobs (OrderedDict): node knobs name and values - parent (str): Name of the parent node. - node_type (str, optional): Nuke node Class. - - Returns: - nuke.Node: Newly created instance node. - - """ - node_type = node_type or "NoOp" - - node_knobs = knobs or {} - - # set parent node - parent_node = nuke.root() - if parent: - parent_node = nuke.toNode(parent) - - try: - with parent_node: - created_node = nuke.createNode(node_type) - created_node["name"].setValue(node_name) - - for key, values in node_knobs.items(): - if key in created_node.knobs(): - created_node["key"].setValue(values) - except Exception as _err: - raise NukeCreatorError("Creating have failed: {}".format(_err)) - - return created_node - - def set_selected_nodes(self, pre_create_data): - if pre_create_data.get("use_selection"): - self.selected_nodes = nuke.selectedNodes() - if self.selected_nodes == []: - raise NukeCreatorError("Creator error: No active selection") - else: - self.selected_nodes = [] - - def create(self, subset_name, instance_data, pre_create_data): - - # make sure selected nodes are added - self.set_selected_nodes(pre_create_data) - - # make sure subset name is unique - self.check_existing_subset(subset_name) - - try: - instance_node = self.create_instance_node( - subset_name, - node_type=instance_data.pop("node_type", None) - ) - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self - ) - - instance.transient_data["node"] = instance_node - - self._add_instance_to_context(instance) - - set_node_data( - instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) - - return instance - - except Exception as er: - six.reraise( - NukeCreatorError, - NukeCreatorError("Creator error: {}".format(er)), - sys.exc_info()[2]) - - def collect_instances(self): - cached_instances = _collect_and_cache_nodes(self) - attr_def_keys = { - attr_def.key - for attr_def in self.get_instance_attr_defs() - } - attr_def_keys.discard(None) - - for (node, data) in cached_instances[self.identifier]: - created_instance = CreatedInstance.from_existing( - data, self - ) - created_instance.transient_data["node"] = node - self._add_instance_to_context(created_instance) - - for key in ( - set(created_instance["creator_attributes"].keys()) - - attr_def_keys - ): - created_instance["creator_attributes"].pop(key) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = created_inst.transient_data["node"] - - # update instance node name if subset name changed - if "subset" in changes.changed_keys: - instance_node["name"].setValue( - changes["subset"].new_value - ) - - # in case node is not existing anymore (user erased it manually) - try: - instance_node.fullName() - except ValueError: - self.remove_instances([created_inst]) - continue - - set_node_data( - instance_node, - INSTANCE_DATA_KNOB, - created_inst.data_to_store() - ) - - def remove_instances(self, instances): - for instance in instances: - remove_instance(instance) - self._remove_instance_from_context(instance) - - def get_pre_create_attr_defs(self): - return [ - BoolDef( - "use_selection", - default=not self.create_context.headless, - label="Use selection" - ) - ] - - def get_creator_settings(self, project_settings, settings_key=None): - if not settings_key: - settings_key = self.__class__.__name__ - return project_settings["nuke"]["create"][settings_key] - - -class NukeWriteCreator(NukeCreator): - """Add Publishable Write node""" - - identifier = "create_write" - label = "Create Write" - family = "write" - icon = "sign-out" - - def get_linked_knobs(self): - linked_knobs = [] - if "channels" in self.instance_attributes: - linked_knobs.append("channels") - if "ordered" in self.instance_attributes: - linked_knobs.append("render_order") - if "use_range_limit" in self.instance_attributes: - linked_knobs.extend(["___", "first", "last", "use_limit"]) - - return linked_knobs - - def integrate_links(self, node, outputs=True): - # skip if no selection - if not self.selected_node: - return - - # collect dependencies - input_nodes = [self.selected_node] - dependent_nodes = self.selected_node.dependent() if outputs else [] - - # relinking to collected connections - for i, input in enumerate(input_nodes): - node.setInput(i, input) - - # make it nicer in graph - node.autoplace() - - # relink also dependent nodes - for dep_nodes in dependent_nodes: - dep_nodes.setInput(0, node) - - def set_selected_nodes(self, pre_create_data): - if pre_create_data.get("use_selection"): - selected_nodes = nuke.selectedNodes() - if selected_nodes == []: - raise NukeCreatorError("Creator error: No active selection") - elif len(selected_nodes) > 1: - NukeCreatorError("Creator error: Select only one camera node") - self.selected_node = selected_nodes[0] - else: - self.selected_node = None - - def get_pre_create_attr_defs(self): - attr_defs = [ - BoolDef("use_selection", label="Use selection"), - self._get_render_target_enum() - ] - return attr_defs - - def get_instance_attr_defs(self): - attr_defs = [ - self._get_render_target_enum(), - ] - # add reviewable attribute - if "reviewable" in self.instance_attributes: - attr_defs.append(self._get_reviewable_bool()) - - return attr_defs - - def _get_render_target_enum(self): - rendering_targets = { - "local": "Local machine rendering", - "frames": "Use existing frames" - } - if ("farm_rendering" in self.instance_attributes): - rendering_targets["frames_farm"] = "Use existing frames - farm" - rendering_targets["farm"] = "Farm rendering" - - return EnumDef( - "render_target", - items=rendering_targets, - label="Render target" - ) - - def _get_reviewable_bool(self): - return BoolDef( - "review", - default=True, - label="Review" - ) - - def create(self, subset_name, instance_data, pre_create_data): - # make sure selected nodes are added - self.set_selected_nodes(pre_create_data) - - # make sure subset name is unique - self.check_existing_subset(subset_name) - - instance_node = self.create_instance_node( - subset_name, - instance_data - ) - - try: - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self - ) - - instance.transient_data["node"] = instance_node - - self._add_instance_to_context(instance) - - set_node_data( - instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) - - return instance - - except Exception as er: - six.reraise( - NukeCreatorError, - NukeCreatorError("Creator error: {}".format(er)), - sys.exc_info()[2] - ) - - def apply_settings(self, project_settings): - """Method called on initialization of plugin to apply settings.""" - - # plugin settings - plugin_settings = self.get_creator_settings(project_settings) - - # individual attributes - self.instance_attributes = plugin_settings.get( - "instance_attributes") or self.instance_attributes - self.prenodes = plugin_settings["prenodes"] - self.default_variants = plugin_settings.get( - "default_variants") or self.default_variants - self.temp_rendering_path_template = ( - plugin_settings.get("temp_rendering_path_template") - or self.temp_rendering_path_template - ) - - -class OpenPypeCreator(LegacyCreator): - """Pype Nuke Creator class wrapper""" - node_color = "0xdfea5dff" - - def __init__(self, *args, **kwargs): - super(OpenPypeCreator, self).__init__(*args, **kwargs) - if check_subsetname_exists( - nuke.allNodes(), - self.data["subset"]): - msg = ("The subset name `{0}` is already used on a node in" - "this workfile.".format(self.data["subset"])) - self.log.error(msg + "\n\nPlease use other subset name!") - raise NameError("`{0}: {1}".format(__name__, msg)) - return - - def process(self): - from nukescripts import autoBackdrop - - instance = None - - if (self.options or {}).get("useSelection"): - - nodes = nuke.selectedNodes() - if not nodes: - nuke.message("Please select nodes that you " - "wish to add to a container") - return - - elif len(nodes) == 1: - # only one node is selected - instance = nodes[0] - - if not instance: - # Not using selection or multiple nodes selected - bckd_node = autoBackdrop() - bckd_node["tile_color"].setValue(int(self.node_color, 16)) - bckd_node["note_font_size"].setValue(24) - bckd_node["label"].setValue("[{}]".format(self.name)) - - instance = bckd_node - - # add avalon knobs - set_avalon_knob_data(instance, self.data) - add_publish_knob(instance) - - return instance - - -def get_instance_group_node_childs(instance): - """Return list of instance group node children - - Args: - instance (pyblish.Instance): pyblish instance - - Returns: - list: [nuke.Node] - """ - node = instance.data["transientData"]["node"] - - if node.Class() != "Group": - return - - # collect child nodes - child_nodes = [] - # iterate all nodes - for node in nuke.allNodes(group=node): - # add contained nodes to instance's node list - child_nodes.append(node) - - return child_nodes - - -def get_colorspace_from_node(node): - # Add version data to instance - colorspace = node["colorspace"].value() - - # remove default part of the string - if "default (" in colorspace: - colorspace = re.sub(r"default.\(|\)", "", colorspace) - - return colorspace - - -def get_review_presets_config(): - settings = get_current_project_settings() - review_profiles = ( - settings["global"] - ["publish"] - ["ExtractReview"] - ["profiles"] - ) - - outputs = {} - for profile in review_profiles: - outputs.update(profile.get("outputs", {})) - - return [str(name) for name, _prop in outputs.items()] - - -class NukeLoader(LoaderPlugin): - container_id_knob = "containerId" - container_id = None - - def reset_container_id(self): - self.container_id = "".join(random.choice( - string.ascii_uppercase + string.digits) for _ in range(10)) - - def get_container_id(self, node): - id_knob = node.knobs().get(self.container_id_knob) - return id_knob.value() if id_knob else None - - def get_members(self, source): - """Return nodes that has same "containerId" as `source`""" - source_id = self.get_container_id(source) - return [node for node in nuke.allNodes(recurseGroups=True) - if self.get_container_id(node) == source_id - and node is not source] if source_id else [] - - def set_as_member(self, node): - source_id = self.get_container_id(node) - - if source_id: - node[self.container_id_knob].setValue(source_id) - else: - HIDEN_FLAG = 0x00040000 - _knob = Knobby( - "String_Knob", - self.container_id, - flags=[ - nuke.READ_ONLY, - HIDEN_FLAG - ]) - knob = _knob.create(self.container_id_knob) - node.addKnob(knob) - - def clear_members(self, parent_node): - parent_class = parent_node.Class() - members = self.get_members(parent_node) - - dependent_nodes = None - for node in members: - _depndc = [n for n in node.dependent() if n not in members] - if not _depndc: - continue - - dependent_nodes = _depndc - break - - for member in members: - if member.Class() == parent_class: - continue - self.log.info("removing node: `{}".format(member.name())) - nuke.delete(member) - - return dependent_nodes - - -class ExporterReview(object): - """ - Base class object for generating review data from Nuke - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - """ - data = None - publish_on_farm = False - - def __init__(self, - klass, - instance, - multiple_presets=True - ): - - self.log = klass.log - self.instance = instance - self.multiple_presets = multiple_presets - self.path_in = self.instance.data.get("path", None) - self.staging_dir = self.instance.data["stagingDir"] - self.collection = self.instance.data.get("collection", None) - self.data = {"representations": []} - - def get_file_info(self): - if self.collection: - # get path - self.fname = os.path.basename( - self.collection.format("{head}{padding}{tail}") - ) - self.fhead = self.collection.format("{head}") - - # get first and last frame - self.first_frame = min(self.collection.indexes) - self.last_frame = max(self.collection.indexes) - - # make sure slate frame is not included - frame_start_handle = self.instance.data["frameStartHandle"] - if frame_start_handle > self.first_frame: - self.first_frame = frame_start_handle - - else: - self.fname = os.path.basename(self.path_in) - self.fhead = os.path.splitext(self.fname)[0] + "." - self.first_frame = self.instance.data["frameStartHandle"] - self.last_frame = self.instance.data["frameEndHandle"] - - if "#" in self.fhead: - self.fhead = self.fhead.replace("#", "")[:-1] - - def get_representation_data( - self, tags=None, range=False, - custom_tags=None, colorspace=None - ): - """ Add representation data to self.data - - Args: - tags (list[str], optional): list of defined tags. - Defaults to None. - range (bool, optional): flag for adding ranges. - Defaults to False. - custom_tags (list[str], optional): user inputted custom tags. - Defaults to None. - """ - add_tags = tags or [] - repre = { - "name": self.name, - "ext": self.ext, - "files": self.file, - "stagingDir": self.staging_dir, - "tags": [self.name.replace("_", "-")] + add_tags - } - - if custom_tags: - repre["custom_tags"] = custom_tags - - if range: - repre.update({ - "frameStart": self.first_frame, - "frameEnd": self.last_frame, - }) - if ".{}".format(self.ext) not in VIDEO_EXTENSIONS: - filenames = get_filenames_without_hash( - self.file, self.first_frame, self.last_frame) - repre["files"] = filenames - - if self.multiple_presets: - repre["outputName"] = self.name - - if self.publish_on_farm: - repre["tags"].append("publish_on_farm") - - # add colorspace data to representation - if colorspace: - set_colorspace_data_to_representation( - repre, - self.instance.context.data, - colorspace=colorspace, - log=self.log - ) - self.data["representations"].append(repre) - - def get_imageio_baking_profile(self): - from . import lib as opnlib - nuke_imageio = opnlib.get_nuke_imageio_settings() - - # TODO: this is only securing backward compatibility lets remove - # this once all projects's anatomy are updated to newer config - if "baking" in nuke_imageio.keys(): - return nuke_imageio["baking"]["viewerProcess"] - else: - return nuke_imageio["viewer"]["viewerProcess"] - - -class ExporterReviewLut(ExporterReview): - """ - Generator object for review lut from Nuke - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - - """ - _temp_nodes = [] - - def __init__(self, - klass, - instance, - name=None, - ext=None, - cube_size=None, - lut_size=None, - lut_style=None, - multiple_presets=True): - # initialize parent class - super(ExporterReviewLut, self).__init__( - klass, instance, multiple_presets) - - # deal with now lut defined in viewer lut - if hasattr(klass, "viewer_lut_raw"): - self.viewer_lut_raw = klass.viewer_lut_raw - else: - self.viewer_lut_raw = False - - self.name = name or "baked_lut" - self.ext = ext or "cube" - self.cube_size = cube_size or 32 - self.lut_size = lut_size or 1024 - self.lut_style = lut_style or "linear" - - # set frame start / end and file name to self - self.get_file_info() - - self.log.info("File info was set...") - - self.file = self.fhead + self.name + ".{}".format(self.ext) - self.path = os.path.join( - self.staging_dir, self.file).replace("\\", "/") - - def clean_nodes(self): - for node in self._temp_nodes: - nuke.delete(node) - self._temp_nodes = [] - self.log.info("Deleted nodes...") - - def generate_lut(self, **kwargs): - bake_viewer_process = kwargs["bake_viewer_process"] - bake_viewer_input_process_node = kwargs[ - "bake_viewer_input_process"] - - # ---------- start nodes creation - - # CMSTestPattern - cms_node = nuke.createNode("CMSTestPattern") - cms_node["cube_size"].setValue(self.cube_size) - # connect - self._temp_nodes.append(cms_node) - self.previous_node = cms_node - - if bake_viewer_process: - # Node View Process - if bake_viewer_input_process_node: - ipn = get_view_process_node() - if ipn is not None: - # connect - ipn.setInput(0, self.previous_node) - self._temp_nodes.append(ipn) - self.previous_node = ipn - self.log.debug( - "ViewProcess... `{}`".format(self._temp_nodes)) - - if not self.viewer_lut_raw: - # OCIODisplay - dag_node = nuke.createNode("OCIODisplay") - # connect - dag_node.setInput(0, self.previous_node) - self._temp_nodes.append(dag_node) - self.previous_node = dag_node - self.log.debug( - "OCIODisplay... `{}`".format(self._temp_nodes)) - - # GenerateLUT - gen_lut_node = nuke.createNode("GenerateLUT") - gen_lut_node["file"].setValue(self.path) - gen_lut_node["file_type"].setValue(".{}".format(self.ext)) - gen_lut_node["lut1d"].setValue(self.lut_size) - gen_lut_node["style1d"].setValue(self.lut_style) - # connect - gen_lut_node.setInput(0, self.previous_node) - self._temp_nodes.append(gen_lut_node) - # ---------- end nodes creation - - # Export lut file - nuke.execute( - gen_lut_node.name(), - int(self.first_frame), - int(self.first_frame)) - - self.log.info("Exported...") - - # ---------- generate representation data - self.get_representation_data() - - # ---------- Clean up - self.clean_nodes() - - return self.data - - -class ExporterReviewMov(ExporterReview): - """ - Metaclass for generating review mov files - - Args: - klass (pyblish.plugin): pyblish plugin parent - instance (pyblish.instance): instance of pyblish context - - """ - _temp_nodes = {} - - def __init__(self, - klass, - instance, - name=None, - ext=None, - multiple_presets=True - ): - # initialize parent class - super(ExporterReviewMov, self).__init__( - klass, instance, multiple_presets) - # passing presets for nodes to self - self.nodes = klass.nodes if hasattr(klass, "nodes") else {} - - # deal with now lut defined in viewer lut - self.viewer_lut_raw = klass.viewer_lut_raw - self.write_colorspace = instance.data["colorspace"] - - self.name = name or "baked" - self.ext = ext or "mov" - - # set frame start / end and file name to self - self.get_file_info() - - self.log.info("File info was set...") - - if ".{}".format(self.ext) in VIDEO_EXTENSIONS: - self.file = "{}{}.{}".format( - self.fhead, self.name, self.ext) - else: - # Output is image (or image sequence) - # When the file is an image it's possible it - # has extra information after the `fhead` that - # we want to preserve, e.g. like frame numbers - # or frames hashes like `####` - filename_no_ext = os.path.splitext( - os.path.basename(self.path_in))[0] - after_head = filename_no_ext[len(self.fhead):] - self.file = "{}{}.{}.{}".format( - self.fhead, self.name, after_head, self.ext) - self.path = os.path.join( - self.staging_dir, self.file).replace("\\", "/") - - def clean_nodes(self, node_name): - for node in self._temp_nodes[node_name]: - nuke.delete(node) - self._temp_nodes[node_name] = [] - self.log.info("Deleted nodes...") - - def render(self, render_node_name): - self.log.info("Rendering... ") - # Render Write node - nuke.execute( - render_node_name, - int(self.first_frame), - int(self.last_frame)) - - self.log.info("Rendered...") - - def save_file(self): - import shutil - with maintained_selection(): - self.log.info("Saving nodes as file... ") - # create nk path - path = os.path.splitext(self.path)[0] + ".nk" - # save file to the path - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - shutil.copyfile(self.instance.context.data["currentFile"], path) - - self.log.info("Nodes exported...") - return path - - def generate_mov(self, farm=False, **kwargs): - # colorspace data - colorspace = None - # get colorspace settings - # get colorspace data from context - config_data, _ = get_colorspace_settings_from_publish_context( - self.instance.context.data) - - add_tags = [] - self.publish_on_farm = farm - read_raw = kwargs["read_raw"] - bake_viewer_process = kwargs["bake_viewer_process"] - bake_viewer_input_process_node = kwargs[ - "bake_viewer_input_process"] - viewer_process_override = kwargs[ - "viewer_process_override"] - - baking_view_profile = ( - viewer_process_override or self.get_imageio_baking_profile()) - - fps = self.instance.context.data["fps"] - - self.log.debug(">> baking_view_profile `{}`".format( - baking_view_profile)) - - add_custom_tags = kwargs.get("add_custom_tags", []) - - self.log.info( - "__ add_custom_tags: `{0}`".format(add_custom_tags)) - - subset = self.instance.data["subset"] - self._temp_nodes[subset] = [] - - # Read node - r_node = nuke.createNode("Read") - r_node["file"].setValue(self.path_in) - r_node["first"].setValue(self.first_frame) - r_node["origfirst"].setValue(self.first_frame) - r_node["last"].setValue(self.last_frame) - r_node["origlast"].setValue(self.last_frame) - r_node["colorspace"].setValue(self.write_colorspace) - - # do not rely on defaults, set explicitly - # to be sure it is set correctly - r_node["frame_mode"].setValue("expression") - r_node["frame"].setValue("") - - if read_raw: - r_node["raw"].setValue(1) - - # connect to Read node - self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`") - - # add reformat node - reformat_nodes_config = kwargs["reformat_nodes_config"] - if reformat_nodes_config["enabled"]: - reposition_nodes = reformat_nodes_config["reposition_nodes"] - for reposition_node in reposition_nodes: - node_class = reposition_node["node_class"] - knobs = reposition_node["knobs"] - node = nuke.createNode(node_class) - set_node_knobs_from_settings(node, knobs) - - # connect in order - self._connect_to_above_nodes( - node, subset, "Reposition node... `{}`" - ) - # append reformated tag - add_tags.append("reformated") - - # only create colorspace baking if toggled on - if bake_viewer_process: - if bake_viewer_input_process_node: - # View Process node - ipn = get_view_process_node() - if ipn is not None: - # connect to ViewProcess node - self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`") - - if not self.viewer_lut_raw: - # OCIODisplay - dag_node = nuke.createNode("OCIODisplay") - - # assign display - display, viewer = get_viewer_config_from_string( - str(baking_view_profile) - ) - if display: - dag_node["display"].setValue(display) - - # assign viewer - dag_node["view"].setValue(viewer) - - if config_data: - # convert display and view to colorspace - colorspace = get_display_view_colorspace_name( - config_path=config_data["path"], - display=display, - view=viewer - ) - - self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`") - # Write node - write_node = nuke.createNode("Write") - self.log.debug("Path: {}".format(self.path)) - write_node["file"].setValue(str(self.path)) - write_node["file_type"].setValue(str(self.ext)) - # Knobs `meta_codec` and `mov64_codec` are not available on centos. - # TODO shouldn't this come from settings on outputs? - try: - write_node["meta_codec"].setValue("ap4h") - except Exception: - self.log.info("`meta_codec` knob was not found") - - try: - write_node["mov64_codec"].setValue("ap4h") - write_node["mov64_fps"].setValue(float(fps)) - except Exception: - self.log.info("`mov64_codec` knob was not found") - - try: - write_node["mov64_write_timecode"].setValue(1) - except Exception: - self.log.info("`mov64_write_timecode` knob was not found") - - write_node["raw"].setValue(1) - # connect - write_node.setInput(0, self.previous_node) - self._temp_nodes[subset].append(write_node) - self.log.debug("Write... `{}`".format(self._temp_nodes[subset])) - # ---------- end nodes creation - - # ---------- render or save to nk - if self.publish_on_farm: - nuke.scriptSave() - path_nk = self.save_file() - self.data.update({ - "bakeScriptPath": path_nk, - "bakeWriteNodeName": write_node.name(), - "bakeRenderPath": self.path - }) - else: - self.render(write_node.name()) - - # ---------- generate representation data - self.get_representation_data( - tags=["review", "need_thumbnail", "delete"] + add_tags, - custom_tags=add_custom_tags, - range=True, - colorspace=colorspace - ) - - self.log.debug("Representation... `{}`".format(self.data)) - - self.clean_nodes(subset) - nuke.scriptSave() - - return self.data - - def _shift_to_previous_node_and_temp(self, subset, node, message): - self._temp_nodes[subset].append(node) - self.previous_node = node - self.log.debug(message.format(self._temp_nodes[subset])) - - def _connect_to_above_nodes(self, node, subset, message): - node.setInput(0, self.previous_node) - self._shift_to_previous_node_and_temp(subset, node, message) - - -@deprecated("openpype.hosts.nuke.api.plugin.NukeWriteCreator") -class AbstractWriteRender(OpenPypeCreator): - """Abstract creator to gather similar implementation for Write creators""" - name = "" - label = "" - hosts = ["nuke"] - n_class = "Write" - family = "render" - icon = "sign-out" - defaults = ["Main", "Mask"] - knobs = [] - prenodes = {} - - def __init__(self, *args, **kwargs): - super(AbstractWriteRender, self).__init__(*args, **kwargs) - - data = OrderedDict() - - data["family"] = self.family - data["families"] = self.n_class - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - self.nodes = nuke.selectedNodes() - - def process(self): - - inputs = [] - outputs = [] - instance = nuke.toNode(self.data["subset"]) - selected_node = None - - # use selection - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if not (len(nodes) < 2): - msg = ("Select only one node. " - "The node you want to connect to, " - "or tick off `Use selection`") - self.log.error(msg) - nuke.message(msg) - return - - if len(nodes) == 0: - msg = ( - "No nodes selected. Please select a single node to connect" - " to or tick off `Use selection`" - ) - self.log.error(msg) - nuke.message(msg) - return - - selected_node = nodes[0] - inputs = [selected_node] - outputs = selected_node.dependent() - - if instance: - if (instance.name() in selected_node.name()): - selected_node = instance.dependencies()[0] - - # if node already exist - if instance: - # collect input / outputs - inputs = instance.dependencies() - outputs = instance.dependent() - selected_node = inputs[0] - # remove old one - nuke.delete(instance) - - # recreate new - write_data = { - "nodeclass": self.n_class, - "families": [self.family], - "avalon": self.data, - "subset": self.data["subset"], - "knobs": self.knobs - } - - # add creator data - creator_data = {"creator": self.__class__.__name__} - self.data.update(creator_data) - write_data.update(creator_data) - - write_node = self._create_write_node( - selected_node, - inputs, - outputs, - write_data - ) - - # relinking to collected connections - for i, input in enumerate(inputs): - write_node.setInput(i, input) - - write_node.autoplace() - - for output in outputs: - output.setInput(0, write_node) - - write_node = self._modify_write_node(write_node) - - return write_node - - def is_legacy(self): - """Check if it needs to run legacy code - - In case where `type` key is missing in single - knob it is legacy project anatomy. - - Returns: - bool: True if legacy - """ - imageio_nodes = get_nuke_imageio_settings()["nodes"] - node = imageio_nodes["requiredNodes"][0] - if "type" not in node["knobs"][0]: - # if type is not yet in project anatomy - return True - elif next(iter( - _k for _k in node["knobs"] - if _k.get("type") == "__legacy__" - ), None): - # in case someone re-saved anatomy - # with old configuration - return True - - @abstractmethod - def _create_write_node(self, selected_node, inputs, outputs, write_data): - """Family dependent implementation of Write node creation - - Args: - selected_node (nuke.Node) - inputs (list of nuke.Node) - input dependencies (what is connected) - outputs (list of nuke.Node) - output dependencies - write_data (dict) - values used to fill Knobs - Returns: - node (nuke.Node): group node with data as Knobs - """ - pass - - @abstractmethod - def _modify_write_node(self, write_node): - """Family dependent modification of created 'write_node' - - Returns: - node (nuke.Node): group node with data as Knobs - """ - pass - - -def convert_to_valid_instaces(): - """ Check and convert to latest publisher instances - - Also save as new minor version of workfile. - """ - def family_to_identifier(family): - mapping = { - "render": "create_write_render", - "prerender": "create_write_prerender", - "still": "create_write_image", - "model": "create_model", - "camera": "create_camera", - "nukenodes": "create_backdrop", - "gizmo": "create_gizmo", - "source": "create_source" - - } - return mapping[family] - - from openpype.hosts.nuke.api import workio - - task_name = get_current_task_name() - - # save into new workfile - current_file = workio.current_file() - - # add file suffex if not - if "_publisherConvert" not in current_file: - new_workfile = ( - current_file[:-3] - + "_publisherConvert" - + current_file[-3:] - ) - else: - new_workfile = current_file - - path = new_workfile.replace("\\", "/") - nuke.scriptSaveAs(new_workfile, overwrite=1) - nuke.Root()["name"].setValue(path) - nuke.Root()["project_directory"].setValue(os.path.dirname(path)) - nuke.Root().setModified(False) - - _remove_old_knobs(nuke.Root()) - - # loop all nodes and convert - for node in nuke.allNodes(recurseGroups=True): - transfer_data = { - "creator_attributes": {} - } - creator_attr = transfer_data["creator_attributes"] - - if node.Class() in ["Viewer", "Dot"]: - continue - - if get_node_data(node, INSTANCE_DATA_KNOB): - continue - - # get data from avalon knob - avalon_knob_data = get_avalon_knob_data( - node, ["avalon:", "ak:"]) - - if not avalon_knob_data: - continue - - if avalon_knob_data["id"] != "pyblish.avalon.instance": - continue - - transfer_data.update({ - k: v for k, v in avalon_knob_data.items() - if k not in ["families", "creator"] - }) - - transfer_data["task"] = task_name - - family = avalon_knob_data["family"] - # establish families - families_ak = avalon_knob_data.get("families", []) - - if "suspend_publish" in node.knobs(): - creator_attr["suspended_publish"] = ( - node["suspend_publish"].value()) - - # get review knob value - if "review" in node.knobs(): - creator_attr["review"] = ( - node["review"].value()) - - if "publish" in node.knobs(): - transfer_data["active"] = ( - node["publish"].value()) - - # add idetifier - transfer_data["creator_identifier"] = family_to_identifier(family) - - # Add all nodes in group instances. - if node.Class() == "Group": - # only alter families for render family - if families_ak and "write" in families_ak.lower(): - target = node["render"].value() - if target == "Use existing frames": - creator_attr["render_target"] = "frames" - elif target == "Local": - # Local rendering - creator_attr["render_target"] = "local" - elif target == "On farm": - # Farm rendering - creator_attr["render_target"] = "farm" - - if "deadlinePriority" in node.knobs(): - transfer_data["farm_priority"] = ( - node["deadlinePriority"].value()) - if "deadlineChunkSize" in node.knobs(): - creator_attr["farm_chunk"] = ( - node["deadlineChunkSize"].value()) - if "deadlineConcurrentTasks" in node.knobs(): - creator_attr["farm_concurrency"] = ( - node["deadlineConcurrentTasks"].value()) - - _remove_old_knobs(node) - - # add new instance knob with transfer data - set_node_data( - node, INSTANCE_DATA_KNOB, transfer_data) - - nuke.scriptSave() - - -def _remove_old_knobs(node): - remove_knobs = [ - "review", "publish", "render", "suspend_publish", "warn", "divd", - "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", - "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" - ] - print(node.name()) - - # remove all old knobs - for knob in node.allKnobs(): - try: - if knob.name() in remove_knobs: - node.removeKnob(knob) - elif "avalon" in knob.name(): - node.removeKnob(knob) - except ValueError: - pass - - -def exposed_write_knobs(settings, plugin_name, instance_node): - exposed_knobs = settings["nuke"]["create"][plugin_name]["exposed_knobs"] - if exposed_knobs: - instance_node.addKnob(nuke.Text_Knob('', 'Write Knobs')) - write_node = nuke.allNodes(group=instance_node, filter="Write")[0] - link_knobs(exposed_knobs, write_node, instance_node) diff --git a/openpype/hosts/nuke/api/utils.py b/openpype/hosts/nuke/api/utils.py deleted file mode 100644 index a7df1dee71..0000000000 --- a/openpype/hosts/nuke/api/utils.py +++ /dev/null @@ -1,140 +0,0 @@ -import os -import nuke - -from openpype import resources -from qtpy import QtWidgets - - -def set_context_favorites(favorites=None): - """ Adding favorite folders to nuke's browser - - Arguments: - favorites (dict): couples of {name:path} - """ - favorites = favorites or {} - icon_path = resources.get_resource("icons", "folder-favorite.png") - for name, path in favorites.items(): - nuke.addFavoriteDir( - name, - path, - nuke.IMAGE | nuke.SCRIPT | nuke.GEO, - icon=icon_path) - - -def get_node_outputs(node): - ''' - Return a dictionary of the nodes and pipes that are connected to node - ''' - dep_dict = {} - dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS) - for d in dependencies: - dep_dict[d] = [] - for i in range(d.inputs()): - if d.input(i) == node: - dep_dict[d].append(i) - return dep_dict - - -def is_node_gizmo(node): - ''' - return True if node is gizmo - ''' - return 'gizmo_file' in node.knobs() - - -def gizmo_is_nuke_default(gizmo): - '''Check if gizmo is in default install path''' - plug_dir = os.path.join(os.path.dirname( - nuke.env['ExecutablePath']), 'plugins') - return gizmo.filename().startswith(plug_dir) - - -def bake_gizmos_recursively(in_group=None): - """Converting a gizmo to group - - Arguments: - is_group (nuke.Node)[optonal]: group node or all nodes - """ - from .lib import maintained_selection - if in_group is None: - in_group = nuke.Root() - # preserve selection after all is done - with maintained_selection(): - # jump to the group - with in_group: - for node in nuke.allNodes(): - if is_node_gizmo(node) and not gizmo_is_nuke_default(node): - with node: - outputs = get_node_outputs(node) - group = node.makeGroup() - # Reconnect inputs and outputs if any - if outputs: - for n, pipes in outputs.items(): - for i in pipes: - n.setInput(i, group) - for i in range(node.inputs()): - group.setInput(i, node.input(i)) - # set node position and name - group.setXYpos(node.xpos(), node.ypos()) - name = node.name() - nuke.delete(node) - group.setName(name) - node = group - - if node.Class() == "Group": - bake_gizmos_recursively(node) - - -def colorspace_exists_on_node(node, colorspace_name): - """ Check if colorspace exists on node - - Look through all options in the colorspace knob, and see if we have an - exact match to one of the items. - - Args: - node (nuke.Node): nuke node object - colorspace_name (str): color profile name - - Returns: - bool: True if exists - """ - try: - colorspace_knob = node['colorspace'] - except ValueError: - # knob is not available on input node - return False - all_clrs = get_colorspace_list(colorspace_knob) - - return colorspace_name in all_clrs - - -def get_colorspace_list(colorspace_knob): - """Get available colorspace profile names - - Args: - colorspace_knob (nuke.Knob): nuke knob object - - Returns: - list: list of strings names of profiles - """ - - all_clrs = list(colorspace_knob.values()) - reduced_clrs = [] - - if not colorspace_knob.getFlag(nuke.STRIP_CASCADE_PREFIX): - return all_clrs - - # strip colorspace with nested path - for clrs in all_clrs: - clrs = clrs.split('/')[-1] - reduced_clrs.append(clrs) - - return reduced_clrs - - -def is_headless(): - """ - Returns: - bool: headless - """ - return QtWidgets.QApplication.instance() is None diff --git a/openpype/hosts/nuke/api/workfile_template_builder.py b/openpype/hosts/nuke/api/workfile_template_builder.py deleted file mode 100644 index ee9f75d10d..0000000000 --- a/openpype/hosts/nuke/api/workfile_template_builder.py +++ /dev/null @@ -1,1005 +0,0 @@ -import collections -import nuke -from openpype.pipeline import registered_host -from openpype.pipeline.workfile.workfile_template_builder import ( - AbstractTemplateBuilder, - PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin -) -from openpype.tools.workfile_template_build import ( - WorkfileBuildPlaceholderDialog, -) -from .lib import ( - find_free_space_to_paste_nodes, - get_extreme_positions, - get_group_io_nodes, - imprint, - refresh_node, - refresh_nodes, - reset_selection, - get_names_from_nodes, - get_nodes_by_names, - select_nodes, - duplicate_node, - node_tempfile, - get_main_window, - WorkfileSettings, -) - -PLACEHOLDER_SET = "PLACEHOLDERS_SET" - - -class NukeTemplateBuilder(AbstractTemplateBuilder): - """Concrete implementation of AbstractTemplateBuilder for nuke""" - - def import_template(self, path): - """Import template into current scene. - Block if a template is already loaded. - - Args: - path (str): A path to current template (usually given by - get_template_preset implementation) - - Returns: - bool: Whether the template was successfully imported or not - """ - - # TODO check if the template is already imported - - nuke.nodePaste(path) - reset_selection() - - return True - -class NukePlaceholderPlugin(PlaceholderPlugin): - node_color = 4278190335 - - def _collect_scene_placeholders(self): - # Cache placeholder data to shared data - placeholder_nodes = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if placeholder_nodes is None: - placeholder_nodes = {} - all_groups = collections.deque() - all_groups.append(nuke.thisGroup()) - while all_groups: - group = all_groups.popleft() - for node in group.nodes(): - if isinstance(node, nuke.Group): - all_groups.append(node) - - node_knobs = node.knobs() - if ( - "is_placeholder" not in node_knobs - or not node.knob("is_placeholder").value() - ): - continue - - if "empty" in node_knobs and node.knob("empty").value(): - continue - - placeholder_nodes[node.fullName()] = node - - self.builder.set_shared_populate_data( - "placeholder_nodes", placeholder_nodes - ) - return placeholder_nodes - - def create_placeholder(self, placeholder_data): - placeholder_data["plugin_identifier"] = self.identifier - - placeholder = nuke.nodes.NoOp() - placeholder.setName("PLACEHOLDER") - placeholder.knob("tile_color").setValue(self.node_color) - - imprint(placeholder, placeholder_data) - imprint(placeholder, {"is_placeholder": True}) - placeholder.knob("is_placeholder").setVisible(False) - - def update_placeholder(self, placeholder_item, placeholder_data): - node = nuke.toNode(placeholder_item.scene_identifier) - imprint(node, placeholder_data) - - def _parse_placeholder_node_data(self, node): - placeholder_data = {} - for key in self.get_placeholder_keys(): - knob = node.knob(key) - value = None - if knob is not None: - value = knob.getValue() - placeholder_data[key] = value - return placeholder_data - - def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful""" - placeholder_node = nuke.toNode(placeholder.scene_identifier) - nuke.delete(placeholder_node) - - -class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): - identifier = "nuke.load" - label = "Nuke load" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderLoadPlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - loaded_representation_ids = set() - for node in nuke.allNodes(): - if "repre_id" in node.knobs(): - loaded_representation_ids.add( - node.knob("repre_id").getValue() - ) - - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def _before_placeholder_load(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def _before_repre_load(self, placeholder, representation): - placeholder.data["last_repre_id"] = str(representation["_id"]) - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - # TODO do data validations and maybe updgrades if are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - # TODO get from shared populate data! - nodes_init = placeholder.data["nodes_init"] - nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Loaded nodes: {}".format(nodes_loaded)) - if not nodes_loaded: - return - - placeholder.data["delete"] = True - - nodes_loaded = self._move_to_placeholder_group( - placeholder, nodes_loaded - ) - placeholder.data["last_loaded"] = nodes_loaded - refresh_nodes(nodes_loaded) - - # positioning of the loaded nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_loaded) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of loaded nodes - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) - - self._set_loaded_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_loaded) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_loaded, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the loaded - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_loaded, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_loaded): - """ - opening the placeholder's group and copying loaded nodes in it. - - Returns : - nodes_loaded (list): the new list of pasted nodes - """ - - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_loaded) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_loaded = nuke.selectedNodes() - return nodes_loaded - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is loaded.""" - - nodes_loaded = placeholder.data["last_loaded"] - loaded_backdrops = [] - bd_orders = set() - for node in nodes_loaded: - if isinstance(node, nuke.BackdropNode): - loaded_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in loaded_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes loaded with it) - - add Id to the attributes of all the other nodes - """ - - loaded_nodes = placeholder.data["last_loaded"] - loaded_nodes_set = set(loaded_nodes) - data = {"repre_id": str(placeholder.data["last_repre_id"])} - - for node in loaded_nodes: - node_knobs = node.knobs() - if "builder_type" not in node_knobs: - # save the id of representation for all imported nodes - imprint(node, data) - node.knob("repre_id").setVisible(False) - refresh_node(node) - continue - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(loaded_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_loaded_connections(self, placeholder): - """ - set inputs and outputs of loaded nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - loaded with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - -class NukePlaceholderCreatePlugin( - NukePlaceholderPlugin, PlaceholderCreateMixin -): - identifier = "nuke.create" - label = "Nuke create" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderCreatePlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _before_instance_create(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - - output.append( - CreatePlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - nodes_init = placeholder.data["nodes_init"] - nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Created nodes: {}".format(nodes_created)) - if not nodes_created: - return - - placeholder.data["delete"] = True - - nodes_created = self._move_to_placeholder_group( - placeholder, nodes_created - ) - placeholder.data["last_created"] = nodes_created - refresh_nodes(nodes_created) - - # positioning of the created nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_created) - for node in nodes_created: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_created) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of created nodes - - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_created) - - self._set_created_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new created nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_created) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_created, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the created - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_created, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_created: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_created): - """ - opening the placeholder's group and copying created nodes in it. - - Returns : - nodes_created (list): the new list of pasted nodes - """ - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_created) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_created = nuke.selectedNodes() - return nodes_created - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is create.""" - - nodes_created = placeholder.data["last_created"] - created_backdrops = [] - bd_orders = set() - for node in nodes_created: - if isinstance(node, nuke.BackdropNode): - created_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in created_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes created with it) - - add Id to the attributes of all the other nodes - """ - - created_nodes = placeholder.data["last_created"] - created_nodes_set = set(created_nodes) - - for node in created_nodes: - node_knobs = node.knobs() - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(created_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_created_connections(self, placeholder): - """ - set inputs and outputs of created nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_created"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - created with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_created"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - -def build_workfile_template(*args, **kwargs): - builder = NukeTemplateBuilder(registered_host()) - builder.build_template(*args, **kwargs) - - # set all settings to shot context default - WorkfileSettings().set_context_settings() - - -def update_workfile_template(*args): - builder = NukeTemplateBuilder(registered_host()) - builder.rebuild_template() - - -def create_placeholder(*args): - host = registered_host() - builder = NukeTemplateBuilder(host) - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.show() - - -def update_placeholder(*args): - host = registered_host() - builder = NukeTemplateBuilder(host) - placeholder_items_by_id = { - placeholder_item.scene_identifier: placeholder_item - for placeholder_item in builder.get_placeholders() - } - placeholder_items = [] - for node in nuke.selectedNodes(): - node_name = node.fullName() - if node_name in placeholder_items_by_id: - placeholder_items.append(placeholder_items_by_id[node_name]) - - # TODO show UI at least - if len(placeholder_items) == 0: - raise ValueError("No node selected") - - if len(placeholder_items) > 1: - raise ValueError("Too many selected nodes") - - placeholder_item = placeholder_items[0] - window = WorkfileBuildPlaceholderDialog(host, builder, - parent=get_main_window()) - window.set_update_mode(placeholder_item) - window.exec_() diff --git a/openpype/hosts/nuke/plugins/create/convert_legacy.py b/openpype/hosts/nuke/plugins/create/convert_legacy.py deleted file mode 100644 index 377e9f78f6..0000000000 --- a/openpype/hosts/nuke/plugins/create/convert_legacy.py +++ /dev/null @@ -1,52 +0,0 @@ -from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin -from openpype.hosts.nuke.api.lib import ( - INSTANCE_DATA_KNOB, - get_node_data, - get_avalon_knob_data, - AVALON_TAB, -) -from openpype.hosts.nuke.api.plugin import convert_to_valid_instaces - -import nuke - - -class LegacyConverted(SubsetConvertorPlugin): - identifier = "legacy.converter" - - def find_instances(self): - - legacy_found = False - # search for first available legacy item - for node in nuke.allNodes(recurseGroups=True): - if node.Class() in ["Viewer", "Dot"]: - continue - - if get_node_data(node, INSTANCE_DATA_KNOB): - continue - - if AVALON_TAB not in node.knobs(): - continue - - # get data from avalon knob - avalon_knob_data = get_avalon_knob_data( - node, ["avalon:", "ak:"], create=False) - - if not avalon_knob_data: - continue - - if avalon_knob_data["id"] != "pyblish.avalon.instance": - continue - - # catch and break - legacy_found = True - break - - if legacy_found: - # if not item do not add legacy instance converter - self.add_convertor_item("Convert legacy instances") - - def convert(self): - # loop all instances and convert them - convert_to_valid_instaces() - # remove legacy item if all is fine - self.remove_convertor_item() diff --git a/openpype/hosts/nuke/plugins/create/create_camera.py b/openpype/hosts/nuke/plugins/create/create_camera.py deleted file mode 100644 index be9c69213e..0000000000 --- a/openpype/hosts/nuke/plugins/create/create_camera.py +++ /dev/null @@ -1,69 +0,0 @@ -import nuke -from openpype.hosts.nuke.api import ( - NukeCreator, - NukeCreatorError, - maintained_selection -) -from openpype.hosts.nuke.api.lib import ( - create_camera_node_by_version -) - - -class CreateCamera(NukeCreator): - """Add Publishable Camera""" - - identifier = "create_camera" - label = "Camera (3d)" - family = "camera" - icon = "camera" - - # plugin attributes - node_color = "0xff9100ff" - - def create_instance_node( - self, - node_name, - knobs=None, - parent=None, - node_type=None - ): - with maintained_selection(): - if self.selected_nodes: - node = self.selected_nodes[0] - if node.Class() != "Camera3": - raise NukeCreatorError( - "Creator error: Select only camera node type") - created_node = self.selected_nodes[0] - else: - created_node = create_camera_node_by_version() - - created_node["tile_color"].setValue( - int(self.node_color, 16)) - - created_node["name"].setValue(node_name) - - return created_node - - def create(self, subset_name, instance_data, pre_create_data): - # make sure subset name is unique - self.check_existing_subset(subset_name) - - instance = super(CreateCamera, self).create( - subset_name, - instance_data, - pre_create_data - ) - - return instance - - def set_selected_nodes(self, pre_create_data): - if pre_create_data.get("use_selection"): - self.selected_nodes = nuke.selectedNodes() - if self.selected_nodes == []: - raise NukeCreatorError( - "Creator error: No active selection") - elif len(self.selected_nodes) > 1: - raise NukeCreatorError( - "Creator error: Select only one camera node") - else: - self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_model.py b/openpype/hosts/nuke/plugins/create/create_model.py deleted file mode 100644 index a94c9f0313..0000000000 --- a/openpype/hosts/nuke/plugins/create/create_model.py +++ /dev/null @@ -1,65 +0,0 @@ -import nuke -from openpype.hosts.nuke.api import ( - NukeCreator, - NukeCreatorError, - maintained_selection -) - - -class CreateModel(NukeCreator): - """Add Publishable Camera""" - - identifier = "create_model" - label = "Model (3d)" - family = "model" - icon = "cube" - default_variants = ["Main"] - - # plugin attributes - node_color = "0xff3200ff" - - def create_instance_node( - self, - node_name, - knobs=None, - parent=None, - node_type=None - ): - with maintained_selection(): - if self.selected_nodes: - node = self.selected_nodes[0] - if node.Class() != "Scene": - raise NukeCreatorError( - "Creator error: Select only 'Scene' node type") - created_node = node - else: - created_node = nuke.createNode("Scene") - - created_node["tile_color"].setValue( - int(self.node_color, 16)) - - created_node["name"].setValue(node_name) - - return created_node - - def create(self, subset_name, instance_data, pre_create_data): - # make sure subset name is unique - self.check_existing_subset(subset_name) - - instance = super(CreateModel, self).create( - subset_name, - instance_data, - pre_create_data - ) - - return instance - - def set_selected_nodes(self, pre_create_data): - if pre_create_data.get("use_selection"): - self.selected_nodes = nuke.selectedNodes() - if self.selected_nodes == []: - raise NukeCreatorError("Creator error: No active selection") - elif len(self.selected_nodes) > 1: - NukeCreatorError("Creator error: Select only one 'Scene' node") - else: - self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/workfile_creator.py b/openpype/hosts/nuke/plugins/create/workfile_creator.py deleted file mode 100644 index c4e0753abc..0000000000 --- a/openpype/hosts/nuke/plugins/create/workfile_creator.py +++ /dev/null @@ -1,68 +0,0 @@ -import openpype.hosts.nuke.api as api -from openpype.client import get_asset_by_name -from openpype.pipeline import ( - AutoCreator, - CreatedInstance, -) -from openpype.hosts.nuke.api import ( - INSTANCE_DATA_KNOB, - set_node_data -) -import nuke - - -class WorkfileCreator(AutoCreator): - identifier = "workfile" - family = "workfile" - - default_variant = "Main" - - def get_instance_attr_defs(self): - return [] - - def collect_instances(self): - root_node = nuke.root() - instance_data = api.get_node_data( - root_node, api.INSTANCE_DATA_KNOB - ) - - project_name = self.create_context.get_current_project_name() - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - instance_data.update({ - "asset": asset_name, - "task": task_name, - "variant": self.default_variant - }) - instance_data.update(self.get_dynamic_data( - self.default_variant, task_name, asset_doc, - project_name, host_name, instance_data - )) - - instance = CreatedInstance( - self.family, subset_name, instance_data, self - ) - instance.transient_data["node"] = root_node - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - instance_node = created_inst.transient_data["node"] - - set_node_data( - instance_node, - INSTANCE_DATA_KNOB, - created_inst.data_to_store() - ) - - def create(self, options=None): - # no need to create if it is created - # in `collect_instances` - pass diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py deleted file mode 100644 index 4e7a20fb26..0000000000 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ /dev/null @@ -1,21 +0,0 @@ -from openpype.pipeline import InventoryAction -from openpype.hosts.nuke.api.command import viewer_update_and_undo_stop - - -class SelectContainers(InventoryAction): - - label = "Select Containers" - icon = "mouse-pointer" - color = "#d8d8d8" - - def process(self, containers): - import nuke - - nodes = [nuke.toNode(i["objectName"]) for i in containers] - - with viewer_update_and_undo_stop(): - # clear previous_selection - [n['selected'].setValue(False) for n in nodes] - # Select tool - for node in nodes: - node["selected"].setValue(True) diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py deleted file mode 100644 index 635318f53d..0000000000 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ /dev/null @@ -1,80 +0,0 @@ -"""A module containing generic loader actions that will display in the Loader. - -""" - -from openpype.lib import Logger -from openpype.pipeline import load - -log = Logger.get_logger(__name__) - - -class SetFrameRangeLoader(load.LoaderPlugin): - """Set frame range excluding pre- and post-handles""" - - families = ["animation", - "camera", - "write", - "yeticache", - "pointcache"] - representations = ["*"] - extensions = {"*"} - - label = "Set frame range" - order = 11 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - from openpype.hosts.nuke.api import lib - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - log.info("start: {}, end: {}".format(start, end)) - if start is None or end is None: - log.info("Skipping setting frame range because start or " - "end frame data is missing..") - return - - lib.update_frame_range(start, end) - - -class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): - """Set frame range including pre- and post-handles""" - - families = ["animation", - "camera", - "write", - "yeticache", - "pointcache"] - representations = ["*"] - - label = "Set frame range (with handles)" - order = 12 - icon = "clock-o" - color = "white" - - def load(self, context, name, namespace, data): - - from openpype.hosts.nuke.api import lib - - version = context['version'] - version_data = version.get("data", {}) - - start = version_data.get("frameStart", None) - end = version_data.get("frameEnd", None) - - if start is None or end is None: - print("Skipping setting frame range because start or " - "end frame data is missing..") - return - - # Include handles - start -= version_data.get("handleStart", 0) - end += version_data.get("handleEnd", 0) - - lib.update_frame_range(start, end) diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py deleted file mode 100644 index 898c5e4e7b..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ /dev/null @@ -1,201 +0,0 @@ -import nuke - -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id -) -from openpype.pipeline import ( - load, - get_current_project_name, - get_representation_path, -) -from openpype.hosts.nuke.api import ( - containerise, - update_container, - viewer_update_and_undo_stop -) -from openpype.hosts.nuke.api.lib import ( - maintained_selection -) - - -class AlembicCameraLoader(load.LoaderPlugin): - """ - This will load alembic camera into script. - """ - - families = ["camera"] - representations = ["*"] - extensions = {"abc"} - - label = "Load Alembic Camera" - icon = "camera" - color = "orange" - node_color = "0x3469ffff" - - def load(self, context, name, namespace, data): - # get main variables - version = context['version'] - version_data = version.get("data", {}) - vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - fps = version_data.get("fps") or nuke.root()["fps"].getValue() - namespace = namespace or context['asset']['name'] - object_name = "{}_{}".format(name, namespace) - - # prepare data for imprinting - # add additional metadata from the version to imprint to Avalon knob - add_keys = ["source", "author", "fps"] - - data_imprint = { - "frameStart": first, - "frameEnd": last, - "version": vname, - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # getting file path - file = self.filepath_from_context(context).replace("\\", "/") - - with maintained_selection(): - camera_node = nuke.createNode( - "Camera2", - "name {} file {} read_from_file True".format( - object_name, file), - inpanel=False - ) - - camera_node.forceValidate() - camera_node["frame_rate"].setValue(float(fps)) - - # workaround because nuke's bug is not adding - # animation keys properly - xpos = camera_node.xpos() - ypos = camera_node.ypos() - nuke.nodeCopy("%clipboard%") - nuke.delete(camera_node) - nuke.nodePaste("%clipboard%") - camera_node = nuke.toNode(object_name) - camera_node.setXYpos(xpos, ypos) - - # color node by correct color by actual version - self.node_version_color(version, camera_node) - - return containerise( - node=camera_node, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def update(self, container, representation): - """ - Called by Scene Inventory when look should be updated to current - version. - If any reference edits cannot be applied, eg. shader renamed and - material not present, reference is unloaded and cleaned. - All failed edits are highlighted to the user via message box. - - Args: - container: object that has look to be updated - representation: (dict): relationship data to get proper - representation from DB and persisted - data in .json - Returns: - None - """ - # Get version from io - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - - object_name = container["node"] - - # get main variables - version_data = version_doc.get("data", {}) - vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - fps = version_data.get("fps") or nuke.root()["fps"].getValue() - - # prepare data for imprinting - # add additional metadata from the version to imprint to Avalon knob - add_keys = ["source", "author", "fps"] - - data_imprint = { - "representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, - "version": vname - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # getting file path - file = get_representation_path(representation).replace("\\", "/") - - with maintained_selection(): - camera_node = nuke.toNode(object_name) - camera_node['selected'].setValue(True) - - # collect input output dependencies - dependencies = camera_node.dependencies() - dependent = camera_node.dependent() - - camera_node["frame_rate"].setValue(float(fps)) - camera_node["file"].setValue(file) - - # workaround because nuke's bug is - # not adding animation keys properly - xpos = camera_node.xpos() - ypos = camera_node.ypos() - nuke.nodeCopy("%clipboard%") - nuke.delete(camera_node) - nuke.nodePaste("%clipboard%") - camera_node = nuke.toNode(object_name) - camera_node.setXYpos(xpos, ypos) - - # link to original input nodes - for i, input in enumerate(dependencies): - camera_node.setInput(i, input) - # link to original output nodes - for d in dependent: - index = next((i for i, dpcy in enumerate( - d.dependencies()) - if camera_node is dpcy), 0) - d.setInput(index, camera_node) - - # color node by correct color by actual version - self.node_version_color(version_doc, camera_node) - - self.log.info("updated to version: {}".format(version_doc.get("name"))) - - return update_container(camera_node, data_imprint) - - def node_version_color(self, version_doc, node): - """ Coloring a node by correct color by actual version - """ - # get all versions in list - project_name = get_current_project_name() - last_version_doc = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - - # change color of node - if version_doc["_id"] == last_version_doc["_id"]: - color_value = self.node_color - else: - color_value = "0xd88467ff" - node["tile_color"].setValue(int(color_value, 16)) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - node = container["node"] - with viewer_update_and_undo_stop(): - nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py deleted file mode 100644 index 3a2ec3dbee..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ /dev/null @@ -1,474 +0,0 @@ -import nuke -import qargparse -from pprint import pformat -from copy import deepcopy -from openpype.lib import Logger -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id, -) -from openpype.pipeline import ( - get_current_project_name, - get_representation_path, -) -from openpype.hosts.nuke.api.lib import ( - get_imageio_input_colorspace, - maintained_selection -) -from openpype.hosts.nuke.api import ( - containerise, - update_container, - viewer_update_and_undo_stop, - colorspace_exists_on_node -) -from openpype.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) -from openpype.hosts.nuke.api import plugin - - -class LoadClip(plugin.NukeLoader): - """Load clip into Nuke - - Either it is image sequence or video file. - """ - log = Logger.get_logger(__name__) - - families = [ - "source", - "plate", - "render", - "prerender", - "review" - ] - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load Clip" - order = -20 - icon = "file-video-o" - color = "white" - - # Loaded from settings - _representations = [] - - script_start = int(nuke.root()["first_frame"].value()) - - # option gui - options_defaults = { - "start_at_workfile": True, - "add_retime": True - } - - node_name_template = "{class_name}_{ext}" - - @classmethod - def get_options(cls, *args): - return [ - qargparse.Boolean( - "start_at_workfile", - help="Load at workfile start frame", - default=cls.options_defaults["start_at_workfile"] - ), - qargparse.Boolean( - "add_retime", - help="Load with retime", - default=cls.options_defaults["add_retime"] - ) - ] - - @classmethod - def get_representations(cls): - return cls._representations or cls.representations - - def load(self, context, name, namespace, options): - """Load asset via database - """ - representation = context["representation"] - # reset container id so it is always unique for each instance - self.reset_container_id() - - is_sequence = len(representation["files"]) > 1 - - if is_sequence: - context["representation"] = \ - self._representation_with_hash_in_frame( - representation - ) - - filepath = self.filepath_from_context(context) - filepath = filepath.replace("\\", "/") - self.log.debug("_ filepath: {}".format(filepath)) - - start_at_workfile = options.get( - "start_at_workfile", self.options_defaults["start_at_workfile"]) - - add_retime = options.get( - "add_retime", self.options_defaults["add_retime"]) - - version = context['version'] - version_data = version.get("data", {}) - repre_id = representation["_id"] - - self.log.debug("_ version_data: {}\n".format( - pformat(version_data))) - self.log.debug( - "Representation id `{}` ".format(repre_id)) - - self.handle_start = version_data.get("handleStart", 0) - self.handle_end = version_data.get("handleEnd", 0) - - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - first -= self.handle_start - last += self.handle_end - - if not is_sequence: - duration = last - first - first = 1 - last = first + duration - - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context['asset']['name'] - - if not filepath: - self.log.warning( - "Representation id `{}` is failing to load".format(repre_id)) - return - - read_name = self._get_node_name(representation) - - # Create the Loader with the filename path set - read_node = nuke.createNode( - "Read", - "name {}".format(read_name), - inpanel=False - ) - - # to avoid multiple undo steps for rest of process - # we will switch off undo-ing - with viewer_update_and_undo_stop(): - read_node["file"].setValue(filepath) - - used_colorspace = self._set_colorspace( - read_node, version_data, representation["data"], filepath) - - self._set_range_to_node(read_node, first, last, start_at_workfile) - - # add additional metadata from the version to imprint Avalon knob - add_keys = ["frameStart", "frameEnd", - "source", "colorspace", "author", "fps", "version", - "handleStart", "handleEnd"] - - data_imprint = {} - for key in add_keys: - if key == 'version': - version_doc = context["version"] - if version_doc["type"] == "hero_version": - version = "hero" - else: - version = version_doc.get("name") - - if version: - data_imprint[key] = version - - elif key == 'colorspace': - colorspace = representation["data"].get(key) - colorspace = colorspace or version_data.get(key) - data_imprint["db_colorspace"] = colorspace - if used_colorspace: - data_imprint["used_colorspace"] = used_colorspace - else: - value_ = context["version"]['data'].get( - key, str(None)) - if isinstance(value_, (str)): - value_ = value_.replace("\\", "/") - data_imprint[key] = value_ - - if add_retime and version_data.get("retime", None): - data_imprint["addRetime"] = True - - read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) - - container = containerise( - read_node, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - if add_retime and version_data.get("retime", None): - self._make_retimes(read_node, version_data) - - self.set_as_member(read_node) - - return container - - def switch(self, container, representation): - self.update(container, representation) - - def _representation_with_hash_in_frame(self, representation): - """Convert frame key value to padded hash - - Args: - representation (dict): representation data - - Returns: - dict: altered representation data - """ - representation = deepcopy(representation) - context = representation["context"] - - # Get the frame from the context and hash it - frame = context["frame"] - hashed_frame = "#" * len(str(frame)) - - # Replace the frame with the hash in the originalBasename - if ( - "{originalBasename}" in representation["data"]["template"] - ): - origin_basename = context["originalBasename"] - context["originalBasename"] = origin_basename.replace( - frame, hashed_frame - ) - - # Replace the frame with the hash in the frame - representation["context"]["frame"] = hashed_frame - return representation - - def update(self, container, representation): - """Update the Loader's path - - Nuke automatically tries to reset some variables when changing - the loader's path to a new file. These automatic changes are to its - inputs: - - """ - - is_sequence = len(representation["files"]) > 1 - - read_node = container["node"] - - if is_sequence: - representation = self._representation_with_hash_in_frame( - representation - ) - - filepath = get_representation_path(representation).replace("\\", "/") - self.log.debug("_ filepath: {}".format(filepath)) - - start_at_workfile = "start at" in read_node['frame_mode'].value() - - add_retime = [ - key for key in read_node.knobs().keys() - if "addRetime" in key - ] - - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - - version_data = version_doc.get("data", {}) - repre_id = representation["_id"] - - # colorspace profile - colorspace = representation["data"].get("colorspace") - colorspace = colorspace or version_data.get("colorspace") - - self.handle_start = version_data.get("handleStart", 0) - self.handle_end = version_data.get("handleEnd", 0) - - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - first -= self.handle_start - last += self.handle_end - - if not is_sequence: - duration = last - first - first = 1 - last = first + duration - - if not filepath: - self.log.warning( - "Representation id `{}` is failing to load".format(repre_id)) - return - - read_node["file"].setValue(filepath) - - # to avoid multiple undo steps for rest of process - # we will switch off undo-ing - with viewer_update_and_undo_stop(): - used_colorspace = self._set_colorspace( - read_node, version_data, representation["data"], filepath) - - self._set_range_to_node(read_node, first, last, start_at_workfile) - - updated_dict = { - "representation": str(representation["_id"]), - "frameStart": str(first), - "frameEnd": str(last), - "version": str(version_doc.get("name")), - "db_colorspace": colorspace, - "source": version_data.get("source"), - "handleStart": str(self.handle_start), - "handleEnd": str(self.handle_end), - "fps": str(version_data.get("fps")), - "author": version_data.get("author") - } - - # add used colorspace if found any - if used_colorspace: - updated_dict["used_colorspace"] = used_colorspace - - last_version_doc = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - # change color of read_node - if version_doc["_id"] == last_version_doc["_id"]: - color_value = "0x4ecd25ff" - else: - color_value = "0xd84f20ff" - read_node["tile_color"].setValue(int(color_value, 16)) - - # Update the imprinted representation - update_container( - read_node, - updated_dict - ) - self.log.info( - "updated to version: {}".format(version_doc.get("name")) - ) - - if add_retime and version_data.get("retime", None): - self._make_retimes(read_node, version_data) - else: - self.clear_members(read_node) - - self.set_as_member(read_node) - - def remove(self, container): - read_node = container["node"] - assert read_node.Class() == "Read", "Must be Read" - - with viewer_update_and_undo_stop(): - members = self.get_members(read_node) - nuke.delete(read_node) - for member in members: - nuke.delete(member) - - def _set_range_to_node(self, read_node, first, last, start_at_workfile): - read_node['origfirst'].setValue(int(first)) - read_node['first'].setValue(int(first)) - read_node['origlast'].setValue(int(last)) - read_node['last'].setValue(int(last)) - - # set start frame depending on workfile or version - self._loader_shift(read_node, start_at_workfile) - - def _make_retimes(self, parent_node, version_data): - ''' Create all retime and timewarping nodes with copied animation ''' - speed = version_data.get('speed', 1) - time_warp_nodes = version_data.get('timewarps', []) - last_node = None - source_id = self.get_container_id(parent_node) - self.log.debug("__ source_id: {}".format(source_id)) - self.log.debug("__ members: {}".format( - self.get_members(parent_node))) - - dependent_nodes = self.clear_members(parent_node) - - with maintained_selection(): - parent_node['selected'].setValue(True) - - if speed != 1: - rtn = nuke.createNode( - "Retime", - "speed {}".format(speed)) - - rtn["before"].setValue("continue") - rtn["after"].setValue("continue") - rtn["input.first_lock"].setValue(True) - rtn["input.first"].setValue( - self.script_start - ) - self.set_as_member(rtn) - last_node = rtn - - if time_warp_nodes != []: - start_anim = self.script_start + (self.handle_start / speed) - for timewarp in time_warp_nodes: - twn = nuke.createNode( - timewarp["Class"], - "name {}".format(timewarp["name"]) - ) - if isinstance(timewarp["lookup"], list): - # if array for animation - twn["lookup"].setAnimated() - for i, value in enumerate(timewarp["lookup"]): - twn["lookup"].setValueAt( - (start_anim + i) + value, - (start_anim + i)) - else: - # if static value `int` - twn["lookup"].setValue(timewarp["lookup"]) - - self.set_as_member(twn) - last_node = twn - - if dependent_nodes: - # connect to original inputs - for i, n in enumerate(dependent_nodes): - last_node.setInput(i, n) - - def _loader_shift(self, read_node, workfile_start=False): - """ Set start frame of read node to a workfile start - - Args: - read_node (nuke.Node): The nuke's read node - workfile_start (bool): set workfile start frame if true - - """ - if workfile_start: - read_node['frame_mode'].setValue("start at") - read_node['frame'].setValue(str(self.script_start)) - - def _get_node_name(self, representation): - - repre_cont = representation["context"] - name_data = { - "asset": repre_cont["asset"], - "subset": repre_cont["subset"], - "representation": representation["name"], - "ext": repre_cont["representation"], - "id": representation["_id"], - "class_name": self.__class__.__name__ - } - - return self.node_name_template.format(**name_data) - - def _set_colorspace(self, node, version_data, repre_data, path): - output_color = None - path = path.replace("\\", "/") - # get colorspace - colorspace = repre_data.get("colorspace") - colorspace = colorspace or version_data.get("colorspace") - - # colorspace from `project_settings/nuke/imageio/regexInputs` - iio_colorspace = get_imageio_input_colorspace(path) - - # Set colorspace defined in version data - if ( - colorspace is not None - and colorspace_exists_on_node(node, str(colorspace)) - ): - node["colorspace"].setValue(str(colorspace)) - output_color = str(colorspace) - elif iio_colorspace is not None: - node["colorspace"].setValue(iio_colorspace) - output_color = iio_colorspace - - return output_color diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py deleted file mode 100644 index cc048372d4..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ /dev/null @@ -1,353 +0,0 @@ -import json -from collections import OrderedDict -import nuke -import six - -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id, -) -from openpype.pipeline import ( - load, - get_current_project_name, - get_representation_path, -) -from openpype.hosts.nuke.api import ( - containerise, - update_container, - viewer_update_and_undo_stop -) - - -class LoadEffects(load.LoaderPlugin): - """Loading colorspace soft effect exported from nukestudio""" - - families = ["effect"] - representations = ["*"] - extensions = {"json"} - - label = "Load Effects - nodes" - order = 0 - icon = "cc" - color = "white" - ignore_attr = ["useLifetime"] - - - def load(self, context, name, namespace, data): - """ - Loading function to get the soft effects to particular read node - - Arguments: - context (dict): context of version - name (str): name of the version - namespace (str): asset name - data (dict): compulsory attribute > not used - - Returns: - nuke node: containerised nuke node object - """ - # get main variables - version = context['version'] - version_data = version.get("data", {}) - vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - workfile_first_frame = int(nuke.root()["first_frame"].getValue()) - namespace = namespace or context['asset']['name'] - colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) - - # prepare data for imprinting - # add additional metadata from the version to imprint to Avalon knob - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] - - data_imprint = { - "frameStart": first, - "frameEnd": last, - "version": vname, - "colorspaceInput": colorspace, - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # getting file path - file = self.filepath_from_context(context).replace("\\", "/") - - # getting data from json file with unicode conversion - with open(file, "r") as f: - json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).items()} - - # get correct order of nodes by positions on track and subtrack - nodes_order = self.reorder_nodes(json_f) - - # adding nodes to node graph - # just in case we are in group lets jump out of it - nuke.endGroup() - - GN = nuke.createNode( - "Group", - "name {}_1".format(object_name), - inpanel=False - ) - - # adding content to the group node - with GN: - pre_node = nuke.createNode("Input") - pre_node["name"].setValue("rgb") - - for ef_name, ef_val in nodes_order.items(): - node = nuke.createNode(ef_val["class"]) - for k, v in ef_val["node"].items(): - if k in self.ignore_attr: - continue - - try: - node[k].value() - except NameError as e: - self.log.warning(e) - continue - - if isinstance(v, list) and len(v) > 4: - node[k].setAnimated() - for i, value in enumerate(v): - if isinstance(value, list): - for ci, cv in enumerate(value): - node[k].setValueAt( - cv, - (workfile_first_frame + i), - ci) - else: - node[k].setValueAt( - value, - (workfile_first_frame + i)) - else: - node[k].setValue(v) - node.setInput(0, pre_node) - pre_node = node - - output = nuke.createNode("Output") - output.setInput(0, pre_node) - - # try to find parent read node - self.connect_read_node(GN, namespace, json_f["assignTo"]) - - GN["tile_color"].setValue(int("0x3469ffff", 16)) - - self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) - - return containerise( - node=GN, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def update(self, container, representation): - """Update the Loader's path - - Nuke automatically tries to reset some variables when changing - the loader's path to a new file. These automatic changes are to its - inputs: - - """ - # get main variables - # Get version from io - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - - # get corresponding node - GN = container["node"] - - file = get_representation_path(representation).replace("\\", "/") - name = container['name'] - version_data = version_doc.get("data", {}) - vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - workfile_first_frame = int(nuke.root()["first_frame"].getValue()) - namespace = container['namespace'] - colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) - - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] - - data_imprint = { - "representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, - "version": vname, - "colorspaceInput": colorspace - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # Update the imprinted representation - update_container( - GN, - data_imprint - ) - - # getting data from json file with unicode conversion - with open(file, "r") as f: - json_f = {self.byteify(key): self.byteify(value) - for key, value in json.load(f).items()} - - # get correct order of nodes by positions on track and subtrack - nodes_order = self.reorder_nodes(json_f) - - # adding nodes to node graph - # just in case we are in group lets jump out of it - nuke.endGroup() - - # adding content to the group node - with GN: - # first remove all nodes - [nuke.delete(n) for n in nuke.allNodes()] - - # create input node - pre_node = nuke.createNode("Input") - pre_node["name"].setValue("rgb") - - for _, ef_val in nodes_order.items(): - node = nuke.createNode(ef_val["class"]) - for k, v in ef_val["node"].items(): - if k in self.ignore_attr: - continue - - try: - node[k].value() - except NameError as e: - self.log.warning(e) - continue - - if isinstance(v, list) and len(v) > 4: - node[k].setAnimated() - for i, value in enumerate(v): - if isinstance(value, list): - for ci, cv in enumerate(value): - node[k].setValueAt( - cv, - (workfile_first_frame + i), - ci) - else: - node[k].setValueAt( - value, - (workfile_first_frame + i)) - else: - node[k].setValue(v) - node.setInput(0, pre_node) - pre_node = node - - # create output node - output = nuke.createNode("Output") - output.setInput(0, pre_node) - - # try to find parent read node - self.connect_read_node(GN, namespace, json_f["assignTo"]) - - last_version_doc = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - - # change color of node - if version_doc["_id"] == last_version_doc["_id"]: - color_value = "0x3469ffff" - else: - color_value = "0xd84f20ff" - - GN["tile_color"].setValue(int(color_value, 16)) - - self.log.info("updated to version: {}".format(version_doc.get("name"))) - - def connect_read_node(self, group_node, asset, subset): - """ - Finds read node and selects it - - Arguments: - asset (str): asset name - - Returns: - nuke node: node is selected - None: if nothing found - """ - search_name = "{0}_{1}".format(asset, subset) - - node = [ - n for n in nuke.allNodes(filter="Read") - if search_name in n["file"].value() - ] - if len(node) > 0: - rn = node[0] - else: - rn = None - - # Parent read node has been found - # solving connections - if rn: - dep_nodes = rn.dependent() - - if len(dep_nodes) > 0: - for dn in dep_nodes: - dn.setInput(0, group_node) - - group_node.setInput(0, rn) - group_node.autoplace() - - def reorder_nodes(self, data): - new_order = OrderedDict() - trackNums = [v["trackIndex"] for k, v in data.items() - if isinstance(v, dict)] - subTrackNums = [v["subTrackIndex"] for k, v in data.items() - if isinstance(v, dict)] - - for trackIndex in range( - min(trackNums), max(trackNums) + 1): - for subTrackIndex in range( - min(subTrackNums), max(subTrackNums) + 1): - item = self.get_item(data, trackIndex, subTrackIndex) - if item is not {}: - new_order.update(item) - return new_order - - def get_item(self, data, trackIndex, subTrackIndex): - return {key: val for key, val in data.items() - if isinstance(val, dict) - if subTrackIndex == val["subTrackIndex"] - if trackIndex == val["trackIndex"]} - - def byteify(self, input): - """ - Converts unicode strings to strings - It goes through all dictionary - - Arguments: - input (dict/str): input - - Returns: - dict: with fixed values and keys - - """ - - if isinstance(input, dict): - return {self.byteify(key): self.byteify(value) - for key, value in input.items()} - elif isinstance(input, list): - return [self.byteify(element) for element in input] - elif isinstance(input, six.text_type): - return str(input) - else: - return input - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - node = container["node"] - with viewer_update_and_undo_stop(): - nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py deleted file mode 100644 index 411a61d77b..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ /dev/null @@ -1,256 +0,0 @@ -import nuke - -import qargparse - -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id, -) -from openpype.pipeline import ( - load, - get_current_project_name, - get_representation_path, -) -from openpype.hosts.nuke.api.lib import ( - get_imageio_input_colorspace -) -from openpype.hosts.nuke.api import ( - containerise, - update_container, - viewer_update_and_undo_stop -) -from openpype.lib.transcoding import ( - IMAGE_EXTENSIONS -) - - -class LoadImage(load.LoaderPlugin): - """Load still image into Nuke""" - - families = [ - "render2d", - "source", - "plate", - "render", - "prerender", - "review", - "image" - ] - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS - ) - - label = "Load Image" - order = -10 - icon = "image" - color = "white" - - # Loaded from settings - _representations = [] - - node_name_template = "{class_name}_{ext}" - - options = [ - qargparse.Integer( - "frame_number", - label="Frame Number", - default=int(nuke.root()["first_frame"].getValue()), - min=1, - max=999999, - help="What frame is reading from?" - ) - ] - - @classmethod - def get_representations(cls): - return cls._representations or cls.representations - - def load(self, context, name, namespace, options): - self.log.info("__ options: `{}`".format(options)) - frame_number = options.get( - "frame_number", int(nuke.root()["first_frame"].getValue()) - ) - - version = context['version'] - version_data = version.get("data", {}) - repr_id = context["representation"]["_id"] - - self.log.info("version_data: {}\n".format(version_data)) - self.log.debug( - "Representation id `{}` ".format(repr_id)) - - last = first = int(frame_number) - - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context['asset']['name'] - - file = self.filepath_from_context(context) - - if not file: - repr_id = context["representation"]["_id"] - self.log.warning( - "Representation id `{}` is failing to load".format(repr_id)) - return - - file = file.replace("\\", "/") - - representation = context["representation"] - repr_cont = representation["context"] - frame = repr_cont.get("frame") - if frame: - padding = len(frame) - file = file.replace( - frame, - format(frame_number, "0{}".format(padding))) - - read_name = self._get_node_name(representation) - - # Create the Loader with the filename path set - with viewer_update_and_undo_stop(): - r = nuke.createNode( - "Read", - "name {}".format(read_name), - inpanel=False - ) - - r["file"].setValue(file) - - # Set colorspace defined in version data - colorspace = context["version"]["data"].get("colorspace") - if colorspace: - r["colorspace"].setValue(str(colorspace)) - - preset_clrsp = get_imageio_input_colorspace(file) - - if preset_clrsp is not None: - r["colorspace"].setValue(preset_clrsp) - - r["origfirst"].setValue(first) - r["first"].setValue(first) - r["origlast"].setValue(last) - r["last"].setValue(last) - - # add additional metadata from the version to imprint Avalon knob - add_keys = ["source", "colorspace", "author", "fps", "version"] - - data_imprint = { - "frameStart": first, - "frameEnd": last - } - for k in add_keys: - if k == 'version': - data_imprint.update({k: context["version"]['name']}) - else: - data_imprint.update( - {k: context["version"]['data'].get(k, str(None))}) - - r["tile_color"].setValue(int("0x4ecd25ff", 16)) - - return containerise(r, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """Update the Loader's path - - Nuke automatically tries to reset some variables when changing - the loader's path to a new file. These automatic changes are to its - inputs: - - """ - node = container["node"] - frame_number = node["first"].value() - - assert node.Class() == "Read", "Must be Read" - - repr_cont = representation["context"] - - file = get_representation_path(representation) - - if not file: - repr_id = representation["_id"] - self.log.warning( - "Representation id `{}` is failing to load".format(repr_id)) - return - - file = file.replace("\\", "/") - - frame = repr_cont.get("frame") - if frame: - padding = len(frame) - file = file.replace( - frame, - format(frame_number, "0{}".format(padding))) - - # Get start frame from version data - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - last_version_doc = get_last_version_by_subset_id( - project_name, version_doc["parent"], fields=["_id"] - ) - - version_data = version_doc.get("data", {}) - - last = first = int(frame_number) - - # Set the global in to the start frame of the sequence - node["file"].setValue(file) - node["origfirst"].setValue(first) - node["first"].setValue(first) - node["origlast"].setValue(last) - node["last"].setValue(last) - - updated_dict = {} - updated_dict.update({ - "representation": str(representation["_id"]), - "frameStart": str(first), - "frameEnd": str(last), - "version": str(version_doc.get("name")), - "colorspace": version_data.get("colorspace"), - "source": version_data.get("source"), - "fps": str(version_data.get("fps")), - "author": version_data.get("author") - }) - - # change color of node - if version_doc["_id"] == last_version_doc["_id"]: - color_value = "0x4ecd25ff" - else: - color_value = "0xd84f20ff" - node["tile_color"].setValue(int(color_value, 16)) - - # Update the imprinted representation - update_container( - node, - updated_dict - ) - self.log.info("updated to version: {}".format(version_doc.get("name"))) - - def remove(self, container): - node = container["node"] - assert node.Class() == "Read", "Must be Read" - - with viewer_update_and_undo_stop(): - nuke.delete(node) - - def _get_node_name(self, representation): - - repre_cont = representation["context"] - name_data = { - "asset": repre_cont["asset"], - "subset": repre_cont["subset"], - "representation": representation["name"], - "ext": repre_cont["representation"], - "id": representation["_id"], - "class_name": self.__class__.__name__ - } - - return self.node_name_template.format(**name_data) diff --git a/openpype/hosts/nuke/plugins/load/load_matchmove.py b/openpype/hosts/nuke/plugins/load/load_matchmove.py deleted file mode 100644 index 14ddf20dc3..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_matchmove.py +++ /dev/null @@ -1,30 +0,0 @@ -import nuke -from openpype.pipeline import load - - -class MatchmoveLoader(load.LoaderPlugin): - """ - This will run matchmove script to create track in script. - """ - - families = ["matchmove"] - representations = ["*"] - extensions = {"py"} - - defaults = ["Camera", "Object"] - - label = "Run matchmove script" - icon = "empire" - color = "orange" - - def load(self, context, name, namespace, data): - path = self.filepath_from_context(context) - if path.lower().endswith(".py"): - exec(open(path).read()) - - else: - msg = "Unsupported script type" - self.log.error(msg) - nuke.message(msg) - - return True diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py deleted file mode 100644 index 3fe92b74d0..0000000000 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ /dev/null @@ -1,211 +0,0 @@ -import nuke - -from openpype.client import ( - get_version_by_id, - get_last_version_by_subset_id, -) -from openpype.pipeline import ( - load, - get_current_project_name, - get_representation_path, -) -from openpype.hosts.nuke.api.lib import maintained_selection -from openpype.hosts.nuke.api import ( - containerise, - update_container, - viewer_update_and_undo_stop -) - - -class AlembicModelLoader(load.LoaderPlugin): - """ - This will load alembic model or anim into script. - """ - - families = ["model", "pointcache", "animation"] - representations = ["*"] - extensions = {"abc"} - - label = "Load Alembic" - icon = "cube" - color = "orange" - node_color = "0x4ecd91ff" - - def load(self, context, name, namespace, data): - # get main variables - version = context['version'] - version_data = version.get("data", {}) - vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - fps = version_data.get("fps") or nuke.root()["fps"].getValue() - namespace = namespace or context['asset']['name'] - object_name = "{}_{}".format(name, namespace) - - # prepare data for imprinting - # add additional metadata from the version to imprint to Avalon knob - add_keys = ["source", "author", "fps"] - - data_imprint = { - "frameStart": first, - "frameEnd": last, - "version": vname - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # getting file path - file = self.filepath_from_context(context).replace("\\", "/") - - with maintained_selection(): - model_node = nuke.createNode( - "ReadGeo2", - "name {} file {} ".format( - object_name, file), - inpanel=False - ) - - model_node.forceValidate() - - # Ensure all items are imported and selected. - scene_view = model_node.knob('scene_view') - scene_view.setImportedItems(scene_view.getAllItems()) - scene_view.setSelectedItems(scene_view.getAllItems()) - - model_node["frame_rate"].setValue(float(fps)) - - # workaround because nuke's bug is not adding - # animation keys properly - xpos = model_node.xpos() - ypos = model_node.ypos() - nuke.nodeCopy("%clipboard%") - nuke.delete(model_node) - nuke.nodePaste("%clipboard%") - model_node = nuke.toNode(object_name) - model_node.setXYpos(xpos, ypos) - - # color node by correct color by actual version - self.node_version_color(version, model_node) - - return containerise( - node=model_node, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def update(self, container, representation): - """ - Called by Scene Inventory when look should be updated to current - version. - If any reference edits cannot be applied, eg. shader renamed and - material not present, reference is unloaded and cleaned. - All failed edits are highlighted to the user via message box. - - Args: - container: object that has look to be updated - representation: (dict): relationship data to get proper - representation from DB and persisted - data in .json - Returns: - None - """ - # Get version from io - project_name = get_current_project_name() - version_doc = get_version_by_id(project_name, representation["parent"]) - - # get corresponding node - model_node = container["node"] - - # get main variables - version_data = version_doc.get("data", {}) - vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - fps = version_data.get("fps") or nuke.root()["fps"].getValue() - - # prepare data for imprinting - # add additional metadata from the version to imprint to Avalon knob - add_keys = ["source", "author", "fps"] - - data_imprint = { - "representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, - "version": vname - } - - for k in add_keys: - data_imprint.update({k: version_data[k]}) - - # getting file path - file = get_representation_path(representation).replace("\\", "/") - - with maintained_selection(): - model_node['selected'].setValue(True) - - # collect input output dependencies - dependencies = model_node.dependencies() - dependent = model_node.dependent() - - model_node["frame_rate"].setValue(float(fps)) - model_node["file"].setValue(file) - - # Ensure all items are imported and selected. - scene_view = model_node.knob('scene_view') - scene_view.setImportedItems(scene_view.getAllItems()) - scene_view.setSelectedItems(scene_view.getAllItems()) - - # workaround because nuke's bug is - # not adding animation keys properly - xpos = model_node.xpos() - ypos = model_node.ypos() - nuke.nodeCopy("%clipboard%") - nuke.delete(model_node) - - # paste the node back and set the position - nuke.nodePaste("%clipboard%") - model_node = nuke.selectedNode() - model_node.setXYpos(xpos, ypos) - - # link to original input nodes - for i, input in enumerate(dependencies): - model_node.setInput(i, input) - # link to original output nodes - for d in dependent: - index = next((i for i, dpcy in enumerate( - d.dependencies()) - if model_node is dpcy), 0) - d.setInput(index, model_node) - - # color node by correct color by actual version - self.node_version_color(version_doc, model_node) - - self.log.info("updated to version: {}".format(version_doc.get("name"))) - - return update_container(model_node, data_imprint) - - def node_version_color(self, version, node): - """ Coloring a node by correct color by actual version""" - - project_name = get_current_project_name() - last_version_doc = get_last_version_by_subset_id( - project_name, version["parent"], fields=["_id"] - ) - - # change color of node - if version["_id"] == last_version_doc["_id"]: - color_value = self.node_color - else: - color_value = "0xd88467ff" - node["tile_color"].setValue(int(color_value, 16)) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - node = nuke.toNode(container['objectName']) - with viewer_update_and_undo_stop(): - nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/publish/extract_camera.py b/openpype/hosts/nuke/plugins/publish/extract_camera.py deleted file mode 100644 index 3ec85c1f11..0000000000 --- a/openpype/hosts/nuke/plugins/publish/extract_camera.py +++ /dev/null @@ -1,184 +0,0 @@ -import os -import math -from pprint import pformat - -import nuke - -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.nuke.api.lib import maintained_selection - - -class ExtractCamera(publish.Extractor): - """ 3D camera extractor - """ - label = 'Extract Camera' - order = pyblish.api.ExtractorOrder - families = ["camera"] - hosts = ["nuke"] - - # presets - write_geo_knobs = [ - ("file_type", "abc"), - ("storageFormat", "Ogawa"), - ("writeGeometries", False), - ("writePointClouds", False), - ("writeAxes", False) - ] - - def process(self, instance): - camera_node = instance.data["transientData"]["node"] - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) - step = 1 - output_range = str(nuke.FrameRange(first_frame, last_frame, step)) - - rm_nodes = [] - self.log.debug("Creating additional nodes for 3D Camera Extractor") - subset = instance.data["subset"] - staging_dir = self.staging_dir(instance) - - # get extension form preset - extension = next((k[1] for k in self.write_geo_knobs - if k[0] == "file_type"), None) - if not extension: - raise RuntimeError( - "Bad config for extension in presets. " - "Talk to your supervisor or pipeline admin") - - # create file name and path - filename = subset + ".{}".format(extension) - file_path = os.path.join(staging_dir, filename).replace("\\", "/") - - with maintained_selection(): - # bake camera with axeses onto word coordinate XYZ - rm_n = bakeCameraWithAxeses( - camera_node, output_range) - rm_nodes.append(rm_n) - - # create scene node - rm_n = nuke.createNode("Scene") - rm_nodes.append(rm_n) - - # create write geo node - wg_n = nuke.createNode("WriteGeo") - wg_n["file"].setValue(file_path) - # add path to write to - for k, v in self.write_geo_knobs: - wg_n[k].setValue(v) - rm_nodes.append(wg_n) - - # write out camera - nuke.execute( - wg_n, - int(first_frame), - int(last_frame) - ) - # erase additional nodes - for n in rm_nodes: - nuke.delete(n) - - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': extension, - 'ext': extension, - 'files': filename, - "stagingDir": staging_dir, - "frameStart": first_frame, - "frameEnd": last_frame - } - instance.data["representations"].append(representation) - - instance.data.update({ - "path": file_path, - "outputDir": staging_dir, - "ext": extension, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "frameStartHandle": first_frame, - "frameEndHandle": last_frame, - }) - - self.log.debug("Extracted instance '{0}' to: {1}".format( - instance.name, file_path)) - - -def bakeCameraWithAxeses(camera_node, output_range): - """ Baking all perent hierarchy of axeses into camera - with transposition onto word XYZ coordinance - """ - bakeFocal = False - bakeHaperture = False - bakeVaperture = False - - camera_matrix = camera_node['world_matrix'] - - new_cam_n = nuke.createNode("Camera2") - new_cam_n.setInput(0, None) - new_cam_n['rotate'].setAnimated() - new_cam_n['translate'].setAnimated() - - old_focal = camera_node['focal'] - if old_focal.isAnimated() and not (old_focal.animation(0).constant()): - new_cam_n['focal'].setAnimated() - bakeFocal = True - else: - new_cam_n['focal'].setValue(old_focal.value()) - - old_haperture = camera_node['haperture'] - if old_haperture.isAnimated() and not ( - old_haperture.animation(0).constant()): - new_cam_n['haperture'].setAnimated() - bakeHaperture = True - else: - new_cam_n['haperture'].setValue(old_haperture.value()) - - old_vaperture = camera_node['vaperture'] - if old_vaperture.isAnimated() and not ( - old_vaperture.animation(0).constant()): - new_cam_n['vaperture'].setAnimated() - bakeVaperture = True - else: - new_cam_n['vaperture'].setValue(old_vaperture.value()) - - new_cam_n['win_translate'].setValue(camera_node['win_translate'].value()) - new_cam_n['win_scale'].setValue(camera_node['win_scale'].value()) - - for x in nuke.FrameRange(output_range): - math_matrix = nuke.math.Matrix4() - for y in range(camera_matrix.height()): - for z in range(camera_matrix.width()): - matrix_pointer = z + (y * camera_matrix.width()) - math_matrix[matrix_pointer] = camera_matrix.getValueAt( - x, (y + (z * camera_matrix.width()))) - - rot_matrix = nuke.math.Matrix4(math_matrix) - rot_matrix.rotationOnly() - rot = rot_matrix.rotationsZXY() - - new_cam_n['rotate'].setValueAt(math.degrees(rot[0]), x, 0) - new_cam_n['rotate'].setValueAt(math.degrees(rot[1]), x, 1) - new_cam_n['rotate'].setValueAt(math.degrees(rot[2]), x, 2) - new_cam_n['translate'].setValueAt( - camera_matrix.getValueAt(x, 3), x, 0) - new_cam_n['translate'].setValueAt( - camera_matrix.getValueAt(x, 7), x, 1) - new_cam_n['translate'].setValueAt( - camera_matrix.getValueAt(x, 11), x, 2) - - if bakeFocal: - new_cam_n['focal'].setValueAt(old_focal.getValueAt(x), x) - if bakeHaperture: - new_cam_n['haperture'].setValueAt(old_haperture.getValueAt(x), x) - if bakeVaperture: - new_cam_n['vaperture'].setValueAt(old_vaperture.getValueAt(x), x) - - return new_cam_n diff --git a/openpype/hosts/nuke/plugins/publish/extract_model.py b/openpype/hosts/nuke/plugins/publish/extract_model.py deleted file mode 100644 index a8b37fb173..0000000000 --- a/openpype/hosts/nuke/plugins/publish/extract_model.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -from pprint import pformat -import nuke -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.nuke.api.lib import ( - maintained_selection, - select_nodes -) - - -class ExtractModel(publish.Extractor): - """ 3D model extractor - """ - label = 'Extract Model' - order = pyblish.api.ExtractorOrder - families = ["model"] - hosts = ["nuke"] - - # presets - write_geo_knobs = [ - ("file_type", "abc"), - ("storageFormat", "Ogawa"), - ("writeGeometries", True), - ("writePointClouds", False), - ("writeAxes", False) - ] - - def process(self, instance): - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) - - self.log.debug("instance.data: `{}`".format( - pformat(instance.data))) - - rm_nodes = [] - model_node = instance.data["transientData"]["node"] - - self.log.debug("Creating additional nodes for Extract Model") - subset = instance.data["subset"] - staging_dir = self.staging_dir(instance) - - extension = next((k[1] for k in self.write_geo_knobs - if k[0] == "file_type"), None) - if not extension: - raise RuntimeError( - "Bad config for extension in presets. " - "Talk to your supervisor or pipeline admin") - - # create file name and path - filename = subset + ".{}".format(extension) - file_path = os.path.join(staging_dir, filename).replace("\\", "/") - - with maintained_selection(): - # select model node - select_nodes([model_node]) - - # create write geo node - wg_n = nuke.createNode("WriteGeo") - wg_n["file"].setValue(file_path) - # add path to write to - for k, v in self.write_geo_knobs: - wg_n[k].setValue(v) - rm_nodes.append(wg_n) - - # write out model - nuke.execute( - wg_n, - int(first_frame), - int(last_frame) - ) - # erase additional nodes - for n in rm_nodes: - nuke.delete(n) - - self.log.debug("Filepath: {}".format(file_path)) - - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': extension, - 'ext': extension, - 'files': filename, - "stagingDir": staging_dir, - "frameStart": first_frame, - "frameEnd": last_frame - } - instance.data["representations"].append(representation) - - instance.data.update({ - "path": file_path, - "outputDir": staging_dir, - "ext": extension, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "frameStartHandle": first_frame, - "frameEndHandle": last_frame, - }) - - self.log.debug("Extracted instance '{0}' to: {1}".format( - instance.name, file_path)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py deleted file mode 100644 index ff04367e20..0000000000 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ /dev/null @@ -1,209 +0,0 @@ -import os -import shutil - -import pyblish.api -import clique -import nuke -from openpype.hosts.nuke import api as napi -from openpype.pipeline import publish -from openpype.lib import collect_frames - - -class NukeRenderLocal(publish.Extractor, - publish.ColormanagedPyblishPluginMixin): - """Render the current Nuke composition locally. - - Extract the result of savers by starting a comp render - This will run the local render of Fusion. - - Allows to use last published frames and overwrite only specific ones - (set in instance.data.get("frames_to_fix")) - """ - - order = pyblish.api.ExtractorOrder - label = "Render Local" - hosts = ["nuke"] - families = ["render.local", "prerender.local", "image.local"] - - def process(self, instance): - child_nodes = ( - instance.data.get("transientData", {}).get("childNodes") - or instance - ) - - node = None - for x in child_nodes: - if x.Class() == "Write": - node = x - - self.log.debug("instance collected: {}".format(instance.data)) - - node_subset_name = instance.data.get("name", None) - - first_frame = instance.data.get("frameStartHandle", None) - last_frame = instance.data.get("frameEndHandle", None) - - filenames = [] - node_file = node["file"] - # Collect expected filepaths for each frame - # - for cases that output is still image is first created set of - # paths which is then sorted and converted to list - expected_paths = list(sorted({ - node_file.evaluate(frame) - for frame in range(first_frame, last_frame + 1) - })) - # Extract only filenames for representation - filenames.extend([ - os.path.basename(filepath) - for filepath in expected_paths - ]) - - # Ensure output directory exists. - out_dir = os.path.dirname(expected_paths[0]) - if not os.path.exists(out_dir): - os.makedirs(out_dir) - - frames_to_render = [(first_frame, last_frame)] - - frames_to_fix = instance.data.get("frames_to_fix") - if instance.data.get("last_version_published_files") and frames_to_fix: - frames_to_render = self._get_frames_to_render(frames_to_fix) - anatomy = instance.context.data["anatomy"] - self._copy_last_published(anatomy, instance, out_dir, - filenames) - - for render_first_frame, render_last_frame in frames_to_render: - - self.log.info("Starting render") - self.log.info("Start frame: {}".format(render_first_frame)) - self.log.info("End frame: {}".format(render_last_frame)) - - # Render frames - nuke.execute( - str(node_subset_name), - int(render_first_frame), - int(render_last_frame) - ) - - ext = node["file_type"].value() - colorspace = napi.get_colorspace_from_node(node) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - if len(filenames) == 1: - repre = { - 'name': ext, - 'ext': ext, - 'files': filenames[0], - "stagingDir": out_dir - } - else: - repre = { - 'name': ext, - 'ext': ext, - 'frameStart': ( - "{{:0>{}}}" - .format(len(str(last_frame))) - .format(first_frame) - ), - 'files': filenames, - "stagingDir": out_dir - } - - # inject colorspace data - self.set_representation_colorspace( - repre, instance.context, - colorspace=colorspace - ) - - instance.data["representations"].append(repre) - - self.log.debug("Extracted instance '{0}' to: {1}".format( - instance.name, - out_dir - )) - - families = instance.data["families"] - # redefinition of families - if "render.local" in families: - instance.data['family'] = 'render' - families.remove('render.local') - families.insert(0, "render2d") - instance.data["anatomyData"]["family"] = "render" - elif "prerender.local" in families: - instance.data['family'] = 'prerender' - families.remove('prerender.local') - families.insert(0, "prerender") - instance.data["anatomyData"]["family"] = "prerender" - elif "image.local" in families: - instance.data['family'] = 'image' - families.remove('image.local') - instance.data["anatomyData"]["family"] = "image" - instance.data["families"] = families - - collections, remainder = clique.assemble(filenames) - self.log.debug('collections: {}'.format(str(collections))) - - if collections: - collection = collections[0] - instance.data['collection'] = collection - - self.log.info('Finished render') - - self.log.debug("_ instance.data: {}".format(instance.data)) - - def _copy_last_published(self, anatomy, instance, out_dir, - expected_filenames): - """Copies last published files to temporary out_dir. - - These are base of files which will be extended/fixed for specific - frames. - Renames published file to expected file name based on frame, eg. - test_project_test_asset_subset_v005.1001.exr > new_render.1001.exr - """ - last_published = instance.data["last_version_published_files"] - last_published_and_frames = collect_frames(last_published) - - expected_and_frames = collect_frames(expected_filenames) - frames_and_expected = {v: k for k, v in expected_and_frames.items()} - for file_path, frame in last_published_and_frames.items(): - file_path = anatomy.fill_root(file_path) - if not os.path.exists(file_path): - continue - target_file_name = frames_and_expected.get(frame) - if not target_file_name: - continue - - out_path = os.path.join(out_dir, target_file_name) - self.log.debug("Copying '{}' -> '{}'".format(file_path, out_path)) - shutil.copy(file_path, out_path) - - # TODO shouldn't this be uncommented - # instance.context.data["cleanupFullPaths"].append(out_path) - - def _get_frames_to_render(self, frames_to_fix): - """Return list of frame range tuples to render - - Args: - frames_to_fix (str): specific or range of frames to be rerendered - (1005, 1009-1010) - Returns: - (list): [(1005, 1005), (1009-1010)] - """ - frames_to_render = [] - - for frame_range in frames_to_fix.split(","): - if frame_range.isdigit(): - render_first_frame = frame_range - render_last_frame = frame_range - elif '-' in frame_range: - frames = frame_range.split('-') - render_first_frame = int(frames[0]) - render_last_frame = int(frames[1]) - else: - raise ValueError("Wrong format of frames to fix {}" - .format(frames_to_fix)) - frames_to_render.append((render_first_frame, - render_last_frame)) - return frames_to_render diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py deleted file mode 100644 index 613d508387..0000000000 --- a/openpype/hosts/nuke/startup/menu.py +++ /dev/null @@ -1,5 +0,0 @@ -from openpype.pipeline import install_host -from openpype.hosts.nuke.api import NukeHost - -host = NukeHost() -install_host(host) diff --git a/openpype/hosts/photoshop/addon.py b/openpype/hosts/photoshop/addon.py deleted file mode 100644 index 965a545ac5..0000000000 --- a/openpype/hosts/photoshop/addon.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -PHOTOSHOP_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class PhotoshopAddon(OpenPypeModule, IHostAddon): - name = "photoshop" - host_name = "photoshop" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "WEBSOCKET_URL": "ws://localhost:8099/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_workfile_extensions(self): - return [".psd", ".psb"] diff --git a/openpype/hosts/photoshop/api/README.md b/openpype/hosts/photoshop/api/README.md deleted file mode 100644 index a207d21f2f..0000000000 --- a/openpype/hosts/photoshop/api/README.md +++ /dev/null @@ -1,257 +0,0 @@ -# Photoshop Integration - -## Setup - -The Photoshop integration requires two components to work; `extension` and `server`. - -### Extension - -To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd). - -``` -ExManCmd /install {path to addon}/api/extension.zxp -``` - -### Server - -The easiest way to get the server and Photoshop launch is with: - -``` -python -c ^"import openpype.hosts.photoshop;openpype.hosts.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^" -``` - -`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists. - -## Usage - -The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this: - -![Ayon Panel](panel.png "AYON Panel") - - -## Developing - -### Extension -When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions). - -When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide). - -``` -ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12 -ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon -``` - -### Plugin Examples - -These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py). - -#### Creator Plugin -```python -from avalon import photoshop - - -class CreateImage(photoshop.Creator): - """Image folder for publish.""" - - name = "imageDefault" - label = "Image" - family = "image" - - def __init__(self, *args, **kwargs): - super(CreateImage, self).__init__(*args, **kwargs) -``` - -#### Collector Plugin -```python -import pythoncom - -import pyblish.api - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by LayerSet and file metadata - - This collector takes into account assets that are associated with - an LayerSet and marked with a unique identifier; - - Identifier: - id (str): "pyblish.avalon.instance" - """ - - label = "Instances" - order = pyblish.api.CollectorOrder - hosts = ["photoshop"] - families_mapping = { - "image": [] - } - - def process(self, context): - # Necessary call when running in a different thread which pyblish-qml - # can be. - pythoncom.CoInitialize() - - photoshop_client = PhotoshopClientStub() - layers = photoshop_client.get_layers() - layers_meta = photoshop_client.get_layers_metadata() - for layer in layers: - layer_data = photoshop_client.read(layer, layers_meta) - - # Skip layers without metadata. - if layer_data is None: - continue - - # Skip containers. - if "container" in layer_data["id"]: - continue - - # child_layers = [*layer.Layers] - # self.log.debug("child_layers {}".format(child_layers)) - # if not child_layers: - # self.log.info("%s skipped, it was empty." % layer.Name) - # continue - - instance = context.create_instance(layer.name) - instance.append(layer) - instance.data.update(layer_data) - instance.data["families"] = self.families_mapping[ - layer_data["family"] - ] - instance.data["publish"] = layer.visible - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) -``` - -#### Extractor Plugin -```python -import os - -from openpype.pipeline import publish -from openpype.hosts.photoshop import api as photoshop - - -class ExtractImage(publish.Extractor): - """Produce a flattened image file from instance - - This plug-in takes into account only the layers in the group. - """ - - label = "Extract Image" - hosts = ["photoshop"] - families = ["image"] - formats = ["png", "jpg"] - - def process(self, instance): - - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - # Perform extraction - stub = photoshop.stub() - files = {} - with photoshop.maintained_selection(): - self.log.info("Extracting %s" % str(list(instance))) - with photoshop.maintained_visibility(): - # Hide all other layers. - extract_ids = set([ll.id for ll in stub. - get_layers_in_layers([instance[0]])]) - - for layer in stub.get_layers(): - # limit unnecessary calls to client - if layer.visible and layer.id not in extract_ids: - stub.set_visible(layer.id, False) - - save_options = [] - if "png" in self.formats: - save_options.append('png') - if "jpg" in self.formats: - save_options.append('jpg') - - file_basename = os.path.splitext( - stub.get_active_document_name() - )[0] - for extension in save_options: - _filename = "{}.{}".format(file_basename, extension) - files[extension] = _filename - - full_filename = os.path.join(staging_dir, _filename) - stub.saveAs(full_filename, extension, True) - - representations = [] - for extension, filename in files.items(): - representations.append({ - "name": extension, - "ext": extension, - "files": filename, - "stagingDir": staging_dir - }) - instance.data["representations"] = representations - instance.data["stagingDir"] = staging_dir - - self.log.info(f"Extracted {instance} to {staging_dir}") -``` - -#### Loader Plugin -```python -from avalon import api, photoshop -from openpype.pipeline import load, get_representation_path - -stub = photoshop.stub() - - -class ImageLoader(load.LoaderPlugin): - """Load images - - Stores the imported asset in a container named after the asset. - """ - - families = ["image"] - representations = ["*"] - - def load(self, context, name=None, namespace=None, data=None): - path = self.filepath_from_context(context) - with photoshop.maintained_selection(): - layer = stub.import_smart_object(path) - - self[:] = [layer] - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - layer = container.pop("layer") - - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, get_representation_path(representation) - ) - - stub.imprint( - layer, {"representation": str(representation["_id"])} - ) - - def remove(self, container): - container["layer"].Delete() - - def switch(self, container, representation): - self.update(container, representation) -``` -For easier debugging of Javascript: -https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1 -Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome -then localhost:8078 (port set in `photoshop\extension\.debug`) - -Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 - -Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x -## Resources - - https://github.com/lohriialo/photoshop-scripting-python - - https://www.adobe.com/devnet/photoshop/scripting.html - - https://github.com/Adobe-CEP/Getting-Started-guides - - https://github.com/Adobe-CEP/CEP-Resources diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py deleted file mode 100644 index 25732446b5..0000000000 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ /dev/null @@ -1,405 +0,0 @@ -import os -import subprocess -import collections -import asyncio - -from wsrpc_aiohttp import ( - WebSocketRoute, - WebSocketAsync -) - -from qtpy import QtCore - -from openpype.lib import Logger, StringTemplate -from openpype.pipeline import ( - registered_host, - Anatomy, -) -from openpype.pipeline.workfile import ( - get_workfile_template_key_from_context, - get_last_workfile, -) -from openpype.pipeline.template_data import get_template_data_with_names -from openpype.tools.utils import host_tools -from openpype.tools.adobe_webserver.app import WebServerTool -from openpype.pipeline.context_tools import change_current_context -from openpype.client import get_asset_by_name - -from .ws_stub import PhotoshopServerStub - -log = Logger.get_logger(__name__) - - -class ConnectionNotEstablishedYet(Exception): - pass - - -class MainThreadItem: - """Structure to store information about callback in main thread. - - Item should be used to execute callback in main thread which may be needed - for execution of Qt objects. - - Item store callback (callable variable), arguments and keyword arguments - for the callback. Item hold information about it's process. - """ - not_set = object() - - def __init__(self, callback, *args, **kwargs): - self._done = False - self._exception = self.not_set - self._result = self.not_set - self._callback = callback - self._args = args - self._kwargs = kwargs - - @property - def done(self): - return self._done - - @property - def exception(self): - return self._exception - - @property - def result(self): - return self._result - - def execute(self): - """Execute callback and store its result. - - Method must be called from main thread. Item is marked as `done` - when callback execution finished. Store output of callback of exception - information when callback raises one. - """ - log.debug("Executing process in main thread") - if self.done: - log.warning("- item is already processed") - return - - log.info("Running callback: {}".format(str(self._callback))) - try: - result = self._callback(*self._args, **self._kwargs) - self._result = result - - except Exception as exc: - self._exception = exc - - finally: - self._done = True - - -def stub(): - """ - Convenience function to get server RPC stub to call methods directed - for host (Photoshop). - It expects already created connection, started from client. - Currently created when panel is opened (PS: Window>Extensions>Avalon) - :return: where functions could be called from - """ - ps_stub = PhotoshopServerStub() - if not ps_stub.client: - raise ConnectionNotEstablishedYet("Connection is not created yet") - - return ps_stub - - -def show_tool_by_name(tool_name): - kwargs = {} - if tool_name == "loader": - kwargs["use_context"] = True - - host_tools.show_tool_by_name(tool_name, **kwargs) - - -class ProcessLauncher(QtCore.QObject): - route_name = "Photoshop" - _main_thread_callbacks = collections.deque() - - def __init__(self, subprocess_args): - self._subprocess_args = subprocess_args - self._log = None - - super(ProcessLauncher, self).__init__() - - # Keep track if launcher was already started - self._started = False - - self._process = None - self._websocket_server = None - - start_process_timer = QtCore.QTimer() - start_process_timer.setInterval(100) - - loop_timer = QtCore.QTimer() - loop_timer.setInterval(200) - - start_process_timer.timeout.connect(self._on_start_process_timer) - loop_timer.timeout.connect(self._on_loop_timer) - - self._start_process_timer = start_process_timer - self._loop_timer = loop_timer - - @property - def log(self): - if self._log is None: - self._log = Logger.get_logger( - "{}-launcher".format(self.route_name) - ) - return self._log - - @property - def websocket_server_is_running(self): - if self._websocket_server is not None: - return self._websocket_server.is_running - return False - - @property - def is_process_running(self): - if self._process is not None: - return self._process.poll() is None - return False - - @property - def is_host_connected(self): - """Returns True if connected, False if app is not running at all.""" - if not self.is_process_running: - return False - - try: - _stub = stub() - if _stub: - return True - except Exception: - pass - - return None - - @classmethod - def execute_in_main_thread(cls, callback, *args, **kwargs): - item = MainThreadItem(callback, *args, **kwargs) - cls._main_thread_callbacks.append(item) - return item - - def start(self): - if self._started: - return - self.log.info("Started launch logic of Photoshop") - self._started = True - self._start_process_timer.start() - - def exit(self): - """ Exit whole application. """ - if self._start_process_timer.isActive(): - self._start_process_timer.stop() - if self._loop_timer.isActive(): - self._loop_timer.stop() - - if self._websocket_server is not None: - self._websocket_server.stop() - - if self._process: - self._process.kill() - self._process.wait() - - QtCore.QCoreApplication.exit() - - def _on_loop_timer(self): - # TODO find better way and catch errors - # Run only callbacks that are in queue at the moment - cls = self.__class__ - for _ in range(len(cls._main_thread_callbacks)): - if cls._main_thread_callbacks: - item = cls._main_thread_callbacks.popleft() - item.execute() - - if not self.is_process_running: - self.log.info("Host process is not running. Closing") - self.exit() - - elif not self.websocket_server_is_running: - self.log.info("Websocket server is not running. Closing") - self.exit() - - def _on_start_process_timer(self): - # TODO add try except validations for each part in this method - # Start server as first thing - if self._websocket_server is None: - self._init_server() - return - - # TODO add waiting time - # Wait for webserver - if not self.websocket_server_is_running: - return - - # Start application process - if self._process is None: - self._start_process() - self.log.info("Waiting for host to connect") - return - - # TODO add waiting time - # Wait until host is connected - if self.is_host_connected: - self._start_process_timer.stop() - self._loop_timer.start() - elif ( - not self.is_process_running - or not self.websocket_server_is_running - ): - self.exit() - - def _init_server(self): - if self._websocket_server is not None: - return - - self.log.debug( - "Initialization of websocket server for host communication" - ) - - self._websocket_server = websocket_server = WebServerTool() - if websocket_server.port_occupied( - websocket_server.host_name, - websocket_server.port - ): - self.log.info( - "Server already running, sending actual context and exit." - ) - asyncio.run(websocket_server.send_context_change(self.route_name)) - self.exit() - return - - # Add Websocket route - websocket_server.add_route("*", "/ws/", WebSocketAsync) - # Add after effects route to websocket handler - - print("Adding {} route".format(self.route_name)) - WebSocketAsync.add_route( - self.route_name, PhotoshopRoute - ) - self.log.info("Starting websocket server for host communication") - websocket_server.start_server() - - def _start_process(self): - if self._process is not None: - return - self.log.info("Starting host process") - try: - self._process = subprocess.Popen( - self._subprocess_args, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - except Exception: - self.log.info("exce", exc_info=True) - self.exit() - - -class PhotoshopRoute(WebSocketRoute): - """ - One route, mimicking external application (like Harmony, etc). - All functions could be called from client. - 'do_notify' function calls function on the client - mimicking - notification after long running job on the server or similar - """ - instance = None - - def init(self, **kwargs): - # Python __init__ must be return "self". - # This method might return anything. - log.debug("someone called Photoshop route") - self.instance = self - return kwargs - - # server functions - async def ping(self): - log.debug("someone called Photoshop route ping") - - # This method calls function on the client side - # client functions - async def set_context(self, project, asset, task): - """ - Sets 'project' and 'asset' to envs, eg. setting context. - - Opens last workile from that context if exists. - - Args: - project (str) - asset (str) - task (str - """ - log.info("Setting context change") - log.info(f"project {project} asset {asset} task {task}") - - asset_doc = get_asset_by_name(project, asset) - change_current_context(asset_doc, task) - - last_workfile_path = self._get_last_workfile_path(project, - asset, - task) - if last_workfile_path and os.path.exists(last_workfile_path): - ProcessLauncher.execute_in_main_thread( - lambda: stub().open(last_workfile_path)) - - - async def read(self): - log.debug("photoshop.read client calls server server calls " - "photoshop client") - return await self.socket.call('photoshop.read') - - # panel routes for tools - async def workfiles_route(self): - self._tool_route("workfiles") - - async def loader_route(self): - self._tool_route("loader") - - async def publish_route(self): - self._tool_route("publisher") - - async def sceneinventory_route(self): - self._tool_route("sceneinventory") - - async def experimental_tools_route(self): - self._tool_route("experimental_tools") - - def _tool_route(self, _tool_name): - """The address accessed when clicking on the buttons.""" - - ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name) - - # Required return statement. - return "nothing" - - def _get_last_workfile_path(self, project_name, asset_name, task_name): - """Returns last workfile path if exists""" - host = registered_host() - host_name = "photoshop" - template_key = get_workfile_template_key_from_context( - asset_name, - task_name, - host_name, - project_name=project_name - ) - anatomy = Anatomy(project_name) - - data = get_template_data_with_names( - project_name, asset_name, task_name, host_name - ) - data["root"] = anatomy.roots - - file_template = anatomy.templates[template_key]["file"] - - # Define saving file extension - extensions = host.get_workfile_extensions() - - folder_template = anatomy.templates[template_key]["folder"] - work_root = StringTemplate.format_strict_template( - folder_template, data - ) - last_workfile_path = get_last_workfile( - work_root, file_template, data, extensions, True - ) - - return last_workfile_path diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py deleted file mode 100644 index d4d4995e6d..0000000000 --- a/openpype/hosts/photoshop/api/lib.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import sys -import contextlib -import traceback - -from openpype.lib import env_value_to_bool, Logger -from openpype.modules import ModulesManager -from openpype.pipeline import install_host -from openpype.tools.utils import host_tools -from openpype.tools.utils import get_openpype_qt_app -from openpype.tests.lib import is_in_tests - -from .launch_logic import ProcessLauncher, stub - -log = Logger.get_logger(__name__) - - -def safe_excepthook(*args): - traceback.print_exception(*args) - - -def main(*subprocess_args): - from openpype.hosts.photoshop.api import PhotoshopHost - - host = PhotoshopHost() - install_host(host) - - sys.excepthook = safe_excepthook - - # coloring in StdOutBroker - os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" - app = get_openpype_qt_app() - app.setQuitOnLastWindowClosed(False) - - launcher = ProcessLauncher(subprocess_args) - launcher.start() - - if env_value_to_bool("HEADLESS_PUBLISH"): - manager = ModulesManager() - webpublisher_addon = manager["webpublisher"] - launcher.execute_in_main_thread( - webpublisher_addon.headless_publish, - log, - "ClosePS", - is_in_tests() - ) - elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", - default=True): - - launcher.execute_in_main_thread( - host_tools.show_workfiles, - save=env_value_to_bool("WORKFILES_SAVE_AS") - ) - - sys.exit(app.exec_()) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context.""" - selection = stub().get_selected_layers() - try: - yield selection - finally: - stub().select_layers(selection) - - -@contextlib.contextmanager -def maintained_visibility(layers=None): - """Maintain visibility during context. - - Args: - layers (list) of PSItem (used for caching) - """ - visibility = {} - if not layers: - layers = stub().get_layers() - for layer in layers: - visibility[layer.id] = layer.visible - try: - yield - finally: - for layer in layers: - stub().set_visible(layer.id, visibility[layer.id]) - pass diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py deleted file mode 100644 index 4e0dbcad06..0000000000 --- a/openpype/hosts/photoshop/api/pipeline.py +++ /dev/null @@ -1,281 +0,0 @@ -import os - -from qtpy import QtWidgets - -import pyblish.api - -from openpype.lib import register_event_callback, Logger -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, -) - -from openpype.host import ( - HostBase, - IWorkfileHost, - ILoadHost, - IPublishHost -) - -from openpype.pipeline.load import any_outdated_containers -from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR -from openpype.tools.utils import get_openpype_qt_app - -from . import lib - -log = Logger.get_logger(__name__) - -PLUGINS_DIR = os.path.join(PHOTOSHOP_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "photoshop" - - def install(self): - """Install Photoshop-specific functionality needed for integration. - - This function is called automatically on calling - `api.install(photoshop)`. - """ - log.info("Installing OpenPype Photoshop...") - pyblish.api.register_host("photoshop") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - register_event_callback("application.launched", on_application_launch) - - def current_file(self): - try: - full_name = lib.stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except Exception: - pass - - return None - - def work_root(self, session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") - - def open_workfile(self, filepath): - lib.stub().open(filepath) - - return True - - def save_workfile(self, filepath=None): - _, ext = os.path.splitext(filepath) - lib.stub().saveAs(filepath, ext[1:], True) - - def get_current_workfile(self): - return self.current_file() - - def workfile_has_unsaved_changes(self): - if self.current_file(): - return not lib.stub().is_saved() - - return False - - def get_workfile_extensions(self): - return [".psd", ".psb"] - - def get_containers(self): - return ls() - - def get_context_data(self): - """Get stored values for context (validation enable/disable etc)""" - meta = _get_stub().get_layers_metadata() - for item in meta: - if item.get("id") == "publish_context": - item.pop("id") - return item - - return {} - - def update_context_data(self, data, changes): - """Store value needed for context""" - item = data - item["id"] = "publish_context" - _get_stub().imprint(item["id"], item) - - def list_instances(self): - """List all created instances to publish from current workfile. - - Pulls from File > File Info - - Returns: - (list) of dictionaries matching instances format - """ - stub = _get_stub() - - if not stub: - return [] - - instances = [] - layers_meta = stub.get_layers_metadata() - if layers_meta: - for instance in layers_meta: - if instance.get("id") == "pyblish.avalon.instance": - instances.append(instance) - - return instances - - def remove_instance(self, instance): - """Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = _get_stub() - - if not stub: - return - - inst_id = instance.get("instance_id") or instance.get("uuid") # legacy - if not inst_id: - log.warning("No instance identifier for {}".format(instance)) - return - - stub.remove_instance(inst_id) - - if instance.get("members"): - item = stub.get_layer(instance["members"][0]) - if item: - stub.rename_layer(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) - - -def check_inventory(): - if not any_outdated_containers(): - return - - # Warn about outdated containers. - _app = get_openpype_qt_app() - - message_box = QtWidgets.QMessageBox() - message_box.setIcon(QtWidgets.QMessageBox.Warning) - msg = "There are outdated containers in the scene." - message_box.setText(msg) - message_box.exec_() - - -def on_application_launch(): - check_inventory() - - -def ls(): - """Yields containers from active Photoshop document - - This is the host-equivalent of api.ls(), but instead of listing - assets on disk, it lists assets already loaded in Photoshop; once loaded - they are called 'containers' - - Yields: - dict: container - - """ - try: - stub = lib.stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - layers_meta = stub.get_layers_metadata() # minimalize calls to PS - for layer in stub.get_layers(): - data = stub.read(layer, layers_meta) - - # Skip non-tagged layers. - if not data: - continue - - # Filter to only containers. - if "container" not in data["id"]: - continue - - # Append transient data - data["objectName"] = layer.name.replace(stub.LOADED_ICON, '') - data["layer"] = layer - - yield data - - -def _get_stub(): - """Handle pulling stub from PS to run operations on host - - Returns: - (PhotoshopServerStub) or None - """ - try: - stub = lib.stub() # only after Photoshop is up - except lib.ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - return stub - - -def containerise( - name, namespace, layer, context, loader=None, suffix="_CON" -): - """Imprint layer with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - layer (PSItem): Layer to containerise - context (dict): Asset information - loader (str, optional): Name of loader used to produce this container. - suffix (str, optional): Suffix of container, defaults to `_CON`. - - Returns: - container (str): Name of container assembly - """ - layer.name = name + suffix - - data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - "members": [str(layer.id)] - } - stub = lib.stub() - stub.imprint(layer.id, data) - - return layer - - -def cache_and_get_instances(creator): - """Cache instances in shared data. - - Storing all instances as a list as legacy instances might be still present. - Args: - creator (Creator): Plugin which would like to get instances from host. - Returns: - List[]: list of all instances stored in metadata - """ - shared_key = "openpype.photoshop.instances" - if shared_key not in creator.collection_shared_data: - creator.collection_shared_data[shared_key] = \ - creator.host.list_instances() - return creator.collection_shared_data[shared_key] diff --git a/openpype/hosts/photoshop/api/plugin.py b/openpype/hosts/photoshop/api/plugin.py deleted file mode 100644 index c80e6bbd06..0000000000 --- a/openpype/hosts/photoshop/api/plugin.py +++ /dev/null @@ -1,35 +0,0 @@ -import re - -from openpype.pipeline import LoaderPlugin -from .launch_logic import stub - - -def get_unique_layer_name(layers, asset_name, subset_name): - """ - Gets all layer names and if 'asset_name_subset_name' is present, it - increases suffix by 1 (eg. creates unique layer name - for Loader) - Args: - layers (list) of dict with layers info (name, id etc.) - asset_name (string): - subset_name (string): - - Returns: - (string): name_00X (without version) - """ - name = "{}_{}".format(asset_name, subset_name) - names = {} - for layer in layers: - layer_name = re.sub(r'_\d{3}$', '', layer.name) - if layer_name in names.keys(): - names[layer_name] = names[layer_name] + 1 - else: - names[layer_name] = 1 - occurrences = names.get(name, 0) - - return "{}_{:0>3d}".format(name, occurrences + 1) - - -class PhotoshopLoader(LoaderPlugin): - @staticmethod - def get_stub(): - return stub() diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py deleted file mode 100644 index 2c4d0ad5fc..0000000000 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ /dev/null @@ -1,571 +0,0 @@ -""" - Stub handling connection from server to client. - Used anywhere solution is calling client methods. -""" -import json -import attr -from wsrpc_aiohttp import WebSocketAsync - -from openpype.tools.adobe_webserver.app import WebServerTool - - -@attr.s -class PSItem(object): - """ - Object denoting layer or group item in PS. Each item is created in - PS by any Loader, but contains same fields, which are being used - in later processing. - """ - # metadata - id = attr.ib() # id created by AE, could be used for querying - name = attr.ib() # name of item - group = attr.ib(default=None) # item type (footage, folder, comp) - parents = attr.ib(factory=list) - visible = attr.ib(default=True) - type = attr.ib(default=None) - # all imported elements, single for - members = attr.ib(factory=list) - long_name = attr.ib(default=None) - color_code = attr.ib(default=None) # color code of layer - instance_id = attr.ib(default=None) - - @property - def clean_name(self): - """Returns layer name without publish icon highlight - - Returns: - (str) - """ - return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '') - .replace(PhotoshopServerStub.LOADED_ICON, '')) - - -class PhotoshopServerStub: - """ - Stub for calling function on client (Photoshop js) side. - Expects that client is already connected (started when avalon menu - is opened). - 'self.websocketserver.call' is used as async wrapper - """ - PUBLISH_ICON = '\u2117 ' - LOADED_ICON = '\u25bc' - - def __init__(self): - self.websocketserver = WebServerTool.get_instance() - self.client = self.get_client() - - @staticmethod - def get_client(): - """ - Return first connected client to WebSocket - TODO implement selection by Route - :return: client - """ - clients = WebSocketAsync.get_clients() - client = None - if len(clients) > 0: - key = list(clients.keys())[0] - client = clients.get(key) - - return client - - def open(self, path): - """Open file located at 'path' (local). - - Args: - path(string): file path locally - Returns: None - """ - self.websocketserver.call( - self.client.call('Photoshop.open', path=path) - ) - - def read(self, layer, layers_meta=None): - """Parses layer metadata from Headline field of active document. - - Args: - layer: (PSItem) - layers_meta: full list from Headline (for performance in loops) - Returns: - (dict) of layer metadata stored in PS file - - Example: - { - 'id': 'pyblish.avalon.container', - 'loader': 'ImageLoader', - 'members': ['64'], - 'name': 'imageMainMiddle', - 'namespace': 'Hero_imageMainMiddle_001', - 'representation': '6203dc91e80934d9f6ee7d96', - 'schema': 'openpype:container-2.0' - } - """ - if layers_meta is None: - layers_meta = self.get_layers_metadata() - - for layer_meta in layers_meta: - layer_id = layer_meta.get("uuid") # legacy - if layer_meta.get("members"): - layer_id = layer_meta["members"][0] - if str(layer.id) == str(layer_id): - return layer_meta - print("Unable to find layer metadata for {}".format(layer.id)) - - def imprint(self, item_id, data, all_layers=None, items_meta=None): - """Save layer metadata to Headline field of active document - - Stores metadata in format: - [{ - "active":true, - "subset":"imageBG", - "family":"image", - "id":"pyblish.avalon.instance", - "asset":"Town", - "uuid": "8" - }] - for created instances - OR - [{ - "schema": "openpype:container-2.0", - "id": "pyblish.avalon.instance", - "name": "imageMG", - "namespace": "Jungle_imageMG_001", - "loader": "ImageLoader", - "representation": "5fbfc0ee30a946093c6ff18a", - "members": [ - "40" - ] - }] - for loaded instances - - Args: - item_id (str): - data(string): json representation for single layer - all_layers (list of PSItem): for performance, could be - injected for usage in loop, if not, single call will be - triggered - items_meta(string): json representation from Headline - (for performance - provide only if imprint is in - loop - value should be same) - Returns: None - """ - if not items_meta: - items_meta = self.get_layers_metadata() - - # json.dumps writes integer values in a dictionary to string, so - # anticipating it here. - item_id = str(item_id) - is_new = True - result_meta = [] - for item_meta in items_meta: - if ((item_meta.get('members') and - item_id == str(item_meta.get('members')[0])) or - item_meta.get("instance_id") == item_id): - is_new = False - if data: - item_meta.update(data) - result_meta.append(item_meta) - else: - result_meta.append(item_meta) - - if is_new: - result_meta.append(data) - - # Ensure only valid ids are stored. - if not all_layers: - all_layers = self.get_layers() - layer_ids = [layer.id for layer in all_layers] - cleaned_data = [] - - for item in result_meta: - if item.get("members"): - if int(item["members"][0]) not in layer_ids: - continue - - cleaned_data.append(item) - - payload = json.dumps(cleaned_data, indent=4) - self.websocketserver.call( - self.client.call('Photoshop.imprint', payload=payload) - ) - - def get_layers(self): - """Returns JSON document with all(?) layers in active document. - - Returns: - Format of tuple: { 'id':'123', - 'name': 'My Layer 1', - 'type': 'GUIDE'|'FG'|'BG'|'OBJ' - 'visible': 'true'|'false' - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_layers') - ) - - return self._to_records(res) - - def get_layer(self, layer_id): - """ - Returns PSItem for specific 'layer_id' or None if not found - Args: - layer_id (string): unique layer id, stored in 'uuid' field - - Returns: - (PSItem) or None - """ - layers = self.get_layers() - for layer in layers: - if str(layer.id) == str(layer_id): - return layer - - def get_layers_in_layers(self, layers): - """Return all layers that belong to layers (might be groups). - - Args: - layers : - - Returns: - - """ - parent_ids = set([lay.id for lay in layers]) - - return self._get_layers_in_layers(parent_ids) - - def get_layers_in_layers_ids(self, layers_ids, layers=None): - """Return all layers that belong to layers (might be groups). - - Args: - layers_ids - layers : - - Returns: - - """ - parent_ids = set(layers_ids) - - return self._get_layers_in_layers(parent_ids, layers) - - def _get_layers_in_layers(self, parent_ids, layers=None): - if not layers: - layers = self.get_layers() - - all_layers = layers - ret = [] - - for layer in all_layers: - parents = set(layer.parents) - if len(parent_ids & parents) > 0: - ret.append(layer) - if layer.id in parent_ids: - ret.append(layer) - - return ret - - def create_group(self, name): - """Create new group (eg. LayerSet) - - Returns: - - """ - enhanced_name = self.PUBLISH_ICON + name - ret = self.websocketserver.call( - self.client.call('Photoshop.create_group', name=enhanced_name) - ) - # create group on PS is asynchronous, returns only id - return PSItem(id=ret, name=name, group=True) - - def group_selected_layers(self, name): - """Group selected layers into new LayerSet (eg. group) - - Returns: - (Layer) - """ - enhanced_name = self.PUBLISH_ICON + name - res = self.websocketserver.call( - self.client.call( - 'Photoshop.group_selected_layers', name=enhanced_name - ) - ) - res = self._to_records(res) - if res: - rec = res.pop() - rec.name = rec.name.replace(self.PUBLISH_ICON, '') - return rec - raise ValueError("No group record returned") - - def get_selected_layers(self): - """Get a list of actually selected layers. - - Returns: - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_selected_layers') - ) - return self._to_records(res) - - def select_layers(self, layers): - """Selects specified layers in Photoshop by its ids. - - Args: - layers: - """ - layers_id = [str(lay.id) for lay in layers] - self.websocketserver.call( - self.client.call( - 'Photoshop.select_layers', - layers=json.dumps(layers_id) - ) - ) - - def get_active_document_full_name(self): - """Returns full name with path of active document via ws call - - Returns(string): - full path with name - """ - res = self.websocketserver.call( - self.client.call('Photoshop.get_active_document_full_name') - ) - - return res - - def get_active_document_name(self): - """Returns just a name of active document via ws call - - Returns(string): - file name - """ - return self.websocketserver.call( - self.client.call('Photoshop.get_active_document_name') - ) - - def is_saved(self): - """Returns true if no changes in active document - - Returns: - - """ - return self.websocketserver.call( - self.client.call('Photoshop.is_saved') - ) - - def save(self): - """Saves active document""" - self.websocketserver.call( - self.client.call('Photoshop.save') - ) - - def saveAs(self, image_path, ext, as_copy): - """Saves active document to psd (copy) or png or jpg - - Args: - image_path(string): full local path - ext: - as_copy: - Returns: None - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.saveAs', - image_path=image_path, - ext=ext, - as_copy=as_copy - ) - ) - - def set_visible(self, layer_id, visibility): - """Set layer with 'layer_id' to 'visibility' - - Args: - layer_id: - visibility: - Returns: None - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.set_visible', - layer_id=layer_id, - visibility=visibility - ) - ) - - def hide_all_others_layers(self, layers): - """hides all layers that are not part of the list or that are not - children of this list - - Args: - layers (list): list of PSItem - highest hierarchy - """ - extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)]) - - self.hide_all_others_layers_ids(extract_ids) - - def hide_all_others_layers_ids(self, extract_ids, layers=None): - """hides all layers that are not part of the list or that are not - children of this list - - Args: - extract_ids (list): list of integer that should be visible - layers (list) of PSItem (used for caching) - """ - if not layers: - layers = self.get_layers() - for layer in layers: - if layer.visible and layer.id not in extract_ids: - self.set_visible(layer.id, False) - - def get_layers_metadata(self): - """Reads layers metadata from Headline from active document in PS. - (Headline accessible by File > File Info) - - Returns: - (list) - example: - {"8":{"active":true,"subset":"imageBG", - "family":"image","id":"pyblish.avalon.instance", - "asset":"Town"}} - 8 is layer(group) id - used for deletion, update etc. - """ - res = self.websocketserver.call(self.client.call('Photoshop.read')) - layers_data = [] - try: - if res: - layers_data = json.loads(res) - except json.decoder.JSONDecodeError: - raise ValueError("{} cannot be parsed, recreate meta".format(res)) - # format of metadata changed from {} to [] because of standardization - # keep current implementation logic as its working - if isinstance(layers_data, dict): - for layer_id, layer_meta in layers_data.items(): - if layer_meta.get("schema") != "openpype:container-2.0": - layer_meta["members"] = [str(layer_id)] - layers_data = list(layers_data.values()) - return layers_data - - def import_smart_object(self, path, layer_name, as_reference=False): - """Import the file at `path` as a smart object to active document. - - Args: - path (str): File path to import. - layer_name (str): Unique layer name to differentiate how many times - same smart object was loaded - as_reference (bool): pull in content or reference - """ - enhanced_name = self.LOADED_ICON + layer_name - res = self.websocketserver.call( - self.client.call( - 'Photoshop.import_smart_object', - path=path, - name=enhanced_name, - as_reference=as_reference - ) - ) - rec = self._to_records(res).pop() - if rec: - rec.name = rec.name.replace(self.LOADED_ICON, '') - return rec - - def replace_smart_object(self, layer, path, layer_name): - """Replace the smart object `layer` with file at `path` - - Args: - layer (PSItem): - path (str): File to import. - layer_name (str): Unique layer name to differentiate how many times - same smart object was loaded - """ - enhanced_name = self.LOADED_ICON + layer_name - self.websocketserver.call( - self.client.call( - 'Photoshop.replace_smart_object', - layer_id=layer.id, - path=path, - name=enhanced_name - ) - ) - - def delete_layer(self, layer_id): - """Deletes specific layer by it's id. - - Args: - layer_id (int): id of layer to delete - """ - self.websocketserver.call( - self.client.call('Photoshop.delete_layer', layer_id=layer_id) - ) - - def rename_layer(self, layer_id, name): - """Renames specific layer by it's id. - - Args: - layer_id (int): id of layer to delete - name (str): new name - """ - self.websocketserver.call( - self.client.call( - 'Photoshop.rename_layer', - layer_id=layer_id, - name=name - ) - ) - - def remove_instance(self, instance_id): - cleaned_data = [] - - for item in self.get_layers_metadata(): - inst_id = item.get("instance_id") or item.get("uuid") - if inst_id != instance_id: - cleaned_data.append(item) - - payload = json.dumps(cleaned_data, indent=4) - - self.websocketserver.call( - self.client.call('Photoshop.imprint', payload=payload) - ) - - def get_extension_version(self): - """Returns version number of installed extension.""" - return self.websocketserver.call( - self.client.call('Photoshop.get_extension_version') - ) - - def close(self): - """Shutting down PS and process too. - - For webpublishing only. - """ - # TODO change client.call to method with checks for client - self.websocketserver.call(self.client.call('Photoshop.close')) - - def _to_records(self, res): - """Converts string json representation into list of PSItem for - dot notation access to work. - - Args: - res (string): valid json - - Returns: - - """ - try: - layers_data = json.loads(res) - except json.decoder.JSONDecodeError: - raise ValueError("Received broken JSON {}".format(res)) - ret = [] - - # convert to AEItem to use dot donation - if isinstance(layers_data, dict): - layers_data = [layers_data] - for d in layers_data: - # currently implemented and expected fields - ret.append(PSItem( - d.get('id'), - d.get('name'), - d.get('group'), - d.get('parents'), - d.get('visible'), - d.get('type'), - d.get('members'), - d.get('long_name'), - d.get("color_code"), - d.get("instance_id") - )) - return ret diff --git a/openpype/hosts/photoshop/lib.py b/openpype/hosts/photoshop/lib.py deleted file mode 100644 index 5c8dff947d..0000000000 --- a/openpype/hosts/photoshop/lib.py +++ /dev/null @@ -1,112 +0,0 @@ -import re - -from openpype import AYON_SERVER_ENABLED -import openpype.hosts.photoshop.api as api -from openpype.client import get_asset_by_name -from openpype.lib import prepare_template_data -from openpype.pipeline import ( - AutoCreator, - CreatedInstance -) -from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances - - -class PSAutoCreator(AutoCreator): - """Generic autocreator to extend.""" - def get_instance_attr_defs(self): - return [] - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - creator_id = instance_data.get("creator_identifier") - - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - self.log.debug("update_list:: {}".format(update_list)) - for created_inst, _changes in update_list: - api.stub().imprint(created_inst.get("instance_id"), - created_inst.data_to_store()) - - def create(self, options=None): - existing_instance = None - for instance in self.create_context.instances: - if instance.family == self.family: - existing_instance = instance - break - - context = self.create_context - project_name = context.get_current_project_name() - asset_name = context.get_current_asset_name() - task_name = context.get_current_task_name() - host_name = context.host_name - - if existing_instance is None: - existing_instance_asset = None - elif AYON_SERVER_ENABLED: - existing_instance_asset = existing_instance["folderPath"] - else: - existing_instance_asset = existing_instance["asset"] - - if existing_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - data = { - "task": task_name, - "variant": self.default_variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - data.update(self.get_dynamic_data( - self.default_variant, task_name, asset_doc, - project_name, host_name, None - )) - - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - self._add_instance_to_context(new_instance) - api.stub().imprint(new_instance.get("instance_id"), - new_instance.data_to_store()) - - elif ( - existing_instance_asset != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, task_name, asset_doc, - project_name, host_name - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name - - -def clean_subset_name(subset_name): - """Clean all variants leftover {layer} from subset name.""" - dynamic_data = prepare_template_data({"layer": "{layer}"}) - for value in dynamic_data.values(): - if value in subset_name: - subset_name = (subset_name.replace(value, "") - .replace("__", "_") - .replace("..", ".")) - # clean trailing separator as Main_ - pattern = r'[\W_]+$' - replacement = '' - return re.sub(pattern, replacement, subset_name) diff --git a/openpype/hosts/photoshop/plugins/create/create_review.py b/openpype/hosts/photoshop/plugins/create/create_review.py deleted file mode 100644 index 63751d94e4..0000000000 --- a/openpype/hosts/photoshop/plugins/create/create_review.py +++ /dev/null @@ -1,28 +0,0 @@ -from openpype.hosts.photoshop.lib import PSAutoCreator - - -class ReviewCreator(PSAutoCreator): - """Creates review instance which might be disabled from publishing.""" - identifier = "review" - family = "review" - - default_variant = "Main" - - def get_detail_description(self): - return """Auto creator for review. - - Photoshop review is created from all published images or from all - visible layers if no `image` instances got created. - - Review might be disabled by an artist (instance shouldn't be deleted as - it will get recreated in next publish either way). - """ - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["ReviewCreator"] - ) - - self.default_variant = plugin_settings["default_variant"] - self.active_on_create = plugin_settings["active_on_create"] - self.enabled = plugin_settings["enabled"] diff --git a/openpype/hosts/photoshop/plugins/create/create_workfile.py b/openpype/hosts/photoshop/plugins/create/create_workfile.py deleted file mode 100644 index 1b255de3a3..0000000000 --- a/openpype/hosts/photoshop/plugins/create/create_workfile.py +++ /dev/null @@ -1,28 +0,0 @@ -from openpype.hosts.photoshop.lib import PSAutoCreator - - -class WorkfileCreator(PSAutoCreator): - identifier = "workfile" - family = "workfile" - - default_variant = "Main" - - def get_detail_description(self): - return """Auto creator for workfile. - - It is expected that each publish will also publish its source workfile - for safekeeping. This creator triggers automatically without need for - an artist to remember and trigger it explicitly. - - Workfile instance could be disabled if it is not required to publish - workfile. (Instance shouldn't be deleted though as it will be recreated - in next publish automatically). - """ - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["photoshop"]["create"]["WorkfileCreator"] - ) - - self.active_on_create = plugin_settings["active_on_create"] - self.enabled = plugin_settings["enabled"] diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py deleted file mode 100644 index eb770bbd20..0000000000 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ /dev/null @@ -1,84 +0,0 @@ -import re - -from openpype.pipeline import get_representation_path -from openpype.hosts.photoshop import api as photoshop -from openpype.hosts.photoshop.api import get_unique_layer_name - - -class ImageLoader(photoshop.PhotoshopLoader): - """Load images - - Stores the imported asset in a container named after the asset. - """ - - families = ["image", "render"] - representations = ["*"] - - def load(self, context, name=None, namespace=None, data=None): - stub = self.get_stub() - layer_name = get_unique_layer_name( - stub.get_layers(), - context["asset"]["name"], - name - ) - with photoshop.maintained_selection(): - path = self.filepath_from_context(context) - layer = self.import_layer(path, layer_name, stub) - - self[:] = [layer] - namespace = namespace or layer_name - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - """ Switch asset or change version """ - stub = self.get_stub() - - layer = container.pop("layer") - - context = representation.get("context", {}) - - namespace_from_container = re.sub(r'_\d{3}$', '', - container["namespace"]) - layer_name = "{}_{}".format(context["asset"], context["subset"]) - # switching assets - if namespace_from_container != layer_name: - layer_name = get_unique_layer_name( - stub.get_layers(), context["asset"], context["subset"] - ) - else: # switching version - keep same name - layer_name = container["namespace"] - - path = get_representation_path(representation) - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, path, layer_name - ) - - stub.imprint( - layer.id, {"representation": str(representation["_id"])} - ) - - def remove(self, container): - """ - Removes element from scene: deletes layer + removes from Headline - Args: - container (dict): container to be removed - used to get layer_id - """ - stub = self.get_stub() - - layer = container.pop("layer") - stub.imprint(layer.id, {}) - stub.delete_layer(layer.id) - - def switch(self, container, representation): - self.update(container, representation) - - def import_layer(self, file_name, layer_name, stub): - return stub.import_smart_object(file_name, layer_name) diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py deleted file mode 100644 index 5772e243d5..0000000000 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ /dev/null @@ -1,85 +0,0 @@ -import re - -from openpype.pipeline import get_representation_path -from openpype.hosts.photoshop import api as photoshop -from openpype.hosts.photoshop.api import get_unique_layer_name - - -class ReferenceLoader(photoshop.PhotoshopLoader): - """Load reference images - - Stores the imported asset in a container named after the asset. - - Inheriting from 'load_image' didn't work because of - "Cannot write to closing transport", possible refactor. - """ - - families = ["image", "render"] - representations = ["*"] - - def load(self, context, name=None, namespace=None, data=None): - stub = self.get_stub() - layer_name = get_unique_layer_name( - stub.get_layers(), context["asset"]["name"], name - ) - with photoshop.maintained_selection(): - path = self.filepath_from_context(context) - layer = self.import_layer(path, layer_name, stub) - - self[:] = [layer] - namespace = namespace or layer_name - - return photoshop.containerise( - name, - namespace, - layer, - context, - self.__class__.__name__ - ) - - def update(self, container, representation): - """ Switch asset or change version """ - stub = self.get_stub() - layer = container.pop("layer") - - context = representation.get("context", {}) - - namespace_from_container = re.sub(r'_\d{3}$', '', - container["namespace"]) - layer_name = "{}_{}".format(context["asset"], context["subset"]) - # switching assets - if namespace_from_container != layer_name: - layer_name = get_unique_layer_name( - stub.get_layers(), context["asset"], context["subset"] - ) - else: # switching version - keep same name - layer_name = container["namespace"] - - path = get_representation_path(representation) - with photoshop.maintained_selection(): - stub.replace_smart_object( - layer, path, layer_name - ) - - stub.imprint( - layer.id, {"representation": str(representation["_id"])} - ) - - def remove(self, container): - """Removes element from scene: deletes layer + removes from Headline - - Args: - container (dict): container to be removed - used to get layer_id - """ - stub = self.get_stub() - layer = container.pop("layer") - stub.imprint(layer.id, {}) - stub.delete_layer(layer.id) - - def switch(self, container, representation): - self.update(container, representation) - - def import_layer(self, file_name, layer_name, stub): - return stub.import_smart_object( - file_name, layer_name, as_reference=True - ) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py deleted file mode 100644 index b13ff5e476..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Parses batch context from json and continues in publish process. - -Provides: - context -> Loaded batch file. - - asset - - task (task name) - - taskType - - project_name - - variant - -Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as -webpublisher should be eventually ejected as an addon, eg. mentioned plugin -shouldn't be pushed into general publish plugins. -""" - -import os - -import pyblish.api - -from openpype.pipeline import legacy_io -from openpype_modules.webpublisher.lib import ( - get_batch_asset_task_info, - parse_json -) -from openpype.tests.lib import is_in_tests - - -class CollectBatchData(pyblish.api.ContextPlugin): - """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. - - The directory must contain 'manifest.json' file where batch data should be - stored. - """ - # must be really early, context values are only in json file - order = pyblish.api.CollectorOrder - 0.495 - label = "Collect batch data" - hosts = ["photoshop"] - targets = ["webpublish"] - - def process(self, context): - self.log.info("CollectBatchData") - batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") - if is_in_tests(): - self.log.debug("Automatic testing, no batch data, skipping") - return - - assert batch_dir, ( - "Missing `OPENPYPE_PUBLISH_DATA`") - - assert os.path.exists(batch_dir), \ - "Folder {} doesn't exist".format(batch_dir) - - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) - - context.data["batchDir"] = batch_dir - context.data["batchData"] = batch_data - - asset_name, task_name, task_type = get_batch_asset_task_info( - batch_data["context"] - ) - - os.environ["AVALON_ASSET"] = asset_name - os.environ["AVALON_TASK"] = task_name - legacy_io.Session["AVALON_ASSET"] = asset_name - legacy_io.Session["AVALON_TASK"] = task_name - - context.data["asset"] = asset_name - context.data["task"] = task_name - context.data["taskType"] = task_type - context.data["project_name"] = project_name - context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_current_file.py b/openpype/hosts/photoshop/plugins/publish/collect_current_file.py deleted file mode 100644 index 5daf47c6ac..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,18 +0,0 @@ -import os - -import pyblish.api - -from openpype.hosts.photoshop import api as photoshop - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Current File" - hosts = ["photoshop"] - - def process(self, context): - context.data["currentFile"] = os.path.normpath( - photoshop.stub().get_active_document_full_name() - ).replace("\\", "/") diff --git a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py b/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py deleted file mode 100644 index dc0678c9af..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import re -import pyblish.api - -from openpype.hosts.photoshop import api as photoshop - - -class CollectExtensionVersion(pyblish.api.ContextPlugin): - """ Pulls and compares version of installed extension. - - It is recommended to use same extension as in provided Openpype code. - - Please use Anastasiyโ€™s Extension Manager or ZXPInstaller to update - extension in case of an error. - - You can locate extension.zxp in your installed Openpype code in - `repos/avalon-core/avalon/photoshop` - """ - # This technically should be a validator, but other collectors might be - # impacted with usage of obsolete extension, so collector that runs first - # was chosen - order = pyblish.api.CollectorOrder - 0.5 - label = "Collect extension version" - hosts = ["photoshop"] - - optional = True - active = True - - def process(self, context): - installed_version = photoshop.stub().get_extension_version() - - if not installed_version: - raise ValueError("Unknown version, probably old extension") - - manifest_url = os.path.join(os.path.dirname(photoshop.__file__), - "extension", "CSXS", "manifest.xml") - - if not os.path.exists(manifest_url): - self.log.debug("Unable to locate extension manifest, not checking") - return - - expected_version = None - with open(manifest_url) as fp: - content = fp.read() - - found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', - content) - if found: - expected_version = found[0][1] - - if expected_version != installed_version: - msg = "Expected version '{}' found '{}'\n".format( - expected_version, installed_version) - msg += "Please update your installed extension, it might not work " - msg += "properly." - - raise ValueError(msg) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py deleted file mode 100644 index 87ec4ee3f1..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Requires: - None - -Provides: - instance -> family ("review") -""" - -import os - -import pyblish.api - -from openpype.pipeline.create import get_subset_name - - -class CollectReview(pyblish.api.ContextPlugin): - """Adds review to families for instances marked to be reviewable. - """ - - label = "Collect Review" - label = "Review" - hosts = ["photoshop"] - order = pyblish.api.CollectorOrder + 0.1 - - publish = True - - def process(self, context): - for instance in context: - creator_attributes = instance.data["creator_attributes"] - if (creator_attributes.get("mark_for_review") and - "review" not in instance.data["families"]): - instance.data["families"].append("review") diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py deleted file mode 100644 index 9625464499..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import pyblish.api - -from openpype.pipeline.create import get_subset_name - - -class CollectWorkfile(pyblish.api.ContextPlugin): - """Collect current script for publish.""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect Workfile" - hosts = ["photoshop"] - - default_variant = "Main" - - def process(self, context): - for instance in context: - if instance.data["family"] == "workfile": - file_path = context.data["currentFile"] - _, ext = os.path.splitext(file_path) - staging_dir = os.path.dirname(file_path) - base_name = os.path.basename(file_path) - - # creating representation - _, ext = os.path.splitext(file_path) - instance.data["representations"].append({ - "name": ext[1:], - "ext": ext[1:], - "files": base_name, - "stagingDir": staging_dir, - }) - return diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py deleted file mode 100644 index 09c5d63aa5..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ /dev/null @@ -1,319 +0,0 @@ -import os -import shutil -from PIL import Image - -from openpype.lib import ( - run_subprocess, - get_ffmpeg_tool_args, -) -from openpype.pipeline import publish -from openpype.hosts.photoshop import api as photoshop - - -class ExtractReview(publish.Extractor): - """ - Produce a flattened or sequence image files from all 'image' instances. - - If no 'image' instance is created, it produces flattened image from - all visible layers. - - It creates review, thumbnail and mov representations. - - 'review' family could be used in other steps as a reference, as it - contains flattened image by default. (Eg. artist could load this - review as a single item and see full image. In most cases 'image' - family is separated by layers to better usage in animation or comp.) - """ - - label = "Extract Review" - hosts = ["photoshop"] - families = ["review"] - - # Extract Options - jpg_options = None - mov_options = None - make_image_sequence = None - max_downscale_size = 8192 - - def process(self, instance): - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - fps = instance.data.get("fps", 25) - stub = photoshop.stub() - self.output_seq_filename = os.path.splitext( - stub.get_active_document_name())[0] + ".%04d.jpg" - - layers = self._get_layers_from_image_instances(instance) - self.log.info("Layers image instance found: {}".format(layers)) - - repre_name = "jpg" - repre_skeleton = { - "name": repre_name, - "ext": "jpg", - "stagingDir": staging_dir, - "tags": self.jpg_options['tags'], - } - - if instance.data["family"] != "review": - self.log.debug("Existing extracted file from image family used.") - # enable creation of review, without this jpg review would clash - # with jpg of the image family - output_name = repre_name - repre_name = "{}_{}".format(repre_name, output_name) - repre_skeleton.update({"name": repre_name, - "outputName": output_name}) - - img_file = self.output_seq_filename % 0 - self._prepare_file_for_image_family(img_file, instance, - staging_dir) - repre_skeleton.update({ - "files": img_file, - }) - processed_img_names = [img_file] - elif self.make_image_sequence and len(layers) > 1: - self.log.debug("Extract layers to image sequence.") - img_list = self._save_sequence_images(staging_dir, layers) - - repre_skeleton.update({ - "frameStart": 0, - "frameEnd": len(img_list), - "fps": fps, - "files": img_list, - }) - processed_img_names = img_list - else: - self.log.debug("Extract layers to flatten image.") - img_file = self._save_flatten_image(staging_dir, layers) - - repre_skeleton.update({ - "files": img_file, - }) - processed_img_names = [img_file] - - instance.data["representations"].append(repre_skeleton) - - ffmpeg_args = get_ffmpeg_tool_args("ffmpeg") - - instance.data["stagingDir"] = staging_dir - - source_files_pattern = os.path.join(staging_dir, - self.output_seq_filename) - source_files_pattern = self._check_and_resize(processed_img_names, - source_files_pattern, - staging_dir) - self._generate_thumbnail( - list(ffmpeg_args), - instance, - source_files_pattern, - staging_dir) - - no_of_frames = len(processed_img_names) - if no_of_frames > 1: - self._generate_mov( - list(ffmpeg_args), - instance, - fps, - no_of_frames, - source_files_pattern, - staging_dir) - - self.log.info(f"Extracted {instance} to {staging_dir}") - - def _prepare_file_for_image_family(self, img_file, instance, staging_dir): - """Converts existing file for image family to .jpg - - Image instance could have its own separate review (instance per layer - for example). This uses extracted file instead of extracting again. - Args: - img_file (str): name of output file (with 0000 value for ffmpeg - later) - instance: - staging_dir (str): temporary folder where extracted file is located - """ - repre_file = instance.data["representations"][0] - source_file_path = os.path.join(repre_file["stagingDir"], - repre_file["files"]) - if not os.path.exists(source_file_path): - raise RuntimeError(f"{source_file_path} doesn't exist for " - "review to create from") - _, ext = os.path.splitext(repre_file["files"]) - if ext != ".jpg": - im = Image.open(source_file_path) - if (im.mode in ('RGBA', 'LA') or ( - im.mode == 'P' and 'transparency' in im.info)): - # without this it produces messy low quality jpg - rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff") - rgb_im.alpha_composite(im) - rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file)) - else: - im.save(os.path.join(staging_dir, img_file)) - else: - # handles already .jpg - shutil.copy(source_file_path, - os.path.join(staging_dir, img_file)) - - def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, - source_files_pattern, staging_dir): - """Generates .mov to upload to Ftrack. - - Args: - ffmpeg_path (str): path to ffmpeg - instance (Pyblish Instance) - fps (str) - no_of_frames (int): - source_files_pattern (str): name of source file - staging_dir (str): temporary location to store thumbnail - Updates: - instance - adds representation portion - """ - # Generate mov. - mov_path = os.path.join(staging_dir, "review.mov") - self.log.info(f"Generate mov review: {mov_path}") - args = ffmpeg_path + [ - "-y", - "-i", source_files_pattern, - "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", - "-vframes", str(no_of_frames), - mov_path - ] - self.log.debug("mov args:: {}".format(args)) - _output = run_subprocess(args) - instance.data["representations"].append({ - "name": "mov", - "ext": "mov", - "files": os.path.basename(mov_path), - "stagingDir": staging_dir, - "frameStart": 1, - "frameEnd": no_of_frames, - "fps": fps, - "tags": self.mov_options['tags'] - }) - - def _generate_thumbnail( - self, ffmpeg_args, instance, source_files_pattern, staging_dir - ): - """Generates scaled down thumbnail and adds it as representation. - - Args: - ffmpeg_path (str): path to ffmpeg - instance (Pyblish Instance) - source_files_pattern (str): name of source file - staging_dir (str): temporary location to store thumbnail - Updates: - instance - adds representation portion - """ - # Generate thumbnail - thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") - self.log.info(f"Generate thumbnail {thumbnail_path}") - args = ffmpeg_args + [ - "-y", - "-i", source_files_pattern, - "-vf", "scale=300:-1", - "-vframes", "1", - thumbnail_path - ] - self.log.debug("thumbnail args:: {}".format(args)) - _output = run_subprocess(args) - instance.data["representations"].append({ - "name": "thumbnail", - "ext": "jpg", - "outputName": "thumb", - "files": os.path.basename(thumbnail_path), - "stagingDir": staging_dir, - "tags": ["thumbnail", "delete"] - }) - instance.data["thumbnailPath"] = thumbnail_path - - def _check_and_resize(self, processed_img_names, source_files_pattern, - staging_dir): - """Check if saved image could be used in ffmpeg. - - Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be - used as a source for thumbnail or review mov. - """ - Image.MAX_IMAGE_PIXELS = None - first_url = os.path.join(staging_dir, processed_img_names[0]) - with Image.open(first_url) as im: - width, height = im.size - - if width > self.max_downscale_size or height > self.max_downscale_size: - resized_dir = os.path.join(staging_dir, "resized") - os.mkdir(resized_dir) - source_files_pattern = os.path.join(resized_dir, - self.output_seq_filename) - for file_name in processed_img_names: - source_url = os.path.join(staging_dir, file_name) - with Image.open(source_url) as res_img: - # 'thumbnail' automatically keeps aspect ratio - res_img.thumbnail((self.max_downscale_size, - self.max_downscale_size), - Image.ANTIALIAS) - res_img.save(os.path.join(resized_dir, file_name)) - - return source_files_pattern - - def _get_layers_from_image_instances(self, instance): - """Collect all layers from 'instance'. - - Returns: - (list) of PSItem - """ - layers = [] - # creating review for existing 'image' instance - if instance.data["family"] == "image" and instance.data.get("layer"): - layers.append(instance.data["layer"]) - return layers - - for image_instance in instance.context: - if image_instance.data["family"] != "image": - continue - if not image_instance.data.get("layer"): - # dummy instance for flatten image - continue - layers.append(image_instance.data.get("layer")) - - return sorted(layers) - - def _save_flatten_image(self, staging_dir, layers): - """Creates flat image from 'layers' into 'staging_dir'. - - Returns: - (str): path to new image - """ - img_filename = self.output_seq_filename % 0 - output_image_path = os.path.join(staging_dir, img_filename) - stub = photoshop.stub() - - with photoshop.maintained_visibility(): - self.log.info("Extracting {}".format(layers)) - if layers: - stub.hide_all_others_layers(layers) - - stub.saveAs(output_image_path, 'jpg', True) - - return img_filename - - def _save_sequence_images(self, staging_dir, layers): - """Creates separate flat images from 'layers' into 'staging_dir'. - - Used as source for multi frames .mov to review at once. - Returns: - (list): paths to new images - """ - stub = photoshop.stub() - - list_img_filename = [] - with photoshop.maintained_visibility(): - for i, layer in enumerate(layers): - self.log.info("Extracting {}".format(layer)) - - img_filename = self.output_seq_filename % i - output_image_path = os.path.join(staging_dir, img_filename) - list_img_filename.append(img_filename) - - with photoshop.maintained_visibility(): - stub.hide_all_others_layers([layer]) - stub.saveAs(output_image_path, 'jpg', True) - - return list_img_filename diff --git a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py b/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py deleted file mode 100644 index aa900fec9f..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py +++ /dev/null @@ -1,14 +0,0 @@ -from openpype.pipeline import publish -from openpype.hosts.photoshop import api as photoshop - - -class ExtractSaveScene(publish.Extractor): - """Save scene before extraction.""" - - order = publish.Extractor.order - 0.49 - label = "Extract Save Scene" - hosts = ["photoshop"] - families = ["workfile"] - - def process(self, instance): - photoshop.stub().save() diff --git a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py b/openpype/hosts/photoshop/plugins/publish/increment_workfile.py deleted file mode 100644 index 665dd67fc5..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline.publish import get_errored_plugins_from_context -from openpype.lib import version_up - -from openpype.hosts.photoshop import api as photoshop - - -class IncrementWorkfile(pyblish.api.InstancePlugin): - """Increment the current workfile. - - Saves the current scene with an increased version number. - """ - - label = "Increment Workfile" - order = pyblish.api.IntegratorOrder + 9.0 - hosts = ["photoshop"] - families = ["workfile"] - optional = True - - def process(self, instance): - errored_plugins = get_errored_plugins_from_context(instance.context) - if errored_plugins: - raise RuntimeError( - "Skipping incrementing current file because publishing failed." - ) - - scene_path = version_up(instance.context.data["currentFile"]) - _, ext = os.path.splitext(scene_path) - photoshop.stub().saveAs(scene_path, ext[1:], True) - - self.log.info("Incremented workfile to: {}".format(scene_path)) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py deleted file mode 100644 index 1a4932fe99..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ /dev/null @@ -1,72 +0,0 @@ -import pyblish.api - -from openpype.pipeline import get_current_asset_name -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, - OptionalPyblishPluginMixin -) -from openpype.hosts.photoshop import api as photoshop - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if (result["error"] is not None and result["instance"] is not None - and result["instance"] not in failed): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - stub = photoshop.stub() - current_asset_name = get_current_asset_name() - for instance in instances: - data = stub.read(instance[0]) - data["asset"] = current_asset_name - stub.imprint(instance[0], data) - - -class ValidateInstanceAsset(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate Instance Asset" - hosts = ["photoshop"] - optional = True - actions = [ValidateInstanceAssetRepair] - order = ValidateContentsOrder - - def process(self, instance): - instance_asset = instance.data["asset"] - current_asset = get_current_asset_name() - - if instance_asset != current_asset: - msg = ( - f"Instance asset {instance_asset} is not the same " - f"as current context {current_asset}." - - ) - repair_msg = ( - f"Repair with 'Repair' button to use '{current_asset}'.\n" - ) - formatting_data = {"msg": msg, - "repair_msg": repair_msg} - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/resolve/addon.py b/openpype/hosts/resolve/addon.py deleted file mode 100644 index 02c1d7957f..0000000000 --- a/openpype/hosts/resolve/addon.py +++ /dev/null @@ -1,23 +0,0 @@ -import os - -from openpype.modules import OpenPypeModule, IHostAddon - -from .utils import RESOLVE_ROOT_DIR - - -class ResolveAddon(OpenPypeModule, IHostAddon): - name = "resolve" - host_name = "resolve" - - def initialize(self, module_settings): - self.enabled = True - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(RESOLVE_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".drp"] diff --git a/openpype/hosts/resolve/api/action.py b/openpype/hosts/resolve/api/action.py deleted file mode 100644 index d1dffca7cc..0000000000 --- a/openpype/hosts/resolve/api/action.py +++ /dev/null @@ -1,52 +0,0 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - -import pyblish.api - - -from openpype.pipeline.publish import get_errored_instances_from_context - - -class SelectInvalidAction(pyblish.api.Action): - """Select invalid clips in Resolve timeline when plug-in failed. - - To retrieve the invalid nodes this assumes a static `get_invalid()` - method is available on the plugin. - - """ - label = "Select invalid" - on = "failed" # This action is only available on a failed plug-in - icon = "search" # Icon from Awesome Icon - - def process(self, context, plugin): - - try: - from .lib import get_project_manager - pm = get_project_manager() - self.log.debug(pm) - except ImportError: - raise ImportError("Current host is not Resolve") - - errored_instances = get_errored_instances_from_context(context, - plugin=plugin) - - # Get the invalid nodes for the plug-ins - self.log.info("Finding invalid clips..") - invalid = list() - for instance in errored_instances: - invalid_nodes = plugin.get_invalid(instance) - if invalid_nodes: - if isinstance(invalid_nodes, (list, tuple)): - invalid.extend(invalid_nodes) - else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") - - # Ensure unique (process each node only once) - invalid = list(set(invalid)) - - if invalid: - self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid)) - # TODO: select resolve timeline track items in current timeline - else: - self.log.info("No invalid nodes found.") diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py deleted file mode 100644 index 3866477c77..0000000000 --- a/openpype/hosts/resolve/api/lib.py +++ /dev/null @@ -1,938 +0,0 @@ -import sys -import json -import re -import os -import contextlib -from opentimelineio import opentime - -from openpype.lib import Logger -from openpype.pipeline.editorial import ( - is_overlapping_otio_ranges, - frames_to_timecode -) - -from ..otio import davinci_export as otio_export - -log = Logger.get_logger(__name__) - -self = sys.modules[__name__] -self.project_manager = None -self.media_storage = None - -# OpenPype sequential rename variables -self.rename_index = 0 -self.rename_add = 0 - -self.publish_clip_color = "Pink" -self.pype_marker_workflow = True - -# OpenPype compound clip workflow variable -self.pype_tag_name = "VFX Notes" - -# OpenPype marker workflow variables -self.pype_marker_name = "OpenPypeData" -self.pype_marker_duration = 1 -self.pype_marker_color = "Mint" -self.temp_marker_frame = None - -# OpenPype default timeline -self.pype_timeline_name = "OpenPypeTimeline" - - -@contextlib.contextmanager -def maintain_current_timeline(to_timeline: object, - from_timeline: object = None): - """Maintain current timeline selection during context - - Attributes: - from_timeline (resolve.Timeline)[optional]: - Example: - >>> print(from_timeline.GetName()) - timeline1 - >>> print(to_timeline.GetName()) - timeline2 - - >>> with maintain_current_timeline(to_timeline): - ... print(get_current_timeline().GetName()) - timeline2 - - >>> print(get_current_timeline().GetName()) - timeline1 - """ - project = get_current_project() - working_timeline = from_timeline or project.GetCurrentTimeline() - - # switch to the input timeline - project.SetCurrentTimeline(to_timeline) - - try: - # do a work - yield - finally: - # put the original working timeline to context - project.SetCurrentTimeline(working_timeline) - - -def get_project_manager(): - from . import bmdvr - if not self.project_manager: - self.project_manager = bmdvr.GetProjectManager() - return self.project_manager - - -def get_media_storage(): - from . import bmdvr - if not self.media_storage: - self.media_storage = bmdvr.GetMediaStorage() - return self.media_storage - - -def get_current_project(): - """Get current project object. - """ - return get_project_manager().GetCurrentProject() - - -def get_current_timeline(new=False): - """Get current timeline object. - - Args: - new (bool)[optional]: [DEPRECATED] if True it will create - new timeline if none exists - - Returns: - TODO: will need to reflect future `None` - object: resolve.Timeline - """ - project = get_current_project() - timeline = project.GetCurrentTimeline() - - # return current timeline if any - if timeline: - return timeline - - # TODO: [deprecated] and will be removed in future - if new: - return get_new_timeline() - - -def get_any_timeline(): - """Get any timeline object. - - Returns: - object | None: resolve.Timeline - """ - project = get_current_project() - timeline_count = project.GetTimelineCount() - if timeline_count > 0: - return project.GetTimelineByIndex(1) - - -def get_new_timeline(timeline_name: str = None): - """Get new timeline object. - - Arguments: - timeline_name (str): New timeline name. - - Returns: - object: resolve.Timeline - """ - project = get_current_project() - media_pool = project.GetMediaPool() - new_timeline = media_pool.CreateEmptyTimeline( - timeline_name or self.pype_timeline_name) - project.SetCurrentTimeline(new_timeline) - return new_timeline - - -def create_bin(name: str, root: object = None) -> object: - """ - Create media pool's folder. - - Return folder object and if the name does not exist it will create a new. - If the input name is with forward or backward slashes then it will create - all parents and return the last child bin object - - Args: - name (str): name of folder / bin, or hierarchycal name "parent/name" - root (resolve.Folder)[optional]: root folder / bin object - - Returns: - object: resolve.Folder - """ - # get all variables - media_pool = get_current_project().GetMediaPool() - root_bin = root or media_pool.GetRootFolder() - - # create hierarchy of bins in case there is slash in name - if "/" in name.replace("\\", "/"): - child_bin = None - for bname in name.split("/"): - child_bin = create_bin(bname, child_bin or root_bin) - if child_bin: - return child_bin - else: - created_bin = None - for subfolder in root_bin.GetSubFolderList(): - if subfolder.GetName() in name: - created_bin = subfolder - - if not created_bin: - new_folder = media_pool.AddSubFolder(root_bin, name) - media_pool.SetCurrentFolder(new_folder) - else: - media_pool.SetCurrentFolder(created_bin) - - return media_pool.GetCurrentFolder() - - -def remove_media_pool_item(media_pool_item: object) -> bool: - media_pool = get_current_project().GetMediaPool() - return media_pool.DeleteClips([media_pool_item]) - - -def create_media_pool_item( - files: list, - root: object = None, -) -> object: - """ - Create media pool item. - - Args: - files (list[str]): list of absolute paths to files - root (resolve.Folder)[optional]: root folder / bin object - - Returns: - object: resolve.MediaPoolItem - """ - # get all variables - media_pool = get_current_project().GetMediaPool() - root_bin = root or media_pool.GetRootFolder() - - # make sure files list is not empty and first available file exists - filepath = next((f for f in files if os.path.isfile(f)), None) - if not filepath: - raise FileNotFoundError("No file found in input files list") - - # try to search in bin if the clip does not exist - existing_mpi = get_media_pool_item(filepath, root_bin) - - if existing_mpi: - return existing_mpi - - # add all data in folder to media pool - media_pool_items = media_pool.ImportMedia(files) - - return media_pool_items.pop() if media_pool_items else False - - -def get_media_pool_item(filepath, root: object = None) -> object: - """ - Return clip if found in folder with use of input file path. - - Args: - filepath (str): absolute path to a file - root (resolve.Folder)[optional]: root folder / bin object - - Returns: - object: resolve.MediaPoolItem - """ - media_pool = get_current_project().GetMediaPool() - root = root or media_pool.GetRootFolder() - fname = os.path.basename(filepath) - - for _mpi in root.GetClipList(): - _mpi_name = _mpi.GetClipProperty("File Name") - _mpi_name = get_reformated_path(_mpi_name, first=True) - if fname in _mpi_name: - return _mpi - return None - - -def create_timeline_item( - media_pool_item: object, - timeline: object = None, - timeline_in: int = None, - source_start: int = None, - source_end: int = None, -) -> object: - """ - Add media pool item to current or defined timeline. - - Args: - media_pool_item (resolve.MediaPoolItem): resolve's object - timeline (Optional[resolve.Timeline]): resolve's object - timeline_in (Optional[int]): timeline input frame (sequence frame) - source_start (Optional[int]): media source input frame (sequence frame) - source_end (Optional[int]): media source output frame (sequence frame) - - Returns: - object: resolve.TimelineItem - """ - # get all variables - project = get_current_project() - media_pool = project.GetMediaPool() - _clip_property = media_pool_item.GetClipProperty - clip_name = _clip_property("File Name") - timeline = timeline or get_current_timeline() - - # timing variables - if all([timeline_in, source_start, source_end]): - fps = timeline.GetSetting("timelineFrameRate") - duration = source_end - source_start - timecode_in = frames_to_timecode(timeline_in, fps) - timecode_out = frames_to_timecode(timeline_in + duration, fps) - else: - timecode_in = None - timecode_out = None - - # if timeline was used then switch it to current timeline - with maintain_current_timeline(timeline): - # Add input mediaPoolItem to clip data - clip_data = { - "mediaPoolItem": media_pool_item, - } - - if source_start: - clip_data["startFrame"] = source_start - if source_end: - clip_data["endFrame"] = source_end - if timecode_in: - clip_data["recordFrame"] = timeline_in - - # add to timeline - media_pool.AppendToTimeline([clip_data]) - - output_timeline_item = get_timeline_item( - media_pool_item, timeline) - - assert output_timeline_item, AssertionError(( - "Clip name '{}' was't created on the timeline: '{}' \n\n" - "Please check if correct track position is activated, \n" - "or if a clip is not already at the timeline in \n" - "position: '{}' out: '{}'. \n\n" - "Clip data: {}" - ).format( - clip_name, timeline.GetName(), timecode_in, timecode_out, clip_data - )) - return output_timeline_item - - -def get_timeline_item(media_pool_item: object, - timeline: object = None) -> object: - """ - Returns clips related to input mediaPoolItem. - - Args: - media_pool_item (resolve.MediaPoolItem): resolve's object - timeline (resolve.Timeline)[optional]: resolve's object - - Returns: - object: resolve.TimelineItem - """ - _clip_property = media_pool_item.GetClipProperty - clip_name = _clip_property("File Name") - output_timeline_item = None - timeline = timeline or get_current_timeline() - - with maintain_current_timeline(timeline): - # search the timeline for the added clip - - for _ti_data in get_current_timeline_items(): - _ti_clip = _ti_data["clip"]["item"] - _ti_clip_property = _ti_clip.GetMediaPoolItem().GetClipProperty - if clip_name in _ti_clip_property("File Name"): - output_timeline_item = _ti_clip - - return output_timeline_item - - -def get_video_track_names() -> list: - tracks = list() - track_type = "video" - timeline = get_current_timeline() - - # get all tracks count filtered by track type - selected_track_count = timeline.GetTrackCount(track_type) - - # loop all tracks and get items - track_index: int - for track_index in range(1, (int(selected_track_count) + 1)): - track_name = timeline.GetTrackName("video", track_index) - tracks.append(track_name) - - return tracks - - -def get_current_timeline_items( - filter: bool = False, - track_type: str = None, - track_name: str = None, - selecting_color: str = None) -> list: - """ Gets all available current timeline track items - """ - track_type = track_type or "video" - selecting_color = selecting_color or "Chocolate" - project = get_current_project() - - # get timeline anyhow - timeline = ( - get_current_timeline() or - get_any_timeline() or - get_new_timeline() - ) - selected_clips = [] - - # get all tracks count filtered by track type - selected_track_count = timeline.GetTrackCount(track_type) - - # loop all tracks and get items - _clips = {} - for track_index in range(1, (int(selected_track_count) + 1)): - _track_name = timeline.GetTrackName(track_type, track_index) - - # filter out all unmathed track names - if track_name and _track_name not in track_name: - continue - - timeline_items = timeline.GetItemListInTrack( - track_type, track_index) - _clips[track_index] = timeline_items - - _data = { - "project": project, - "timeline": timeline, - "track": { - "name": _track_name, - "index": track_index, - "type": track_type} - } - # get track item object and its color - for clip_index, ti in enumerate(_clips[track_index]): - data = _data.copy() - data["clip"] = { - "item": ti, - "index": clip_index - } - ti_color = ti.GetClipColor() - if filter and selecting_color in ti_color or not filter: - selected_clips.append(data) - return selected_clips - - -def get_pype_timeline_item_by_name(name: str) -> object: - """Get timeline item by name. - - Args: - name (str): name of timeline item - - Returns: - object: resolve.TimelineItem - """ - for _ti_data in get_current_timeline_items(): - _ti_clip = _ti_data["clip"]["item"] - tag_data = get_timeline_item_pype_tag(_ti_clip) - tag_name = tag_data.get("namespace") - if not tag_name: - continue - if tag_name in name: - return _ti_clip - return None - - -def get_timeline_item_pype_tag(timeline_item): - """ - Get openpype track item tag created by creator or loader plugin. - - Attributes: - trackItem (resolve.TimelineItem): resolve object - - Returns: - dict: openpype tag data - """ - return_tag = None - - if self.pype_marker_workflow: - return_tag = get_pype_marker(timeline_item) - else: - media_pool_item = timeline_item.GetMediaPoolItem() - - # get all tags from track item - _tags = media_pool_item.GetMetadata() - if not _tags: - return None - for key, data in _tags.items(): - # return only correct tag defined by global name - if key in self.pype_tag_name: - return_tag = json.loads(data) - - return return_tag - - -def set_timeline_item_pype_tag(timeline_item, data=None): - """ - Set openpype track item tag to input timeline_item. - - Attributes: - trackItem (resolve.TimelineItem): resolve api object - - Returns: - dict: json loaded data - """ - data = data or dict() - - # get available openpype tag if any - tag_data = get_timeline_item_pype_tag(timeline_item) - - if self.pype_marker_workflow: - # delete tag as it is not updatable - if tag_data: - delete_pype_marker(timeline_item) - - tag_data.update(data) - set_pype_marker(timeline_item, tag_data) - else: - if tag_data: - media_pool_item = timeline_item.GetMediaPoolItem() - # it not tag then create one - tag_data.update(data) - media_pool_item.SetMetadata( - self.pype_tag_name, json.dumps(tag_data)) - else: - tag_data = data - # if openpype tag available then update with input data - # add it to the input track item - timeline_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) - - return tag_data - - -def imprint(timeline_item, data=None): - """ - Adding `Avalon data` into a hiero track item tag. - - Also including publish attribute into tag. - - Arguments: - timeline_item (hiero.core.TrackItem): hiero track item object - data (dict): Any data which needs to be imprinted - - Examples: - data = { - 'asset': 'sq020sh0280', - 'family': 'render', - 'subset': 'subsetMain' - } - """ - data = data or {} - - set_timeline_item_pype_tag(timeline_item, data) - - # add publish attribute - set_publish_attribute(timeline_item, True) - - -def set_publish_attribute(timeline_item, value): - """ Set Publish attribute in input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = get_timeline_item_pype_tag(timeline_item) - tag_data["publish"] = value - # set data to the publish attribute - set_timeline_item_pype_tag(timeline_item, tag_data) - - -def get_publish_attribute(timeline_item): - """ Get Publish attribute from input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = get_timeline_item_pype_tag(timeline_item) - return tag_data["publish"] - - -def set_pype_marker(timeline_item, tag_data): - source_start = timeline_item.GetLeftOffset() - item_duration = timeline_item.GetDuration() - frame = int(source_start + (item_duration / 2)) - - # marker attributes - frameId = (frame / 10) * 10 - color = self.pype_marker_color - name = self.pype_marker_name - note = json.dumps(tag_data) - duration = (self.pype_marker_duration / 10) * 10 - - timeline_item.AddMarker( - frameId, - color, - name, - note, - duration - ) - - -def get_pype_marker(timeline_item): - timeline_item_markers = timeline_item.GetMarkers() - for marker_frame, marker in timeline_item_markers.items(): - color = marker["color"] - name = marker["name"] - if name == self.pype_marker_name and color == self.pype_marker_color: - note = marker["note"] - self.temp_marker_frame = marker_frame - return json.loads(note) - - return dict() - - -def delete_pype_marker(timeline_item): - timeline_item.DeleteMarkerAtFrame(self.temp_marker_frame) - self.temp_marker_frame = None - - -def create_compound_clip(clip_data, name, folder): - """ - Convert timeline object into nested timeline object - - Args: - clip_data (dict): timeline item object packed into dict - with project, timeline (sequence) - folder (resolve.MediaPool.Folder): media pool folder object, - name (str): name for compound clip - - Returns: - resolve.MediaPoolItem: media pool item with compound clip timeline(cct) - """ - # get basic objects form data - project = clip_data["project"] - timeline = clip_data["timeline"] - clip = clip_data["clip"] - - # get details of objects - clip_item = clip["item"] - - mp = project.GetMediaPool() - - # get clip attributes - clip_attributes = get_clip_attributes(clip_item) - - mp_item = clip_item.GetMediaPoolItem() - _mp_props = mp_item.GetClipProperty - - mp_first_frame = int(_mp_props("Start")) - mp_last_frame = int(_mp_props("End")) - - # initialize basic source timing for otio - ci_l_offset = clip_item.GetLeftOffset() - ci_duration = clip_item.GetDuration() - rate = float(_mp_props("FPS")) - - # source rational times - mp_in_rc = opentime.RationalTime((ci_l_offset), rate) - mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) - - # get frame in and out for clip swapping - in_frame = opentime.to_frames(mp_in_rc) - out_frame = opentime.to_frames(mp_out_rc) - - # keep original sequence - tl_origin = timeline - - # Set current folder to input media_pool_folder: - mp.SetCurrentFolder(folder) - - # check if clip doesn't exist already: - clips = folder.GetClipList() - cct = next((c for c in clips - if c.GetName() in name), None) - - if cct: - print(f"Compound clip exists: {cct}") - else: - # Create empty timeline in current folder and give name: - cct = mp.CreateEmptyTimeline(name) - - # check if clip doesn't exist already: - clips = folder.GetClipList() - cct = next((c for c in clips - if c.GetName() in name), None) - print(f"Compound clip created: {cct}") - - with maintain_current_timeline(cct, tl_origin): - # Add input clip to the current timeline: - mp.AppendToTimeline([{ - "mediaPoolItem": mp_item, - "startFrame": mp_first_frame, - "endFrame": mp_last_frame - }]) - - # Add collected metadata and attributes to the comound clip: - if mp_item.GetMetadata(self.pype_tag_name): - clip_attributes[self.pype_tag_name] = mp_item.GetMetadata( - self.pype_tag_name)[self.pype_tag_name] - - # stringify - clip_attributes = json.dumps(clip_attributes) - - # add attributes to metadata - for k, v in mp_item.GetMetadata().items(): - cct.SetMetadata(k, v) - - # add metadata to cct - cct.SetMetadata(self.pype_tag_name, clip_attributes) - - # reset start timecode of the compound clip - cct.SetClipProperty("Start TC", _mp_props("Start TC")) - - # swap clips on timeline - swap_clips(clip_item, cct, in_frame, out_frame) - - cct.SetClipColor("Pink") - return cct - - -def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame): - """ - Swapping clips on timeline in timelineItem - - It will add take and activate it to the frame range which is inputted - - Args: - from_clip (resolve.TimelineItem) - to_clip (resolve.mediaPoolItem) - to_clip_name (str): name of to_clip - to_in_frame (float): cut in frame, usually `GetLeftOffset()` - to_out_frame (float): cut out frame, usually left offset plus duration - - Returns: - bool: True if successfully replaced - - """ - _clip_prop = to_clip.GetClipProperty - to_clip_name = _clip_prop("File Name") - # add clip item as take to timeline - take = from_clip.AddTake( - to_clip, - float(to_in_frame), - float(to_out_frame) - ) - - if not take: - return False - - for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)): - take_item = from_clip.GetTakeByIndex(take_index) - take_mp_item = take_item["mediaPoolItem"] - if to_clip_name in take_mp_item.GetName(): - from_clip.SelectTakeByIndex(take_index) - from_clip.FinalizeTake() - return True - return False - - -def _validate_tc(x): - # Validate and reformat timecode string - - if len(x) != 11: - print('Invalid timecode. Try again.') - - c = ':' - colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:] - - if colonized.replace(':', '').isdigit(): - print(f"_ colonized: {colonized}") - return colonized - else: - print('Invalid timecode. Try again.') - - -def get_pype_clip_metadata(clip): - """ - Get openpype metadata created by creator plugin - - Attributes: - clip (resolve.TimelineItem): resolve's object - - Returns: - dict: hierarchy, orig clip attributes - """ - mp_item = clip.GetMediaPoolItem() - metadata = mp_item.GetMetadata() - - return metadata.get(self.pype_tag_name) - - -def get_clip_attributes(clip): - """ - Collect basic attributes from resolve timeline item - - Args: - clip (resolve.TimelineItem): timeline item object - - Returns: - dict: all collected attributres as key: values - """ - mp_item = clip.GetMediaPoolItem() - - return { - "clipIn": clip.GetStart(), - "clipOut": clip.GetEnd(), - "clipLeftOffset": clip.GetLeftOffset(), - "clipRightOffset": clip.GetRightOffset(), - "clipMarkers": clip.GetMarkers(), - "clipFlags": clip.GetFlagList(), - "sourceId": mp_item.GetMediaId(), - "sourceProperties": mp_item.GetClipProperty() - } - - -def set_project_manager_to_folder_name(folder_name): - """ - Sets context of Project manager to given folder by name. - - Searching for folder by given name from root folder to nested. - If no existing folder by name it will create one in root folder. - - Args: - folder_name (str): name of searched folder - - Returns: - bool: True if success - - Raises: - Exception: Cannot create folder in root - - """ - # initialize project manager - get_project_manager() - - set_folder = False - - # go back to root folder - if self.project_manager.GotoRootFolder(): - log.info(f"Testing existing folder: {folder_name}") - folders = _convert_resolve_list_type( - self.project_manager.GetFoldersInCurrentFolder()) - log.info(f"Testing existing folders: {folders}") - # get me first available folder object - # with the same name as in `folder_name` else return False - if next((f for f in folders if f in folder_name), False): - log.info(f"Found existing folder: {folder_name}") - set_folder = self.project_manager.OpenFolder(folder_name) - - if set_folder: - return True - - # if folder by name is not existent then create one - # go back to root folder - log.info(f"Folder `{folder_name}` not found and will be created") - if self.project_manager.GotoRootFolder(): - try: - # create folder by given name - self.project_manager.CreateFolder(folder_name) - self.project_manager.OpenFolder(folder_name) - return True - except NameError as e: - log.error((f"Folder with name `{folder_name}` cannot be created!" - f"Error: {e}")) - return False - - -def _convert_resolve_list_type(resolve_list): - """ Resolve is using indexed dictionary as list type. - `{1.0: 'vaule'}` - This will convert it to normal list class - """ - assert isinstance(resolve_list, dict), ( - "Input argument should be dict() type") - - return [resolve_list[i] for i in sorted(resolve_list.keys())] - - -def create_otio_time_range_from_timeline_item_data(timeline_item_data): - timeline_item = timeline_item_data["clip"]["item"] - project = timeline_item_data["project"] - timeline = timeline_item_data["timeline"] - timeline_start = timeline.GetStartFrame() - - frame_start = int(timeline_item.GetStart() - timeline_start) - frame_duration = int(timeline_item.GetDuration()) - fps = project.GetSetting("timelineFrameRate") - - return otio_export.create_otio_time_range( - frame_start, frame_duration, fps) - - -def get_otio_clip_instance_data(otio_timeline, timeline_item_data): - """ - Return otio objects for timeline, track and clip - - Args: - timeline_item_data (dict): timeline_item_data from list returned by - resolve.get_current_timeline_items() - otio_timeline (otio.schema.Timeline): otio object - - Returns: - dict: otio clip object - - """ - - timeline_item = timeline_item_data["clip"]["item"] - track_name = timeline_item_data["track"]["name"] - timeline_range = create_otio_time_range_from_timeline_item_data( - timeline_item_data) - - for otio_clip in otio_timeline.each_clip(): - track_name = otio_clip.parent().name - parent_range = otio_clip.range_in_parent() - if track_name not in track_name: - continue - if otio_clip.name not in timeline_item.GetName(): - continue - if is_overlapping_otio_ranges( - parent_range, timeline_range, strict=True): - - # add pypedata marker to otio_clip metadata - for marker in otio_clip.markers: - if self.pype_marker_name in marker.name: - otio_clip.metadata.update(marker.metadata) - return {"otioClip": otio_clip} - - return None - - -def get_reformated_path(path, padded=False, first=False): - """ - Return fixed python expression path - - Args: - path (str): path url or simple file name - - Returns: - type: string with reformated path - - Example: - get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr - - """ - first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]") - - if "[" in path: - padding_pattern = r"(\d+)(?=-)" - padding = len(re.findall(padding_pattern, path).pop()) - num_pattern = r"(\[\d+\-\d+\])" - if padded: - path = re.sub(num_pattern, f"%0{padding}d", path) - elif first: - first_frame = re.findall(first_frame_pattern, path, flags=0) - if len(first_frame) >= 1: - first_frame = first_frame[0] - path = re.sub(num_pattern, first_frame, path) - else: - path = re.sub(num_pattern, "%d", path) - return path diff --git a/openpype/hosts/resolve/api/menu.py b/openpype/hosts/resolve/api/menu.py deleted file mode 100644 index 2210178a67..0000000000 --- a/openpype/hosts/resolve/api/menu.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import sys - -from qtpy import QtWidgets, QtCore, QtGui - -from openpype.tools.utils import host_tools -from openpype.pipeline import registered_host - - -MENU_LABEL = os.environ["AVALON_LABEL"] - - -def load_stylesheet(): - path = os.path.join(os.path.dirname(__file__), "menu_style.qss") - if not os.path.exists(path): - print("Unable to load stylesheet, file not found in resources") - return "" - - with open(path, "r") as file_stream: - stylesheet = file_stream.read() - return stylesheet - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(Spacer, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class OpenPypeMenu(QtWidgets.QWidget): - def __init__(self, *args, **kwargs): - super(OpenPypeMenu, self).__init__(*args, **kwargs) - - self.setObjectName(f"{MENU_LABEL}Menu") - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - - self.setWindowTitle(f"{MENU_LABEL}") - save_current_btn = QtWidgets.QPushButton("Save current file", self) - workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self) - create_btn = QtWidgets.QPushButton("Create ...", self) - publish_btn = QtWidgets.QPushButton("Publish ...", self) - load_btn = QtWidgets.QPushButton("Load ...", self) - inventory_btn = QtWidgets.QPushButton("Manager ...", self) - subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) - libload_btn = QtWidgets.QPushButton("Library ...", self) - experimental_btn = QtWidgets.QPushButton( - "Experimental tools ...", self - ) - # rename_btn = QtWidgets.QPushButton("Rename", self) - # set_colorspace_btn = QtWidgets.QPushButton( - # "Set colorspace from presets", self - # ) - # reset_resolution_btn = QtWidgets.QPushButton( - # "Set Resolution from presets", self - # ) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(10, 20, 10, 20) - - layout.addWidget(save_current_btn) - - layout.addWidget(Spacer(15, self)) - - layout.addWidget(workfiles_btn) - layout.addWidget(create_btn) - layout.addWidget(publish_btn) - layout.addWidget(load_btn) - layout.addWidget(inventory_btn) - layout.addWidget(subsetm_btn) - - layout.addWidget(Spacer(15, self)) - - layout.addWidget(libload_btn) - - # layout.addWidget(Spacer(15, self)) - - # layout.addWidget(rename_btn) - - # layout.addWidget(Spacer(15, self)) - - # layout.addWidget(set_colorspace_btn) - # layout.addWidget(reset_resolution_btn) - layout.addWidget(Spacer(15, self)) - layout.addWidget(experimental_btn) - - self.setLayout(layout) - - save_current_btn.clicked.connect(self.on_save_current_clicked) - save_current_btn.setShortcut(QtGui.QKeySequence.Save) - workfiles_btn.clicked.connect(self.on_workfile_clicked) - create_btn.clicked.connect(self.on_create_clicked) - publish_btn.clicked.connect(self.on_publish_clicked) - load_btn.clicked.connect(self.on_load_clicked) - inventory_btn.clicked.connect(self.on_inventory_clicked) - subsetm_btn.clicked.connect(self.on_subsetm_clicked) - libload_btn.clicked.connect(self.on_libload_clicked) - # rename_btn.clicked.connect(self.on_rename_clicked) - # set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked) - # reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked) - experimental_btn.clicked.connect(self.on_experimental_clicked) - - def on_save_current_clicked(self): - host = registered_host() - current_file = host.get_current_workfile() - if not current_file: - print("Current project is not saved. " - "Please save once first via workfiles tool.") - host_tools.show_workfiles() - return - - print(f"Saving current file to: {current_file}") - host.save_workfile(current_file) - - def on_workfile_clicked(self): - print("Clicked Workfile") - host_tools.show_workfiles() - - def on_create_clicked(self): - print("Clicked Create") - host_tools.show_creator() - - def on_publish_clicked(self): - print("Clicked Publish") - host_tools.show_publish(parent=None) - - def on_load_clicked(self): - print("Clicked Load") - host_tools.show_loader(use_context=True) - - def on_inventory_clicked(self): - print("Clicked Inventory") - host_tools.show_scene_inventory() - - def on_subsetm_clicked(self): - print("Clicked Subset Manager") - host_tools.show_subset_manager() - - def on_libload_clicked(self): - print("Clicked Library") - host_tools.show_library_loader() - - def on_rename_clicked(self): - print("Clicked Rename") - - def on_set_colorspace_clicked(self): - print("Clicked Set Colorspace") - - def on_set_resolution_clicked(self): - print("Clicked Set Resolution") - - def on_experimental_clicked(self): - host_tools.show_experimental_tools_dialog() - - -def launch_pype_menu(): - app = QtWidgets.QApplication(sys.argv) - - pype_menu = OpenPypeMenu() - - stylesheet = load_stylesheet() - pype_menu.setStyleSheet(stylesheet) - - pype_menu.show() - - sys.exit(app.exec_()) diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py deleted file mode 100644 index 93dec300fb..0000000000 --- a/openpype/hosts/resolve/api/pipeline.py +++ /dev/null @@ -1,303 +0,0 @@ -""" -Basic avalon integration -""" -import os -import contextlib -from collections import OrderedDict - -from pyblish import api as pyblish - -from openpype.lib import Logger -from openpype.pipeline import ( - schema, - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.host import ( - HostBase, - IWorkfileHost, - ILoadHost -) - -from . import lib -from .utils import get_resolve_module -from .workio import ( - open_file, - save_file, - file_extensions, - has_unsaved_changes, - work_root, - current_file -) - -log = Logger.get_logger(__name__) - -HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - - -class ResolveHost(HostBase, IWorkfileHost, ILoadHost): - name = "resolve" - - def install(self): - """Install resolve-specific functionality of avalon-core. - - This is where you install menus and register families, data - and loaders into resolve. - - It is called automatically when installing via `api.install(resolve)`. - - See the Maya equivalent for inspiration on how to implement this. - - """ - - log.info("openpype.hosts.resolve installed") - - pyblish.register_host(self.name) - pyblish.register_plugin_path(PUBLISH_PATH) - print("Registering DaVinci Resolve plug-ins..") - - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", - on_pyblish_instance_toggled) - - get_resolve_module() - - def open_workfile(self, filepath): - return open_file(filepath) - - def save_workfile(self, filepath=None): - return save_file(filepath) - - def work_root(self, session): - return work_root(session) - - def get_current_workfile(self): - return current_file() - - def workfile_has_unsaved_changes(self): - return has_unsaved_changes() - - def get_workfile_extensions(self): - return file_extensions() - - def get_containers(self): - return ls() - - -def containerise(timeline_item, - name, - namespace, - context, - loader=None, - data=None): - """Bundle Hiero's object into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - timeline_item (hiero.core.TrackItem): object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of node used to produce this container. - - Returns: - timeline_item (hiero.core.TrackItem): containerised object - - """ - - data_imprint = OrderedDict({ - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - }) - - if data: - data_imprint.update(data) - - lib.set_timeline_item_pype_tag(timeline_item, data_imprint) - - return timeline_item - - -def ls(): - """List available containers. - - This function is used by the Container Manager in Nuke. You'll - need to implement a for-loop that then *yields* one Container at - a time. - - See the `container.json` schema for details on how it should look, - and the Maya equivalent, which is in `avalon.maya.pipeline` - """ - - # get all track items from current timeline - all_timeline_items = lib.get_current_timeline_items(filter=False) - - for timeline_item_data in all_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - container = parse_container(timeline_item) - if container: - yield container - - -def parse_container(timeline_item, validate=True): - """Return container data from timeline_item's openpype tag. - - Args: - timeline_item (hiero.core.TrackItem): A containerised track item. - validate (bool)[optional]: validating with avalon scheme - - Returns: - dict: The container schema data for input containerized track item. - - """ - # convert tag metadata to normal keys names - data = lib.get_timeline_item_pype_tag(timeline_item) - - if validate and data and data.get("schema"): - schema.validate(data) - - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = timeline_item.GetName() - - # Store reference to the node object - container["_timeline_item"] = timeline_item - - return container - - -def update_container(timeline_item, data=None): - """Update container data to input timeline_item's openpype tag. - - Args: - timeline_item (hiero.core.TrackItem): A containerised track item. - data (dict)[optional]: dictionery with data to be updated - - Returns: - bool: True if container was updated correctly - - """ - data = data or dict() - - container = lib.get_timeline_item_pype_tag(timeline_item) - - for _key, _value in container.items(): - try: - container[_key] = data[_key] - except KeyError: - pass - - log.info("Updating container: `{}`".format(timeline_item)) - return bool(lib.set_timeline_item_pype_tag(timeline_item, container)) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> with maintained_selection(): - ... node['selected'].setValue(True) - >>> print(node['selected'].value()) - False - """ - try: - # do the operation - yield - finally: - pass - - -def reset_selection(): - """Deselect all selected nodes - """ - pass - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - from openpype.hosts.resolve.api import ( - set_publish_attribute - ) - - # Whether instances should be passthrough based on new value - timeline_item = instance.data["item"] - set_publish_attribute(timeline_item, new_value) - - -def remove_instance(instance): - """Remove instance marker from track item.""" - instance_id = instance.get("uuid") - - selected_timeline_items = lib.get_current_timeline_items( - filter=True, selecting_color=lib.publish_clip_color) - - found_ti = None - for timeline_item_data in selected_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - - # get openpype tag data - tag_data = lib.get_timeline_item_pype_tag(timeline_item) - _ti_id = tag_data.get("uuid") - if _ti_id == instance_id: - found_ti = timeline_item - break - - if found_ti is None: - return - - # removing instance by marker color - print(f"Removing instance: {found_ti.GetName()}") - found_ti.DeleteMarkersByColor(lib.pype_marker_color) - - -def list_instances(): - """List all created instances from current workfile.""" - listed_instances = [] - selected_timeline_items = lib.get_current_timeline_items( - filter=True, selecting_color=lib.publish_clip_color) - - for timeline_item_data in selected_timeline_items: - timeline_item = timeline_item_data["clip"]["item"] - ti_name = timeline_item.GetName().split(".")[0] - - # get openpype tag data - tag_data = lib.get_timeline_item_pype_tag(timeline_item) - - if tag_data: - asset = tag_data.get("asset") - subset = tag_data.get("subset") - tag_data["label"] = f"{ti_name} [{asset}-{subset}]" - listed_instances.append(tag_data) - - return listed_instances diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py deleted file mode 100644 index a00933405f..0000000000 --- a/openpype/hosts/resolve/api/plugin.py +++ /dev/null @@ -1,901 +0,0 @@ -import re -import uuid -import copy - -import qargparse -from qtpy import QtWidgets, QtCore - -from openpype.settings import get_current_project_settings -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, - Anatomy -) - -from . import lib -from .menu import load_stylesheet - - -class CreatorWidget(QtWidgets.QDialog): - - # output items - items = {} - - def __init__(self, name, info, ui_inputs, parent=None): - super(CreatorWidget, self).__init__(parent) - - self.setObjectName(name) - - self.setWindowFlags( - QtCore.Qt.Window - | QtCore.Qt.CustomizeWindowHint - | QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowCloseButtonHint - | QtCore.Qt.WindowStaysOnTopHint - ) - self.setWindowTitle(name or "OpenPype Creator Input") - self.resize(500, 700) - - # Where inputs and labels are set - self.content_widget = [QtWidgets.QWidget(self)] - top_layout = QtWidgets.QFormLayout(self.content_widget[0]) - top_layout.setObjectName("ContentLayout") - top_layout.addWidget(Spacer(5, self)) - - # first add widget tag line - top_layout.addWidget(QtWidgets.QLabel(info)) - - # main dynamic layout - self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAsNeeded) - self.scroll_area.setVerticalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOn) - self.scroll_area.setHorizontalScrollBarPolicy( - QtCore.Qt.ScrollBarAlwaysOff) - self.scroll_area.setWidgetResizable(True) - - self.content_widget.append(self.scroll_area) - - scroll_widget = QtWidgets.QWidget(self) - in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) - self.content_layout = [in_scroll_area] - - # add preset data into input widget layout - self.items = self.populate_widgets(ui_inputs) - self.scroll_area.setWidget(scroll_widget) - - # Confirmation buttons - btns_widget = QtWidgets.QWidget(self) - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - - cancel_btn = QtWidgets.QPushButton("Cancel") - btns_layout.addWidget(cancel_btn) - - ok_btn = QtWidgets.QPushButton("Ok") - btns_layout.addWidget(ok_btn) - - # Main layout of the dialog - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.setSpacing(0) - - # adding content widget - for w in self.content_widget: - main_layout.addWidget(w) - - main_layout.addWidget(btns_widget) - - ok_btn.clicked.connect(self._on_ok_clicked) - cancel_btn.clicked.connect(self._on_cancel_clicked) - - stylesheet = load_stylesheet() - self.setStyleSheet(stylesheet) - - def _on_ok_clicked(self): - self.result = self.value(self.items) - self.close() - - def _on_cancel_clicked(self): - self.result = None - self.close() - - def value(self, data, new_data=None): - new_data = new_data or {} - for k, v in data.items(): - new_data[k] = { - "target": None, - "value": None - } - if v["type"] == "dict": - new_data[k]["target"] = v["target"] - new_data[k]["value"] = self.value(v["value"]) - if v["type"] == "section": - new_data.pop(k) - new_data = self.value(v["value"], new_data) - elif getattr(v["value"], "currentText", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].currentText() - elif getattr(v["value"], "isChecked", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].isChecked() - elif getattr(v["value"], "value", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].value() - elif getattr(v["value"], "text", None): - new_data[k]["target"] = v["target"] - new_data[k]["value"] = v["value"].text() - - return new_data - - def camel_case_split(self, text): - matches = re.finditer( - '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) - return " ".join([str(m.group(0)).capitalize() for m in matches]) - - def create_row(self, layout, type, text, **kwargs): - # get type attribute from qwidgets - attr = getattr(QtWidgets, type) - - # convert label text to normal capitalized text with spaces - label_text = self.camel_case_split(text) - - # assign the new text to label widget - label = QtWidgets.QLabel(label_text) - label.setObjectName("LineLabel") - - # create attribute name text strip of spaces - attr_name = text.replace(" ", "") - - # create attribute and assign default values - setattr( - self, - attr_name, - attr(parent=self)) - - # assign the created attribute to variable - item = getattr(self, attr_name) - for func, val in kwargs.items(): - if getattr(item, func): - func_attr = getattr(item, func) - if isinstance(val, tuple): - func_attr(*val) - else: - func_attr(val) - - # add to layout - layout.addRow(label, item) - - return item - - def populate_widgets(self, data, content_layout=None): - """ - Populate widget from input dict. - - Each plugin has its own set of widget rows defined in dictionary - each row values should have following keys: `type`, `target`, - `label`, `order`, `value` and optionally also `toolTip`. - - Args: - data (dict): widget rows or organized groups defined - by types `dict` or `section` - content_layout (QtWidgets.QFormLayout)[optional]: used when nesting - - Returns: - dict: redefined data dict updated with created widgets - - """ - - content_layout = content_layout or self.content_layout[-1] - # fix order of process by defined order value - ordered_keys = list(data.keys()) - for k, v in data.items(): - try: - # try removing a key from index which should - # be filled with new - ordered_keys.pop(v["order"]) - except IndexError: - pass - # add key into correct order - ordered_keys.insert(v["order"], k) - - # process ordered - for k in ordered_keys: - v = data[k] - tool_tip = v.get("toolTip", "") - if v["type"] == "dict": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - if v["type"] == "section": - # adding spacer between sections - self.content_layout.append(QtWidgets.QWidget(self)) - content_layout.addWidget(self.content_layout[-1]) - self.content_layout[-1].setObjectName("sectionHeadline") - - headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) - headline.addWidget(Spacer(20, self)) - headline.addWidget(QtWidgets.QLabel(v["label"])) - - # adding nested layout with label - self.content_layout.append(QtWidgets.QWidget(self)) - self.content_layout[-1].setObjectName("sectionContent") - - nested_content_layout = QtWidgets.QFormLayout( - self.content_layout[-1]) - nested_content_layout.setObjectName("NestedContentLayout") - content_layout.addWidget(self.content_layout[-1]) - - # add nested key as label - data[k]["value"] = self.populate_widgets( - v["value"], nested_content_layout) - - elif v["type"] == "QLineEdit": - data[k]["value"] = self.create_row( - content_layout, "QLineEdit", v["label"], - setText=v["value"], setToolTip=tool_tip) - elif v["type"] == "QComboBox": - data[k]["value"] = self.create_row( - content_layout, "QComboBox", v["label"], - addItems=v["value"], setToolTip=tool_tip) - elif v["type"] == "QCheckBox": - data[k]["value"] = self.create_row( - content_layout, "QCheckBox", v["label"], - setChecked=v["value"], setToolTip=tool_tip) - elif v["type"] == "QSpinBox": - data[k]["value"] = self.create_row( - content_layout, "QSpinBox", v["label"], - setRange=(0, 99999), - setValue=v["value"], - setToolTip=tool_tip) - return data - - -class Spacer(QtWidgets.QWidget): - def __init__(self, height, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - - self.setFixedHeight(height) - - real_spacer = QtWidgets.QWidget(self) - real_spacer.setObjectName("Spacer") - real_spacer.setFixedHeight(height) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(real_spacer) - - self.setLayout(layout) - - -class ClipLoader: - - active_bin = None - data = {} - - def __init__(self, loader_obj, context, **options): - """ Initialize object - - Arguments: - loader_obj (openpype.pipeline.load.LoaderPlugin): plugin object - context (dict): loader plugin context - options (dict)[optional]: possible keys: - projectBinPath: "path/to/binItem" - - """ - self.__dict__.update(loader_obj.__dict__) - self.context = context - self.active_project = lib.get_current_project() - - # try to get value from options or evaluate key value for `handles` - self.with_handles = options.get("handles") is True - - # try to get value from options or evaluate key value for `load_to` - self.new_timeline = ( - options.get("newTimeline") or - options.get("load_to") == "New timeline" - ) - # try to get value from options or evaluate key value for `load_how` - self.sequential_load = ( - options.get("sequentially") or - options.get("load_how") == "Sequentially in order" - ) - - assert self._populate_data(), str( - "Cannot Load selected data, look into database " - "or call your supervisor") - - # inject asset data to representation dict - self._get_asset_data() - - # add active components to class - if self.new_timeline: - loader_cls = loader_obj.__class__ - if loader_cls.timeline: - # if multiselection is set then use options sequence - self.active_timeline = loader_cls.timeline - else: - # create new sequence - self.active_timeline = lib.get_new_timeline( - "{}_{}".format( - self.data["timeline_basename"], - str(uuid.uuid4())[:8] - ) - ) - loader_cls.timeline = self.active_timeline - - else: - self.active_timeline = lib.get_current_timeline() - - def _populate_data(self): - """ Gets context and convert it to self.data - data structure: - { - "name": "assetName_subsetName_representationName" - "binPath": "projectBinPath", - } - """ - # create name - representation = self.context["representation"] - representation_context = representation["context"] - asset = str(representation_context["asset"]) - subset = str(representation_context["subset"]) - representation_name = str(representation_context["representation"]) - self.data["clip_name"] = "_".join([ - asset, - subset, - representation_name - ]) - self.data["versionData"] = self.context["version"]["data"] - - self.data["timeline_basename"] = "timeline_{}_{}".format( - subset, representation_name) - - # solve project bin structure path - hierarchy = str("/".join(( - "Loader", - representation_context["hierarchy"].replace("\\", "/"), - asset - ))) - - self.data["binPath"] = hierarchy - - return True - - def _get_asset_data(self): - """ Get all available asset data - - joint `data` key with asset.data dict into the representation - - """ - - self.data["assetData"] = copy.deepcopy(self.context["asset"]["data"]) - - def load(self, files): - """Load clip into timeline - - Arguments: - files (list[str]): list of files to load into timeline - """ - # create project bin for the media to be imported into - self.active_bin = lib.create_bin(self.data["binPath"]) - - # create mediaItem in active project bin - # create clip media - media_pool_item = lib.create_media_pool_item( - files, - self.active_bin - ) - _clip_property = media_pool_item.GetClipProperty - source_in = int(_clip_property("Start")) - source_out = int(_clip_property("End")) - source_duration = int(_clip_property("Frames")) - - if not self.with_handles: - # Load file without the handles of the source media - # We remove the handles from the source in and source out - # so that the handles are excluded in the timeline - handle_start = 0 - handle_end = 0 - - # get version data frame data from db - version_data = self.data["versionData"] - frame_start = version_data.get("frameStart") - frame_end = version_data.get("frameEnd") - - # The version data usually stored the frame range + handles of the - # media however certain representations may be shorter because they - # exclude those handles intentionally. Unfortunately the - # representation does not store that in the database currently; - # so we should compensate for those cases. If the media is shorter - # than the frame range specified in the database we assume it is - # without handles and thus we do not need to remove the handles - # from source and out - if frame_start is not None and frame_end is not None: - # Version has frame range data, so we can compare media length - handle_start = version_data.get("handleStart", 0) - handle_end = version_data.get("handleEnd", 0) - frame_start_handle = frame_start - handle_start - frame_end_handle = frame_start + handle_end - database_frame_duration = int( - frame_end_handle - frame_start_handle + 1 - ) - if source_duration >= database_frame_duration: - source_in += handle_start - source_out -= handle_end - - # get timeline in - timeline_start = self.active_timeline.GetStartFrame() - if self.sequential_load: - # set timeline start frame - timeline_in = int(timeline_start) - else: - # set timeline start frame + original clip in frame - timeline_in = int( - timeline_start + self.data["assetData"]["clipIn"]) - - # make track item from source in bin as item - timeline_item = lib.create_timeline_item( - media_pool_item, - self.active_timeline, - timeline_in, - source_in, - source_out, - ) - - print("Loading clips: `{}`".format(self.data["clip_name"])) - return timeline_item - - def update(self, timeline_item, files): - # create project bin for the media to be imported into - self.active_bin = lib.create_bin(self.data["binPath"]) - - # create mediaItem in active project bin - # create clip media - media_pool_item = lib.create_media_pool_item( - files, - self.active_bin - ) - _clip_property = media_pool_item.GetClipProperty - - source_in = int(_clip_property("Start")) - source_out = int(_clip_property("End")) - - lib.swap_clips( - timeline_item, - media_pool_item, - source_in, - source_out - ) - - print("Loading clips: `{}`".format(self.data["clip_name"])) - return timeline_item - - -class TimelineItemLoader(LoaderPlugin): - """A basic SequenceLoader for Resolve - - This will implement the basic behavior for a loader to inherit from that - will containerize the reference and will implement the `remove` and - `update` logic. - - """ - - options = [ - qargparse.Boolean( - "handles", - label="Include handles", - default=0, - help="Load with handles or without?" - ), - qargparse.Choice( - "load_to", - label="Where to load clips", - items=[ - "Current timeline", - "New timeline" - ], - default=0, - help="Where do you want clips to be loaded?" - ), - qargparse.Choice( - "load_how", - label="How to load clips", - items=[ - "Original timing", - "Sequentially in order" - ], - default="Original timing", - help="Would you like to place it at original timing?" - ) - ] - - def load( - self, - context, - name=None, - namespace=None, - options=None - ): - pass - - def update(self, container, representation): - """Update an existing `container` - """ - pass - - def remove(self, container): - """Remove an existing `container` - """ - pass - - -class Creator(LegacyCreator): - """Creator class wrapper - """ - marker_color = "Purple" - - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - - resolve_p_settings = get_current_project_settings().get("resolve") - self.presets = {} - if resolve_p_settings: - self.presets = resolve_p_settings["create"].get( - self.__class__.__name__, {}) - - # adding basic current context resolve objects - self.project = lib.get_current_project() - self.timeline = lib.get_current_timeline() - - if (self.options or {}).get("useSelection"): - self.selected = lib.get_current_timeline_items(filter=True) - else: - self.selected = lib.get_current_timeline_items(filter=False) - - self.widget = CreatorWidget - - -class PublishClip: - """ - Convert a track item to publishable instance - - Args: - timeline_item (hiero.core.TrackItem): hiero track item object - kwargs (optional): additional data needed for rename=True (presets) - - Returns: - hiero.core.TrackItem: hiero track item object with openpype tag - """ - vertical_clip_match = {} - tag_data = {} - types = { - "shot": "shot", - "folder": "folder", - "episode": "episode", - "sequence": "sequence", - "track": "sequence", - } - - # parents search pattern - parents_search_pattern = r"\{([a-z]*?)\}" - - # default templates for non-ui use - rename_default = False - hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" - clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" - subset_name_default = "" - review_track_default = "< none >" - subset_family_default = "plate" - count_from_default = 10 - count_steps_default = 10 - vertical_sync_default = False - driving_layer_default = "" - - def __init__(self, cls, timeline_item_data, **kwargs): - # populate input cls attribute onto self.[attr] - self.__dict__.update(cls.__dict__) - - # get main parent objects - self.timeline_item_data = timeline_item_data - self.timeline_item = timeline_item_data["clip"]["item"] - timeline_name = timeline_item_data["timeline"].GetName() - self.timeline_name = str(timeline_name).replace(" ", "_") - - # track item (clip) main attributes - self.ti_name = self.timeline_item.GetName() - self.ti_index = int(timeline_item_data["clip"]["index"]) - - # get track name and index - track_name = timeline_item_data["track"]["name"] - self.track_name = str(track_name).replace(" ", "_") - self.track_index = int(timeline_item_data["track"]["index"]) - - # adding tag.family into tag - if kwargs.get("avalon"): - self.tag_data.update(kwargs["avalon"]) - - # adding ui inputs if any - self.ui_inputs = kwargs.get("ui_inputs", {}) - - # adding media pool folder if any - self.mp_folder = kwargs.get("mp_folder") - - # populate default data before we get other attributes - self._populate_timeline_item_default_data() - - # use all populated default data to create all important attributes - self._populate_attributes() - - # create parents with correct types - self._create_parents() - - def convert(self): - # solve track item data and add them to tag data - self._convert_to_tag_data() - - # if track name is in review track name and also if driving track name - # is not in review track name: skip tag creation - if (self.track_name in self.review_layer) and ( - self.driving_layer not in self.review_layer): - return - - # deal with clip name - new_name = self.tag_data.pop("newClipName") - - if self.rename: - self.tag_data["asset_name"] = new_name - else: - self.tag_data["asset_name"] = self.ti_name - - # AYON unique identifier - folder_path = "/{}/{}".format( - self.tag_data["hierarchy"], - self.tag_data["asset_name"] - ) - self.tag_data["folder_path"] = folder_path - - # create new name for track item - if not lib.pype_marker_workflow: - # create compound clip workflow - lib.create_compound_clip( - self.timeline_item_data, - self.tag_data["asset_name"], - self.mp_folder - ) - - # add timeline_item_data selection to tag - self.tag_data.update({ - "track_data": self.timeline_item_data["track"] - }) - - # create openpype tag on timeline_item and add data - lib.imprint(self.timeline_item, self.tag_data) - - return self.timeline_item - - def _populate_timeline_item_default_data(self): - """ Populate default formatting data from track item. """ - - self.timeline_item_default_data = { - "_folder_": "shots", - "_sequence_": self.timeline_name, - "_track_": self.track_name, - "_clip_": self.ti_name, - "_trackIndex_": self.track_index, - "_clipIndex_": self.ti_index - } - - def _populate_attributes(self): - """ Populate main object attributes. """ - # track item frame range and parent track name for vertical sync check - self.clip_in = int(self.timeline_item.GetStart()) - self.clip_out = int(self.timeline_item.GetEnd()) - - # define ui inputs if non gui mode was used - self.shot_num = self.ti_index - - # ui_inputs data or default values if gui was not used - self.rename = self.ui_inputs.get( - "clipRename", {}).get("value") or self.rename_default - self.clip_name = self.ui_inputs.get( - "clipName", {}).get("value") or self.clip_name_default - self.hierarchy = self.ui_inputs.get( - "hierarchy", {}).get("value") or self.hierarchy_default - self.hierarchy_data = self.ui_inputs.get( - "hierarchyData", {}).get("value") or \ - self.timeline_item_default_data.copy() - self.count_from = self.ui_inputs.get( - "countFrom", {}).get("value") or self.count_from_default - self.count_steps = self.ui_inputs.get( - "countSteps", {}).get("value") or self.count_steps_default - self.subset_name = self.ui_inputs.get( - "subsetName", {}).get("value") or self.subset_name_default - self.subset_family = self.ui_inputs.get( - "subsetFamily", {}).get("value") or self.subset_family_default - self.vertical_sync = self.ui_inputs.get( - "vSyncOn", {}).get("value") or self.vertical_sync_default - self.driving_layer = self.ui_inputs.get( - "vSyncTrack", {}).get("value") or self.driving_layer_default - self.review_track = self.ui_inputs.get( - "reviewTrack", {}).get("value") or self.review_track_default - - # build subset name from layer name - if self.subset_name == "": - self.subset_name = self.track_name - - # create subset for publishing - self.subset = self.subset_family + self.subset_name.capitalize() - - def _replace_hash_to_expression(self, name, text): - """ Replace hash with number in correct padding. """ - _spl = text.split("#") - _len = (len(_spl) - 1) - _repl = "{{{0}:0>{1}}}".format(name, _len) - new_text = text.replace(("#" * _len), _repl) - return new_text - - def _convert_to_tag_data(self): - """ Convert internal data to tag data. - - Populating the tag data into internal variable self.tag_data - """ - # define vertical sync attributes - hero_track = True - self.review_layer = "" - if self.vertical_sync: - # check if track name is not in driving layer - if self.track_name not in self.driving_layer: - # if it is not then define vertical sync as None - hero_track = False - - # increasing steps by index of rename iteration - self.count_steps *= self.rename_index - - hierarchy_formatting_data = {} - _data = self.timeline_item_default_data.copy() - if self.ui_inputs: - # adding tag metadata from ui - for _k, _v in self.ui_inputs.items(): - if _v["target"] == "tag": - self.tag_data[_k] = _v["value"] - - # driving layer is set as positive match - if hero_track or self.vertical_sync: - # mark review layer - if self.review_track and ( - self.review_track not in self.review_track_default): - # if review layer is defined and not the same as default - self.review_layer = self.review_track - # shot num calculate - if self.rename_index == 0: - self.shot_num = self.count_from - else: - self.shot_num = self.count_from + self.count_steps - - # clip name sequence number - _data.update({"shot": self.shot_num}) - - # solve # in test to pythonic expression - for _k, _v in self.hierarchy_data.items(): - if "#" not in _v["value"]: - continue - self.hierarchy_data[ - _k]["value"] = self._replace_hash_to_expression( - _k, _v["value"]) - - # fill up pythonic expresisons in hierarchy data - for k, _v in self.hierarchy_data.items(): - hierarchy_formatting_data[k] = _v["value"].format(**_data) - else: - # if no gui mode then just pass default data - hierarchy_formatting_data = self.hierarchy_data - - tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formatting_data - ) - - tag_hierarchy_data.update({"heroTrack": True}) - if hero_track and self.vertical_sync: - self.vertical_clip_match.update({ - (self.clip_in, self.clip_out): tag_hierarchy_data - }) - - if not hero_track and self.vertical_sync: - # driving layer is set as negative match - for (_in, _out), hero_data in self.vertical_clip_match.items(): - hero_data.update({"heroTrack": False}) - if _in == self.clip_in and _out == self.clip_out: - data_subset = hero_data["subset"] - # add track index in case duplicity of names in hero data - if self.subset in data_subset: - hero_data["subset"] = self.subset + str( - self.track_index) - # in case track name and subset name is the same then add - if self.subset_name == self.track_name: - hero_data["subset"] = self.subset - # assign data to return hierarchy data to tag - tag_hierarchy_data = hero_data - - # add data to return data dict - self.tag_data.update(tag_hierarchy_data) - - # add uuid to tag data - self.tag_data["uuid"] = str(uuid.uuid4()) - - # add review track only to hero track - if hero_track and self.review_layer: - self.tag_data.update({"reviewTrack": self.review_layer}) - else: - self.tag_data.update({"reviewTrack": None}) - - def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): - """ Solve tag data from hierarchy data and templates. """ - # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) - clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) - - return { - "newClipName": clip_name_filled, - "hierarchy": hierarchy_filled, - "parents": self.parents, - "hierarchyData": hierarchy_formatting_data, - "subset": self.subset, - "family": self.subset_family - } - - def _convert_to_entity(self, key): - """ Converting input key to key with type. """ - # convert to entity type - entity_type = self.types.get(key) - - assert entity_type, "Missing entity type for `{}`".format( - key - ) - - return { - "entity_type": entity_type, - "entity_name": self.hierarchy_data[key]["value"].format( - **self.timeline_item_default_data - ) - } - - def _create_parents(self): - """ Create parents and return it in list. """ - self.parents = [] - - pattern = re.compile(self.parents_search_pattern) - par_split = [pattern.findall(t).pop() - for t in self.hierarchy.split("/")] - - for key in par_split: - parent = self._convert_to_entity(key) - self.parents.append(parent) - - -def get_representation_files(representation): - anatomy = Anatomy() - files = [] - for file_data in representation["files"]: - path = anatomy.fill_root(file_data["path"]) - files.append(path) - return files diff --git a/openpype/hosts/resolve/api/utils.py b/openpype/hosts/resolve/api/utils.py deleted file mode 100644 index 851851a3b3..0000000000 --- a/openpype/hosts/resolve/api/utils.py +++ /dev/null @@ -1,83 +0,0 @@ -#! python3 - -""" -Resolve's tools for setting environment -""" - -import os -import sys - -from openpype.lib import Logger - -log = Logger.get_logger(__name__) - - -def get_resolve_module(): - from openpype.hosts.resolve import api - # dont run if already loaded - if api.bmdvr: - log.info(("resolve module is assigned to " - f"`openpype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) - return api.bmdvr - try: - """ - The PYTHONPATH needs to be set correctly for this import - statement to work. An alternative is to import the - DaVinciResolveScript by specifying absolute path - (see ExceptionHandler logic) - """ - import DaVinciResolveScript as bmd - except ImportError: - if sys.platform.startswith("darwin"): - expected_path = ("/Library/Application Support/Blackmagic Design" - "/DaVinci Resolve/Developer/Scripting/Modules") - elif sys.platform.startswith("win") \ - or sys.platform.startswith("cygwin"): - expected_path = os.path.normpath( - os.getenv('PROGRAMDATA') + ( - "/Blackmagic Design/DaVinci Resolve/Support/Developer" - "/Scripting/Modules" - ) - ) - elif sys.platform.startswith("linux"): - expected_path = "/opt/resolve/libs/Fusion/Modules" - else: - raise NotImplementedError( - "Unsupported platform: {}".format(sys.platform) - ) - - # check if the default path has it... - print(("Unable to find module DaVinciResolveScript from " - "$PYTHONPATH - trying default locations")) - - module_path = os.path.normpath( - os.path.join( - expected_path, - "DaVinciResolveScript.py" - ) - ) - - try: - import imp - bmd = imp.load_source('DaVinciResolveScript', module_path) - except ImportError: - # No fallbacks ... report error: - log.error( - ("Unable to find module DaVinciResolveScript - please " - "ensure that the module DaVinciResolveScript is " - "discoverable by python") - ) - log.error( - ("For a default DaVinci Resolve installation, the " - f"module is expected to be located in: {expected_path}") - ) - sys.exit() - # assign global var and return - bmdvr = bmd.scriptapp("Resolve") - bmdvf = bmd.scriptapp("Fusion") - api.bmdvr = bmdvr - api.bmdvf = bmdvf - log.info(("Assigning resolve module to " - f"`openpype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) - log.info(("Assigning resolve module to " - f"`openpype.hosts.resolve.api.bmdvf`: {api.bmdvf}")) diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py deleted file mode 100644 index 9101d6fce6..0000000000 --- a/openpype/hosts/resolve/api/workio.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Host API required Work Files tool""" - -import os -from openpype.lib import Logger -from .lib import ( - get_project_manager, - get_current_project -) - - -log = Logger.get_logger(__name__) - - -def file_extensions(): - return [".drp"] - - -def has_unsaved_changes(): - get_project_manager().SaveProject() - return False - - -def save_file(filepath): - pm = get_project_manager() - file = os.path.basename(filepath) - fname, _ = os.path.splitext(file) - project = get_current_project() - name = project.GetName() - - response = False - if name == "Untitled Project": - response = pm.CreateProject(fname) - log.info("New project created: {}".format(response)) - pm.SaveProject() - elif name != fname: - response = project.SetName(fname) - log.info("Project renamed: {}".format(response)) - - exported = pm.ExportProject(fname, filepath) - log.info("Project exported: {}".format(exported)) - - -def open_file(filepath): - """ - Loading project - """ - - from . import bmdvr - - pm = get_project_manager() - page = bmdvr.GetCurrentPage() - if page is not None: - # Save current project only if Resolve has an active page, otherwise - # we consider Resolve being in a pre-launch state (no open UI yet) - project = pm.GetCurrentProject() - print(f"Saving current project: {project}") - pm.SaveProject() - - file = os.path.basename(filepath) - fname, _ = os.path.splitext(file) - - try: - # load project from input path - project = pm.LoadProject(fname) - log.info(f"Project {project.GetName()} opened...") - - except AttributeError: - log.warning((f"Project with name `{fname}` does not exist! It will " - f"be imported from {filepath} and then loaded...")) - if pm.ImportProject(filepath): - # load project from input path - project = pm.LoadProject(fname) - log.info(f"Project imported/loaded {project.GetName()}...") - return True - return False - return True - - -def current_file(): - pm = get_project_manager() - file_ext = file_extensions()[0] - workdir_path = os.getenv("AVALON_WORKDIR") - project = pm.GetCurrentProject() - project_name = project.GetName() - file_name = project_name + file_ext - - # create current file path - current_file_path = os.path.join(workdir_path, file_name) - - # return current file path if it exists - if os.path.exists(current_file_path): - return os.path.normpath(current_file_path) - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/resolve/hooks/pre_resolve_startup.py b/openpype/hosts/resolve/hooks/pre_resolve_startup.py deleted file mode 100644 index 6dbfd09a37..0000000000 --- a/openpype/hosts/resolve/hooks/pre_resolve_startup.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - -from openpype.lib.applications import PreLaunchHook, LaunchTypes -import openpype.hosts.resolve - - -class PreLaunchResolveStartup(PreLaunchHook): - """Special hook to configure startup script. - - """ - order = 11 - app_groups = {"resolve"} - launch_types = {LaunchTypes.local} - - def execute(self): - # Set the openpype prelaunch startup script path for easy access - # in the LUA .scriptlib code - op_resolve_root = os.path.dirname(openpype.hosts.resolve.__file__) - script_path = os.path.join(op_resolve_root, "startup.py") - key = "OPENPYPE_RESOLVE_STARTUP_SCRIPT" - self.launch_context.env[key] = script_path - - self.log.info( - f"Setting OPENPYPE_RESOLVE_STARTUP_SCRIPT to: {script_path}" - ) diff --git a/openpype/hosts/resolve/plugins/create/create_shot_clip.py b/openpype/hosts/resolve/plugins/create/create_shot_clip.py deleted file mode 100644 index 4b14f2493f..0000000000 --- a/openpype/hosts/resolve/plugins/create/create_shot_clip.py +++ /dev/null @@ -1,272 +0,0 @@ -# from pprint import pformat -from openpype.hosts.resolve.api import plugin, lib -from openpype.hosts.resolve.api.lib import ( - get_video_track_names, - create_bin, -) - - -class CreateShotClip(plugin.Creator): - """Publishable clip""" - - label = "Create Publishable Clip" - family = "clip" - icon = "film" - defaults = ["Main"] - - gui_tracks = get_video_track_names() - gui_name = "OpenPype publish attributes creator" - gui_info = "Define sequential rename and fill hierarchy data." - gui_inputs = { - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 1}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 2}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 3}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 4}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1 - } - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "subsetName": { - "value": ["", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Subset Name", - "target": "ui", - "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa - "order": 0}, - "subsetFamily": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Subset Family", - "target": "ui", "toolTip": "What use of this subset is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process subsets with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resloution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "shotAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0 - }, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle start (head)", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1 - }, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle end (tail)", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2 - } - } - } - } - - presets = None - - def process(self): - # get key pares from presets and match it on ui inputs - for k, v in self.gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if self.presets.get(_k) is not None: - self.gui_inputs[k][ - "value"][_k]["value"] = self.presets[_k] - if self.presets.get(k): - self.gui_inputs[k]["value"] = self.presets[k] - - # open widget for plugins inputs - widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) - widget.exec_() - - if len(self.selected) < 1: - return - - if not widget.result: - print("Operation aborted") - return - - self.rename_add = 0 - - # get ui output for track name for vertical sync - v_sync_track = widget.result["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_track_items = [] - unsorted_selected_track_items = [] - print("_____ selected ______") - print(self.selected) - for track_item_data in self.selected: - if track_item_data["track"]["name"] in v_sync_track: - sorted_selected_track_items.append(track_item_data) - else: - unsorted_selected_track_items.append(track_item_data) - - sorted_selected_track_items.extend(unsorted_selected_track_items) - - # sequence attrs - sq_frame_start = self.timeline.GetStartFrame() - sq_markers = self.timeline.GetMarkers() - - # create media bin for compound clips (trackItems) - mp_folder = create_bin(self.timeline.GetName()) - - kwargs = { - "ui_inputs": widget.result, - "avalon": self.data, - "mp_folder": mp_folder, - "sq_frame_start": sq_frame_start, - "sq_markers": sq_markers - } - print(kwargs) - for i, track_item_data in enumerate(sorted_selected_track_items): - self.rename_index = i - self.log.info(track_item_data) - # convert track item to timeline media pool item - track_item = plugin.PublishClip( - self, track_item_data, **kwargs).convert() - track_item.SetClipColor(lib.publish_clip_color) diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py deleted file mode 100644 index d3f83c7f24..0000000000 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ /dev/null @@ -1,167 +0,0 @@ -from openpype.client import get_last_version_by_subset_id -from openpype.pipeline import ( - get_representation_context, - get_current_project_name -) -from openpype.hosts.resolve.api import lib, plugin -from openpype.hosts.resolve.api.pipeline import ( - containerise, - update_container, -) -from openpype.lib.transcoding import ( - VIDEO_EXTENSIONS, - IMAGE_EXTENSIONS -) - - -class LoadClip(plugin.TimelineItemLoader): - """Load a subset to timeline as clip - - Place clip to timeline on its asset origin timings collected - during conforming to project - """ - - families = ["render2d", "source", "plate", "render", "review"] - - representations = ["*"] - extensions = set( - ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) - ) - - label = "Load as clip" - order = -10 - icon = "code-fork" - color = "orange" - - # for loader multiselection - timeline = None - - # presets - clip_color_last = "Olive" - clip_color = "Orange" - - def load(self, context, name, namespace, options): - - # load clip to timeline and get main variables - files = plugin.get_representation_files(context["representation"]) - - timeline_item = plugin.ClipLoader( - self, context, **options).load(files) - namespace = namespace or timeline_item.GetName() - - # update color of clip regarding the version order - self.set_item_color(timeline_item, version=context["version"]) - - data_imprint = self.get_tag_data(context, name, namespace) - return containerise( - timeline_item, - name, namespace, context, - self.__class__.__name__, - data_imprint) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """ Updating previously loaded clips - """ - - context = get_representation_context(representation) - name = container['name'] - namespace = container['namespace'] - timeline_item = container["_timeline_item"] - - media_pool_item = timeline_item.GetMediaPoolItem() - - files = plugin.get_representation_files(representation) - - loader = plugin.ClipLoader(self, context) - timeline_item = loader.update(timeline_item, files) - - # update color of clip regarding the version order - self.set_item_color(timeline_item, version=context["version"]) - - # if original media pool item has no remaining usages left - # remove it from the media pool - if int(media_pool_item.GetClipProperty("Usage")) == 0: - lib.remove_media_pool_item(media_pool_item) - - data_imprint = self.get_tag_data(context, name, namespace) - return update_container(timeline_item, data_imprint) - - def get_tag_data(self, context, name, namespace): - """Return data to be imprinted on the timeline item marker""" - - representation = context["representation"] - version = context['version'] - version_data = version.get("data", {}) - version_name = version.get("name", None) - colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) - - # add additional metadata from the version to imprint Avalon knob - # move all version data keys to tag data - add_version_data_keys = [ - "frameStart", "frameEnd", "source", "author", - "fps", "handleStart", "handleEnd" - ] - data = { - key: version_data.get(key, "None") for key in add_version_data_keys - } - - # add variables related to version context - data.update({ - "representation": str(representation["_id"]), - "version": version_name, - "colorspace": colorspace, - "objectName": object_name - }) - return data - - @classmethod - def set_item_color(cls, timeline_item, version): - """Color timeline item based on whether it is outdated or latest""" - # define version name - version_name = version.get("name", None) - # get all versions in list - project_name = get_current_project_name() - last_version_doc = get_last_version_by_subset_id( - project_name, - version["parent"], - fields=["name"] - ) - if last_version_doc: - last_version = last_version_doc["name"] - else: - last_version = None - - # set clip colour - if version_name == last_version: - timeline_item.SetClipColor(cls.clip_color_last) - else: - timeline_item.SetClipColor(cls.clip_color) - - def remove(self, container): - timeline_item = container["_timeline_item"] - media_pool_item = timeline_item.GetMediaPoolItem() - timeline = lib.get_current_timeline() - - # DeleteClips function was added in Resolve 18.5+ - # by checking None we can detect whether the - # function exists in Resolve - if timeline.DeleteClips is not None: - timeline.DeleteClips([timeline_item]) - else: - # Resolve versions older than 18.5 can't delete clips via API - # so all we can do is just remove the pype marker to 'untag' it - if lib.get_pype_marker(timeline_item): - # Note: We must call `get_pype_marker` because - # `delete_pype_marker` uses a global variable set by - # `get_pype_marker` to delete the right marker - # TODO: Improve code to avoid the global `temp_marker_frame` - lib.delete_pype_marker(timeline_item) - - # if media pool item has no remaining usages left - # remove it from the media pool - if int(media_pool_item.GetClipProperty("Usage")) == 0: - lib.remove_media_pool_item(media_pool_item) diff --git a/openpype/hosts/resolve/plugins/publish/extract_workfile.py b/openpype/hosts/resolve/plugins/publish/extract_workfile.py deleted file mode 100644 index db63487405..0000000000 --- a/openpype/hosts/resolve/plugins/publish/extract_workfile.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import pyblish.api - -from openpype.pipeline import publish -from openpype.hosts.resolve.api.lib import get_project_manager - - -class ExtractWorkfile(publish.Extractor): - """ - Extractor export DRP workfile file representation - """ - - label = "Extract Workfile" - order = pyblish.api.ExtractorOrder - families = ["workfile"] - hosts = ["resolve"] - - def process(self, instance): - # create representation data - if "representations" not in instance.data: - instance.data["representations"] = [] - - name = instance.data["name"] - project = instance.context.data["activeProject"] - staging_dir = self.staging_dir(instance) - - resolve_workfile_ext = ".drp" - drp_file_name = name + resolve_workfile_ext - - drp_file_path = os.path.normpath( - os.path.join(staging_dir, drp_file_name)) - - # write out the drp workfile - get_project_manager().ExportProject( - project.GetName(), drp_file_path) - - # create drp workfile representation - representation_drp = { - 'name': resolve_workfile_ext[1:], - 'ext': resolve_workfile_ext[1:], - 'files': drp_file_name, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(representation_drp) - - # add sourcePath attribute to instance - if not instance.data.get("sourcePath"): - instance.data["sourcePath"] = drp_file_path - - self.log.info("Added Resolve file representation: {}".format( - representation_drp)) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py deleted file mode 100644 index bca6734848..0000000000 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ /dev/null @@ -1,148 +0,0 @@ -from pprint import pformat - -import pyblish - -from openpype.hosts.resolve.api.lib import ( - get_current_timeline_items, - get_timeline_item_pype_tag, - publish_clip_color, - get_publish_attribute, - get_otio_clip_instance_data, -) -from openpype import AYON_SERVER_ENABLED - - -class PrecollectInstances(pyblish.api.ContextPlugin): - """Collect all Track items selection.""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Precollect Instances" - hosts = ["resolve"] - - def process(self, context): - otio_timeline = context.data["otioTimeline"] - selected_timeline_items = get_current_timeline_items( - filter=True, selecting_color=publish_clip_color) - - self.log.info( - "Processing enabled track items: {}".format( - len(selected_timeline_items))) - - for timeline_item_data in selected_timeline_items: - - data = {} - timeline_item = timeline_item_data["clip"]["item"] - - # get pype tag data - tag_data = get_timeline_item_pype_tag(timeline_item) - self.log.debug(f"__ tag_data: {pformat(tag_data)}") - - if not tag_data: - continue - - if tag_data.get("id") != "pyblish.avalon.instance": - continue - - media_pool_item = timeline_item.GetMediaPoolItem() - source_duration = int(media_pool_item.GetClipProperty("Frames")) - - # solve handles length - handle_start = min( - tag_data["handleStart"], int(timeline_item.GetLeftOffset())) - handle_end = min( - tag_data["handleEnd"], int( - source_duration - timeline_item.GetRightOffset())) - - self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end)) - - # add tag data to instance data - data.update({ - k: v for k, v in tag_data.items() - if k not in ("id", "applieswhole", "label") - }) - - if AYON_SERVER_ENABLED: - asset = tag_data["folder_path"] - else: - asset = tag_data["asset_name"] - - subset = tag_data["subset"] - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": "{} {}".format(asset, subset), - "asset": asset, - "item": timeline_item, - "publish": get_publish_attribute(timeline_item), - "fps": context.data["fps"], - "handleStart": handle_start, - "handleEnd": handle_end, - "newAssetPublishing": True, - "families": ["clip"], - }) - - # otio clip data - otio_data = get_otio_clip_instance_data( - otio_timeline, timeline_item_data) or {} - data.update(otio_data) - - # add resolution - self.get_resolution_to_data(data, context) - - # create instance - instance = context.create_instance(**data) - - # create shot instance for shot attributes create/update - self.create_shot_instance(context, timeline_item, **data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.debug( - "_ instance.data: {}".format(pformat(instance.data))) - - def get_resolution_to_data(self, data, context): - assert data.get("otioClip"), "Missing `otioClip` data" - - # solve source resolution option - if data.get("sourceResolution", None): - otio_clip_metadata = data[ - "otioClip"].media_reference.metadata - data.update({ - "resolutionWidth": otio_clip_metadata["width"], - "resolutionHeight": otio_clip_metadata["height"], - "pixelAspect": otio_clip_metadata["pixelAspect"] - }) - else: - otio_tl_metadata = context.data["otioTimeline"].metadata - data.update({ - "resolutionWidth": otio_tl_metadata["width"], - "resolutionHeight": otio_tl_metadata["height"], - "pixelAspect": otio_tl_metadata["pixelAspect"] - }) - - def create_shot_instance(self, context, timeline_item, **data): - hero_track = data.get("heroTrack") - hierarchy_data = data.get("hierarchyData") - - if not hero_track: - return - - if not hierarchy_data: - return - - asset = data["asset"] - subset = "shotMain" - - # insert family into families - family = "shot" - - data.update({ - "name": "{}_{}".format(asset, subset), - "label": "{} {}".format(asset, subset), - "subset": subset, - "asset": asset, - "family": family, - "families": [], - "publish": get_publish_attribute(timeline_item) - }) - - context.create_instance(**data) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py deleted file mode 100644 index ccc5fd86ff..0000000000 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ /dev/null @@ -1,56 +0,0 @@ -import pyblish.api -from pprint import pformat - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import get_current_asset_name - -from openpype.hosts.resolve import api as rapi -from openpype.hosts.resolve.otio import davinci_export - - -class PrecollectWorkfile(pyblish.api.ContextPlugin): - """Precollect the current working file into context""" - - label = "Precollect Workfile" - order = pyblish.api.CollectorOrder - 0.5 - - def process(self, context): - current_asset_name = asset_name = get_current_asset_name() - - if AYON_SERVER_ENABLED: - asset_name = current_asset_name.split("/")[-1] - - subset = "workfileMain" - project = rapi.get_current_project() - fps = project.GetSetting("timelineFrameRate") - video_tracks = rapi.get_video_track_names() - - # adding otio timeline to context - otio_timeline = davinci_export.create_otio_timeline(project) - - instance_data = { - "name": "{}_{}".format(asset_name, subset), - "label": "{} {}".format(current_asset_name, subset), - "asset": current_asset_name, - "subset": subset, - "item": project, - "family": "workfile", - "families": [] - } - - # create instance with workfile - instance = context.create_instance(**instance_data) - - # update context with main project attributes - context_data = { - "activeProject": project, - "otioTimeline": otio_timeline, - "videoTracks": video_tracks, - "currentFile": project.GetName(), - "fps": fps, - } - context.data.update(context_data) - - self.log.info("Creating instance: {}".format(instance)) - self.log.debug("__ instance.data: {}".format(pformat(instance.data))) - self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/openpype/hosts/resolve/startup.py b/openpype/hosts/resolve/startup.py deleted file mode 100644 index 5ac3c99524..0000000000 --- a/openpype/hosts/resolve/startup.py +++ /dev/null @@ -1,70 +0,0 @@ -"""This script is used as a startup script in Resolve through a .scriptlib file - -It triggers directly after the launch of Resolve and it's recommended to keep -it optimized for fast performance since the Resolve UI is actually interactive -while this is running. As such, there's nothing ensuring the user isn't -continuing manually before any of the logic here runs. As such we also try -to delay any imports as much as possible. - -This code runs in a separate process to the main Resolve process. - -""" -import os -from openpype.lib import Logger -import openpype.hosts.resolve.api - -log = Logger.get_logger(__name__) - - -def ensure_installed_host(): - """Install resolve host with openpype and return the registered host. - - This function can be called multiple times without triggering an - additional install. - """ - from openpype.pipeline import install_host, registered_host - host = registered_host() - if host: - return host - - host = openpype.hosts.resolve.api.ResolveHost() - install_host(host) - return registered_host() - - -def launch_menu(): - print("Launching Resolve OpenPype menu..") - ensure_installed_host() - openpype.hosts.resolve.api.launch_pype_menu() - - -def open_workfile(path): - # Avoid the need to "install" the host - host = ensure_installed_host() - host.open_workfile(path) - - -def main(): - # Open last workfile - workfile_path = os.environ.get("OPENPYPE_RESOLVE_OPEN_ON_LAUNCH") - - if workfile_path and os.path.exists(workfile_path): - log.info(f"Opening last workfile: {workfile_path}") - open_workfile(workfile_path) - else: - log.info("No last workfile set to open. Skipping..") - - # Launch OpenPype menu - from openpype.settings import get_project_settings - from openpype.pipeline.context_tools import get_current_project_name - project_name = get_current_project_name() - log.info(f"Current project name in context: {project_name}") - - settings = get_project_settings(project_name) - if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True): - log.info("Launching OpenPype menu..") - launch_menu() - - -if __name__ == "__main__": - main() diff --git a/openpype/hosts/resolve/utility_scripts/AYON__Menu.py b/openpype/hosts/resolve/utility_scripts/AYON__Menu.py deleted file mode 100644 index 4f14927074..0000000000 --- a/openpype/hosts/resolve/utility_scripts/AYON__Menu.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys - -from openpype.pipeline import install_host -from openpype.lib import Logger - -log = Logger.get_logger(__name__) - - -def main(env): - from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu - - # activate resolve from openpype - host = ResolveHost() - install_host(host) - - launch_pype_menu() - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py b/openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py deleted file mode 100644 index 4f14927074..0000000000 --- a/openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys - -from openpype.pipeline import install_host -from openpype.lib import Logger - -log = Logger.get_logger(__name__) - - -def main(env): - from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu - - # activate resolve from openpype - host = ResolveHost() - install_host(host) - - launch_pype_menu() - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py deleted file mode 100644 index 8f3917bece..0000000000 --- a/openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -import os -import sys - -from openpype.pipeline import install_host - - -def main(env): - from openpype.hosts.resolve.utils import setup - import openpype.hosts.resolve.api as bmdvr - # Registers openpype's Global pyblish plugins - install_host(bmdvr) - setup(env) - - -if __name__ == "__main__": - result = main(os.environ) - sys.exit(not bool(result)) diff --git a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib deleted file mode 100644 index ec9b30a18d..0000000000 --- a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib +++ /dev/null @@ -1,21 +0,0 @@ --- Run OpenPype's Python launch script for resolve -function file_exists(name) - local f = io.open(name, "r") - return f ~= nil and io.close(f) -end - - -openpype_startup_script = os.getenv("OPENPYPE_RESOLVE_STARTUP_SCRIPT") -if openpype_startup_script ~= nil then - script = fusion:MapPath(openpype_startup_script) - - if file_exists(script) then - -- We must use RunScript to ensure it runs in a separate - -- process to Resolve itself to avoid a deadlock for - -- certain imports of OpenPype libraries or Qt - print("Running launch script: " .. script) - fusion:RunScript(script) - else - print("Launch script not found at: " .. script) - end -end \ No newline at end of file diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py deleted file mode 100644 index 9b91a14267..0000000000 --- a/openpype/hosts/resolve/utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -import shutil -from openpype.lib import Logger, is_running_from_build - -from openpype import AYON_SERVER_ENABLED -RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -def setup(env): - log = Logger.get_logger("ResolveSetup") - scripts = {} - util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") - util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] - - util_scripts_paths = [os.path.join( - RESOLVE_ROOT_DIR, - "utility_scripts" - )] - - # collect script dirs - if util_scripts_env: - log.info("Utility Scripts Env: `{}`".format(util_scripts_env)) - util_scripts_paths = util_scripts_env.split( - os.pathsep) + util_scripts_paths - - # collect scripts from dirs - for path in util_scripts_paths: - scripts.update({path: os.listdir(path)}) - - log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths)) - log.info("Utility Scripts: `{}`".format(scripts)) - - # Make sure scripts dir exists - os.makedirs(util_scripts_dir, exist_ok=True) - - # make sure no script file is in folder - for script in os.listdir(util_scripts_dir): - path = os.path.join(util_scripts_dir, script) - log.info("Removing `{}`...".format(path)) - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - - # copy scripts into Resolve's utility scripts dir - for directory, scripts in scripts.items(): - for script in scripts: - if ( - is_running_from_build() and - script in ["tests", "develop"] - ): - # only copy those if started from build - continue - - src = os.path.join(directory, script) - dst = os.path.join(util_scripts_dir, script) - - # TODO: remove this once we have a proper solution - if AYON_SERVER_ENABLED: - if "OpenPype__Menu.py" == script: - continue - else: - if "AYON__Menu.py" == script: - continue - - # TODO: Make this a less hacky workaround - if script == "openpype_startup.scriptlib": - # Handle special case for scriptlib that needs to be a folder - # up from the Comp folder in the Fusion scripts - dst = os.path.join(os.path.dirname(util_scripts_dir), - script) - - log.info("Copying `{}` to `{}`...".format(src, dst)) - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) diff --git a/openpype/hosts/standalonepublisher/__init__.py b/openpype/hosts/standalonepublisher/__init__.py deleted file mode 100644 index f47fa6b573..0000000000 --- a/openpype/hosts/standalonepublisher/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .addon import StandAlonePublishAddon - - -__all__ = ( - "StandAlonePublishAddon", -) diff --git a/openpype/hosts/standalonepublisher/addon.py b/openpype/hosts/standalonepublisher/addon.py deleted file mode 100644 index 607c4ecdae..0000000000 --- a/openpype/hosts/standalonepublisher/addon.py +++ /dev/null @@ -1,59 +0,0 @@ -import os - -from openpype.lib import get_openpype_execute_args -from openpype.lib.execute import run_detached_process -from openpype.modules import ( - click_wrap, - OpenPypeModule, - ITrayAction, - IHostAddon, -) - -STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon): - label = "Publisher (legacy)" - name = "standalonepublisher" - host_name = "standalonepublisher" - - def initialize(self, modules_settings): - self.enabled = modules_settings["standalonepublish_tool"]["enabled"] - self.publish_paths = [ - os.path.join(STANDALONEPUBLISH_ROOT_DIR, "plugins", "publish") - ] - - def tray_init(self): - return - - def on_action_trigger(self): - self.run_standalone_publisher() - - def connect_with_modules(self, enabled_modules): - """Collect publish paths from other modules.""" - - publish_paths = self.manager.collect_plugin_paths()["publish"] - self.publish_paths.extend(publish_paths) - - def run_standalone_publisher(self): - args = get_openpype_execute_args("module", self.name, "launch") - run_detached_process(args) - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -@click_wrap.group( - StandAlonePublishAddon.name, - help="StandalonePublisher related commands.") -def cli_main(): - pass - - -@cli_main.command() -def launch(): - """Launch StandalonePublisher tool UI.""" - - from openpype.tools import standalonepublish - - standalonepublish.main() diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py deleted file mode 100644 index 857f3dca20..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py +++ /dev/null @@ -1,13 +0,0 @@ -import pyblish.api - - -class CollectSAAppName(pyblish.api.ContextPlugin): - """Collect app name and label.""" - - label = "Collect App Name/Label" - order = pyblish.api.CollectorOrder - 0.5 - hosts = ["standalonepublisher"] - - def process(self, context): - context.data["appName"] = "standalone publisher" - context.data["appLabel"] = "Standalone publisher" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py deleted file mode 100644 index 6c3b0c3efd..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ /dev/null @@ -1,91 +0,0 @@ -import copy -import json -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline.create import get_subset_name - - -class CollectBulkMovInstances(pyblish.api.InstancePlugin): - """Collect all available instances for batch publish.""" - - label = "Collect Bulk Mov Instances" - order = pyblish.api.CollectorOrder + 0.489 - hosts = ["standalonepublisher"] - families = ["render_mov_batch"] - - new_instance_family = "render" - instance_task_names = [ - "compositing", - "comp" - ] - default_task_name = "compositing" - subset_name_variant = "Default" - - def process(self, instance): - context = instance.context - project_name = context.data["projectEntity"]["name"] - asset_name = instance.data["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - if not asset_doc: - raise AssertionError(( - "Couldn't find Asset document with name \"{}\"" - ).format(asset_name)) - - available_task_names = {} - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - for task_name in asset_tasks.keys(): - available_task_names[task_name.lower()] = task_name - - task_name = self.default_task_name - for _task_name in self.instance_task_names: - _task_name_low = _task_name.lower() - if _task_name_low in available_task_names: - task_name = available_task_names[_task_name_low] - break - - subset_name = get_subset_name( - self.new_instance_family, - self.subset_name_variant, - task_name, - asset_doc, - project_name, - host_name=context.data["hostName"], - project_settings=context.data["project_settings"] - ) - instance_name = f"{asset_name}_{subset_name}" - - # create new instance - new_instance = context.create_instance(instance_name) - new_instance_data = { - "name": instance_name, - "label": instance_name, - "family": self.new_instance_family, - "subset": subset_name, - "task": task_name - } - new_instance.data.update(new_instance_data) - # add original instance data except name key - for key, value in instance.data.items(): - if key in new_instance_data: - continue - # Make sure value is copy since value may be object which - # can be shared across all new created objects - new_instance.data[key] = copy.deepcopy(value) - - # Add `render_mov_batch` for specific validators - if "families" not in new_instance.data: - new_instance.data["families"] = [] - new_instance.data["families"].append("render_mov_batch") - - # delete original instance - context.remove(instance) - - self.log.info(f"Created new instance: {instance_name}") - - def converter(value): - return str(value) - - self.log.debug("Instance data: {}".format( - json.dumps(new_instance.data, indent=4, default=converter) - )) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py deleted file mode 100644 index 8fa53f5f48..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ /dev/null @@ -1,276 +0,0 @@ -""" -Requires: - environment -> SAPUBLISH_INPATH - environment -> SAPUBLISH_OUTPATH - -Provides: - context -> returnJsonPath (str) - context -> project - context -> asset - instance -> destination_list (list) - instance -> representations (list) - instance -> source (list) - instance -> representations -""" - -import os -import json -import copy -from pprint import pformat -import clique -import pyblish.api - -from openpype.pipeline import legacy_io - - -class CollectContextDataSAPublish(pyblish.api.ContextPlugin): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Context - SA Publish" - order = pyblish.api.CollectorOrder - 0.49 - hosts = ["standalonepublisher"] - - # presets - batch_extensions = ["edl", "xml", "psd"] - - def process(self, context): - # get json paths from os and load them - legacy_io.install() - - # get json file context - input_json_path = os.environ.get("SAPUBLISH_INPATH") - - with open(input_json_path, "r") as f: - in_data = json.load(f) - self.log.debug(f"_ in_data: {pformat(in_data)}") - - self.add_files_to_ignore_cleanup(in_data, context) - # exception for editorial - if in_data["family"] == "render_mov_batch": - in_data_list = self.prepare_mov_batch_instances(in_data) - - elif in_data["family"] in ["editorial", "background_batch"]: - in_data_list = self.multiple_instances(context, in_data) - - else: - in_data_list = [in_data] - - self.log.debug(f"_ in_data_list: {pformat(in_data_list)}") - - for in_data in in_data_list: - # create instance - self.create_instance(context, in_data) - - def add_files_to_ignore_cleanup(self, in_data, context): - all_filepaths = context.data.get("skipCleanupFilepaths") or [] - for repre in in_data["representations"]: - files = repre["files"] - if not isinstance(files, list): - files = [files] - - dirpath = repre["stagingDir"] - for filename in files: - filepath = os.path.normpath(os.path.join(dirpath, filename)) - if filepath not in all_filepaths: - all_filepaths.append(filepath) - - context.data["skipCleanupFilepaths"] = all_filepaths - - def multiple_instances(self, context, in_data): - # avoid subset name duplicity - if not context.data.get("subsetNamesCheck"): - context.data["subsetNamesCheck"] = list() - - in_data_list = list() - representations = in_data.pop("representations") - for repr in representations: - in_data_copy = copy.deepcopy(in_data) - ext = repr["ext"][1:] - subset = in_data_copy["subset"] - # filter out non editorial files - if ext not in self.batch_extensions: - in_data_copy["representations"] = [repr] - in_data_copy["subset"] = f"{ext}{subset}" - in_data_list.append(in_data_copy) - - files = repr.get("files") - - # delete unneeded keys - delete_repr_keys = ["frameStart", "frameEnd"] - for k in delete_repr_keys: - if repr.get(k): - repr.pop(k) - - # convert files to list if it isn't - if not isinstance(files, (tuple, list)): - files = [files] - - self.log.debug(f"_ files: {files}") - for index, f in enumerate(files): - index += 1 - # copy dictionaries - in_data_copy = copy.deepcopy(in_data_copy) - repr_new = copy.deepcopy(repr) - - repr_new["files"] = f - repr_new["name"] = ext - in_data_copy["representations"] = [repr_new] - - # create subset Name - new_subset = f"{ext}{index}{subset}" - while new_subset in context.data["subsetNamesCheck"]: - index += 1 - new_subset = f"{ext}{index}{subset}" - - context.data["subsetNamesCheck"].append(new_subset) - in_data_copy["subset"] = new_subset - in_data_list.append(in_data_copy) - self.log.info(f"Creating subset: {ext}{index}{subset}") - - return in_data_list - - def prepare_mov_batch_instances(self, in_data): - """Copy of `multiple_instances` method. - - Method was copied because `batch_extensions` is used in - `multiple_instances` but without any family filtering. Since usage - of the filtering is unknown and modification of that part may break - editorial or PSD batch publishing it was decided to create a copy with - this family specific filtering. Also "frameStart" and "frameEnd" keys - are removed from instance which is needed for this processing. - - Instance data will also care about families. - - TODO: - - Merge possible logic with `multiple_instances` method. - """ - self.log.info("Preparing data for mov batch processing.") - in_data_list = [] - - representations = in_data.pop("representations") - for repre in representations: - self.log.debug("Processing representation with files {}".format( - str(repre["files"]) - )) - ext = repre["ext"][1:] - - # Rename representation name - repre_name = repre["name"] - if repre_name.startswith(ext + "_"): - repre["name"] = ext - # Skip files that are not available for mov batch publishing - # TODO add dynamic expected extensions by family from `in_data` - # - with this modification it would be possible to use only - # `multiple_instances` method - expected_exts = ["mov"] - if ext not in expected_exts: - self.log.warning(( - "Skipping representation." - " Does not match expected extensions <{}>. {}" - ).format(", ".join(expected_exts), str(repre))) - continue - - files = repre["files"] - # Convert files to list if it isn't - if not isinstance(files, (tuple, list)): - files = [files] - - # Loop through files and create new instance per each file - for filename in files: - # Create copy of representation and change it's files and name - new_repre = copy.deepcopy(repre) - new_repre["files"] = filename - new_repre["name"] = ext - new_repre["thumbnail"] = True - - if "tags" not in new_repre: - new_repre["tags"] = [] - new_repre["tags"].append("review") - - # Prepare new subset name (temporary name) - # - subset name will be changed in batch specific plugins - new_subset_name = "{}{}".format( - in_data["subset"], - os.path.basename(filename) - ) - # Create copy of instance data as new instance and pass in new - # representation - in_data_copy = copy.deepcopy(in_data) - in_data_copy["representations"] = [new_repre] - in_data_copy["subset"] = new_subset_name - if "families" not in in_data_copy: - in_data_copy["families"] = [] - in_data_copy["families"].append("review") - - in_data_list.append(in_data_copy) - - return in_data_list - - def create_instance(self, context, in_data): - subset = in_data["subset"] - # If instance data already contain families then use it - instance_families = in_data.get("families") or [] - - instance = context.create_instance(subset) - instance.data.update( - { - "subset": subset, - "asset": in_data["asset"], - "label": subset, - "name": subset, - "family": in_data["family"], - "frameStart": in_data.get("representations", [None])[0].get( - "frameStart", None - ), - "frameEnd": in_data.get("representations", [None])[0].get( - "frameEnd", None - ), - "families": instance_families - } - ) - # Fill version only if 'use_next_available_version' is disabled - # and version is filled in instance data - version = in_data.get("version") - use_next_available_version = in_data.get( - "use_next_available_version", True) - if not use_next_available_version and version is not None: - instance.data["version"] = version - - self.log.info("collected instance: {}".format(pformat(instance.data))) - self.log.info("parsing data: {}".format(pformat(in_data))) - - instance.data["destination_list"] = list() - instance.data["representations"] = list() - instance.data["source"] = "standalone publisher" - - for component in in_data["representations"]: - component["destination"] = component["files"] - component["stagingDir"] = component["stagingDir"] - - if isinstance(component["files"], list): - collections, _remainder = clique.assemble(component["files"]) - self.log.debug("collecting sequence: {}".format(collections)) - instance.data["frameStart"] = int(component["frameStart"]) - instance.data["frameEnd"] = int(component["frameEnd"]) - if component.get("fps"): - instance.data["fps"] = int(component["fps"]) - - ext = component["ext"] - if ext.startswith("."): - component["ext"] = ext[1:] - - # Remove 'preview' key from representation data - preview = component.pop("preview") - if preview: - instance.data["families"].append("review") - component["tags"] = ["review"] - self.log.debug("Adding review family") - - if "psd" in component["name"]: - instance.data["source"] = component["files"] - self.log.debug("Adding image:background_batch family") - - instance.data["representations"].append(component) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py deleted file mode 100644 index 391cace761..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -Optional: - presets -> extensions ( - example of use: - ["mov", "mp4"] - ) - presets -> source_dir ( - example of use: - "C:/pathToFolder" - "{root}/{project[name]}/inputs" - "{root[work]}/{project[name]}/inputs" - "./input" - "../input" - "" - ) -""" - -import os -import opentimelineio as otio -import pyblish.api -from openpype import lib as plib -from openpype.pipeline.context_tools import get_current_project_asset - - -class OTIO_View(pyblish.api.Action): - """Currently disabled because OTIO requires PySide2. Issue on Qt.py: - https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289 - """ - - label = "OTIO View" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - instance = context[0] - representation = instance.data["representations"][0] - file_path = os.path.join( - representation["stagingDir"], representation["files"] - ) - plib.run_subprocess(["otioview", file_path]) - - -class CollectEditorial(pyblish.api.InstancePlugin): - """Collect Editorial OTIO timeline""" - - order = pyblish.api.CollectorOrder - label = "Collect Editorial" - hosts = ["standalonepublisher"] - families = ["editorial"] - actions = [] - - # presets - extensions = ["mov", "mp4"] - source_dir = None - - def process(self, instance): - root_dir = None - # remove context test attribute - if instance.context.data.get("subsetNamesCheck"): - instance.context.data.pop("subsetNamesCheck") - - self.log.debug(f"__ instance: `{instance}`") - # get representation with editorial file - for representation in instance.data["representations"]: - self.log.debug(f"__ representation: `{representation}`") - # make editorial sequence file path - staging_dir = representation["stagingDir"] - file_path = os.path.join( - staging_dir, str(representation["files"]) - ) - instance.context.data["currentFile"] = file_path - - # get video file path - video_path = None - basename = os.path.splitext(os.path.basename(file_path))[0] - - if self.source_dir != "": - source_dir = self.source_dir.replace("\\", "/") - if ("./" in source_dir) or ("../" in source_dir): - # get current working dir - cwd = os.getcwd() - # set cwd to staging dir for absolute path solving - os.chdir(staging_dir) - root_dir = os.path.abspath(source_dir) - # set back original cwd - os.chdir(cwd) - elif "{" in source_dir: - root_dir = source_dir - else: - root_dir = os.path.normpath(source_dir) - - if root_dir: - # search for source data will need to be done - instance.data["editorialSourceRoot"] = root_dir - instance.data["editorialSourcePath"] = None - else: - # source data are already found - for f in os.listdir(staging_dir): - # filter out by not sharing the same name - if os.path.splitext(f)[0] not in basename: - continue - # filter out by respected extensions - if os.path.splitext(f)[1][1:] not in self.extensions: - continue - video_path = os.path.join( - staging_dir, f - ) - self.log.debug(f"__ video_path: `{video_path}`") - instance.data["editorialSourceRoot"] = staging_dir - instance.data["editorialSourcePath"] = video_path - - instance.data["stagingDir"] = staging_dir - - # get editorial sequence file into otio timeline object - extension = os.path.splitext(file_path)[1] - kwargs = {} - if extension == ".edl": - # EDL has no frame rate embedded so needs explicit - # frame rate else 24 is assumed. - kwargs["rate"] = get_current_project_asset()["data"]["fps"] - - instance.data["otio_timeline"] = otio.adapters.read_from_file( - file_path, **kwargs) - - self.log.info(f"Added OTIO timeline from: `{file_path}`") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py deleted file mode 100644 index 75c260bad7..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ /dev/null @@ -1,214 +0,0 @@ -import os -from copy import deepcopy - -import opentimelineio as otio -import pyblish.api - -from openpype import lib as plib -from openpype.pipeline.context_tools import get_current_project_asset - - -class CollectInstances(pyblish.api.InstancePlugin): - """Collect instances from editorial's OTIO sequence""" - - order = pyblish.api.CollectorOrder + 0.01 - label = "Collect Editorial Instances" - hosts = ["standalonepublisher"] - families = ["editorial"] - - # presets - subsets = { - "referenceMain": { - "family": "review", - "families": ["clip"], - "extensions": ["mp4"] - }, - "audioMain": { - "family": "audio", - "families": ["clip"], - "extensions": ["wav"], - } - } - timeline_frame_start = 900000 # starndard edl default (10:00:00:00) - timeline_frame_offset = None - custom_start_frame = None - - def process(self, instance): - # get context - context = instance.context - - instance_data_filter = [ - "editorialSourceRoot", - "editorialSourcePath" - ] - - # attribute for checking duplicity during creation - if not context.data.get("assetNameCheck"): - context.data["assetNameCheck"] = list() - - # create asset_names conversion table - if not context.data.get("assetsShared"): - context.data["assetsShared"] = dict() - - # get timeline otio data - timeline = instance.data["otio_timeline"] - fps = get_current_project_asset()["data"]["fps"] - - tracks = timeline.each_child( - descended_from_type=otio.schema.Track - ) - - # get data from avalon - asset_entity = instance.context.data["assetEntity"] - asset_data = asset_entity["data"] - asset_name = asset_entity["name"] - - # Timeline data. - handle_start = int(asset_data["handleStart"]) - handle_end = int(asset_data["handleEnd"]) - - for track in tracks: - self.log.debug(f"track.name: {track.name}") - try: - track_start_frame = ( - abs(track.source_range.start_time.value) - ) - self.log.debug(f"track_start_frame: {track_start_frame}") - track_start_frame -= self.timeline_frame_start - except AttributeError: - track_start_frame = 0 - - self.log.debug(f"track_start_frame: {track_start_frame}") - - for clip in track.each_child(): - if clip.name is None: - continue - - if isinstance(clip, otio.schema.Gap): - continue - - # skip all generators like black empty - if isinstance( - clip.media_reference, - otio.schema.GeneratorReference): - continue - - # Transitions are ignored, because Clips have the full frame - # range. - if isinstance(clip, otio.schema.Transition): - continue - - # basic unique asset name - clip_name = os.path.splitext(clip.name)[0].lower() - name = f"{asset_name.split('_')[0]}_{clip_name}" - - if name not in context.data["assetNameCheck"]: - context.data["assetNameCheck"].append(name) - else: - self.log.warning(f"duplicate shot name: {name}") - - # frame ranges data - clip_in = clip.range_in_parent().start_time.value - clip_in += track_start_frame - clip_out = clip.range_in_parent().end_time_inclusive().value - clip_out += track_start_frame - self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}") - - # add offset in case there is any - if self.timeline_frame_offset: - clip_in += self.timeline_frame_offset - clip_out += self.timeline_frame_offset - - clip_duration = clip.duration().value - self.log.info(f"clip duration: {clip_duration}") - - source_in = clip.trimmed_range().start_time.value - source_out = source_in + clip_duration - source_in_h = source_in - handle_start - source_out_h = source_out + handle_end - - clip_in_h = clip_in - handle_start - clip_out_h = clip_out + handle_end - - # define starting frame for future shot - if self.custom_start_frame is not None: - frame_start = self.custom_start_frame - else: - frame_start = clip_in - - frame_end = frame_start + (clip_duration - 1) - - # create shared new instance data - instance_data = { - # shared attributes - "asset": name, - "assetShareName": name, - "item": clip, - "clipName": clip_name, - - # parent time properties - "trackStartFrame": track_start_frame, - "handleStart": handle_start, - "handleEnd": handle_end, - "fps": fps, - - # media source - "sourceIn": source_in, - "sourceOut": source_out, - "sourceInH": source_in_h, - "sourceOutH": source_out_h, - - # timeline - "clipIn": clip_in, - "clipOut": clip_out, - "clipDuration": clip_duration, - "clipInH": clip_in_h, - "clipOutH": clip_out_h, - "clipDurationH": clip_duration + handle_start + handle_end, - - # task - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartH": frame_start - handle_start, - "frameEndH": frame_end + handle_end, - "newAssetPublishing": True - } - - for data_key in instance_data_filter: - instance_data.update({ - data_key: instance.data.get(data_key)}) - - # adding subsets to context as instances - self.subsets.update({ - "shotMain": { - "family": "shot", - "families": [] - } - }) - for subset, properties in self.subsets.items(): - version = properties.get("version") - if version == 0: - properties.pop("version") - - # adding Review-able instance - subset_instance_data = deepcopy(instance_data) - subset_instance_data.update(deepcopy(properties)) - subset_instance_data.update({ - # unique attributes - "name": f"{name}_{subset}", - "label": f"{name} {subset} ({clip_in}-{clip_out})", - "subset": subset - }) - # create new instance - _instance = instance.context.create_instance( - **subset_instance_data) - self.log.debug( - f"Instance: `{_instance}` | " - f"families: `{subset_instance_data['families']}`") - - context.data["assetsShared"][name] = { - "_clipIn": clip_in, - "_clipOut": clip_out - } - - self.log.debug("Instance: `{}` | families: `{}`") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py deleted file mode 100644 index 4d7a13fcf2..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py +++ /dev/null @@ -1,271 +0,0 @@ -import os -import re -import tempfile -import pyblish.api -from copy import deepcopy -import clique - - -class CollectInstanceResources(pyblish.api.InstancePlugin): - """Collect instance's resources""" - - # must be after `CollectInstances` - order = pyblish.api.CollectorOrder + 0.011 - label = "Collect Editorial Resources" - hosts = ["standalonepublisher"] - families = ["clip"] - - def process(self, instance): - self.context = instance.context - self.log.info(f"Processing instance: {instance}") - self.new_instances = [] - subset_files = dict() - subset_dirs = list() - anatomy = self.context.data["anatomy"] - anatomy_data = deepcopy(self.context.data["anatomyData"]) - anatomy_data.update({"root": anatomy.roots}) - - subset = instance.data["subset"] - clip_name = instance.data["clipName"] - - editorial_source_root = instance.data["editorialSourceRoot"] - editorial_source_path = instance.data["editorialSourcePath"] - - # if `editorial_source_path` then loop through - if editorial_source_path: - # add family if mov or mp4 found which is longer for - # cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data["stagingDir"] = staging_dir - instance.data["families"] += ["trimming"] - return - - # if template pattern in path then fill it with `anatomy_data` - if "{" in editorial_source_root: - editorial_source_root = editorial_source_root.format( - **anatomy_data) - - self.log.debug(f"root: {editorial_source_root}") - # loop `editorial_source_root` and find clip name in folders - # and look for any subset name alternatives - for root, dirs, _files in os.walk(editorial_source_root): - # search only for directories related to clip name - correct_clip_dir = None - for _d_search in dirs: - # avoid all non clip dirs - if _d_search not in clip_name: - continue - # found correct dir for clip - correct_clip_dir = _d_search - - # continue if clip dir was not found - if not correct_clip_dir: - continue - - clip_dir_path = os.path.join(root, correct_clip_dir) - subset_files_items = list() - # list content of clip dir and search for subset items - for subset_item in os.listdir(clip_dir_path): - # avoid all items which are not defined as subsets by name - if subset not in subset_item: - continue - - subset_item_path = os.path.join( - clip_dir_path, subset_item) - # if it is dir store it to `subset_dirs` list - if os.path.isdir(subset_item_path): - subset_dirs.append(subset_item_path) - - # if it is file then store it to `subset_files` list - if os.path.isfile(subset_item_path): - subset_files_items.append(subset_item_path) - - if subset_files_items: - subset_files.update({clip_dir_path: subset_files_items}) - - # break the loop if correct_clip_dir was captured - # no need to cary on if correct folder was found - if correct_clip_dir: - break - - if subset_dirs: - # look all dirs and check for subset name alternatives - for _dir in subset_dirs: - instance_data = deepcopy( - {k: v for k, v in instance.data.items()}) - sub_dir = os.path.basename(_dir) - # if subset name is only alternative then create new instance - if sub_dir != subset: - instance_data = self.duplicate_instance( - instance_data, subset, sub_dir) - - # create all representations - self.create_representations( - os.listdir(_dir), instance_data, _dir) - - if sub_dir == subset: - self.new_instances.append(instance_data) - # instance.data.update(instance_data) - - if subset_files: - unique_subset_names = list() - root_dir = list(subset_files.keys()).pop() - files_list = subset_files[root_dir] - search_pattern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])" - for _file in files_list: - pattern = re.compile(search_pattern) - match = pattern.findall(_file) - if not match: - continue - match_subset = match.pop() - if match_subset in unique_subset_names: - continue - unique_subset_names.append(match_subset) - - self.log.debug(f"unique_subset_names: {unique_subset_names}") - - for _un_subs in unique_subset_names: - instance_data = self.duplicate_instance( - instance.data, subset, _un_subs) - - # create all representations - self.create_representations( - [os.path.basename(f) for f in files_list - if _un_subs in f], - instance_data, root_dir) - - # remove the original instance as it had been used only - # as template and is duplicated - self.context.remove(instance) - - # create all instances in self.new_instances into context - for new_instance in self.new_instances: - _new_instance = self.context.create_instance( - new_instance["name"]) - _new_instance.data.update(new_instance) - - def duplicate_instance(self, instance_data, subset, new_subset): - - new_instance_data = dict() - for _key, _value in instance_data.items(): - new_instance_data[_key] = _value - if not isinstance(_value, str): - continue - if subset in _value: - new_instance_data[_key] = _value.replace( - subset, new_subset) - - self.log.info(f"Creating new instance: {new_instance_data['name']}") - self.new_instances.append(new_instance_data) - return new_instance_data - - def create_representations( - self, files_list, instance_data, staging_dir): - """ Create representations from Collection object - """ - # collecting frames for later frame start/end reset - frames = list() - # break down Collection object to collections and reminders - collections, remainder = clique.assemble(files_list) - # add staging_dir to instance_data - instance_data["stagingDir"] = staging_dir - # add representations to instance_data - instance_data["representations"] = list() - - collection_head_name = None - # loop through collections and create representations - for _collection in collections: - ext = _collection.tail[1:] - collection_head_name = _collection.head - frame_start = list(_collection.indexes)[0] - frame_end = list(_collection.indexes)[-1] - repre_data = { - "frameStart": frame_start, - "frameEnd": frame_end, - "name": ext, - "ext": ext, - "files": [item for item in _collection], - "stagingDir": staging_dir - } - - if instance_data.get("keepSequence"): - repre_data_keep = deepcopy(repre_data) - instance_data["representations"].append(repre_data_keep) - - if "review" in instance_data["families"]: - repre_data.update({ - "thumbnail": True, - "frameStartFtrack": frame_start, - "frameEndFtrack": frame_end, - "step": 1, - "fps": self.context.data.get("fps"), - "name": "review", - "tags": ["review", "ftrackreview", "delete"], - }) - instance_data["representations"].append(repre_data) - - # add to frames for frame range reset - frames.append(frame_start) - frames.append(frame_end) - - # loop through reminders and create representations - for _reminding_file in remainder: - ext = os.path.splitext(_reminding_file)[-1][1:] - if ext not in instance_data["extensions"]: - continue - if collection_head_name and ( - (collection_head_name + ext) not in _reminding_file - ) and (ext in ["mp4", "mov"]): - self.log.info(f"Skipping file: {_reminding_file}") - continue - frame_start = 1 - frame_end = 1 - - repre_data = { - "name": ext, - "ext": ext, - "files": _reminding_file, - "stagingDir": staging_dir - } - - # exception for thumbnail - if "thumb" in _reminding_file: - repre_data.update({ - 'name': "thumbnail", - 'thumbnail': True - }) - - # exception for mp4 preview - if ext in ["mp4", "mov"]: - frame_start = 0 - frame_end = ( - (instance_data["frameEnd"] - instance_data["frameStart"]) - + 1) - # add review ftrack family into families - for _family in ["review", "ftrack"]: - if _family not in instance_data["families"]: - instance_data["families"].append(_family) - repre_data.update({ - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartFtrack": frame_start, - "frameEndFtrack": frame_end, - "step": 1, - "fps": self.context.data.get("fps"), - "name": "review", - "thumbnail": True, - "tags": ["review", "ftrackreview", "delete"], - }) - - # add to frames for frame range reset only if no collection - if not collections: - frames.append(frame_start) - frames.append(frame_end) - - instance_data["representations"].append(repre_data) - - # reset frame start / end - instance_data["frameStart"] = min(frames) - instance_data["frameEnd"] = max(frames) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py deleted file mode 100644 index c435ca2096..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_scenes.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect Harmony scenes in Standalone Publisher.""" -import copy -import glob -import os -from pprint import pformat - -import pyblish.api - - -class CollectHarmonyScenes(pyblish.api.InstancePlugin): - """Collect Harmony xstage files.""" - - order = pyblish.api.CollectorOrder + 0.498 - label = "Collect Harmony Scene" - hosts = ["standalonepublisher"] - families = ["harmony.scene"] - - # presets - ignored_instance_data_keys = ("name", "label", "stagingDir", "version") - - def process(self, instance): - """Plugin entry point.""" - context = instance.context - asset_data = instance.context.data["assetEntity"] - asset_name = instance.data["asset"] - subset_name = instance.data.get("subset", "sceneMain") - anatomy_data = instance.context.data["anatomyData"] - repres = instance.data["representations"] - staging_dir = repres[0]["stagingDir"] - files = repres[0]["files"] - - if not files.endswith(".zip"): - # A harmony project folder / .xstage was dropped - instance_name = f"{asset_name}_{subset_name}" - task = instance.data.get("task", "harmonyIngest") - - # create new instance - new_instance = context.create_instance(instance_name) - - # add original instance data except name key - for key, value in instance.data.items(): - # Make sure value is copy since value may be object which - # can be shared across all new created objects - if key not in self.ignored_instance_data_keys: - new_instance.data[key] = copy.deepcopy(value) - - self.log.info("Copied data: {}".format(new_instance.data)) - - # fix anatomy data - anatomy_data_new = copy.deepcopy(anatomy_data) - - project_entity = context.data["projectEntity"] - asset_entity = context.data["assetEntity"] - - task_type = asset_entity["data"]["tasks"].get(task, {}).get("type") - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") - - # updating hierarchy data - anatomy_data_new.update({ - "asset": asset_data["name"], - "folder": { - "name": asset_data["name"], - }, - "task": { - "name": task, - "type": task_type, - "short": task_code, - }, - "subset": subset_name - }) - - new_instance.data["label"] = f"{instance_name}" - new_instance.data["subset"] = subset_name - new_instance.data["extension"] = ".zip" - new_instance.data["anatomyData"] = anatomy_data_new - new_instance.data["publish"] = True - - # When a project folder was dropped vs. just an xstage file, find - # the latest file xstage version and update the instance - if not files.endswith(".xstage"): - - source_dir = os.path.join( - staging_dir, files - ).replace("\\", "/") - - latest_file = max(glob.iglob(source_dir + "/*.xstage"), - key=os.path.getctime).replace("\\", "/") - - new_instance.data["representations"][0]["stagingDir"] = ( - source_dir - ) - new_instance.data["representations"][0]["files"] = ( - os.path.basename(latest_file) - ) - self.log.info(f"Created new instance: {instance_name}") - self.log.debug(f"_ inst_data: {pformat(new_instance.data)}") - - # set original instance for removal - self.log.info("Context data: {}".format(context.data)) - instance.data["remove"] = True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py deleted file mode 100644 index d90215e767..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_harmony_zips.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect zips as Harmony scene files.""" -import copy -from pprint import pformat - -import pyblish.api - - -class CollectHarmonyZips(pyblish.api.InstancePlugin): - """Collect Harmony zipped projects.""" - - order = pyblish.api.CollectorOrder + 0.497 - label = "Collect Harmony Zipped Projects" - hosts = ["standalonepublisher"] - families = ["harmony.scene"] - extensions = ["zip"] - - # presets - ignored_instance_data_keys = ("name", "label", "stagingDir", "version") - - def process(self, instance): - """Plugin entry point.""" - context = instance.context - asset_data = instance.context.data["assetEntity"] - asset_name = instance.data["asset"] - subset_name = instance.data.get("subset", "sceneMain") - anatomy_data = instance.context.data["anatomyData"] - repres = instance.data["representations"] - files = repres[0]["files"] - project_entity = context.data["projectEntity"] - - if files.endswith(".zip"): - # A zip file was dropped - instance_name = f"{asset_name}_{subset_name}" - task = instance.data.get("task", "harmonyIngest") - - # create new instance - new_instance = context.create_instance(instance_name) - - # add original instance data except name key - for key, value in instance.data.items(): - # Make sure value is copy since value may be object which - # can be shared across all new created objects - if key not in self.ignored_instance_data_keys: - new_instance.data[key] = copy.deepcopy(value) - - self.log.info("Copied data: {}".format(new_instance.data)) - - task_type = asset_data["data"]["tasks"].get(task, {}).get("type") - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") - - # fix anatomy data - anatomy_data_new = copy.deepcopy(anatomy_data) - # updating hierarchy data - anatomy_data_new.update( - { - "asset": asset_data["name"], - "folder": { - "name": asset_data["name"], - }, - "task": { - "name": task, - "type": task_type, - "short": task_code, - }, - "subset": subset_name - } - ) - - new_instance.data["label"] = f"{instance_name}" - new_instance.data["subset"] = subset_name - new_instance.data["extension"] = ".zip" - new_instance.data["anatomyData"] = anatomy_data_new - new_instance.data["publish"] = True - - self.log.info(f"Created new instance: {instance_name}") - self.log.debug(f"_ inst_data: {pformat(new_instance.data)}") - - # set original instance for removal - self.log.info("Context data: {}".format(context.data)) - instance.data["remove"] = True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py deleted file mode 100644 index eb06875601..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ /dev/null @@ -1,304 +0,0 @@ -import os -from pprint import pformat -import re -from copy import deepcopy -import pyblish.api - -from openpype.client import get_asset_by_id - - -class CollectHierarchyInstance(pyblish.api.ContextPlugin): - """Collecting hierarchy context from `parents` and `hierarchy` data - present in `clip` family instances coming from the request json data file - - It will add `hierarchical_context` into each instance for integrate - plugins to be able to create needed parents for the context if they - don't exist yet - """ - - label = "Collect Hierarchy Clip" - order = pyblish.api.CollectorOrder + 0.101 - hosts = ["standalonepublisher"] - families = ["shot"] - - # presets - shot_rename = True - shot_rename_template = None - shot_rename_search_patterns = None - shot_add_hierarchy = None - shot_add_tasks = None - - def convert_to_entity(self, key, value): - # ftrack compatible entity types - types = {"shot": "Shot", - "folder": "Folder", - "episode": "Episode", - "sequence": "Sequence", - "track": "Sequence", - } - # convert to entity type - entity_type = types.get(key, None) - - # return if any - if entity_type: - return {"entity_type": entity_type, "entity_name": value} - - def rename_with_hierarchy(self, instance): - search_text = "" - parent_name = instance.context.data["assetEntity"]["name"] - clip = instance.data["item"] - clip_name = os.path.splitext(clip.name)[0].lower() - if self.shot_rename_search_patterns and self.shot_rename: - search_text += parent_name + clip_name - instance.data["anatomyData"].update({"clip_name": clip_name}) - for type, pattern in self.shot_rename_search_patterns.items(): - p = re.compile(pattern) - match = p.findall(search_text) - if not match: - continue - instance.data["anatomyData"][type] = match[-1] - - # format to new shot name - instance.data["asset"] = self.shot_rename_template.format( - **instance.data["anatomyData"]) - - def create_hierarchy(self, instance): - asset_doc = instance.context.data["assetEntity"] - project_doc = instance.context.data["projectEntity"] - project_name = project_doc["name"] - visual_hierarchy = [asset_doc] - current_doc = asset_doc - while True: - visual_parent_id = current_doc["data"]["visualParent"] - visual_parent = None - if visual_parent_id: - visual_parent = get_asset_by_id(project_name, visual_parent_id) - - if not visual_parent: - visual_hierarchy.append(project_doc) - break - visual_hierarchy.append(visual_parent) - current_doc = visual_parent - - # add current selection context hierarchy from standalonepublisher - parents = list() - for entity in reversed(visual_hierarchy): - parents.append({ - "entity_type": entity["data"]["entityType"], - "entity_name": entity["name"] - }) - - hierarchy = list() - if self.shot_add_hierarchy.get("enabled"): - parent_template_patern = re.compile(r"\{([a-z]*?)\}") - # fill the parents parts from presets - shot_add_hierarchy = self.shot_add_hierarchy.copy() - hierarchy_parents = shot_add_hierarchy["parents"].copy() - - # fill parent keys data template from anatomy data - for parent_key in hierarchy_parents: - hierarchy_parents[parent_key] = hierarchy_parents[ - parent_key].format(**instance.data["anatomyData"]) - - for _index, _parent in enumerate( - shot_add_hierarchy["parents_path"].split("/")): - parent_filled = _parent.format(**hierarchy_parents) - parent_key = parent_template_patern.findall(_parent).pop() - - # in case SP context is set to the same folder - if (_index == 0) and ("folder" in parent_key) \ - and (parents[-1]["entity_name"] == parent_filled): - self.log.debug(f" skipping : {parent_filled}") - continue - - # in case first parent is project then start parents from start - if (_index == 0) and ("project" in parent_key): - self.log.debug("rebuilding parents from scratch") - project_parent = parents[0] - parents = [project_parent] - self.log.debug(f"project_parent: {project_parent}") - self.log.debug(f"parents: {parents}") - continue - - prnt = self.convert_to_entity( - parent_key, parent_filled) - parents.append(prnt) - hierarchy.append(parent_filled) - - # convert hierarchy to string - hierarchy = "/".join(hierarchy) - - # assign to instance data - instance.data["hierarchy"] = hierarchy - instance.data["parents"] = parents - - # print - self.log.warning(f"Hierarchy: {hierarchy}") - self.log.info(f"parents: {parents}") - - tasks_to_add = dict() - if self.shot_add_tasks: - project_tasks = project_doc["config"]["tasks"] - for task_name, task_data in self.shot_add_tasks.items(): - _task_data = deepcopy(task_data) - - # fixing enumerator from settings - _task_data["type"] = task_data["type"][0] - - # check if task type in project task types - if _task_data["type"] in project_tasks.keys(): - tasks_to_add.update({task_name: _task_data}) - else: - raise KeyError( - "Wrong FtrackTaskType `{}` for `{}` is not" - " existing in `{}``".format( - _task_data["type"], - task_name, - list(project_tasks.keys()))) - - instance.data["tasks"] = tasks_to_add - - # updating hierarchy data - instance.data["anatomyData"].update({ - "asset": instance.data["asset"], - "task": "conform" - }) - - def process(self, context): - self.log.info("self.shot_add_hierarchy: {}".format( - pformat(self.shot_add_hierarchy) - )) - for instance in context: - if instance.data["family"] in self.families: - self.processing_instance(instance) - - def processing_instance(self, instance): - self.log.info(f"_ instance: {instance}") - # adding anatomyData for burnins - instance.data["anatomyData"] = deepcopy( - instance.context.data["anatomyData"]) - - asset = instance.data["asset"] - assets_shared = instance.context.data.get("assetsShared") - - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - - if self.shot_rename_template: - self.rename_with_hierarchy(instance) - - self.create_hierarchy(instance) - - shot_name = instance.data["asset"] - self.log.debug(f"Shot Name: {shot_name}") - - label = f"{shot_name} ({frame_start}-{frame_end})" - instance.data["label"] = label - - # dealing with shared attributes across instances - # with the same asset name - if assets_shared.get(asset): - asset_shared = assets_shared.get(asset) - else: - asset_shared = assets_shared[asset] - - asset_shared.update({ - "asset": instance.data["asset"], - "hierarchy": instance.data["hierarchy"], - "parents": instance.data["parents"], - "tasks": instance.data["tasks"], - "anatomyData": instance.data["anatomyData"] - }) - - -class CollectHierarchyContext(pyblish.api.ContextPlugin): - '''Collecting Hierarchy from instances and building - context hierarchy tree - ''' - - label = "Collect Hierarchy Context" - order = pyblish.api.CollectorOrder + 0.102 - hosts = ["standalonepublisher"] - families = ["shot"] - - def update_dict(self, ex_dict, new_dict): - for key in ex_dict: - if key in new_dict and isinstance(ex_dict[key], dict): - new_dict[key] = self.update_dict(ex_dict[key], new_dict[key]) - else: - if ex_dict.get(key) and new_dict.get(key): - continue - else: - new_dict[key] = ex_dict[key] - - return new_dict - - def process(self, context): - instances = context - # create hierarchyContext attr if context has none - assets_shared = context.data.get("assetsShared") - final_context = {} - for instance in instances: - if 'editorial' in instance.data.get('family', ''): - continue - # inject assetsShared to other instances with - # the same `assetShareName` attribute in data - asset_shared_name = instance.data.get("assetShareName") - - s_asset_data = assets_shared.get(asset_shared_name) - if s_asset_data: - instance.data["asset"] = s_asset_data["asset"] - instance.data["parents"] = s_asset_data["parents"] - instance.data["hierarchy"] = s_asset_data["hierarchy"] - instance.data["tasks"] = s_asset_data["tasks"] - instance.data["anatomyData"] = s_asset_data["anatomyData"] - - # generate hierarchy data only on shot instances - if 'shot' not in instance.data.get('family', ''): - continue - - # get handles - handle_start = int(instance.data["handleStart"]) - handle_end = int(instance.data["handleEnd"]) - - in_info = {} - - # suppose that all instances are Shots - in_info['entity_type'] = 'Shot' - - # get custom attributes of the shot - - in_info['custom_attributes'] = { - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"], - "clipIn": instance.data["clipIn"], - "clipOut": instance.data["clipOut"], - 'fps': instance.data["fps"] - } - - in_info['tasks'] = instance.data['tasks'] - - from pprint import pformat - parents = instance.data.get('parents', []) - self.log.debug(f"parents: {pformat(parents)}") - - # Split by '/' for AYON where asset is a path - name = instance.data["asset"].split("/")[-1] - actual = {name: in_info} - - for parent in reversed(parents): - next_dict = {} - parent_name = parent["entity_name"] - next_dict[parent_name] = {} - next_dict[parent_name]["entity_type"] = parent["entity_type"] - next_dict[parent_name]["childs"] = actual - actual = next_dict - - final_context = self.update_dict(final_context, actual) - - # adding hierarchy context to instance - context.data["hierarchyContext"] = final_context - self.log.debug(f"hierarchyContext: {pformat(final_context)}") - self.log.info("Hierarchy instance collected") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_instance_data.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_instance_data.py deleted file mode 100644 index be87e72302..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_instance_data.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Requires: - Nothing - -Provides: - Instance -""" - -import pyblish.api -from pprint import pformat - - -class CollectInstanceData(pyblish.api.InstancePlugin): - """ - Collector with only one reason for its existence - remove 'ftrack' - family implicitly added by Standalone Publisher - """ - - label = "Collect instance data" - order = pyblish.api.CollectorOrder + 0.49 - families = ["render", "plate", "review"] - hosts = ["standalonepublisher"] - - def process(self, instance): - fps = instance.context.data["fps"] - - instance.data.update({ - "fps": fps - }) - self.log.debug(f"instance.data: {pformat(instance.data)}") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py deleted file mode 100644 index 82d7247b2b..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import re -import collections -import pyblish.api -from pprint import pformat - -from openpype.client import get_assets - - -class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Matching Asset to Instance" - order = pyblish.api.CollectorOrder - 0.05 - hosts = ["standalonepublisher"] - families = ["background_batch", "render_mov_batch"] - - # Version regex to parse asset name and version from filename - version_regex = re.compile(r"^(.+)_v([0-9]+)$") - - def process(self, instance): - source_filename = self.get_source_filename(instance) - self.log.info("Looking for asset document for file \"{}\"".format( - source_filename - )) - asset_name = os.path.splitext(source_filename)[0].lower() - - asset_docs_by_name = self.selection_children_by_name(instance) - - version_number = None - # Always first check if source filename is in assets - matching_asset_doc = asset_docs_by_name.get(asset_name) - if matching_asset_doc is None: - # Check if source file contain version in name - self.log.debug(( - "Asset doc by \"{}\" was not found trying version regex." - ).format(asset_name)) - regex_result = self.version_regex.findall(asset_name) - if regex_result: - _asset_name, _version_number = regex_result[0] - matching_asset_doc = asset_docs_by_name.get(_asset_name) - if matching_asset_doc: - version_number = int(_version_number) - - if matching_asset_doc is None: - for asset_name_low, asset_doc in asset_docs_by_name.items(): - if asset_name_low in asset_name: - matching_asset_doc = asset_doc - break - - if not matching_asset_doc: - self.log.debug("Available asset names {}".format( - str(list(asset_docs_by_name.keys())) - )) - # TODO better error message - raise AssertionError(( - "Filename \"{}\" does not match" - " any name of asset documents in database for your selection." - ).format(source_filename)) - - instance.data["asset"] = matching_asset_doc["name"] - instance.data["assetEntity"] = matching_asset_doc - if version_number is not None: - instance.data["version"] = version_number - - self.log.info( - f"Matching asset found: {pformat(matching_asset_doc)}" - ) - - def get_source_filename(self, instance): - if instance.data["family"] == "background_batch": - return os.path.basename(instance.data["source"]) - - if len(instance.data["representations"]) != 1: - raise ValueError(( - "Implementation bug: Instance data contain" - " more than one representation." - )) - - repre = instance.data["representations"][0] - repre_files = repre["files"] - if not isinstance(repre_files, str): - raise ValueError(( - "Implementation bug: Instance's representation contain" - " unexpected value (expected single file). {}" - ).format(str(repre_files))) - return repre_files - - def selection_children_by_name(self, instance): - storing_key = "childrenDocsForSelection" - - children_docs = instance.context.data.get(storing_key) - if children_docs is None: - top_asset_doc = instance.context.data["assetEntity"] - assets_by_parent_id = self._asset_docs_by_parent_id(instance) - _children_docs = self._children_docs( - assets_by_parent_id, top_asset_doc - ) - children_docs = { - children_doc["name"].lower(): children_doc - for children_doc in _children_docs - } - instance.context.data[storing_key] = children_docs - return children_docs - - def _children_docs(self, documents_by_parent_id, parent_doc): - # Find all children in reverse order, last children is at first place. - output = [] - children = documents_by_parent_id.get(parent_doc["_id"]) or tuple() - for child in children: - output.extend( - self._children_docs(documents_by_parent_id, child) - ) - output.append(parent_doc) - return output - - def _asset_docs_by_parent_id(self, instance): - # Query all assets for project and store them by parent's id to list - project_name = instance.context.data["projectEntity"]["name"] - asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in get_assets(project_name): - parent_id = asset_doc["data"]["visualParent"] - asset_docs_by_parent_id[parent_id].append(asset_doc) - return asset_docs_by_parent_id diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_remove_marked.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_remove_marked.py deleted file mode 100644 index 4279d67655..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_remove_marked.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect instances that are marked for removal and remove them.""" -import pyblish.api - - -class CollectRemoveMarked(pyblish.api.ContextPlugin): - """Clean up instances marked for removal. - - Note: - This is a workaround for race conditions and removing of instances - used to generate other instances. - """ - - order = pyblish.api.CollectorOrder + 0.499 - label = 'Remove Marked Instances' - - def process(self, context): - """Plugin entry point.""" - for instance in context: - if instance.data.get('remove'): - context.remove(instance) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py deleted file mode 100644 index 82dbba3345..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py +++ /dev/null @@ -1,31 +0,0 @@ -import re -import os -import pyblish.api - - -class CollectRepresentationNames(pyblish.api.InstancePlugin): - """ - Sets the representation names for given families based on RegEx filter - """ - - label = "Collect Representation Names" - order = pyblish.api.CollectorOrder - families = [] - hosts = ["standalonepublisher"] - name_filter = "" - - def process(self, instance): - for repre in instance.data['representations']: - new_repre_name = None - if isinstance(repre['files'], list): - shortened_name = os.path.splitext(repre['files'][0])[0] - new_repre_name = re.search(self.name_filter, - shortened_name).group() - else: - new_repre_name = re.search(self.name_filter, - repre['files']).group() - - if new_repre_name: - repre['name'] = new_repre_name - - repre['outputName'] = repre['name'] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py deleted file mode 100644 index c1c48ec72d..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py +++ /dev/null @@ -1,524 +0,0 @@ -import os -import re -import pyblish.api -import json - -from openpype.lib import ( - prepare_template_data, - StringTemplate, -) - - -class CollectTextures(pyblish.api.ContextPlugin): - """Collect workfile (and its resource_files) and textures. - - Currently implements use case with Mari and Substance Painter, where - one workfile is main (.mra - Mari) with possible additional workfiles - (.spp - Substance) - - - Provides: - 1 instance per workfile (with 'resources' filled if needed) - (workfile family) - 1 instance per group of textures - (textures family) - """ - - order = pyblish.api.CollectorOrder - label = "Collect Textures" - hosts = ["standalonepublisher"] - families = ["texture_batch"] - actions = [] - - # from presets - main_workfile_extensions = ['mra'] - other_workfile_extensions = ['spp', 'psd'] - texture_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", - "gif", "svg"] - - # additional families (ftrack etc.) - workfile_families = [] - textures_families = [] - - color_space = ["linsRGB", "raw", "acesg"] - - # currently implemented placeholders ["color_space"] - # describing patterns in file names splitted by regex groups - input_naming_patterns = { - # workfile: corridorMain_v001.mra > - # texture: corridorMain_aluminiumID_v001_baseColor_linsRGB_1001.exr - "workfile": r'^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+', - "textures": r'^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+', # noqa - } - # matching regex group position to 'input_naming_patterns' - input_naming_groups = { - "workfile": ('asset', 'filler', 'version'), - "textures": ('asset', 'shader', 'version', 'channel', 'color_space', - 'udim') - } - - workfile_subset_template = "textures{Subset}Workfile" - # implemented keys: ["color_space", "channel", "subset", "shader"] - texture_subset_template = "textures{Subset}_{Shader}_{Channel}" - - def process(self, context): - self.context = context - - resource_files = {} - workfile_files = {} - representations = {} - version_data = {} - asset_builds = set() - asset = None - for instance in context: - if not self.input_naming_patterns: - raise ValueError("Naming patterns are not configured. \n" - "Ask admin to provide naming conventions " - "for workfiles and textures.") - - if not asset: - asset = instance.data["asset"] # selected from SP - - parsed_subset = instance.data["subset"].replace( - instance.data["family"], '') - - explicit_data = { - "subset": parsed_subset - } - - processed_instance = False - for repre in instance.data["representations"]: - ext = repre["ext"].replace('.', '') - asset_build = version = None - - if isinstance(repre["files"], list): - repre_file = repre["files"][0] - else: - repre_file = repre["files"] - - if ext in self.main_workfile_extensions or \ - ext in self.other_workfile_extensions: - - formatting_data = self._get_parsed_groups( - repre_file, - self.input_naming_patterns["workfile"], - self.input_naming_groups["workfile"], - self.color_space - ) - self.log.info("Parsed groups from workfile " - "name '{}': {}".format(repre_file, - formatting_data)) - - formatting_data.update(explicit_data) - fill_pairs = prepare_template_data(formatting_data) - workfile_subset = StringTemplate.format_strict_template( - self.workfile_subset_template, fill_pairs - ) - - asset_build = self._get_asset_build( - repre_file, - self.input_naming_patterns["workfile"], - self.input_naming_groups["workfile"], - self.color_space - ) - version = self._get_version( - repre_file, - self.input_naming_patterns["workfile"], - self.input_naming_groups["workfile"], - self.color_space - ) - asset_builds.add((asset_build, version, - workfile_subset, 'workfile')) - processed_instance = True - - if not representations.get(workfile_subset): - representations[workfile_subset] = [] - - if ext in self.main_workfile_extensions: - # workfiles can have only single representation - # currently OP is not supporting different extensions in - # representation files - representations[workfile_subset] = [repre] - - workfile_files[asset_build] = repre_file - - if ext in self.other_workfile_extensions: - # add only if not added already from main - if not representations.get(workfile_subset): - representations[workfile_subset] = [repre] - - # only overwrite if not present - if not workfile_files.get(asset_build): - workfile_files[asset_build] = repre_file - - if not resource_files.get(workfile_subset): - resource_files[workfile_subset] = [] - item = { - "files": [os.path.join(repre["stagingDir"], - repre["files"])], - "source": "standalone publisher" - } - resource_files[workfile_subset].append(item) - - if ext in self.texture_extensions: - formatting_data = self._get_parsed_groups( - repre_file, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space - ) - - self.log.info("Parsed groups from texture " - "name '{}': {}".format(repre_file, - formatting_data)) - - c_space = self._get_color_space( - repre_file, - self.color_space - ) - - # optional value - channel = self._get_channel_name( - repre_file, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space - ) - - # optional value - shader = self._get_shader_name( - repre_file, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space - ) - - explicit_data = { - "color_space": c_space or '', # None throws exception - "channel": channel or '', - "shader": shader or '', - "subset": parsed_subset or '' - } - - formatting_data.update(explicit_data) - - fill_pairs = prepare_template_data(formatting_data) - subset = StringTemplate.format_strict_template( - self.texture_subset_template, fill_pairs - ) - - asset_build = self._get_asset_build( - repre_file, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space - ) - version = self._get_version( - repre_file, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space - ) - if not representations.get(subset): - representations[subset] = [] - representations[subset].append(repre) - - ver_data = { - "color_space": c_space or '', - "channel_name": channel or '', - "shader_name": shader or '' - } - version_data[subset] = ver_data - - asset_builds.add( - (asset_build, version, subset, "textures")) - processed_instance = True - - if processed_instance: - self.context.remove(instance) - - self._create_new_instances(context, - asset, - asset_builds, - resource_files, - representations, - version_data, - workfile_files) - - def _create_new_instances(self, context, asset, asset_builds, - resource_files, representations, - version_data, workfile_files): - """Prepare new instances from collected data. - - Args: - context (ContextPlugin) - asset (string): selected asset from SP - asset_builds (set) of tuples - (asset_build, version, subset, family) - resource_files (list) of resource dicts - to store additional - files to main workfile - representations (list) of dicts - to store workfile info OR - all collected texture files, key is asset_build - version_data (dict) - prepared to store into version doc in DB - workfile_files (dict) - to store workfile to add to textures - key is asset_build - """ - # sort workfile first - asset_builds = sorted(asset_builds, - key=lambda tup: tup[3], reverse=True) - - # workfile must have version, textures might - main_version = None - for asset_build, version, subset, family in asset_builds: - if not main_version: - main_version = version - - try: - version_int = int(version or main_version or 1) - except ValueError: - self.log.error("Parsed version {} is not " - "an number".format(version)) - - new_instance = context.create_instance(subset) - new_instance.data.update( - { - "subset": subset, - "asset": asset, - "label": subset, - "name": subset, - "family": family, - "version": version_int, - "asset_build": asset_build # remove in validator - } - ) - - workfile = workfile_files.get(asset_build) - - if resource_files.get(subset): - # add resources only when workfile is main style - for ext in self.main_workfile_extensions: - if ext in workfile: - new_instance.data.update({ - "resources": resource_files.get(subset) - }) - break - - # store origin - if family == 'workfile': - families = self.workfile_families - families.append("texture_batch_workfile") - - new_instance.data["source"] = "standalone publisher" - else: - families = self.textures_families - - repre = representations.get(subset)[0] - new_instance.context.data["currentFile"] = os.path.join( - repre["stagingDir"], workfile or 'dummy.txt') - - new_instance.data["families"] = families - - # add data for version document - ver_data = version_data.get(subset) - if ver_data: - if workfile: - ver_data['workfile'] = workfile - - new_instance.data.update( - {"versionData": ver_data} - ) - - upd_representations = representations.get(subset) - if upd_representations and family != 'workfile': - upd_representations = self._update_representations( - upd_representations) - - new_instance.data["representations"] = upd_representations - - self.log.debug("new instance - {}:: {}".format( - family, - json.dumps(new_instance.data, indent=4))) - - def _get_asset_build(self, name, - input_naming_patterns, input_naming_groups, - color_spaces): - """Loops through configured workfile patterns to find asset name. - - Asset name used to bind workfile and its textures. - - Args: - name (str): workfile name - input_naming_patterns (list): - [workfile_pattern] or [texture_pattern] - input_naming_groups (list) - ordinal position of regex groups matching to input_naming.. - color_spaces (list) - predefined color spaces - """ - asset_name = "NOT_AVAIL" - - return (self._parse_key(name, input_naming_patterns, - input_naming_groups, color_spaces, 'asset') or - asset_name) - - def _get_version(self, name, input_naming_patterns, input_naming_groups, - color_spaces): - found = self._parse_key(name, input_naming_patterns, - input_naming_groups, color_spaces, 'version') - - if found: - return found.replace('v', '') - - self.log.info("No version found in the name {}".format(name)) - - def _get_udim(self, name, input_naming_patterns, input_naming_groups, - color_spaces): - """Parses from 'name' udim value.""" - found = self._parse_key(name, input_naming_patterns, - input_naming_groups, color_spaces, 'udim') - if found: - return found - - self.log.warning("Didn't find UDIM in {}".format(name)) - - def _get_color_space(self, name, color_spaces): - """Looks for color_space from a list in a file name. - - Color space seems not to be recognizable by regex pattern, set of - known space spaces must be provided. - """ - color_space = None - found = [cs for cs in color_spaces if - re.search("_{}_".format(cs), name)] - - if not found: - self.log.warning("No color space found in {}".format(name)) - else: - if len(found) > 1: - msg = "Multiple color spaces found in {}->{}".format(name, - found) - self.log.warning(msg) - - color_space = found[0] - - return color_space - - def _get_shader_name(self, name, input_naming_patterns, - input_naming_groups, color_spaces): - """Return parsed shader name. - - Shader name is needed for overlapping udims (eg. udims might be - used for different materials, shader needed to not overwrite). - - Unknown format of channel name and color spaces >> cs are known - list - 'color_space' used as a placeholder - """ - found = None - try: - found = self._parse_key(name, input_naming_patterns, - input_naming_groups, color_spaces, - 'shader') - except ValueError: - self.log.warning("Didn't find shader in {}".format(name)) - - return found - - def _get_channel_name(self, name, input_naming_patterns, - input_naming_groups, color_spaces): - """Return parsed channel name. - - Unknown format of channel name and color spaces >> cs are known - list - 'color_space' used as a placeholder - """ - found = None - try: - found = self._parse_key(name, input_naming_patterns, - input_naming_groups, color_spaces, - 'channel') - except ValueError: - self.log.warning("Didn't find channel in {}".format(name)) - - return found - - def _parse_key(self, name, input_naming_patterns, input_naming_groups, - color_spaces, key): - """Universal way to parse 'name' with configurable regex groups. - - Args: - name (str): workfile name - input_naming_patterns (list): - [workfile_pattern] or [texture_pattern] - input_naming_groups (list) - ordinal position of regex groups matching to input_naming.. - color_spaces (list) - predefined color spaces - - Raises: - ValueError - if broken 'input_naming_groups' - """ - parsed_groups = self._get_parsed_groups(name, - input_naming_patterns, - input_naming_groups, - color_spaces) - - try: - parsed_value = parsed_groups[key] - return parsed_value - except (IndexError, KeyError): - msg = ("'Textures group positions' must " + - "have '{}' key".format(key)) - raise ValueError(msg) - - def _get_parsed_groups(self, name, input_naming_patterns, - input_naming_groups, color_spaces): - """Universal way to parse 'name' with configurable regex groups. - - Args: - name (str): workfile name or texture name - input_naming_patterns (list): - [workfile_pattern] or [texture_pattern] - input_naming_groups (list) - ordinal position of regex groups matching to input_naming.. - color_spaces (list) - predefined color spaces - - Returns: - (dict) {group_name:parsed_value} - """ - for input_pattern in input_naming_patterns: - for cs in color_spaces: - pattern = input_pattern.replace('{color_space}', cs) - regex_result = re.findall(pattern, name) - if regex_result: - if len(regex_result[0]) == len(input_naming_groups): - return dict(zip(input_naming_groups, regex_result[0])) - else: - self.log.warning("No of parsed groups doesn't match " - "no of group labels") - - raise ValueError("Name '{}' cannot be parsed by any " - "'{}' patterns".format(name, input_naming_patterns)) - - def _update_representations(self, upd_representations): - """Frames dont have sense for textures, add collected udims instead.""" - udims = [] - for repre in upd_representations: - repre.pop("frameStart", None) - repre.pop("frameEnd", None) - repre.pop("fps", None) - - # ignore unique name from SP, use extension instead - # SP enforces unique name, here different subsets >> unique repres - repre["name"] = repre["ext"].replace('.', '') - - files = repre.get("files", []) - if not isinstance(files, list): - files = [files] - - for file_name in files: - udim = self._get_udim(file_name, - self.input_naming_patterns["textures"], - self.input_naming_groups["textures"], - self.color_space) - udims.append(udim) - - repre["udim"] = udims # must be this way, used for filling path - - return upd_representations diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py deleted file mode 100644 index 1183180833..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import pyblish.api - - -class ExtractResources(pyblish.api.InstancePlugin): - """ - Extracts files from instance.data["resources"]. - - These files are additional (textures etc.), currently not stored in - representations! - - Expects collected 'resourcesDir'. (list of dicts with 'files' key and - list of source urls) - - Provides filled 'transfers' (list of tuples (source_url, target_url)) - """ - - label = "Extract Resources SP" - hosts = ["standalonepublisher"] - order = pyblish.api.ExtractorOrder - - families = ["workfile"] - - def process(self, instance): - if not instance.data.get("resources"): - self.log.info("No resources") - return - - if not instance.data.get("transfers"): - instance.data["transfers"] = [] - - publish_dir = instance.data["resourcesDir"] - - transfers = [] - for resource in instance.data["resources"]: - for file_url in resource.get("files", []): - file_name = os.path.basename(file_url) - dest_url = os.path.join(publish_dir, file_name) - transfers.append((file_url, dest_url)) - - self.log.info("transfers:: {}".format(transfers)) - instance.data["transfers"].extend(transfers) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py deleted file mode 100644 index a2afd160fa..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import subprocess -import tempfile -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_args, - get_ffprobe_streams, - path_to_subprocess_arg, - run_subprocess, -) - - -class ExtractThumbnailSP(pyblish.api.InstancePlugin): - """Extract jpeg thumbnail from component input from standalone publisher - - Uses jpeg file from component if possible (when single or multiple jpegs - are loaded to component selected as thumbnail) otherwise extracts from - input file/s single jpeg to temp. - """ - - label = "Extract Thumbnail SP" - hosts = ["standalonepublisher"] - order = pyblish.api.ExtractorOrder - - # Presetable attribute - ffmpeg_args = None - - def process(self, instance): - repres = instance.data.get('representations') - if not repres: - return - - thumbnail_repre = None - for repre in repres: - if repre.get("thumbnail"): - thumbnail_repre = repre - break - - if not thumbnail_repre: - return - - thumbnail_repre.pop("thumbnail") - files = thumbnail_repre.get("files") - if not files: - return - - if isinstance(files, list): - first_filename = str(files[0]) - else: - first_filename = files - - # Convert to jpeg if not yet - full_input_path = os.path.join( - thumbnail_repre["stagingDir"], first_filename - ) - self.log.info("input {}".format(full_input_path)) - with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: - full_thumbnail_path = tmp.name - - self.log.info("output {}".format(full_thumbnail_path)) - - instance.context.data["cleanupFullPaths"].append(full_thumbnail_path) - - ffmpeg_executable_args = get_ffmpeg_tool_args("ffmpeg") - - ffmpeg_args = self.ffmpeg_args or {} - - jpeg_items = [ - subprocess.list2cmdline(ffmpeg_executable_args), - # override file if already exists - "-y" - ] - - # add input filters from peresets - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.extend([ - "-i", path_to_subprocess_arg(full_input_path), - # extract only single file - "-frames:v", "1", - # Add black background for transparent images - "-filter_complex", ( - "\"color=black,format=rgb24[c]" - ";[c][0]scale2ref[c][i]" - ";[c][i]overlay=format=auto:shortest=1,setsar=1\"" - ), - ]) - - jpeg_items.extend(ffmpeg_args.get("output") or []) - - # output file - jpeg_items.append(path_to_subprocess_arg(full_thumbnail_path)) - - subprocess_jpeg = " ".join(jpeg_items) - - # run subprocess - self.log.debug("Executing: {}".format(subprocess_jpeg)) - run_subprocess( - subprocess_jpeg, shell=True, logger=self.log - ) - - # remove thumbnail key from origin repre - streams = get_ffprobe_streams(full_thumbnail_path) - width = height = None - for stream in streams: - if "width" in stream and "height" in stream: - width = stream["width"] - height = stream["height"] - break - - staging_dir, filename = os.path.split(full_thumbnail_path) - - # create new thumbnail representation - representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': filename, - "stagingDir": staging_dir, - "tags": ["thumbnail", "delete"], - "thumbnail": True - } - if width and height: - representation["width"] = width - representation["height"] = height - - self.log.info(f"New representation {representation}") - instance.data["representations"].append(representation) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py deleted file mode 100644 index 9ff84e32fb..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import pyblish.api - - -class ExtractWorkfileUrl(pyblish.api.ContextPlugin): - """ - Modifies 'workfile' field to contain link to published workfile. - - Expects that batch contains only single workfile and matching - (multiple) textures. - """ - - label = "Extract Workfile Url SP" - hosts = ["standalonepublisher"] - order = pyblish.api.ExtractorOrder - - families = ["textures"] - - def process(self, context): - filepath = None - - # first loop for workfile - for instance in context: - if instance.data["family"] == 'workfile': - anatomy = context.data['anatomy'] - template_data = instance.data.get("anatomyData") - rep_name = instance.data.get("representations")[0].get("name") - template_data["representation"] = rep_name - template_data["ext"] = rep_name - template_obj = anatomy.templates_obj["publish"]["path"] - template_filled = template_obj.format_strict(template_data) - filepath = os.path.normpath(template_filled) - self.log.info("Using published scene for render {}".format( - filepath)) - break - - if not filepath: - self.log.info("Texture batch doesn't contain workfile.") - return - - # then apply to all textures - for instance in context: - if instance.data["family"] == 'textures': - instance.data["versionData"]["workfile"] = filepath diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml deleted file mode 100644 index 803de6bf11..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_editorial_resources.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - -Missing source video file - -## No attached video file found - -Process expects presence of source video file with same name prefix as an editorial file in same folder. -(example `simple_editorial_setup_Layer1.edl` expects `simple_editorial_setup.mp4` in same folder) - - -### How to repair? - -Copy source video file to the folder next to `.edl` file. (On a disk, do not put it into Standalone Publisher.) - - - diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml deleted file mode 100644 index 77b8727162..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_shot_duplicates.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - -Duplicate shots - -## Duplicate shot names - -Process contains duplicated shot names '{duplicates_str}'. - -### How to repair? - -Remove shot duplicates. - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml deleted file mode 100644 index b65d274fe5..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - -Invalid texture name - -## Invalid file name - -Submitted file has invalid name: -'{invalid_file}' - -### How to repair? - - Texture file must adhere to naming conventions for Unreal: - T_{asset}_*.ext - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml deleted file mode 100644 index d527d2173e..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_sources.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - -Files not found - -## Source files not found - -Process contains duplicated shot names: -'{files_not_found}' - -### How to repair? - -Add missing files or run Publish again to collect new publishable files. - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml deleted file mode 100644 index a943f560d0..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_task_existence.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - -Task not found - -## Task not found in database - -Process contains tasks that don't exist in database: -'{task_not_found}' - -### How to repair? - -Remove set task or add task into database into proper place. - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml deleted file mode 100644 index a645df8d02..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_batch.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - -No texture files found - -## Batch doesn't contain texture files - -Batch must contain at least one texture file. - -### How to repair? - -Add texture file to the batch or check name if it follows naming convention to match texture files to the batch. - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml deleted file mode 100644 index 077987a96d..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_has_workfile.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - -No workfile found - -## Batch should contain workfile - -It is expected that published contains workfile that served as a source for textures. - -### How to repair? - -Add workfile to the batch, or disable this validator if you do not want workfile published. - - - \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml deleted file mode 100644 index 2610917736..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_name.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - -Asset name not found - -## Couldn't parse asset name from a file - -Unable to parse asset name from '{file_name}'. File name doesn't match configured naming convention. - -### How to repair? - -Check Settings: project_settings/standalonepublisher/publish/CollectTextures for naming convention. - - -### __Detailed Info__ (optional) - -This error happens when parsing cannot figure out name of asset texture files belong under. - - - -Missing keys - -## Texture file name is missing some required keys - -Texture '{file_name}' is missing values for {missing_str} keys. - -### How to repair? - -Fix name of texture file and Publish again. - - - diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml deleted file mode 100644 index 1e536e604f..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_versions.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - -Texture version - -## Texture version mismatch with workfile - -Workfile '{file_name}' version doesn't match with '{version}' of a texture. - -### How to repair? - -Rename either workfile or texture to contain matching versions - - -### __Detailed Info__ (optional) - -This might happen if you are trying to publish textures for older version of workfile (or the other way). -(Eg. publishing 'workfile_v001' and 'texture_file_v002') - - - -Too many versions - -## Too many versions published at same time - -It is currently expected to publish only batch with single version. - -Found {found} versions. - -### How to repair? - -Please remove files with different version and split publishing into multiple steps. - - - diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml deleted file mode 100644 index 8187eb0bc8..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_texture_workfiles.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - -No secondary workfile - -## No secondary workfile found - -Current process expects that primary workfile (for example with a extension '{extension}') will contain also 'secondary' workfile. - -Secondary workfile for '{file_name}' wasn't found. - -### How to repair? - -Attach secondary workfile or disable this validator and Publish again. - - -### __Detailed Info__ (optional) - -This process was implemented for a possible use case of first workfile coming from Mari, secondary workfile for textures from Substance. -Publish should contain both if primary workfile is present. - - - diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py deleted file mode 100644 index 3d2b6d04ad..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py +++ /dev/null @@ -1,28 +0,0 @@ -import pyblish.api -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateEditorialResources(pyblish.api.InstancePlugin): - """Validate there is a "mov" next to the editorial file.""" - - label = "Validate Editorial Resources" - hosts = ["standalonepublisher"] - families = ["clip", "trimming"] - - # make sure it is enabled only if at least both families are available - match = pyblish.api.Subset - - order = ValidateContentsOrder - - def process(self, instance): - self.log.debug( - f"Instance: {instance}, Families: " - f"{[instance.data['family']] + instance.data['families']}") - check_file = instance.data["editorialSourcePath"] - msg = "Missing source video file." - - if not check_file: - raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py deleted file mode 100644 index e46fbe6098..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ /dev/null @@ -1,67 +0,0 @@ -import re - -import pyblish.api - -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateFrameRange(pyblish.api.InstancePlugin): - """Validating frame range of rendered files against state in DB.""" - - label = "Validate Frame Range" - hosts = ["standalonepublisher"] - families = ["render"] - order = ValidateContentsOrder - - optional = True - # published data might be sequence (.mov, .mp4) in that counting files - # doesnt make sense - check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", - "gif", "svg"] - skip_timelines_check = [] # skip for specific task names (regex) - - def process(self, instance): - if any(re.search(pattern, instance.data["task"]) - for pattern in self.skip_timelines_check): - self.log.info("Skipping for {} task".format(instance.data["task"])) - - # TODO replace query with using 'instance.data["assetEntity"]' - asset_data = get_current_project_asset(instance.data["asset"])["data"] - frame_start = asset_data["frameStart"] - frame_end = asset_data["frameEnd"] - handle_start = asset_data["handleStart"] - handle_end = asset_data["handleEnd"] - duration = (frame_end - frame_start + 1) + handle_start + handle_end - - repre = instance.data.get("representations", [None]) - if not repre: - self.log.info("No representations, skipping.") - return - - ext = repre[0]['ext'].replace(".", '') - - if not ext or ext.lower() not in self.check_extensions: - self.log.warning("Cannot check for extension {}".format(ext)) - return - - files = instance.data.get("representations", [None])[0]["files"] - if isinstance(files, str): - files = [files] - frames = len(files) - - msg = "Frame duration from DB:'{}' ". format(int(duration)) +\ - " doesn't match number of files:'{}'".format(frames) +\ - " Please change frame range for Asset or limit no. of files" - - formatting_data = {"duration": duration, - "found": frames} - if frames != duration: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) - - self.log.debug("Valid ranges expected '{}' - found '{}'". - format(int(duration), frames)) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py deleted file mode 100644 index df04ae3b66..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py +++ /dev/null @@ -1,31 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateShotDuplicates(pyblish.api.ContextPlugin): - """Validating no duplicate names are in context.""" - - label = "Validate Shot Duplicates" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - - def process(self, context): - shot_names = [] - duplicate_names = [] - for instance in context: - name = instance.data["name"] - if name in shot_names: - duplicate_names.append(name) - else: - shot_names.append(name) - - msg = "There are duplicate shot names:\n{}".format(duplicate_names) - - formatting_data = {"duplicates_str": ','.join(duplicate_names)} - if duplicate_names: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py deleted file mode 100644 index 1782f53de2..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateSources(pyblish.api.InstancePlugin): - """Validates source files. - - Loops through all 'files' in 'stagingDir' if actually exist. They might - got deleted between starting of SP and now. - - """ - order = ValidateContentsOrder - label = "Check source files" - - optional = True # only for unforeseeable cases - - hosts = ["standalonepublisher"] - - def process(self, instance): - self.log.info("instance {}".format(instance.data)) - - missing_files = set() - for repre in instance.data.get("representations") or []: - files = [] - if isinstance(repre["files"], str): - files.append(repre["files"]) - else: - files = list(repre["files"]) - - for file_name in files: - source_file = os.path.join(repre["stagingDir"], - file_name) - - if not os.path.exists(source_file): - missing_files.add(source_file) - - msg = "Files '{}' not found".format(','.join(missing_files)) - formatting_data = {"files_not_found": ' - {}'.join(missing_files)} - if missing_files: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py deleted file mode 100644 index 19ea1a4778..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ /dev/null @@ -1,59 +0,0 @@ -import pyblish.api - -from openpype.client import get_assets -from openpype.pipeline import PublishXmlValidationError - - -class ValidateTaskExistence(pyblish.api.ContextPlugin): - """Validating tasks on instances are filled and existing.""" - - label = "Validate Task Existence" - order = pyblish.api.ValidatorOrder - - hosts = ["standalonepublisher"] - families = ["render_mov_batch"] - - def process(self, context): - asset_names = set() - for instance in context: - asset_names.add(instance.data["asset"]) - - project_name = context.data["projectEntity"]["name"] - asset_docs = get_assets( - project_name, - asset_names=asset_names, - fields=["name", "data.tasks"] - ) - tasks_by_asset_names = {} - for asset_doc in asset_docs: - asset_name = asset_doc["name"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - tasks_by_asset_names[asset_name] = list(asset_tasks.keys()) - - missing_tasks = [] - for instance in context: - asset_name = instance.data["asset"] - task_name = instance.data["task"] - task_names = tasks_by_asset_names.get(asset_name) or [] - if task_name and task_name in task_names: - continue - missing_tasks.append((asset_name, task_name)) - - # Everything is OK - if not missing_tasks: - return - - # Raise an exception - msg = "Couldn't find task name/s required for publishing.\n{}" - pair_msgs = [] - for missing_pair in missing_tasks: - pair_msgs.append( - "Asset: \"{}\" Task: \"{}\"".format(*missing_pair) - ) - - msg = msg.format("\n".join(pair_msgs)) - - formatting_data = {"task_not_found": ' - {}'.join(pair_msgs)} - if pair_msgs: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py deleted file mode 100644 index 44f69e48f7..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py +++ /dev/null @@ -1,28 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateTextureBatch(pyblish.api.InstancePlugin): - """Validates that some texture files are present.""" - - label = "Validate Texture Presence" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - families = ["texture_batch_workfile"] - optional = False - - def process(self, instance): - present = False - for instance in instance.context: - if instance.data["family"] == "textures": - self.log.info("At least some textures present.") - - return - - msg = "No textures found in published batch!" - if not present: - raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py deleted file mode 100644 index f489d37f59..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py +++ /dev/null @@ -1,26 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): - """Validates that textures have appropriate workfile attached. - - Workfile is optional, disable this Validator after Refresh if you are - sure it is not needed. - """ - label = "Validate Texture Has Workfile" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - families = ["textures"] - optional = True - - def process(self, instance): - wfile = instance.data["versionData"].get("workfile") - - msg = "Textures are missing attached workfile" - if not wfile: - raise PublishXmlValidationError(self, msg) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py deleted file mode 100644 index 22f4a0eafc..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py +++ /dev/null @@ -1,63 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - -class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): - """Validates that all instances had properly formatted name.""" - - label = "Validate Texture Batch Naming" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - families = ["texture_batch_workfile", "textures"] - optional = False - - def process(self, instance): - file_name = instance.data["representations"][0]["files"] - if isinstance(file_name, list): - file_name = file_name[0] - - msg = "Couldn't find asset name in '{}'\n".format(file_name) + \ - "File name doesn't follow configured pattern.\n" + \ - "Please rename the file." - - formatting_data = {"file_name": file_name} - if "NOT_AVAIL" in instance.data["asset_build"]: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) - - instance.data.pop("asset_build") # not needed anymore - - if instance.data["family"] == "textures": - file_name = instance.data["representations"][0]["files"][0] - self._check_proper_collected(instance.data["versionData"], - file_name) - - def _check_proper_collected(self, versionData, file_name): - """ - Loop through collected versionData to check if name parsing was OK. - Args: - versionData: (dict) - - Returns: - raises AssertionException - """ - missing_key_values = [] - for key, value in versionData.items(): - if not value: - missing_key_values.append(key) - - msg = "Collected data {} doesn't contain values for {}".format( - versionData, missing_key_values) + "\n" + \ - "Name of the texture file doesn't match expected pattern.\n" + \ - "Please rename file(s) {}".format(file_name) - - missing_str = ','.join(["'{}'".format(key) - for key in missing_key_values]) - formatting_data = {"file_name": file_name, - "missing_str": missing_str} - if missing_key_values: - raise PublishXmlValidationError(self, msg, key="missing_values", - formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py deleted file mode 100644 index dab160d537..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py +++ /dev/null @@ -1,49 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): - """Validates that versions match in workfile and textures. - - Workfile is optional, so if you are sure, you can disable this - validator after Refresh. - - Validates that only single version is published at a time. - """ - label = "Validate Texture Batch Versions" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - families = ["textures"] - optional = False - - def process(self, instance): - wfile = instance.data["versionData"].get("workfile") - - version_str = "v{:03d}".format(instance.data["version"]) - - if not wfile: # no matching workfile, do not check versions - self.log.info("No workfile present for textures") - return - - if version_str not in wfile: - msg = "Not matching version: texture v{:03d} - workfile {}" - msg.format( - instance.data["version"], wfile - ) - raise PublishXmlValidationError(self, msg) - - present_versions = set() - for instance in instance.context: - present_versions.add(instance.data["version"]) - - if len(present_versions) != 1: - msg = "Too many versions in a batch!" - found = ','.join(["'{}'".format(val) for val in present_versions]) - formatting_data = {"found": found} - - raise PublishXmlValidationError(self, msg, key="too_many", - formatting_data=formatting_data) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py deleted file mode 100644 index fd2d4a9f36..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py +++ /dev/null @@ -1,58 +0,0 @@ -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): - """Validates that textures workfile has collected resources (optional). - - Collected resources means secondary workfiles (in most cases). - """ - - label = "Validate Texture Workfile Has Resources" - hosts = ["standalonepublisher"] - order = ValidateContentsOrder - families = ["texture_batch_workfile"] - optional = True - - def process(self, instance): - if instance.data["family"] != "workfile": - return - - ext = instance.data["representations"][0]["ext"] - main_workfile_extensions = self.get_main_workfile_extensions( - instance - ) - if ext not in main_workfile_extensions: - self.log.warning("Only secondary workfile present!") - return - - if not instance.data.get("resources"): - msg = "No secondary workfile present for workfile '{}'". \ - format(instance.data["name"]) - ext = main_workfile_extensions[0] - formatting_data = {"file_name": instance.data["name"], - "extension": ext} - - raise PublishXmlValidationError( - self, msg, formatting_data=formatting_data) - - @staticmethod - def get_main_workfile_extensions(instance): - project_settings = instance.context.data["project_settings"] - - try: - extensions = (project_settings["standalonepublisher"] - ["publish"] - ["CollectTextures"] - ["main_workfile_extensions"]) - except KeyError: - raise Exception("Setting 'Main workfile extensions' not found." - " The setting must be set for the" - " 'Collect Texture' publish plugin of the" - " 'Standalone Publish' tool.") - - return extensions diff --git a/openpype/hosts/substancepainter/addon.py b/openpype/hosts/substancepainter/addon.py deleted file mode 100644 index 2fbea139c5..0000000000 --- a/openpype/hosts/substancepainter/addon.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -SUBSTANCE_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class SubstanceAddon(OpenPypeModule, IHostAddon): - name = "substancepainter" - host_name = "substancepainter" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - # Add requirements to SUBSTANCE_PAINTER_PLUGINS_PATH - plugin_path = os.path.join(SUBSTANCE_HOST_DIR, "deploy") - plugin_path = plugin_path.replace("\\", "/") - if env.get("SUBSTANCE_PAINTER_PLUGINS_PATH"): - plugin_path += os.pathsep + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] - - env["SUBSTANCE_PAINTER_PLUGINS_PATH"] = plugin_path - - # Log in Substance Painter doesn't support custom terminal colors - env["OPENPYPE_LOG_NO_COLORS"] = "Yes" - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(SUBSTANCE_HOST_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".spp", ".toc"] diff --git a/openpype/hosts/substancepainter/api/pipeline.py b/openpype/hosts/substancepainter/api/pipeline.py deleted file mode 100644 index a13075127f..0000000000 --- a/openpype/hosts/substancepainter/api/pipeline.py +++ /dev/null @@ -1,428 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pipeline tools for OpenPype Substance Painter integration.""" -import os -import logging -from functools import partial - -# Substance 3D Painter modules -import substance_painter.ui -import substance_painter.event -import substance_painter.project - -import pyblish.api - -from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from openpype.settings import ( - get_current_project_settings, - get_system_settings -) - -from openpype.pipeline.template_data import get_template_data_with_names -from openpype.pipeline import ( - register_creator_plugin_path, - register_loader_plugin_path, - AVALON_CONTAINER_ID, - Anatomy -) -from openpype.lib import ( - StringTemplate, - register_event_callback, - emit_event, -) -from openpype.pipeline.load import any_outdated_containers -from openpype.hosts.substancepainter import SUBSTANCE_HOST_DIR - -from . import lib - -log = logging.getLogger("openpype.hosts.substance") - -PLUGINS_DIR = os.path.join(SUBSTANCE_HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - -OPENPYPE_METADATA_KEY = "OpenPype" -OPENPYPE_METADATA_CONTAINERS_KEY = "containers" # child key -OPENPYPE_METADATA_CONTEXT_KEY = "context" # child key -OPENPYPE_METADATA_INSTANCES_KEY = "instances" # child key - - -class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "substancepainter" - - def __init__(self): - super(SubstanceHost, self).__init__() - self._has_been_setup = False - self.menu = None - self.callbacks = [] - self.shelves = [] - - def install(self): - pyblish.api.register_host("substancepainter") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - - log.info("Installing callbacks ... ") - # register_event_callback("init", on_init) - self._register_callbacks() - # register_event_callback("before.save", before_save) - # register_event_callback("save", on_save) - register_event_callback("open", on_open) - # register_event_callback("new", on_new) - - log.info("Installing menu ... ") - self._install_menu() - - project_settings = get_current_project_settings() - self._install_shelves(project_settings) - - self._has_been_setup = True - - def uninstall(self): - self._uninstall_shelves() - self._uninstall_menu() - self._deregister_callbacks() - - def workfile_has_unsaved_changes(self): - - if not substance_painter.project.is_open(): - return False - - return substance_painter.project.needs_saving() - - def get_workfile_extensions(self): - return [".spp", ".toc"] - - def save_workfile(self, dst_path=None): - - if not substance_painter.project.is_open(): - return False - - if not dst_path: - dst_path = self.get_current_workfile() - - full_save_mode = substance_painter.project.ProjectSaveMode.Full - substance_painter.project.save_as(dst_path, full_save_mode) - - return dst_path - - def open_workfile(self, filepath): - - if not os.path.exists(filepath): - raise RuntimeError("File does not exist: {}".format(filepath)) - - # We must first explicitly close current project before opening another - if substance_painter.project.is_open(): - substance_painter.project.close() - - substance_painter.project.open(filepath) - return filepath - - def get_current_workfile(self): - if not substance_painter.project.is_open(): - return None - - filepath = substance_painter.project.file_path() - if filepath and filepath.endswith(".spt"): - # When currently in a Substance Painter template assume our - # scene isn't saved. This can be the case directly after doing - # "New project", the path will then be the template used. This - # avoids Workfiles tool trying to save as .spt extension if the - # file hasn't been saved before. - return - - return filepath - - def get_containers(self): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) - if containers: - for key, container in containers.items(): - container["objectName"] = key - yield container - - def update_context_data(self, data, changes): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - metadata.set(OPENPYPE_METADATA_CONTEXT_KEY, data) - - def get_context_data(self): - - if not substance_painter.project.is_open(): - return - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - return metadata.get(OPENPYPE_METADATA_CONTEXT_KEY) or {} - - def _install_menu(self): - from PySide2 import QtWidgets - from openpype.tools.utils import host_tools - - parent = substance_painter.ui.get_main_window() - - tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON" - menu = QtWidgets.QMenu(tab_menu_label) - - action = menu.addAction("Create...") - action.triggered.connect( - lambda: host_tools.show_publisher(parent=parent, - tab="create") - ) - - action = menu.addAction("Load...") - action.triggered.connect( - lambda: host_tools.show_loader(parent=parent, use_context=True) - ) - - action = menu.addAction("Publish...") - action.triggered.connect( - lambda: host_tools.show_publisher(parent=parent, - tab="publish") - ) - - action = menu.addAction("Manage...") - action.triggered.connect( - lambda: host_tools.show_scene_inventory(parent=parent) - ) - - action = menu.addAction("Library...") - action.triggered.connect( - lambda: host_tools.show_library_loader(parent=parent) - ) - - menu.addSeparator() - action = menu.addAction("Work Files...") - action.triggered.connect( - lambda: host_tools.show_workfiles(parent=parent) - ) - - substance_painter.ui.add_menu(menu) - - def on_menu_destroyed(): - self.menu = None - - menu.destroyed.connect(on_menu_destroyed) - - self.menu = menu - - def _uninstall_menu(self): - if self.menu: - self.menu.destroy() - self.menu = None - - def _register_callbacks(self): - # Prepare emit event callbacks - open_callback = partial(emit_event, "open") - - # Connect to the Substance Painter events - dispatcher = substance_painter.event.DISPATCHER - for event, callback in [ - (substance_painter.event.ProjectOpened, open_callback) - ]: - dispatcher.connect(event, callback) - # Keep a reference so we can deregister if needed - self.callbacks.append((event, callback)) - - def _deregister_callbacks(self): - for event, callback in self.callbacks: - substance_painter.event.DISPATCHER.disconnect(event, callback) - self.callbacks.clear() - - def _install_shelves(self, project_settings): - - shelves = project_settings["substancepainter"].get("shelves", {}) - if not shelves: - return - - # Prepare formatting data if we detect any path which might have - # template tokens like {asset} in there. - formatting_data = {} - has_formatting_entries = any("{" in path for path in shelves.values()) - if has_formatting_entries: - project_name = self.get_current_project_name() - asset_name = self.get_current_asset_name() - task_name = self.get_current_asset_name() - system_settings = get_system_settings() - formatting_data = get_template_data_with_names(project_name, - asset_name, - task_name, - system_settings) - anatomy = Anatomy(project_name) - formatting_data["root"] = anatomy.roots - - for name, path in shelves.items(): - shelf_name = None - - # Allow formatting with anatomy for the paths - if "{" in path: - path = StringTemplate.format_template(path, formatting_data) - - try: - shelf_name = lib.load_shelf(path, name=name) - except ValueError as exc: - print(f"Failed to load shelf -> {exc}") - - if shelf_name: - self.shelves.append(shelf_name) - - def _uninstall_shelves(self): - for shelf_name in self.shelves: - substance_painter.resource.Shelves.remove(shelf_name) - self.shelves.clear() - - -def on_open(): - log.info("Running callback on open..") - - if any_outdated_containers(): - from openpype.widgets import popup - - log.warning("Scene has outdated content.") - - # Get main window - parent = substance_painter.ui.get_main_window() - if parent is None: - log.info("Skipping outdated content pop-up " - "because Substance window can't be found.") - else: - - # Show outdated pop-up - def _on_show_inventory(): - from openpype.tools.utils import host_tools - host_tools.show_scene_inventory(parent=parent) - - dialog = popup.Popup(parent=parent) - dialog.setWindowTitle("Substance scene has outdated content") - dialog.setMessage("There are outdated containers in " - "your Substance scene.") - dialog.on_clicked.connect(_on_show_inventory) - dialog.show() - - -def imprint_container(container, - name, - namespace, - context, - loader): - """Imprint a loaded container with metadata. - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - container (dict): The (substance metadata) dictionary to imprint into. - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (load.LoaderPlugin): loader instance used to produce container. - - Returns: - None - - """ - - data = [ - ("schema", "openpype:container-2.0"), - ("id", AVALON_CONTAINER_ID), - ("name", str(name)), - ("namespace", str(namespace) if namespace else None), - ("loader", str(loader.__class__.__name__)), - ("representation", str(context["representation"]["_id"])), - ] - for key, value in data: - container[key] = value - - -def set_container_metadata(object_name, container_data, update=False): - """Helper method to directly set the data for a specific container - - Args: - object_name (str): The unique object name identifier for the container - container_data (dict): The data for the container. - Note 'objectName' data is derived from `object_name` and key in - `container_data` will be ignored. - update (bool): Whether to only update the dict data. - - """ - # The objectName is derived from the key in the metadata so won't be stored - # in the metadata in the container's data. - container_data.pop("objectName", None) - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) or {} - if update: - existing_data = containers.setdefault(object_name, {}) - existing_data.update(container_data) # mutable dict, in-place update - else: - containers[object_name] = container_data - metadata.set("containers", containers) - - -def remove_container_metadata(object_name): - """Helper method to remove the data for a specific container""" - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) - if containers: - containers.pop(object_name, None) - metadata.set("containers", containers) - - -def set_instance(instance_id, instance_data, update=False): - """Helper method to directly set the data for a specific container - - Args: - instance_id (str): Unique identifier for the instance - instance_data (dict): The instance data to store in the metaadata. - """ - set_instances({instance_id: instance_data}, update=update) - - -def set_instances(instance_data_by_id, update=False): - """Store data for multiple instances at the same time. - - This is more optimal than querying and setting them in the metadata one - by one. - """ - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - - for instance_id, instance_data in instance_data_by_id.items(): - if update: - existing_data = instances.get(instance_id, {}) - existing_data.update(instance_data) - else: - instances[instance_id] = instance_data - - metadata.set("instances", instances) - - -def remove_instance(instance_id): - """Helper method to remove the data for a specific container""" - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - instances.pop(instance_id, None) - metadata.set("instances", instances) - - -def get_instances_by_id(): - """Return all instances stored in the project instances metadata""" - if not substance_painter.project.is_open(): - return {} - - metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) - return metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} - - -def get_instances(): - """Return all instances stored in the project instances as a list""" - return list(get_instances_by_id().values()) diff --git a/openpype/hosts/substancepainter/plugins/create/create_workfile.py b/openpype/hosts/substancepainter/plugins/create/create_workfile.py deleted file mode 100644 index c73277e405..0000000000 --- a/openpype/hosts/substancepainter/plugins/create/create_workfile.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -"""Creator plugin for creating workfiles.""" - -from openpype import AYON_SERVER_ENABLED -from openpype.pipeline import CreatedInstance, AutoCreator -from openpype.client import get_asset_by_name - -from openpype.hosts.substancepainter.api.pipeline import ( - set_instances, - set_instance, - get_instances -) - -import substance_painter.project - - -class CreateWorkfile(AutoCreator): - """Workfile auto-creator.""" - identifier = "io.openpype.creators.substancepainter.workfile" - label = "Workfile" - family = "workfile" - icon = "document" - - default_variant = "Main" - - def create(self): - - if not substance_painter.project.is_open(): - return - - variant = self.default_variant - project_name = self.project_name - asset_name = self.create_context.get_current_asset_name() - task_name = self.create_context.get_current_task_name() - host_name = self.create_context.host_name - - # Workfile instance should always exist and must only exist once. - # As such we'll first check if it already exists and is collected. - current_instance = next( - ( - instance for instance in self.create_context.instances - if instance.creator_identifier == self.identifier - ), None) - - if current_instance is None: - current_instance_asset = None - elif AYON_SERVER_ENABLED: - current_instance_asset = current_instance["folderPath"] - else: - current_instance_asset = current_instance["asset"] - - if current_instance is None: - self.log.info("Auto-creating workfile instance...") - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - data = { - "task": task_name, - "variant": variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - current_instance = self.create_instance_in_context(subset_name, - data) - elif ( - current_instance_asset != asset_name - or current_instance["task"] != task_name - ): - # Update instance context if is not the same - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name - ) - if AYON_SERVER_ENABLED: - current_instance["folderPath"] = asset_name - else: - current_instance["asset"] = asset_name - current_instance["task"] = task_name - current_instance["subset"] = subset_name - - set_instance( - instance_id=current_instance.get("instance_id"), - instance_data=current_instance.data_to_store() - ) - - def collect_instances(self): - for instance in get_instances(): - if (instance.get("creator_identifier") == self.identifier or - instance.get("family") == self.family): - self.create_instance_in_context_from_existing(instance) - - def update_instances(self, update_list): - instance_data_by_id = {} - for instance, _changes in update_list: - # Persist the data - instance_id = instance.get("instance_id") - instance_data = instance.data_to_store() - instance_data_by_id[instance_id] = instance_data - set_instances(instance_data_by_id, update=True) - - # Helper methods (this might get moved into Creator class) - def create_instance_in_context(self, subset_name, data): - instance = CreatedInstance( - self.family, subset_name, data, self - ) - self.create_context.creator_adds_instance(instance) - return instance - - def create_instance_in_context_from_existing(self, data): - instance = CreatedInstance.from_existing(data, self) - self.create_context.creator_adds_instance(instance) - return instance diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py b/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py deleted file mode 100644 index 9a37eb0d1c..0000000000 --- a/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py +++ /dev/null @@ -1,17 +0,0 @@ -import pyblish.api - -from openpype.pipeline import registered_host - - -class CollectCurrentFile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Current Workfile" - hosts = ["substancepainter"] - - def process(self, context): - host = registered_host() - path = host.get_current_workfile() - context.data["currentFile"] = path - self.log.debug(f"Current workfile: {path}") diff --git a/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py b/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py deleted file mode 100644 index b45d66fbb1..0000000000 --- a/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py +++ /dev/null @@ -1,23 +0,0 @@ -import pyblish.api - -from openpype.lib import version_up -from openpype.pipeline import registered_host - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 1 - label = "Increment Workfile Version" - optional = True - hosts = ["substancepainter"] - - def process(self, context): - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - host = registered_host() - path = context.data["currentFile"] - self.log.info(f"Incrementing current workfile to: {path}") - host.save_workfile(version_up(path)) diff --git a/openpype/hosts/traypublisher/addon.py b/openpype/hosts/traypublisher/addon.py deleted file mode 100644 index ca60760bab..0000000000 --- a/openpype/hosts/traypublisher/addon.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -from openpype.lib import get_openpype_execute_args -from openpype.lib.execute import run_detached_process -from openpype.modules import ( - click_wrap, - OpenPypeModule, - ITrayAction, - IHostAddon, -) - -TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): - label = "Publisher" - name = "traypublisher" - host_name = "traypublisher" - - def initialize(self, modules_settings): - self.enabled = True - self.publish_paths = [ - os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") - ] - - def tray_init(self): - return - - def on_action_trigger(self): - self.run_traypublisher() - - def connect_with_modules(self, enabled_modules): - """Collect publish paths from other modules.""" - publish_paths = self.manager.collect_plugin_paths()["publish"] - self.publish_paths.extend(publish_paths) - - def run_traypublisher(self): - args = get_openpype_execute_args( - "module", self.name, "launch" - ) - run_detached_process(args) - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -@click_wrap.group( - TrayPublishAddon.name, - help="TrayPublisher related commands.") -def cli_main(): - pass - - -@cli_main.command() -def launch(): - """Launch TrayPublish tool UI.""" - - from openpype.tools import traypublisher - - traypublisher.main() diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py deleted file mode 100644 index 3264f52b0f..0000000000 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import json -import tempfile -import atexit - -import pyblish.api - -from openpype.pipeline import ( - register_creator_plugin_path, - legacy_io, -) -from openpype.host import HostBase, IPublishHost - - -ROOT_DIR = os.path.dirname(os.path.dirname( - os.path.abspath(__file__) -)) -PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") -CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") - - -class TrayPublisherHost(HostBase, IPublishHost): - name = "traypublisher" - - def install(self): - os.environ["AVALON_APP"] = self.name - legacy_io.Session["AVALON_APP"] = self.name - - pyblish.api.register_host("traypublisher") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_creator_plugin_path(CREATE_PATH) - - def get_context_title(self): - return HostContext.get_project_name() - - def get_context_data(self): - return HostContext.get_context_data() - - def update_context_data(self, data, changes): - HostContext.save_context_data(data) - - def set_project_name(self, project_name): - # TODO Deregister project specific plugins and register new project - # plugins - os.environ["AVALON_PROJECT"] = project_name - legacy_io.Session["AVALON_PROJECT"] = project_name - legacy_io.install() - HostContext.set_project_name(project_name) - - -class HostContext: - _context_json_path = None - - @staticmethod - def _on_exit(): - if ( - HostContext._context_json_path - and os.path.exists(HostContext._context_json_path) - ): - os.remove(HostContext._context_json_path) - - @classmethod - def get_context_json_path(cls): - if cls._context_json_path is None: - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="traypub_", suffix=".json" - ) - output_file.close() - cls._context_json_path = output_file.name - atexit.register(HostContext._on_exit) - print(cls._context_json_path) - return cls._context_json_path - - @classmethod - def _get_data(cls, group=None): - json_path = cls.get_context_json_path() - data = {} - if not os.path.exists(json_path): - with open(json_path, "w") as json_stream: - json.dump(data, json_stream) - else: - with open(json_path, "r") as json_stream: - content = json_stream.read() - if content: - data = json.loads(content) - if group is None: - return data - return data.get(group) - - @classmethod - def _save_data(cls, group, new_data): - json_path = cls.get_context_json_path() - data = cls._get_data() - data[group] = new_data - with open(json_path, "w") as json_stream: - json.dump(data, json_stream) - - @classmethod - def add_instance(cls, instance): - instances = cls.get_instances() - instances.append(instance) - cls.save_instances(instances) - - @classmethod - def get_instances(cls): - return cls._get_data("instances") or [] - - @classmethod - def save_instances(cls, instances): - cls._save_data("instances", instances) - - @classmethod - def get_context_data(cls): - return cls._get_data("context") or {} - - @classmethod - def save_context_data(cls, data): - cls._save_data("context", data) - - @classmethod - def get_project_name(cls): - return cls._get_data("project_name") - - @classmethod - def set_project_name(cls, project_name): - cls._save_data("project_name", project_name) - - @classmethod - def get_data_to_store(cls): - return { - "project_name": cls.get_project_name(), - "instances": cls.get_instances(), - "context": cls.get_context_data(), - } - - -def list_instances(): - return HostContext.get_instances() - - -def update_instances(update_list): - updated_instances = {} - for instance, _changes in update_list: - updated_instances[instance.id] = instance.data_to_store() - - instances = HostContext.get_instances() - for instance_data in instances: - instance_id = instance_data["instance_id"] - if instance_id in updated_instances: - new_instance_data = updated_instances[instance_id] - old_keys = set(instance_data.keys()) - new_keys = set(new_instance_data.keys()) - instance_data.update(new_instance_data) - for key in (old_keys - new_keys): - instance_data.pop(key) - - HostContext.save_instances(instances) - - -def remove_instances(instances): - if not isinstance(instances, (tuple, list)): - instances = [instances] - - current_instances = HostContext.get_instances() - for instance in instances: - instance_id = instance.data["instance_id"] - found_idx = None - for idx, _instance in enumerate(current_instances): - if instance_id == _instance["instance_id"]: - found_idx = idx - break - - if found_idx is not None: - current_instances.pop(found_idx) - HostContext.save_instances(current_instances) - - -def get_context_data(): - return HostContext.get_context_data() - - -def update_context_data(data, changes): - HostContext.save_context_data(data) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py deleted file mode 100644 index 6859b85a46..0000000000 --- a/openpype/hosts/traypublisher/api/plugin.py +++ /dev/null @@ -1,345 +0,0 @@ -from openpype import AYON_SERVER_ENABLED -from openpype.client import ( - get_assets, - get_subsets, - get_last_versions, - get_asset_name_identifier, -) -from openpype.lib.attribute_definitions import ( - FileDef, - BoolDef, - NumberDef, - UISeparatorDef, -) -from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS -from openpype.pipeline.create import ( - Creator, - HiddenCreator, - CreatedInstance, - cache_and_get_instances, - PRE_CREATE_THUMBNAIL_KEY, -) -from .pipeline import ( - list_instances, - update_instances, - remove_instances, - HostContext, -) - -REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS) -SHARED_DATA_KEY = "openpype.traypublisher.instances" - - -class HiddenTrayPublishCreator(HiddenCreator): - host_name = "traypublisher" - - def collect_instances(self): - instances_by_identifier = cache_and_get_instances( - self, SHARED_DATA_KEY, list_instances - ) - for instance_data in instances_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - update_instances(update_list) - - def remove_instances(self, instances): - remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def _store_new_instance(self, new_instance): - """Tray publisher specific method to store instance. - - Instance is stored into "workfile" of traypublisher and also add it - to CreateContext. - - Args: - new_instance (CreatedInstance): Instance that should be stored. - """ - - # Host implementation of storing metadata about instance - HostContext.add_instance(new_instance.data_to_store()) - # Add instance to current context - self._add_instance_to_context(new_instance) - - -class TrayPublishCreator(Creator): - create_allow_context_change = True - host_name = "traypublisher" - - def collect_instances(self): - instances_by_identifier = cache_and_get_instances( - self, SHARED_DATA_KEY, list_instances - ) - for instance_data in instances_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - update_instances(update_list) - - def remove_instances(self, instances): - remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def _store_new_instance(self, new_instance): - """Tray publisher specific method to store instance. - - Instance is stored into "workfile" of traypublisher and also add it - to CreateContext. - - Args: - new_instance (CreatedInstance): Instance that should be stored. - """ - - # Host implementation of storing metadata about instance - HostContext.add_instance(new_instance.data_to_store()) - new_instance.mark_as_stored() - - # Add instance to current context - self._add_instance_to_context(new_instance) - - -class SettingsCreator(TrayPublishCreator): - create_allow_context_change = True - create_allow_thumbnail = True - allow_version_control = False - - extensions = [] - - def create(self, subset_name, data, pre_create_data): - # Pass precreate data to creator attributes - thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None) - - # Fill 'version_to_use' if version control is enabled - if self.allow_version_control: - if AYON_SERVER_ENABLED: - asset_name = data["folderPath"] - else: - asset_name = data["asset"] - subset_docs_by_asset_id = self._prepare_next_versions( - [asset_name], [subset_name]) - version = subset_docs_by_asset_id[asset_name].get(subset_name) - pre_create_data["version_to_use"] = version - data["_previous_last_version"] = version - - data["creator_attributes"] = pre_create_data - data["settings_creator"] = True - - # Create new instance - new_instance = CreatedInstance(self.family, subset_name, data, self) - - self._store_new_instance(new_instance) - - if thumbnail_path: - self.set_instance_thumbnail_path(new_instance.id, thumbnail_path) - - def _prepare_next_versions(self, asset_names, subset_names): - """Prepare next versions for given asset and subset names. - - Todos: - Expect combination of subset names by asset name to avoid - unnecessary server calls for unused subsets. - - Args: - asset_names (Iterable[str]): Asset names. - subset_names (Iterable[str]): Subset names. - - Returns: - dict[str, dict[str, int]]: Last versions by asset - and subset names. - """ - - # Prepare all versions for all combinations to '1' - subset_docs_by_asset_id = { - asset_name: { - subset_name: 1 - for subset_name in subset_names - } - for asset_name in asset_names - } - if not asset_names or not subset_names: - return subset_docs_by_asset_id - - asset_docs = get_assets( - self.project_name, - asset_names=asset_names, - fields=["_id", "name", "data.parents"] - ) - asset_names_by_id = { - asset_doc["_id"]: get_asset_name_identifier(asset_doc) - for asset_doc in asset_docs - } - subset_docs = list(get_subsets( - self.project_name, - asset_ids=asset_names_by_id.keys(), - subset_names=subset_names, - fields=["_id", "name", "parent"] - )) - - subset_ids = {subset_doc["_id"] for subset_doc in subset_docs} - last_versions = get_last_versions( - self.project_name, - subset_ids, - fields=["name", "parent"]) - - for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - asset_name = asset_names_by_id[asset_id] - subset_name = subset_doc["name"] - subset_id = subset_doc["_id"] - last_version = last_versions.get(subset_id) - version = 0 - if last_version is not None: - version = last_version["name"] - subset_docs_by_asset_id[asset_name][subset_name] += version - return subset_docs_by_asset_id - - def _fill_next_versions(self, instances_data): - """Fill next version for instances. - - Instances have also stored previous next version to be able to - recognize if user did enter different version. If version was - not changed by user, or user set it to '0' the next version will be - updated by current database state. - """ - - filtered_instance_data = [] - for instance in instances_data: - previous_last_version = instance.get("_previous_last_version") - creator_attributes = instance["creator_attributes"] - use_next_version = creator_attributes.get( - "use_next_version", True) - version = creator_attributes.get("version_to_use", 0) - if ( - use_next_version - or version == 0 - or version == previous_last_version - ): - filtered_instance_data.append(instance) - - if AYON_SERVER_ENABLED: - asset_names = { - instance["folderPath"] - for instance in filtered_instance_data - } - else: - asset_names = { - instance["asset"] - for instance in filtered_instance_data - } - subset_names = { - instance["subset"] - for instance in filtered_instance_data} - subset_docs_by_asset_id = self._prepare_next_versions( - asset_names, subset_names - ) - for instance in filtered_instance_data: - if AYON_SERVER_ENABLED: - asset_name = instance["folderPath"] - else: - asset_name = instance["asset"] - subset_name = instance["subset"] - version = subset_docs_by_asset_id[asset_name][subset_name] - instance["creator_attributes"]["version_to_use"] = version - instance["_previous_last_version"] = version - - def collect_instances(self): - """Collect instances from host. - - Overriden to be able to manage version control attributes. If version - control is disabled, the attributes will be removed from instances, - and next versions are filled if is version control enabled. - """ - - instances_by_identifier = cache_and_get_instances( - self, SHARED_DATA_KEY, list_instances - ) - instances = instances_by_identifier[self.identifier] - if not instances: - return - - if self.allow_version_control: - self._fill_next_versions(instances) - - for instance_data in instances: - # Make sure that there are not data related to version control - # if plugin does not support it - if not self.allow_version_control: - instance_data.pop("_previous_last_version", None) - creator_attributes = instance_data["creator_attributes"] - creator_attributes.pop("version_to_use", None) - creator_attributes.pop("use_next_version", None) - - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def get_instance_attr_defs(self): - defs = self.get_pre_create_attr_defs() - if self.allow_version_control: - defs += [ - UISeparatorDef(), - BoolDef( - "use_next_version", - default=True, - label="Use next version", - ), - NumberDef( - "version_to_use", - default=1, - minimum=0, - maximum=999, - label="Version to use", - ) - ] - return defs - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attributes - return [ - FileDef( - "representation_files", - folders=False, - extensions=self.extensions, - allow_sequences=self.allow_sequences, - single_item=not self.allow_multiple_items, - label="Representations", - ), - FileDef( - "reviewable", - folders=False, - extensions=REVIEW_EXTENSIONS, - allow_sequences=True, - single_item=True, - label="Reviewable representations", - extensions_label="Single reviewable item" - ) - ] - - @classmethod - def from_settings(cls, item_data): - identifier = item_data["identifier"] - family = item_data["family"] - if not identifier: - identifier = "settings_{}".format(family) - return type( - "{}{}".format(cls.__name__, identifier), - (cls, ), - { - "family": family, - "identifier": identifier, - "label": item_data["label"].strip(), - "icon": item_data["icon"], - "description": item_data["description"], - "detailed_description": item_data["detailed_description"], - "extensions": item_data["extensions"], - "allow_sequences": item_data["allow_sequences"], - "allow_multiple_items": item_data["allow_multiple_items"], - "allow_version_control": item_data.get( - "allow_version_control", False), - "default_variants": item_data["default_variants"], - } - ) diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py deleted file mode 100644 index df6253b0c2..0000000000 --- a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -from openpype.lib import Logger -from openpype.settings import get_project_settings - -log = Logger.get_logger(__name__) - - -def initialize(): - from openpype.hosts.traypublisher.api.plugin import SettingsCreator - - project_name = os.environ["AVALON_PROJECT"] - project_settings = get_project_settings(project_name) - - simple_creators = project_settings["traypublisher"]["simple_creators"] - - global_variables = globals() - for item in simple_creators: - - dynamic_plugin = SettingsCreator.from_settings(item) - global_variables[dynamic_plugin.__name__] = dynamic_plugin - - -initialize() diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml deleted file mode 100644 index 933df1c7c5..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - -Invalid frame range - -## Invalid frame range - -Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. - -### How to repair? - -Modify configuration in the database or tweak frame range in the workfile. - - - \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py deleted file mode 100644 index 56389a927f..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py +++ /dev/null @@ -1,81 +0,0 @@ -import re - -import pyblish.api - -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) - - -class ValidateFrameRange(OptionalPyblishPluginMixin, - pyblish.api.InstancePlugin): - """Validating frame range of rendered files against state in DB.""" - - label = "Validate Frame Range" - hosts = ["traypublisher"] - families = ["render", "plate"] - order = ValidateContentsOrder - - optional = True - # published data might be sequence (.mov, .mp4) in that counting files - # doesnt make sense - check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", - "gif", "svg"] - skip_timelines_check = [] # skip for specific task names (regex) - - def process(self, instance): - # Skip the instance if is not active by data on the instance - if not self.is_active(instance.data): - return - - # editorial would fail since they might not be in database yet - new_asset_publishing = instance.data.get("newAssetPublishing") - if new_asset_publishing: - self.log.debug("Instance is creating new asset. Skipping.") - return - - if (self.skip_timelines_check and - any(re.search(pattern, instance.data["task"]) - for pattern in self.skip_timelines_check)): - self.log.info("Skipping for {} task".format(instance.data["task"])) - - asset_doc = instance.data["assetEntity"] - asset_data = asset_doc["data"] - frame_start = asset_data["frameStart"] - frame_end = asset_data["frameEnd"] - handle_start = asset_data["handleStart"] - handle_end = asset_data["handleEnd"] - duration = (frame_end - frame_start + 1) + handle_start + handle_end - - repres = instance.data.get("representations") - if not repres: - self.log.info("No representations, skipping.") - return - - first_repre = repres[0] - ext = first_repre['ext'].replace(".", '') - - if not ext or ext.lower() not in self.check_extensions: - self.log.warning("Cannot check for extension {}".format(ext)) - return - - files = first_repre["files"] - if isinstance(files, str): - files = [files] - frames = len(files) - - msg = ( - "Frame duration from DB:'{}' doesn't match number of files:'{}'" - " Please change frame range for Asset or limit no. of files" - ). format(int(duration), frames) - - formatting_data = {"duration": duration, - "found": frames} - if frames != duration: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) - - self.log.debug("Valid ranges expected '{}' - found '{}'". - format(int(duration), frames)) diff --git a/openpype/hosts/tvpaint/addon.py b/openpype/hosts/tvpaint/addon.py deleted file mode 100644 index b695bf8ecc..0000000000 --- a/openpype/hosts/tvpaint/addon.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from openpype.modules import OpenPypeModule, IHostAddon - -TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -def get_launch_script_path(): - return os.path.join( - TVPAINT_ROOT_DIR, - "api", - "launch_script.py" - ) - - -class TVPaintAddon(OpenPypeModule, IHostAddon): - name = "tvpaint" - host_name = "tvpaint" - - def initialize(self, module_settings): - self.enabled = True - - def add_implementation_envs(self, env, _app): - """Modify environments to contain all required for implementation.""" - - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(TVPAINT_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".tvpp"] diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py deleted file mode 100644 index c125da1533..0000000000 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ /dev/null @@ -1,511 +0,0 @@ -import os -import json -import tempfile -import logging - -import requests - -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost -from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR -from openpype.settings import get_current_project_settings -from openpype.lib import register_event_callback -from openpype.pipeline import ( - legacy_io, - register_loader_plugin_path, - register_creator_plugin_path, - AVALON_CONTAINER_ID, -) -from openpype.pipeline.context_tools import get_global_context - -from .lib import ( - execute_george, - execute_george_through_file -) - -log = logging.getLogger(__name__) - - -METADATA_SECTION = "avalon" -SECTION_NAME_CONTEXT = "context" -SECTION_NAME_CREATE_CONTEXT = "create_context" -SECTION_NAME_INSTANCES = "instances" -SECTION_NAME_CONTAINERS = "containers" -# Maximum length of metadata chunk string -# TODO find out the max (500 is safe enough) -TVPAINT_CHUNK_LENGTH = 500 - -"""TVPaint's Metadata - -Metadata are stored to TVPaint's workfile. - -Workfile works similar to .ini file but has few limitation. Most important -limitation is that value under key has limited length. Due to this limitation -each metadata section/key stores number of "subkeys" that are related to -the section. - -Example: -Metadata key `"instances"` may have stored value "2". In that case it is -expected that there are also keys `["instances0", "instances1"]`. - -Workfile data looks like: -``` -[avalon] -instances0=[{{__dq__}id{__dq__}: {__dq__}pyblish.avalon.instance{__dq__... -instances1=...more data... -instances=2 -``` -""" - - -class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): - name = "tvpaint" - - def install(self): - """Install TVPaint-specific functionality.""" - - log.info("OpenPype - Installing TVPaint integration") - legacy_io.install() - - # Create workdir folder if does not exist yet - workdir = legacy_io.Session["AVALON_WORKDIR"] - if not os.path.exists(workdir): - os.makedirs(workdir) - - plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins") - publish_dir = os.path.join(plugins_dir, "publish") - load_dir = os.path.join(plugins_dir, "load") - create_dir = os.path.join(plugins_dir, "create") - - pyblish.api.register_host("tvpaint") - pyblish.api.register_plugin_path(publish_dir) - register_loader_plugin_path(load_dir) - register_creator_plugin_path(create_dir) - - register_event_callback("application.launched", self.initial_launch) - register_event_callback("application.exit", self.application_exit) - - def get_current_project_name(self): - """ - Returns: - Union[str, None]: Current project name. - """ - - return self.get_current_context().get("project_name") - - def get_current_asset_name(self): - """ - Returns: - Union[str, None]: Current asset name. - """ - - return self.get_current_context().get("asset_name") - - def get_current_task_name(self): - """ - Returns: - Union[str, None]: Current task name. - """ - - return self.get_current_context().get("task_name") - - def get_current_context(self): - context = get_current_workfile_context() - if not context: - return get_global_context() - - if "project_name" in context: - return context - # This is legacy way how context was stored - return { - "project_name": context.get("project"), - "asset_name": context.get("asset"), - "task_name": context.get("task") - } - - # --- Create --- - def get_context_data(self): - return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {}) - - def update_context_data(self, data, changes): - return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data) - - def list_instances(self): - """List all created instances from current workfile.""" - return list_instances() - - def write_instances(self, data): - return write_instances(data) - - # --- Workfile --- - def open_workfile(self, filepath): - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath.replace("\\", "/") - ) - return execute_george_through_file(george_script) - - def save_workfile(self, filepath=None): - if not filepath: - filepath = self.get_current_workfile() - context = get_global_context() - save_current_workfile_context(context) - - # Execute george script to save workfile. - george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) - return execute_george(george_script) - - def work_root(self, session): - return session["AVALON_WORKDIR"] - - def get_current_workfile(self): - return execute_george("tv_GetProjectName") - - def workfile_has_unsaved_changes(self): - return None - - def get_workfile_extensions(self): - return [".tvpp"] - - # --- Load --- - def get_containers(self): - return get_containers() - - def initial_launch(self): - # Setup project settings if its the template that's launched. - # TODO also check for template creation when it's possible to define - # templates - last_workfile = os.environ.get("AVALON_LAST_WORKFILE") - if not last_workfile or os.path.exists(last_workfile): - return - - log.info("Setting up project...") - global_context = get_global_context() - project_name = global_context.get("project_name") - asset_name = global_context.get("aset_name") - if not project_name or not asset_name: - return - - asset_doc = get_asset_by_name(project_name, asset_name) - - set_context_settings(project_name, asset_doc) - - def application_exit(self): - """Logic related to TimerManager. - - Todo: - This should be handled out of TVPaint integration logic. - """ - - data = get_current_project_settings() - stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] - - if not stop_timer: - return - - # Stop application timer. - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) - - -def containerise( - name, namespace, members, context, loader, current_containers=None -): - """Add new container to metadata. - - Args: - name (str): Container name. - namespace (str): Container namespace. - members (list): List of members that were loaded and belongs - to the container (layer names). - current_containers (list): Preloaded containers. Should be used only - on update/switch when containers were modified during the process. - - Returns: - dict: Container data stored to workfile metadata. - """ - - container_data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, - "members": members, - "name": name, - "namespace": namespace, - "loader": str(loader), - "representation": str(context["representation"]["_id"]) - } - if current_containers is None: - current_containers = get_containers() - - # Add container to containers list - current_containers.append(container_data) - - # Store data to metadata - write_workfile_metadata(SECTION_NAME_CONTAINERS, current_containers) - - return container_data - - -def split_metadata_string(text, chunk_length=None): - """Split string by length. - - Split text to chunks by entered length. - Example: - ```python - text = "ABCDEFGHIJKLM" - result = split_metadata_string(text, 3) - print(result) - >>> ['ABC', 'DEF', 'GHI', 'JKL'] - ``` - - Args: - text (str): Text that will be split into chunks. - chunk_length (int): Single chunk size. Default chunk_length is - set to global variable `TVPAINT_CHUNK_LENGTH`. - - Returns: - list: List of strings with at least one item. - """ - if chunk_length is None: - chunk_length = TVPAINT_CHUNK_LENGTH - chunks = [] - for idx in range(chunk_length, len(text) + chunk_length, chunk_length): - start_idx = idx - chunk_length - chunks.append(text[start_idx:idx]) - return chunks - - -def get_workfile_metadata_string_for_keys(metadata_keys): - """Read metadata for specific keys from current project workfile. - - All values from entered keys are stored to single string without separator. - - Function is designed to help get all values for one metadata key at once. - So order of passed keys matteres. - - Args: - metadata_keys (list, str): Metadata keys for which data should be - retrieved. Order of keys matters! It is possible to enter only - single key as string. - """ - # Add ability to pass only single key - if isinstance(metadata_keys, str): - metadata_keys = [metadata_keys] - - output_file = tempfile.NamedTemporaryFile( - mode="w", prefix="a_tvp_", suffix=".txt", delete=False - ) - output_file.close() - output_filepath = output_file.name.replace("\\", "/") - - george_script_parts = [] - george_script_parts.append( - "output_path = \"{}\"".format(output_filepath) - ) - # Store data for each index of metadata key - for metadata_key in metadata_keys: - george_script_parts.append( - "tv_readprojectstring \"{}\" \"{}\" \"\"".format( - METADATA_SECTION, metadata_key - ) - ) - george_script_parts.append( - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result" - ) - - # Execute the script - george_script = "\n".join(george_script_parts) - execute_george_through_file(george_script) - - # Load data from temp file - with open(output_filepath, "r") as stream: - file_content = stream.read() - - # Remove `\n` from content - output_string = file_content.replace("\n", "") - - # Delete temp file - os.remove(output_filepath) - - return output_string - - -def get_workfile_metadata_string(metadata_key): - """Read metadata for specific key from current project workfile.""" - result = get_workfile_metadata_string_for_keys([metadata_key]) - if not result: - return None - - stripped_result = result.strip() - if not stripped_result: - return None - - # NOTE Backwards compatibility when metadata key did not store range of key - # indexes but the value itself - # NOTE We don't have to care about negative values with `isdecimal` check - if not stripped_result.isdecimal(): - metadata_string = result - else: - keys = [] - for idx in range(int(stripped_result)): - keys.append("{}{}".format(metadata_key, idx)) - metadata_string = get_workfile_metadata_string_for_keys(keys) - - # Replace quotes plaholders with their values - metadata_string = ( - metadata_string - .replace("{__sq__}", "'") - .replace("{__dq__}", "\"") - ) - return metadata_string - - -def get_workfile_metadata(metadata_key, default=None): - """Read and parse metadata for specific key from current project workfile. - - Pipeline use function to store loaded and created instances within keys - stored in `SECTION_NAME_INSTANCES` and `SECTION_NAME_CONTAINERS` - constants. - - Args: - metadata_key (str): Key defying which key should read. It is expected - value contain json serializable string. - """ - if default is None: - default = [] - - json_string = get_workfile_metadata_string(metadata_key) - if json_string: - try: - return json.loads(json_string) - except json.decoder.JSONDecodeError: - # TODO remove when backwards compatibility of storing metadata - # will be removed - print(( - "Fixed invalid metadata in workfile." - " Not serializable string was: {}" - ).format(json_string)) - write_workfile_metadata(metadata_key, default) - return default - - -def write_workfile_metadata(metadata_key, value): - """Write metadata for specific key into current project workfile. - - George script has specific way how to work with quotes which should be - solved automatically with this function. - - Args: - metadata_key (str): Key defying under which key value will be stored. - value (dict,list,str): Data to store they must be json serializable. - """ - if isinstance(value, (dict, list)): - value = json.dumps(value) - - if not value: - value = "" - - # Handle quotes in dumped json string - # - replace single and double quotes with placeholders - value = ( - value - .replace("'", "{__sq__}") - .replace("\"", "{__dq__}") - ) - chunks = split_metadata_string(value) - chunks_len = len(chunks) - - write_template = "tv_writeprojectstring \"{}\" \"{}\" \"{}\"" - george_script_parts = [] - # Add information about chunks length to metadata key itself - george_script_parts.append( - write_template.format(METADATA_SECTION, metadata_key, chunks_len) - ) - # Add chunk values to indexed metadata keys - for idx, chunk_value in enumerate(chunks): - sub_key = "{}{}".format(metadata_key, idx) - george_script_parts.append( - write_template.format(METADATA_SECTION, sub_key, chunk_value) - ) - - george_script = "\n".join(george_script_parts) - - return execute_george_through_file(george_script) - - -def get_current_workfile_context(): - """Return context in which was workfile saved.""" - return get_workfile_metadata(SECTION_NAME_CONTEXT, {}) - - -def save_current_workfile_context(context): - """Save context which was used to create a workfile.""" - return write_workfile_metadata(SECTION_NAME_CONTEXT, context) - - -def list_instances(): - """List all created instances from current workfile.""" - return get_workfile_metadata(SECTION_NAME_INSTANCES) - - -def write_instances(data): - return write_workfile_metadata(SECTION_NAME_INSTANCES, data) - - -def get_containers(): - output = get_workfile_metadata(SECTION_NAME_CONTAINERS) - if output: - for item in output: - if "objectName" not in item and "members" in item: - members = item["members"] - if isinstance(members, list): - members = "|".join([str(member) for member in members]) - item["objectName"] = members - return output - - -def set_context_settings(project_name, asset_doc): - """Set workfile settings by asset document data. - - Change fps, resolution and frame start/end. - """ - - width_key = "resolutionWidth" - height_key = "resolutionHeight" - - width = asset_doc["data"].get(width_key) - height = asset_doc["data"].get(height_key) - if width is None or height is None: - print("Resolution was not found!") - else: - execute_george( - "tv_resizepage {} {} 0".format(width, height) - ) - - framerate = asset_doc["data"].get("fps") - - if framerate is not None: - execute_george( - "tv_framerate {} \"timestretch\"".format(framerate) - ) - else: - print("Framerate was not found!") - - frame_start = asset_doc["data"].get("frameStart") - frame_end = asset_doc["data"].get("frameEnd") - - if frame_start is None or frame_end is None: - print("Frame range was not found!") - return - - handle_start = asset_doc["data"].get("handleStart") - handle_end = asset_doc["data"].get("handleEnd") - - # Always start from 0 Mark In and set only Mark Out - mark_in = 0 - mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end - - execute_george("tv_markin {} set".format(mark_in)) - execute_george("tv_markout {} set".format(mark_out)) diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py deleted file mode 100644 index 96b99199f2..0000000000 --- a/openpype/hosts/tvpaint/api/plugin.py +++ /dev/null @@ -1,189 +0,0 @@ -import re - -from openpype.pipeline import LoaderPlugin -from openpype.pipeline.create import ( - CreatedInstance, - get_subset_name, - AutoCreator, - Creator, -) -from openpype.pipeline.create.creator_plugins import cache_and_get_instances - -from .lib import get_layers_data - - -SHARED_DATA_KEY = "openpype.tvpaint.instances" - - -class TVPaintCreatorCommon: - @property - def subset_template_family_filter(self): - return self.family - - def _cache_and_get_instances(self): - return cache_and_get_instances( - self, SHARED_DATA_KEY, self.host.list_instances - ) - - def _collect_create_instances(self): - instances_by_identifier = self._cache_and_get_instances() - for instance_data in instances_by_identifier[self.identifier]: - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def _update_create_instances(self, update_list): - if not update_list: - return - - cur_instances = self.host.list_instances() - cur_instances_by_id = {} - for instance_data in cur_instances: - instance_id = instance_data.get("instance_id") - if instance_id: - cur_instances_by_id[instance_id] = instance_data - - for instance, changes in update_list: - instance_data = changes.new_value - cur_instance_data = cur_instances_by_id.get(instance.id) - if cur_instance_data is None: - cur_instances.append(instance_data) - continue - for key in set(cur_instance_data) - set(instance_data): - cur_instance_data.pop(key) - cur_instance_data.update(instance_data) - self.host.write_instances(cur_instances) - - def _custom_get_subset_name( - self, - variant, - task_name, - asset_doc, - project_name, - host_name=None, - instance=None - ): - dynamic_data = self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name, instance - ) - - return get_subset_name( - self.family, - variant, - task_name, - asset_doc, - project_name, - host_name, - dynamic_data=dynamic_data, - project_settings=self.project_settings, - family_filter=self.subset_template_family_filter - ) - - -class TVPaintCreator(Creator, TVPaintCreatorCommon): - def collect_instances(self): - self._collect_create_instances() - - def update_instances(self, update_list): - self._update_create_instances(update_list) - - def remove_instances(self, instances): - ids_to_remove = { - instance.id - for instance in instances - } - cur_instances = self.host.list_instances() - changed = False - new_instances = [] - for instance_data in cur_instances: - if instance_data.get("instance_id") in ids_to_remove: - changed = True - else: - new_instances.append(instance_data) - - if changed: - self.host.write_instances(new_instances) - - for instance in instances: - self._remove_instance_from_context(instance) - - def get_dynamic_data(self, *args, **kwargs): - # Change asset and name by current workfile context - create_context = self.create_context - asset_name = create_context.get_current_asset_name() - task_name = create_context.get_current_task_name() - output = {} - if asset_name: - output["asset"] = asset_name - if task_name: - output["task"] = task_name - return output - - def get_subset_name(self, *args, **kwargs): - return self._custom_get_subset_name(*args, **kwargs) - - def _store_new_instance(self, new_instance): - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - -class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon): - def collect_instances(self): - self._collect_create_instances() - - def update_instances(self, update_list): - self._update_create_instances(update_list) - - def get_subset_name(self, *args, **kwargs): - return self._custom_get_subset_name(*args, **kwargs) - - -class Loader(LoaderPlugin): - hosts = ["tvpaint"] - - @staticmethod - def get_members_from_container(container): - if "members" not in container and "objectName" in container: - # Backwards compatibility - layer_ids_str = container.get("objectName") - return [ - int(layer_id) for layer_id in layer_ids_str.split("|") - ] - return container["members"] - - def get_unique_layer_name(self, asset_name, name): - """Layer name with counter as suffix. - - Find higher 3 digit suffix from all layer names in scene matching regex - `{asset_name}_{name}_{suffix}`. Higher 3 digit suffix is used - as base for next number if scene does not contain layer matching regex - `0` is used ase base. - - Args: - asset_name (str): Name of subset's parent asset document. - name (str): Name of loaded subset. - - Returns: - (str): `{asset_name}_{name}_{higher suffix + 1}` - """ - layer_name_base = "{}_{}".format(asset_name, name) - - counter_regex = re.compile(r"_(\d{3})$") - - higher_counter = 0 - for layer in get_layers_data(): - layer_name = layer["name"] - if not layer_name.startswith(layer_name_base): - continue - number_subpart = layer_name[len(layer_name_base):] - groups = counter_regex.findall(number_subpart) - if len(groups) != 1: - continue - - counter = int(groups[0]) - if counter > higher_counter: - higher_counter = counter - continue - - return "{}_{:0>3d}".format(layer_name_base, higher_counter + 1) diff --git a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py deleted file mode 100644 index 5cfa1faa50..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py +++ /dev/null @@ -1,150 +0,0 @@ -import collections - -from openpype.pipeline.create.creator_plugins import ( - SubsetConvertorPlugin, - cache_and_get_instances, -) -from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY -from openpype.hosts.tvpaint.api.lib import get_groups_data - - -class TVPaintLegacyConverted(SubsetConvertorPlugin): - """Conversion of legacy instances in scene to new creators. - - This convertor handles only instances created by core creators. - - All instances that would be created using auto-creators are removed as at - the moment of finding them would there already be existing instances. - """ - - identifier = "tvpaint.legacy.converter" - - def find_instances(self): - instances_by_identifier = cache_and_get_instances( - self, SHARED_DATA_KEY, self.host.list_instances - ) - if instances_by_identifier[None]: - self.add_convertor_item("Convert legacy instances") - - def convert(self): - current_instances = self.host.list_instances() - to_convert = collections.defaultdict(list) - converted = False - for instance in current_instances: - if instance.get("creator_identifier") is not None: - continue - converted = True - - family = instance.get("family") - if family in ( - "renderLayer", - "renderPass", - "renderScene", - "review", - "workfile", - ): - to_convert[family].append(instance) - else: - instance["keep"] = False - - # Skip if nothing was changed - if not converted: - self.remove_convertor_item() - return - - self._convert_render_layers( - to_convert["renderLayer"], current_instances) - self._convert_render_passes( - to_convert["renderPass"], current_instances) - self._convert_render_scenes( - to_convert["renderScene"], current_instances) - self._convert_workfiles( - to_convert["workfile"], current_instances) - self._convert_reviews( - to_convert["review"], current_instances) - - new_instances = [ - instance - for instance in current_instances - if instance.get("keep") is not False - ] - self.host.write_instances(new_instances) - # remove legacy item if all is fine - self.remove_convertor_item() - - def _convert_render_layers(self, render_layers, current_instances): - if not render_layers: - return - - # Look for possible existing render layers in scene - render_layers_by_group_id = {} - for instance in current_instances: - if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_identifier"]["group_id"] - render_layers_by_group_id[group_id] = instance - - groups_by_id = { - group["group_id"]: group - for group in get_groups_data() - } - for render_layer in render_layers: - group_id = render_layer.pop("group_id") - # Just remove legacy instance if group is already occupied - if group_id in render_layers_by_group_id: - render_layer["keep"] = False - continue - # Add identifier - render_layer["creator_identifier"] = "render.layer" - # Change 'uuid' to 'instance_id' - render_layer["instance_id"] = render_layer.pop("uuid") - # Fill creator attributes - render_layer["creator_attributes"] = { - "group_id": group_id - } - render_layer["family"] = "render" - group = groups_by_id[group_id] - # Use group name for variant - group["variant"] = group["name"] - - def _convert_render_passes(self, render_passes, current_instances): - if not render_passes: - return - - # Render passes must have available render layers so we look for render - # layers first - # - '_convert_render_layers' must be called before this method - render_layers_by_group_id = {} - for instance in current_instances: - if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_attributes"]["group_id"] - render_layers_by_group_id[group_id] = instance - - for render_pass in render_passes: - group_id = render_pass.pop("group_id") - render_layer = render_layers_by_group_id.get(group_id) - if not render_layer: - render_pass["keep"] = False - continue - - render_pass["creator_identifier"] = "render.pass" - render_pass["instance_id"] = render_pass.pop("uuid") - render_pass["family"] = "render" - - render_pass["creator_attributes"] = { - "render_layer_instance_id": render_layer["instance_id"] - } - render_pass["variant"] = render_pass.pop("pass") - render_pass.pop("renderlayer") - - # Rest of instances are just marked for deletion - def _convert_render_scenes(self, render_scenes, current_instances): - for render_scene in render_scenes: - render_scene["keep"] = False - - def _convert_workfiles(self, workfiles, current_instances): - for render_scene in workfiles: - render_scene["keep"] = False - - def _convert_reviews(self, reviews, current_instances): - for render_scene in reviews: - render_scene["keep"] = False diff --git a/openpype/hosts/tvpaint/plugins/create/create_render.py b/openpype/hosts/tvpaint/plugins/create/create_render.py deleted file mode 100644 index 667103432e..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_render.py +++ /dev/null @@ -1,1186 +0,0 @@ -"""Render Layer and Passes creators. - -Render layer is main part which is represented by group in TVPaint. All TVPaint -layers marked with that group color are part of the render layer. To be more -specific about some parts of layer it is possible to create sub-sets of layer -which are named passes. Render pass consist of layers in same color group as -render layer but define more specific part. - -For example render layer could be 'Bob' which consist of 5 TVPaint layers. -- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head' -- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body' -- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm' -- Last layer does not belong to render pass at all - -Bob will be rendered as 'beauty' of bob (all visible layers in group). -His head will be rendered too but without any other parts. The same for body -and arm. - -What is this good for? Compositing has more power how the renders are used. -Can do transforms on each render pass without need to modify a re-render them -using TVPaint. - -The workflow may hit issues when there are used other blending modes than -default 'color' blend more. In that case it is not recommended to use this -workflow at all as other blend modes may affect all layers in clip which can't -be done. - -There is special case for simple publishing of scene which is called -'render.scene'. That will use all visible layers and render them as one big -sequence. - -Todos: - Add option to extract marked layers and passes as json output format for - AfterEffects. -""" - -import collections -from typing import Any, Optional, Union - -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_by_name, get_asset_name_identifier -from openpype.lib import ( - prepare_template_data, - AbstractAttrDef, - UILabelDef, - UISeparatorDef, - EnumDef, - TextDef, - BoolDef, -) -from openpype.pipeline.create import ( - CreatedInstance, - CreatorError, -) -from openpype.hosts.tvpaint.api.plugin import ( - TVPaintCreator, - TVPaintAutoCreator, -) -from openpype.hosts.tvpaint.api.lib import ( - get_layers_data, - get_groups_data, - execute_george_through_file, -) - -RENDER_LAYER_DETAILED_DESCRIPTIONS = ( - """Render Layer is "a group of TVPaint layers" - -Be aware Render Layer is not TVPaint layer. - -All TVPaint layers in the scene with the color group id are rendered in the -beauty pass. To create sub passes use Render Pass creator which is -dependent on existence of render layer instance. - -The group can represent an asset (tree) or different part of scene that consist -of one or more TVPaint layers that can be used as single item during -compositing (for example). - -In some cases may be needed to have sub parts of the layer. For example 'Bob' -could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes. -""" -) - - -RENDER_PASS_DETAILED_DESCRIPTIONS = ( - """Render Pass is sub part of Render Layer. - -Render Pass can consist of one or more TVPaint layers. Render Pass must -belong to a Render Layer. Marked TVPaint layers will change it's group color -to match group color of Render Layer. -""" -) - - -AUTODETECT_RENDER_DETAILED_DESCRIPTION = ( - """Semi-automated Render Layer and Render Pass creation. - -Based on information in TVPaint scene will be created Render Layers and Render -Passes. All color groups used in scene will be used for Render Layer creation. -Name of the group is used as a variant. - -All TVPaint layers under the color group will be created as Render Pass where -layer name is used as variant. - -The plugin will use all used color groups and layers, or can skip those that -are not visible. - -There is option to auto-rename color groups before Render Layer creation. That -is based on settings template where is filled index of used group from bottom -to top. -""" -) - -class CreateRenderlayer(TVPaintCreator): - """Mark layer group as Render layer instance. - - All TVPaint layers in the scene with the color group id are rendered in the - beauty pass. To create sub passes use Render Layer creator which is - dependent on existence of render layer instance. - """ - - label = "Render Layer" - family = "render" - subset_template_family_filter = "renderLayer" - identifier = "render.layer" - icon = "fa5.images" - - # George script to change color group - rename_script_template = ( - "tv_layercolor \"setcolor\"" - " {clip_id} {group_id} {r} {g} {b} \"{name}\"" - ) - # Order to be executed before Render Pass creator - order = 90 - description = "Mark TVPaint color group as one Render Layer." - detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS - - # Settings - # - Default render pass name for beauty - default_pass_name = "beauty" - # - Mark by default instance for review - mark_for_review = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_layer"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.default_pass_name = plugin_settings["default_pass_name"] - self.mark_for_review = plugin_settings["mark_for_review"] - - def get_dynamic_data( - self, variant, task_name, asset_doc, project_name, host_name, instance - ): - dynamic_data = super().get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name, instance - ) - dynamic_data["renderpass"] = self.default_pass_name - dynamic_data["renderlayer"] = variant - return dynamic_data - - def _get_selected_group_ids(self): - return { - layer["group_id"] - for layer in get_layers_data() - if layer["selected"] - } - - def create(self, subset_name, instance_data, pre_create_data): - self.log.debug("Query data from workfile.") - - group_name = instance_data["variant"] - group_id = pre_create_data.get("group_id") - # This creator should run only on one group - if group_id is None or group_id == -1: - selected_groups = self._get_selected_group_ids() - selected_groups.discard(0) - if len(selected_groups) > 1: - raise CreatorError("You have selected more than one group") - - if len(selected_groups) == 0: - raise CreatorError("You don't have selected any group") - group_id = tuple(selected_groups)[0] - - self.log.debug("Querying groups data from workfile.") - groups_data = get_groups_data() - group_item = None - for group_data in groups_data: - if group_data["group_id"] == group_id: - group_item = group_data - - for instance in self.create_context.instances: - if ( - instance.creator_identifier == self.identifier - and instance["creator_attributes"]["group_id"] == group_id - ): - raise CreatorError(( - f"Group \"{group_item.get('name')}\" is already used" - f" by another render layer \"{instance['subset']}\"" - )) - - self.log.debug(f"Selected group id is \"{group_id}\".") - if "creator_attributes" not in instance_data: - instance_data["creator_attributes"] = {} - creator_attributes = instance_data["creator_attributes"] - mark_for_review = pre_create_data.get("mark_for_review") - if mark_for_review is None: - mark_for_review = self.mark_for_review - creator_attributes["group_id"] = group_id - creator_attributes["mark_for_review"] = mark_for_review - - self.log.info(f"Subset name is {subset_name}") - new_instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self - ) - self._store_new_instance(new_instance) - - if not group_id or group_item["name"] == group_name: - return new_instance - - self.log.debug("Changing name of the group.") - # Rename TVPaint group (keep color same) - # - groups can't contain spaces - rename_script = self.rename_script_template.format( - clip_id=group_item["clip_id"], - group_id=group_item["group_id"], - r=group_item["red"], - g=group_item["green"], - b=group_item["blue"], - name=group_name - ) - execute_george_through_file(rename_script) - - self.log.info(( - f"Name of group with index {group_id}" - f" was changed to \"{group_name}\"." - )) - return new_instance - - def _get_groups_enum(self): - groups_enum = [] - empty_groups = [] - for group in get_groups_data(): - group_name = group["name"] - item = { - "label": group_name, - "value": group["group_id"] - } - # TVPaint have defined how many color groups is available, but - # the count is not consistent across versions. It is not possible - # to know how many groups there is. - # - if group_name and group_name != "0": - if empty_groups: - groups_enum.extend(empty_groups) - empty_groups = [] - groups_enum.append(item) - else: - empty_groups.append(item) - return groups_enum - - def get_pre_create_attr_defs(self): - groups_enum = self._get_groups_enum() - groups_enum.insert(0, {"label": "", "value": -1}) - - return [ - EnumDef( - "group_id", - label="Group", - items=groups_enum - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def get_instance_attr_defs(self): - groups_enum = self._get_groups_enum() - return [ - EnumDef( - "group_id", - label="Group", - items=groups_enum - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def update_instances(self, update_list): - self._update_color_groups() - self._update_renderpass_groups() - - super().update_instances(update_list) - - def _update_color_groups(self): - render_layer_instances = [] - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - render_layer_instances.append(instance) - - if not render_layer_instances: - return - - groups_by_id = { - group["group_id"]: group - for group in get_groups_data() - } - grg_script_lines = [] - for instance in render_layer_instances: - group_id = instance["creator_attributes"]["group_id"] - variant = instance["variant"] - group = groups_by_id[group_id] - if group["name"] == variant: - continue - - grg_script_lines.append(self.rename_script_template.format( - clip_id=group["clip_id"], - group_id=group["group_id"], - r=group["red"], - g=group["green"], - b=group["blue"], - name=variant - )) - - if grg_script_lines: - execute_george_through_file("\n".join(grg_script_lines)) - - def _update_renderpass_groups(self): - render_layer_instances = {} - render_pass_instances = collections.defaultdict(list) - - for instance in self.create_context.instances: - if instance.creator_identifier == CreateRenderPass.identifier: - render_layer_id = ( - instance["creator_attributes"]["render_layer_instance_id"] - ) - render_pass_instances[render_layer_id].append(instance) - elif instance.creator_identifier == self.identifier: - render_layer_instances[instance.id] = instance - - if not render_pass_instances or not render_layer_instances: - return - - layers_data = get_layers_data() - layers_by_name = collections.defaultdict(list) - for layer in layers_data: - layers_by_name[layer["name"]].append(layer) - - george_lines = [] - for render_layer_id, instances in render_pass_instances.items(): - render_layer_inst = render_layer_instances.get(render_layer_id) - if render_layer_inst is None: - continue - group_id = render_layer_inst["creator_attributes"]["group_id"] - layer_names = set() - for instance in instances: - layer_names |= set(instance["layer_names"]) - - for layer_name in layer_names: - george_lines.extend( - f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" - for layer in layers_by_name[layer_name] - if layer["group_id"] != group_id - ) - if george_lines: - execute_george_through_file("\n".join(george_lines)) - - -class CreateRenderPass(TVPaintCreator): - family = "render" - subset_template_family_filter = "renderPass" - identifier = "render.pass" - label = "Render Pass" - icon = "fa5.image" - description = "Mark selected TVPaint layers as pass of Render Layer." - detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS - - order = CreateRenderlayer.order + 10 - - # Settings - mark_for_review = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_pass"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.mark_for_review = plugin_settings["mark_for_review"] - - def collect_instances(self): - instances_by_identifier = self._cache_and_get_instances() - render_layers = { - instance_data["instance_id"]: { - "variant": instance_data["variant"], - "template_data": prepare_template_data({ - "renderlayer": instance_data["variant"] - }) - } - for instance_data in ( - instances_by_identifier[CreateRenderlayer.identifier] - ) - } - - for instance_data in instances_by_identifier[self.identifier]: - render_layer_instance_id = ( - instance_data - .get("creator_attributes", {}) - .get("render_layer_instance_id") - ) - render_layer_info = render_layers.get(render_layer_instance_id, {}) - self.update_instance_labels( - instance_data, - render_layer_info.get("variant"), - render_layer_info.get("template_data") - ) - instance = CreatedInstance.from_existing(instance_data, self) - self._add_instance_to_context(instance) - - def get_dynamic_data( - self, variant, task_name, asset_doc, project_name, host_name, instance - ): - dynamic_data = super().get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name, instance - ) - dynamic_data["renderpass"] = variant - dynamic_data["renderlayer"] = "{renderlayer}" - return dynamic_data - - def update_instance_labels( - self, instance, render_layer_variant, render_layer_data=None - ): - old_label = instance.get("label") - old_group = instance.get("group") - new_label = None - new_group = None - if render_layer_variant is not None: - if render_layer_data is None: - render_layer_data = prepare_template_data({ - "renderlayer": render_layer_variant - }) - try: - new_label = instance["subset"].format(**render_layer_data) - except (KeyError, ValueError): - pass - - new_group = f"{self.get_group_label()} ({render_layer_variant})" - - instance["label"] = new_label - instance["group"] = new_group - return old_group != new_group or old_label != new_label - - def create(self, subset_name, instance_data, pre_create_data): - render_layer_instance_id = pre_create_data.get( - "render_layer_instance_id" - ) - if not render_layer_instance_id: - raise CreatorError(( - "You cannot create a Render Pass without a Render Layer." - " Please select one first" - )) - - render_layer_instance = self.create_context.instances_by_id.get( - render_layer_instance_id - ) - if render_layer_instance is None: - raise CreatorError(( - "RenderLayer instance was not found" - f" by id \"{render_layer_instance_id}\"" - )) - - group_id = render_layer_instance["creator_attributes"]["group_id"] - self.log.debug("Query data from workfile.") - layers_data = get_layers_data() - - self.log.debug("Checking selection.") - # Get all selected layers and their group ids - marked_layer_names = pre_create_data.get("layer_names") - if marked_layer_names is not None: - layers_by_name = {layer["name"]: layer for layer in layers_data} - marked_layers = [] - for layer_name in marked_layer_names: - layer = layers_by_name.get(layer_name) - if layer is None: - raise CreatorError( - f"Layer with name \"{layer_name}\" was not found") - marked_layers.append(layer) - - else: - marked_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - - # Raise if nothing is selected - if not marked_layers: - raise CreatorError( - "Nothing is selected. Please select layers.") - - marked_layer_names = {layer["name"] for layer in marked_layers} - - marked_layer_names = set(marked_layer_names) - - instances_to_remove = [] - for instance in self.create_context.instances: - if instance.creator_identifier != self.identifier: - continue - cur_layer_names = set(instance["layer_names"]) - if not cur_layer_names.intersection(marked_layer_names): - continue - new_layer_names = cur_layer_names - marked_layer_names - if new_layer_names: - instance["layer_names"] = list(new_layer_names) - else: - instances_to_remove.append(instance) - - render_layer = render_layer_instance["variant"] - subset_name_fill_data = {"renderlayer": render_layer} - - # Format dynamic keys in subset name - label = subset_name - try: - label = label.format( - **prepare_template_data(subset_name_fill_data) - ) - except (KeyError, ValueError): - pass - - self.log.info(f"New subset name is \"{label}\".") - instance_data["label"] = label - instance_data["group"] = f"{self.get_group_label()} ({render_layer})" - instance_data["layer_names"] = list(marked_layer_names) - if "creator_attributes" not in instance_data: - instance_data["creator_attributes"] = {} - - creator_attributes = instance_data["creator_attributes"] - mark_for_review = pre_create_data.get("mark_for_review") - if mark_for_review is None: - mark_for_review = self.mark_for_review - creator_attributes["mark_for_review"] = mark_for_review - creator_attributes["render_layer_instance_id"] = ( - render_layer_instance_id - ) - - new_instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self - ) - instances_data = self._remove_and_filter_instances( - instances_to_remove - ) - instances_data.append(new_instance.data_to_store()) - - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - self._change_layers_group(marked_layers, group_id) - - return new_instance - - def _change_layers_group(self, layers, group_id): - filtered_layers = [ - layer - for layer in layers - if layer["group_id"] != group_id - ] - if filtered_layers: - self.log.info(( - "Changing group of " - f"{','.join([l['name'] for l in filtered_layers])}" - f" to {group_id}" - )) - george_lines = [ - f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" - for layer in filtered_layers - ] - execute_george_through_file("\n".join(george_lines)) - - def _remove_and_filter_instances(self, instances_to_remove): - instances_data = self.host.list_instances() - if not instances_to_remove: - return instances_data - - removed_ids = set() - for instance in instances_to_remove: - removed_ids.add(instance.id) - self._remove_instance_from_context(instance) - - return [ - instance_data - for instance_data in instances_data - if instance_data.get("instance_id") not in removed_ids - ] - - def get_pre_create_attr_defs(self): - # Find available Render Layers - # - instances are created after creators reset - current_instances = self.host.list_instances() - render_layers = [ - { - "value": inst["instance_id"], - "label": inst["subset"] - } - for inst in current_instances - if inst.get("creator_identifier") == CreateRenderlayer.identifier - ] - if not render_layers: - render_layers.append({"value": None, "label": "N/A"}) - - return [ - EnumDef( - "render_layer_instance_id", - label="Render Layer", - items=render_layers - ), - UILabelDef( - "NOTE: Try to hit refresh if you don't see a Render Layer" - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - def get_instance_attr_defs(self): - # Find available Render Layers - current_instances = self.create_context.instances - render_layers = [ - { - "value": instance.id, - "label": instance.label - } - for instance in current_instances - if instance.creator_identifier == CreateRenderlayer.identifier - ] - if not render_layers: - render_layers.append({"value": None, "label": "N/A"}) - - return [ - EnumDef( - "render_layer_instance_id", - label="Render Layer", - items=render_layers - ), - UILabelDef( - "NOTE: Try to hit refresh if you don't see a Render Layer" - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] - - -class TVPaintAutoDetectRenderCreator(TVPaintCreator): - """Create Render Layer and Render Pass instances based on scene data. - - This is auto-detection creator which can be triggered by user to create - instances based on information in scene. Each used color group in scene - will be created as Render Layer where group name is used as variant and - each TVPaint layer as Render Pass where layer name is used as variant. - - Never will have any instances, all instances belong to different creators. - """ - - family = "render" - label = "Render Layer/Passes" - identifier = "render.auto.detect.creator" - order = CreateRenderPass.order + 10 - description = ( - "Create Render Layers and Render Passes based on scene setup" - ) - detailed_description = AUTODETECT_RENDER_DETAILED_DESCRIPTION - - # Settings - enabled = False - allow_group_rename = True - group_name_template = "L{group_index}" - group_idx_offset = 10 - group_idx_padding = 3 - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings - ["tvpaint"] - ["create"] - ["auto_detect_render"] - ) - self.enabled = plugin_settings.get("enabled", False) - self.allow_group_rename = plugin_settings["allow_group_rename"] - self.group_name_template = plugin_settings["group_name_template"] - self.group_idx_offset = plugin_settings["group_idx_offset"] - self.group_idx_padding = plugin_settings["group_idx_padding"] - - def _rename_groups( - self, - groups_order: list[int], - scene_groups: list[dict[str, Any]] - ): - new_group_name_by_id: dict[int, str] = {} - groups_by_id: dict[int, dict[str, Any]] = { - group["group_id"]: group - for group in scene_groups - } - # Count only renamed groups - for idx, group_id in enumerate(groups_order): - group_index_value: str = ( - "{{:0>{}}}" - .format(self.group_idx_padding) - .format((idx + 1) * self.group_idx_offset) - ) - group_name_fill_values: dict[str, str] = { - "groupIdx": group_index_value, - "groupidx": group_index_value, - "group_idx": group_index_value, - "group_index": group_index_value, - } - - group_name: str = self.group_name_template.format( - **group_name_fill_values - ) - group: dict[str, Any] = groups_by_id[group_id] - if group["name"] != group_name: - new_group_name_by_id[group_id] = group_name - - grg_lines: list[str] = [] - for group_id, group_name in new_group_name_by_id.items(): - group: dict[str, Any] = groups_by_id[group_id] - grg_line: str = "tv_layercolor \"setcolor\" {} {} {} {} {}".format( - group["clip_id"], - group_id, - group["red"], - group["green"], - group["blue"], - group_name - ) - grg_lines.append(grg_line) - group["name"] = group_name - - if grg_lines: - execute_george_through_file("\n".join(grg_lines)) - - def _prepare_render_layer( - self, - project_name: str, - asset_doc: dict[str, Any], - task_name: str, - group_id: int, - groups: list[dict[str, Any]], - mark_for_review: bool, - existing_instance: Optional[CreatedInstance] = None, - ) -> Union[CreatedInstance, None]: - match_group: Union[dict[str, Any], None] = next( - ( - group - for group in groups - if group["group_id"] == group_id - ), - None - ) - if not match_group: - return None - - variant: str = match_group["name"] - creator: CreateRenderlayer = ( - self.create_context.creators[CreateRenderlayer.identifier] - ) - - subset_name: str = creator.get_subset_name( - variant, - task_name, - asset_doc, - project_name, - host_name=self.create_context.host_name, - ) - asset_name = get_asset_name_identifier(asset_doc) - if existing_instance is not None: - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name - return existing_instance - - instance_data: dict[str, str] = { - "task": task_name, - "family": creator.family, - "variant": variant - } - if AYON_SERVER_ENABLED: - instance_data["folderPath"] = asset_name - else: - instance_data["asset"] = asset_name - pre_create_data: dict[str, str] = { - "group_id": group_id, - "mark_for_review": mark_for_review - } - return creator.create(subset_name, instance_data, pre_create_data) - - def _prepare_render_passes( - self, - project_name: str, - asset_doc: dict[str, Any], - task_name: str, - render_layer_instance: CreatedInstance, - layers: list[dict[str, Any]], - mark_for_review: bool, - existing_render_passes: list[CreatedInstance] - ): - creator: CreateRenderPass = ( - self.create_context.creators[CreateRenderPass.identifier] - ) - render_pass_by_layer_name = {} - for render_pass in existing_render_passes: - for layer_name in render_pass["layer_names"]: - render_pass_by_layer_name[layer_name] = render_pass - - asset_name = get_asset_name_identifier(asset_doc) - - for layer in layers: - layer_name = layer["name"] - variant = layer_name - render_pass = render_pass_by_layer_name.get(layer_name) - if render_pass is not None: - if (render_pass["layer_names"]) > 1: - variant = render_pass["variant"] - - subset_name = creator.get_subset_name( - variant, - task_name, - asset_doc, - project_name, - host_name=self.create_context.host_name, - instance=render_pass - ) - - if render_pass is not None: - if AYON_SERVER_ENABLED: - render_pass["folderPath"] = asset_name - else: - render_pass["asset"] = asset_name - - render_pass["task"] = task_name - render_pass["subset"] = subset_name - continue - - instance_data: dict[str, str] = { - "task": task_name, - "family": creator.family, - "variant": variant - } - if AYON_SERVER_ENABLED: - instance_data["folderPath"] = asset_name - else: - instance_data["asset"] = asset_name - - pre_create_data: dict[str, Any] = { - "render_layer_instance_id": render_layer_instance.id, - "layer_names": [layer_name], - "mark_for_review": mark_for_review - } - creator.create(subset_name, instance_data, pre_create_data) - - def _filter_groups( - self, - layers_by_group_id, - groups_order, - only_visible_groups - ): - new_groups_order = [] - for group_id in groups_order: - layers: list[dict[str, Any]] = layers_by_group_id[group_id] - if not layers: - continue - - if ( - only_visible_groups - and not any( - layer - for layer in layers - if layer["visible"] - ) - ): - continue - new_groups_order.append(group_id) - return new_groups_order - - def create(self, subset_name, instance_data, pre_create_data): - project_name: str = self.create_context.get_current_project_name() - if AYON_SERVER_ENABLED: - asset_name: str = instance_data["folderPath"] - else: - asset_name: str = instance_data["asset"] - task_name: str = instance_data["task"] - asset_doc: dict[str, Any] = get_asset_by_name( - project_name, asset_name) - - render_layers_by_group_id: dict[int, CreatedInstance] = {} - render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = ( - collections.defaultdict(list) - ) - for instance in self.create_context.instances: - if instance.creator_identifier == CreateRenderlayer.identifier: - group_id = instance["creator_attributes"]["group_id"] - render_layers_by_group_id[group_id] = instance - elif instance.creator_identifier == CreateRenderPass.identifier: - render_layer_id = ( - instance - ["creator_attributes"] - ["render_layer_instance_id"] - ) - render_passes_by_render_layer_id[render_layer_id].append( - instance - ) - - layers_by_group_id: dict[int, list[dict[str, Any]]] = ( - collections.defaultdict(list) - ) - scene_layers: list[dict[str, Any]] = get_layers_data() - scene_groups: list[dict[str, Any]] = get_groups_data() - groups_order: list[int] = [] - for layer in scene_layers: - group_id: int = layer["group_id"] - # Skip 'default' group - if group_id == 0: - continue - - layers_by_group_id[group_id].append(layer) - if group_id not in groups_order: - groups_order.append(group_id) - - groups_order.reverse() - - mark_layers_for_review = pre_create_data.get( - "mark_layers_for_review", False - ) - mark_passes_for_review = pre_create_data.get( - "mark_passes_for_review", False - ) - rename_groups = pre_create_data.get("rename_groups", False) - only_visible_groups = pre_create_data.get("only_visible_groups", False) - groups_order = self._filter_groups( - layers_by_group_id, - groups_order, - only_visible_groups - ) - if not groups_order: - return - - if rename_groups: - self._rename_groups(groups_order, scene_groups) - - # Make sure all render layers are created - for group_id in groups_order: - instance: Union[CreatedInstance, None] = ( - self._prepare_render_layer( - project_name, - asset_doc, - task_name, - group_id, - scene_groups, - mark_layers_for_review, - render_layers_by_group_id.get(group_id), - ) - ) - if instance is not None: - render_layers_by_group_id[group_id] = instance - - for group_id in groups_order: - layers: list[dict[str, Any]] = layers_by_group_id[group_id] - render_layer_instance: Union[CreatedInstance, None] = ( - render_layers_by_group_id.get(group_id) - ) - if not layers or render_layer_instance is None: - continue - - self._prepare_render_passes( - project_name, - asset_doc, - task_name, - render_layer_instance, - layers, - mark_passes_for_review, - render_passes_by_render_layer_id[render_layer_instance.id] - ) - - def get_pre_create_attr_defs(self) -> list[AbstractAttrDef]: - render_layer_creator: CreateRenderlayer = ( - self.create_context.creators[CreateRenderlayer.identifier] - ) - render_pass_creator: CreateRenderPass = ( - self.create_context.creators[CreateRenderPass.identifier] - ) - output = [] - if self.allow_group_rename: - output.extend([ - BoolDef( - "rename_groups", - label="Rename color groups", - tooltip="Will rename color groups using studio template", - default=True - ), - BoolDef( - "only_visible_groups", - label="Only visible color groups", - tooltip=( - "Render Layers and rename will happen only on color" - " groups with visible layers." - ), - default=True - ), - UISeparatorDef() - ]) - output.extend([ - BoolDef( - "mark_layers_for_review", - label="Mark RenderLayers for review", - default=render_layer_creator.mark_for_review - ), - BoolDef( - "mark_passes_for_review", - label="Mark RenderPasses for review", - default=render_pass_creator.mark_for_review - ) - ]) - return output - - -class TVPaintSceneRenderCreator(TVPaintAutoCreator): - family = "render" - subset_template_family_filter = "renderScene" - identifier = "render.scene" - label = "Scene Render" - icon = "fa.file-image-o" - - # Settings - default_pass_name = "beauty" - mark_for_review = True - active_on_create = False - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_render_scene"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.mark_for_review = plugin_settings["mark_for_review"] - self.active_on_create = plugin_settings["active_on_create"] - self.default_pass_name = plugin_settings["default_pass_name"] - - def get_dynamic_data(self, variant, *args, **kwargs): - dynamic_data = super().get_dynamic_data(variant, *args, **kwargs) - dynamic_data["renderpass"] = "{renderpass}" - dynamic_data["renderlayer"] = variant - return dynamic_data - - def _create_new_instance(self): - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - asset_name = create_context.get_current_asset_name() - task_name = create_context.get_current_task_name() - - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, - task_name, - asset_doc, - project_name, - host_name - ) - data = { - "task": task_name, - "variant": self.default_variant, - "creator_attributes": { - "render_pass_name": self.default_pass_name, - "mark_for_review": True - }, - "label": self._get_label( - subset_name, - self.default_pass_name - ) - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - return new_instance - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - if existing_instance is None: - return self._create_new_instance() - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - asset_name = create_context.get_current_asset_name() - task_name = create_context.get_current_task_name() - - existing_name = None - if AYON_SERVER_ENABLED: - existing_name = existing_instance.get("folderPath") - if existing_name is None: - existing_name = existing_instance["asset"] - - if ( - existing_name != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - existing_instance["variant"], - task_name, - asset_doc, - project_name, - host_name, - existing_instance - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name - - existing_instance["label"] = self._get_label( - existing_instance["subset"], - existing_instance["creator_attributes"]["render_pass_name"] - ) - - def _get_label(self, subset_name, render_pass_name): - try: - subset_name = subset_name.format(**prepare_template_data({ - "renderpass": render_pass_name - })) - except (KeyError, ValueError): - pass - - return subset_name - - def get_instance_attr_defs(self): - return [ - TextDef( - "render_pass_name", - label="Pass Name", - default=self.default_pass_name, - tooltip=( - "Value is calculated during publishing and UI will update" - " label after refresh." - ) - ), - BoolDef( - "mark_for_review", - label="Review", - default=self.mark_for_review - ) - ] diff --git a/openpype/hosts/tvpaint/plugins/create/create_review.py b/openpype/hosts/tvpaint/plugins/create/create_review.py deleted file mode 100644 index 5caf20f27d..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_review.py +++ /dev/null @@ -1,91 +0,0 @@ -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_by_name -from openpype.pipeline import CreatedInstance -from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator - - -class TVPaintReviewCreator(TVPaintAutoCreator): - family = "review" - identifier = "scene.review" - label = "Review" - icon = "ei.video" - - # Settings - active_on_create = True - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_review"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - self.active_on_create = plugin_settings["active_on_create"] - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - asset_name = create_context.get_current_asset_name() - task_name = create_context.get_current_task_name() - - if existing_instance is None: - existing_asset_name = None - elif AYON_SERVER_ENABLED: - existing_asset_name = existing_instance["folderPath"] - else: - existing_asset_name = existing_instance["asset"] - - if existing_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, - task_name, - asset_doc, - project_name, - host_name - ) - data = { - "task": task_name, - "variant": self.default_variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - - if not self.active_on_create: - data["active"] = False - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - elif ( - existing_asset_name != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - existing_instance["variant"], - task_name, - asset_doc, - project_name, - host_name, - existing_instance - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/create/create_workfile.py b/openpype/hosts/tvpaint/plugins/create/create_workfile.py deleted file mode 100644 index 4ce5d7fc96..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_workfile.py +++ /dev/null @@ -1,84 +0,0 @@ -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_asset_by_name -from openpype.pipeline import CreatedInstance -from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator - - -class TVPaintWorkfileCreator(TVPaintAutoCreator): - family = "workfile" - identifier = "workfile" - label = "Workfile" - icon = "fa.file-o" - - def apply_settings(self, project_settings): - plugin_settings = ( - project_settings["tvpaint"]["create"]["create_workfile"] - ) - self.default_variant = plugin_settings["default_variant"] - self.default_variants = plugin_settings["default_variants"] - - def create(self): - existing_instance = None - for instance in self.create_context.instances: - if instance.creator_identifier == self.identifier: - existing_instance = instance - break - - create_context = self.create_context - host_name = create_context.host_name - project_name = create_context.get_current_project_name() - asset_name = create_context.get_current_asset_name() - task_name = create_context.get_current_task_name() - - if existing_instance is None: - existing_asset_name = None - elif AYON_SERVER_ENABLED: - existing_asset_name = existing_instance["folderPath"] - else: - existing_asset_name = existing_instance["asset"] - - if existing_instance is None: - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - self.default_variant, - task_name, - asset_doc, - project_name, - host_name - ) - data = { - "task": task_name, - "variant": self.default_variant - } - if AYON_SERVER_ENABLED: - data["folderPath"] = asset_name - else: - data["asset"] = asset_name - - new_instance = CreatedInstance( - self.family, subset_name, data, self - ) - instances_data = self.host.list_instances() - instances_data.append(new_instance.data_to_store()) - self.host.write_instances(instances_data) - self._add_instance_to_context(new_instance) - - elif ( - existing_asset_name != asset_name - or existing_instance["task"] != task_name - ): - asset_doc = get_asset_by_name(project_name, asset_name) - subset_name = self.get_subset_name( - existing_instance["variant"], - task_name, - asset_doc, - project_name, - host_name, - existing_instance - ) - if AYON_SERVER_ENABLED: - existing_instance["folderPath"] = asset_name - else: - existing_instance["asset"] = asset_name - existing_instance["task"] = task_name - existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py deleted file mode 100644 index a400738019..0000000000 --- a/openpype/hosts/tvpaint/plugins/load/load_image.py +++ /dev/null @@ -1,86 +0,0 @@ -from openpype.lib.attribute_definitions import BoolDef -from openpype.hosts.tvpaint.api import plugin -from openpype.hosts.tvpaint.api.lib import execute_george_through_file - - -class ImportImage(plugin.Loader): - """Load image or image sequence to TVPaint as new layer.""" - - families = ["render", "image", "background", "plate", "review"] - representations = ["*"] - - label = "Import Image" - order = 1 - icon = "image" - color = "white" - - import_script = ( - "filepath = \"{}\"\n" - "layer_name = \"{}\"\n" - "tv_loadsequence filepath {}PARSE layer_id\n" - "tv_layerrename layer_id layer_name" - ) - - defaults = { - "stretch": True, - "timestretch": True, - "preload": True - } - - @classmethod - def get_options(cls, contexts): - return [ - BoolDef( - "stretch", - label="Stretch to project size", - default=cls.defaults["stretch"], - tooltip="Stretch loaded image/s to project resolution?" - ), - BoolDef( - "timestretch", - label="Stretch to timeline length", - default=cls.defaults["timestretch"], - tooltip="Clip loaded image/s to timeline length?" - ), - BoolDef( - "preload", - label="Preload loaded image/s", - default=cls.defaults["preload"], - tooltip="Preload image/s?" - ) - ] - - def load(self, context, name, namespace, options): - stretch = options.get("stretch", self.defaults["stretch"]) - timestretch = options.get("timestretch", self.defaults["timestretch"]) - preload = options.get("preload", self.defaults["preload"]) - - load_options = [] - if stretch: - load_options.append("\"STRETCH\"") - if timestretch: - load_options.append("\"TIMESTRETCH\"") - if preload: - load_options.append("\"PRELOAD\"") - - load_options_str = "" - for load_option in load_options: - load_options_str += (load_option + " ") - - # Prepare layer name - asset_name = context["asset"]["name"] - version_name = context["version"]["name"] - layer_name = "{}_{}_v{:0>3}".format( - asset_name, - name, - version_name - ) - # Fill import script with filename and layer name - # - filename mus not contain backwards slashes - path = self.filepath_from_context(context).replace("\\", "/") - george_script = self.import_script.format( - path, - layer_name, - load_options_str - ) - return execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py deleted file mode 100644 index 169bfdcdd8..0000000000 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ /dev/null @@ -1,115 +0,0 @@ -import os - -from openpype.lib import StringTemplate -from openpype.pipeline import ( - registered_host, - get_current_context, - Anatomy, -) -from openpype.pipeline.workfile import ( - get_workfile_template_key_from_context, - get_last_workfile_with_version, -) -from openpype.pipeline.template_data import get_template_data_with_names -from openpype.hosts.tvpaint.api import plugin -from openpype.hosts.tvpaint.api.lib import ( - execute_george_through_file, -) -from openpype.hosts.tvpaint.api.pipeline import ( - get_current_workfile_context, -) -from openpype.pipeline.version_start import get_versioning_start - - -class LoadWorkfile(plugin.Loader): - """Load workfile.""" - - families = ["workfile"] - representations = ["tvpp"] - - label = "Load Workfile" - - def load(self, context, name, namespace, options): - # Load context of current workfile as first thing - # - which context and extension has - filepath = self.filepath_from_context(context) - filepath = filepath.replace("\\", "/") - - if not os.path.exists(filepath): - raise FileExistsError( - "The loaded file does not exist. Try downloading it first." - ) - - host = registered_host() - current_file = host.get_current_workfile() - work_context = get_current_workfile_context() - - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath - ) - execute_george_through_file(george_script) - - # Save workfile. - host_name = "tvpaint" - project_name = work_context.get("project") - asset_name = work_context.get("asset") - task_name = work_context.get("task") - # Far cases when there is workfile without work_context - if not asset_name: - context = get_current_context() - project_name = context["project_name"] - asset_name = context["asset_name"] - task_name = context["task_name"] - - template_key = get_workfile_template_key_from_context( - asset_name, - task_name, - host_name, - project_name=project_name - ) - anatomy = Anatomy(project_name) - - data = get_template_data_with_names( - project_name, asset_name, task_name, host_name - ) - data["root"] = anatomy.roots - - file_template = anatomy.templates[template_key]["file"] - - # Define saving file extension - extensions = host.get_workfile_extensions() - if current_file: - # Match the extension of current file - _, extension = os.path.splitext(current_file) - else: - # Fall back to the first extension supported for this host. - extension = extensions[0] - - data["ext"] = extension - - folder_template = anatomy.templates[template_key]["folder"] - work_root = StringTemplate.format_strict_template( - folder_template, data - ) - version = get_last_workfile_with_version( - work_root, file_template, data, extensions - )[1] - - if version is None: - version = get_versioning_start( - project_name, - "tvpaint", - task_name=task_name, - task_type=data["task"]["type"], - family="workfile" - ) - else: - version += 1 - - data["version"] = version - - filename = StringTemplate.format_strict_template( - file_template, data - ) - path = os.path.join(work_root, filename) - host.save_workfile(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py deleted file mode 100644 index 577e6e30e2..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py +++ /dev/null @@ -1,114 +0,0 @@ -import copy -import pyblish.api -from openpype.lib import prepare_template_data - - -class CollectRenderInstances(pyblish.api.InstancePlugin): - label = "Collect Render Instances" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["tvpaint"] - families = ["render", "review"] - - ignore_render_pass_transparency = False - - def process(self, instance): - context = instance.context - creator_identifier = instance.data["creator_identifier"] - if creator_identifier == "render.layer": - self._collect_data_for_render_layer(instance) - - elif creator_identifier == "render.pass": - self._collect_data_for_render_pass(instance) - - elif creator_identifier == "render.scene": - self._collect_data_for_render_scene(instance) - - else: - if creator_identifier == "scene.review": - self._collect_data_for_review(instance) - return - - subset_name = instance.data["subset"] - instance.data["name"] = subset_name - instance.data["label"] = "{} [{}-{}]".format( - subset_name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ) - - def _collect_data_for_render_layer(self, instance): - instance.data["families"].append("renderLayer") - creator_attributes = instance.data["creator_attributes"] - group_id = creator_attributes["group_id"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - layers_data = instance.context.data["layersData"] - instance.data["layers"] = [ - copy.deepcopy(layer) - for layer in layers_data - if layer["group_id"] == group_id - ] - - def _collect_data_for_render_pass(self, instance): - instance.data["families"].append("renderPass") - - layer_names = set(instance.data["layer_names"]) - layers_data = instance.context.data["layersData"] - - creator_attributes = instance.data["creator_attributes"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - instance.data["layers"] = [ - copy.deepcopy(layer) - for layer in layers_data - if layer["name"] in layer_names - ] - instance.data["ignoreLayersTransparency"] = ( - self.ignore_render_pass_transparency - ) - - render_layer_data = None - render_layer_id = creator_attributes["render_layer_instance_id"] - for in_data in instance.context.data["workfileInstances"]: - if ( - in_data.get("creator_identifier") == "render.layer" - and in_data["instance_id"] == render_layer_id - ): - render_layer_data = in_data - break - - instance.data["renderLayerData"] = copy.deepcopy(render_layer_data) - # Invalid state - if render_layer_data is None: - return - render_layer_name = render_layer_data["variant"] - subset_name = instance.data["subset"] - instance.data["subset"] = subset_name.format( - **prepare_template_data({"renderlayer": render_layer_name}) - ) - - def _collect_data_for_render_scene(self, instance): - instance.data["families"].append("renderScene") - - creator_attributes = instance.data["creator_attributes"] - if creator_attributes["mark_for_review"]: - instance.data["families"].append("review") - - instance.data["layers"] = copy.deepcopy( - instance.context.data["layersData"] - ) - - render_pass_name = ( - instance.data["creator_attributes"]["render_pass_name"] - ) - subset_name = instance.data["subset"] - instance.data["subset"] = subset_name.format( - **prepare_template_data({"renderpass": render_pass_name}) - ) - - def _collect_data_for_review(self, instance): - instance.data["layers"] = copy.deepcopy( - instance.context.data["layersData"] - ) diff --git a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py deleted file mode 100644 index a85caf2557..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py +++ /dev/null @@ -1,23 +0,0 @@ -import pyblish.api - -from openpype.lib import version_up -from openpype.pipeline import registered_host - - -class IncrementWorkfileVersion(pyblish.api.ContextPlugin): - """Increment current workfile version.""" - - order = pyblish.api.IntegratorOrder + 1 - label = "Increment Workfile Version" - optional = True - hosts = ["tvpaint"] - - def process(self, context): - - assert all(result["success"] for result in context.data["results"]), ( - "Publishing not successful so version is not increased.") - - host = registered_host() - path = context.data["currentFile"] - host.save_workfile(version_up(path)) - self.log.info('Incrementing workfile version') diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py deleted file mode 100644 index 0ab8e811f5..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py +++ /dev/null @@ -1,58 +0,0 @@ -import json - -import pyblish.api -from openpype.pipeline import ( - PublishXmlValidationError, - OptionalPyblishPluginMixin, -) - - -# TODO @iLliCiTiT add fix action for fps -class ValidateProjectSettings( - OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin -): - """Validate scene settings against database.""" - - label = "Validate Scene Settings" - order = pyblish.api.ValidatorOrder - optional = True - - def process(self, context): - if not self.is_active(context.data): - return - - expected_data = context.data["assetEntity"]["data"] - scene_data = { - "fps": context.data.get("sceneFps"), - "resolutionWidth": context.data.get("sceneWidth"), - "resolutionHeight": context.data.get("sceneHeight"), - "pixelAspect": context.data.get("scenePixelAspect") - } - invalid = {} - for k in scene_data.keys(): - expected_value = expected_data[k] - if scene_data[k] != expected_value: - invalid[k] = { - "current": scene_data[k], "expected": expected_value - } - - if not invalid: - return - - raise PublishXmlValidationError( - self, - "Scene settings does not match database:\n{}".format( - json.dumps(invalid, sort_keys=True, indent=4) - ), - formatting_data={ - "expected_fps": expected_data["fps"], - "current_fps": scene_data["fps"], - "expected_width": expected_data["resolutionWidth"], - "expected_height": expected_data["resolutionHeight"], - "current_width": scene_data["resolutionWidth"], - "current_height": scene_data["resolutionHeight"], - "expected_pixel_ratio": expected_data["pixelAspect"], - "current_pixel_ratio": scene_data["pixelAspect"] - } - ) diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py deleted file mode 100644 index fcc5d98ab6..0000000000 --- a/openpype/hosts/unreal/addon.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -import re -from openpype.modules import IHostAddon, OpenPypeModule - -UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class UnrealAddon(OpenPypeModule, IHostAddon): - name = "unreal" - host_name = "unreal" - - def initialize(self, module_settings): - self.enabled = True - - def get_global_environments(self): - return { - "AYON_UNREAL_ROOT": UNREAL_ROOT_DIR, - } - - def add_implementation_envs(self, env, app): - """Modify environments to contain all required for implementation.""" - # Set AYON_UNREAL_PLUGIN required for Unreal implementation - # Imports are in this method for Python 2 compatiblity of an addon - from pathlib import Path - - from .lib import get_compatible_integration - - from openpype.widgets.message_window import Window - - pattern = re.compile(r'^\d+-\d+$') - - if not pattern.match(app.name): - msg = ( - "Unreal application key in the settings must be in format" - "'5-0' or '5-1'" - ) - Window( - parent=None, - title="Unreal application name format", - message=msg, - level="critical") - raise ValueError(msg) - - ue_version = app.name.replace("-", ".") - unreal_plugin_path = os.path.join( - UNREAL_ROOT_DIR, "integration", "UE_{}".format(ue_version), "Ayon" - ) - if not Path(unreal_plugin_path).exists(): - compatible_versions = get_compatible_integration( - ue_version, Path(UNREAL_ROOT_DIR) / "integration" - ) - if compatible_versions: - unreal_plugin_path = compatible_versions[-1] / "Ayon" - unreal_plugin_path = unreal_plugin_path.as_posix() - - if not env.get("AYON_UNREAL_PLUGIN") or \ - env.get("AYON_UNREAL_PLUGIN") != unreal_plugin_path: - env["AYON_UNREAL_PLUGIN"] = unreal_plugin_path - - # Set default environments if are not set via settings - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "UE_PYTHONPATH": os.environ.get("PYTHONPATH", ""), - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - def get_launch_hook_paths(self, app): - if app.host_name != self.host_name: - return [] - return [ - os.path.join(UNREAL_ROOT_DIR, "hooks") - ] - - def get_workfile_extensions(self): - return [".uproject"] diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py deleted file mode 100644 index f2d7b5f73e..0000000000 --- a/openpype/hosts/unreal/api/pipeline.py +++ /dev/null @@ -1,802 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import json -import logging -from typing import List -from contextlib import contextmanager -import semver -import time - -import pyblish.api - -from openpype.client import get_asset_by_name, get_assets -from openpype.pipeline import ( - register_loader_plugin_path, - register_creator_plugin_path, - register_inventory_action_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - deregister_inventory_action_path, - AYON_CONTAINER_ID, - legacy_io, -) -from openpype.tools.utils import host_tools -import openpype.hosts.unreal -from openpype.host import HostBase, ILoadHost, IPublishHost - -import unreal # noqa - -# Rename to Ayon once parent module renames -logger = logging.getLogger("openpype.hosts.unreal") - -AYON_CONTAINERS = "AyonContainers" -AYON_ASSET_DIR = "/Game/Ayon/Assets" -CONTEXT_CONTAINER = "Ayon/context.json" -UNREAL_VERSION = semver.VersionInfo( - *os.getenv("AYON_UNREAL_VERSION").split(".") -) - -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") - - -class UnrealHost(HostBase, ILoadHost, IPublishHost): - """Unreal host implementation. - - For some time this class will re-use functions from module based - implementation for backwards compatibility of older unreal projects. - """ - - name = "unreal" - - def install(self): - install() - - def get_containers(self): - return ls() - - @staticmethod - def show_tools_popup(): - """Show tools popup with actions leading to show other tools.""" - show_tools_popup() - - @staticmethod - def show_tools_dialog(): - """Show tools dialog with actions leading to show other tools.""" - show_tools_dialog() - - def update_context_data(self, data, changes): - content_path = unreal.Paths.project_content_dir() - op_ctx = content_path + CONTEXT_CONTAINER - attempts = 3 - for i in range(attempts): - try: - with open(op_ctx, "w+") as f: - json.dump(data, f) - break - except IOError as e: - if i == attempts - 1: - raise Exception( - "Failed to write context data. Aborting.") from e - unreal.log_warning("Failed to write context data. Retrying...") - i += 1 - time.sleep(3) - continue - - def get_context_data(self): - content_path = unreal.Paths.project_content_dir() - op_ctx = content_path + CONTEXT_CONTAINER - if not os.path.isfile(op_ctx): - return {} - with open(op_ctx, "r") as fp: - data = json.load(fp) - return data - - -def install(): - """Install Unreal configuration for OpenPype.""" - print("-=" * 40) - logo = '''. -. - ยท - โ”‚ - ยทโˆ™/ - ยท-โˆ™โ€ขโˆ™-ยท - / \\ /โˆ™ยท / \\ - โˆ™ \\ โ”‚ / โˆ™ - \\ \\ ยท / / - \\\\ โˆ™ โˆ™ // - \\\\/ \\// - ___ - โ”‚ โ”‚ - โ”‚ โ”‚ - โ”‚ โ”‚ - โ”‚___โ”‚ - -ยท - - ยท-โ”€โ•โ”€-โˆ™ A Y O N โˆ™-โ”€โ•โ”€-ยท - by YNPUT -. -''' - print(logo) - print("installing Ayon for Unreal ...") - print("-=" * 40) - logger.info("installing Ayon for Unreal") - pyblish.api.register_host("unreal") - pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - register_loader_plugin_path(str(LOAD_PATH)) - register_creator_plugin_path(str(CREATE_PATH)) - register_inventory_action_path(str(INVENTORY_PATH)) - _register_callbacks() - _register_events() - - -def uninstall(): - """Uninstall Unreal configuration for Ayon.""" - pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - deregister_loader_plugin_path(str(LOAD_PATH)) - deregister_creator_plugin_path(str(CREATE_PATH)) - deregister_inventory_action_path(str(INVENTORY_PATH)) - - -def _register_callbacks(): - """ - TODO: Implement callbacks if supported by UE - """ - pass - - -def _register_events(): - """ - TODO: Implement callbacks if supported by UE - """ - pass - - -def ls(): - """List all containers. - - List all found in *Content Manager* of Unreal and return - metadata from them. Adding `objectName` to set. - - """ - ar = unreal.AssetRegistryHelpers.get_asset_registry() - # UE 5.1 changed how class name is specified - class_name = ["/Script/Ayon", "AyonAssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AyonAssetContainer" # noqa - ayon_containers = ar.get_assets_by_class(class_name, True) - - # get_asset_by_class returns AssetData. To get all metadata we need to - # load asset. get_tag_values() work only on metadata registered in - # Asset Registry Project settings (and there is no way to set it with - # python short of editing ini configuration file). - for asset_data in ayon_containers: - asset = asset_data.get_asset() - data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) - data["objectName"] = asset_data.asset_name - yield cast_map_to_str_dict(data) - - -def ls_inst(): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - # UE 5.1 changed how class name is specified - class_name = [ - "/Script/Ayon", - "AyonPublishInstance" - ] if ( - UNREAL_VERSION.major == 5 - and UNREAL_VERSION.minor > 0 - ) else "AyonPublishInstance" # noqa - instances = ar.get_assets_by_class(class_name, True) - - # get_asset_by_class returns AssetData. To get all metadata we need to - # load asset. get_tag_values() work only on metadata registered in - # Asset Registry Project settings (and there is no way to set it with - # python short of editing ini configuration file). - for asset_data in instances: - asset = asset_data.get_asset() - data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) - data["objectName"] = asset_data.asset_name - yield cast_map_to_str_dict(data) - - -def parse_container(container): - """To get data from container, AyonAssetContainer must be loaded. - - Args: - container(str): path to container - - Returns: - dict: metadata stored on container - """ - asset = unreal.EditorAssetLibrary.load_asset(container) - data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) - data["objectName"] = asset.get_name() - data = cast_map_to_str_dict(data) - - return data - - -def publish(): - """Shorthand to publish from within host.""" - import pyblish.util - - return pyblish.util.publish() - - -def containerise(name, namespace, nodes, context, loader=None, suffix="_CON"): - """Bundles *nodes* (assets) into a *container* and add metadata to it. - - Unreal doesn't support *groups* of assets that you can add metadata to. - But it does support folders that helps to organize asset. Unfortunately - those folders are just that - you cannot add any additional information - to them. Ayon Integration Plugin is providing way out - Implementing - `AssetContainer` Blueprint class. This class when added to folder can - handle metadata on it using standard - :func:`unreal.EditorAssetLibrary.set_metadata_tag()` and - :func:`unreal.EditorAssetLibrary.get_metadata_tag_values()`. It also - stores and monitor all changes in assets in path where it resides. List of - those assets is available as `assets` property. - - This is list of strings starting with asset type and ending with its path: - `Material /Game/Ayon/Test/TestMaterial.TestMaterial` - - """ - # 1 - create directory for container - root = "/Game" - container_name = f"{name}{suffix}" - new_name = move_assets_to_path(root, container_name, nodes) - - # 2 - create Asset Container there - path = f"{root}/{new_name}" - create_container(container=container_name, path=path) - - namespace = path - - data = { - "schema": "ayon:container-2.0", - "id": AYON_CONTAINER_ID, - "name": new_name, - "namespace": namespace, - "loader": str(loader), - "representation": context["representation"]["_id"], - } - # 3 - imprint data - imprint(f"{path}/{container_name}", data) - return path - - -def instantiate(root, name, data, assets=None, suffix="_INS"): - """Bundles *nodes* into *container*. - - Marking it with metadata as publishable instance. If assets are provided, - they are moved to new path where `AyonPublishInstance` class asset is - created and imprinted with metadata. - - This can then be collected for publishing by Pyblish for example. - - Args: - root (str): root path where to create instance container - name (str): name of the container - data (dict): data to imprint on container - assets (list of str): list of asset paths to include in publish - instance - suffix (str): suffix string to append to instance name - - """ - container_name = f"{name}{suffix}" - - # if we specify assets, create new folder and move them there. If not, - # just create empty folder - if assets: - new_name = move_assets_to_path(root, container_name, assets) - else: - new_name = create_folder(root, name) - - path = f"{root}/{new_name}" - create_publish_instance(instance=container_name, path=path) - - imprint(f"{path}/{container_name}", data) - - -def imprint(node, data): - loaded_asset = unreal.EditorAssetLibrary.load_asset(node) - for key, value in data.items(): - # Support values evaluated at imprint - if callable(value): - value = value() - # Unreal doesn't support NoneType in metadata values - if value is None: - value = "" - unreal.EditorAssetLibrary.set_metadata_tag( - loaded_asset, key, str(value) - ) - - with unreal.ScopedEditorTransaction("Ayon containerising"): - unreal.EditorAssetLibrary.save_asset(node) - - -def show_tools_popup(): - """Show popup with tools. - - Popup will disappear on click or losing focus. - """ - from openpype.hosts.unreal.api import tools_ui - - tools_ui.show_tools_popup() - - -def show_tools_dialog(): - """Show dialog with tools. - - Dialog will stay visible. - """ - from openpype.hosts.unreal.api import tools_ui - - tools_ui.show_tools_dialog() - - -def show_creator(): - host_tools.show_creator() - - -def show_loader(): - host_tools.show_loader(use_context=True) - - -def show_publisher(): - host_tools.show_publish() - - -def show_manager(): - host_tools.show_scene_inventory() - - -def show_experimental_tools(): - host_tools.show_experimental_tools_dialog() - - -def create_folder(root: str, name: str) -> str: - """Create new folder. - - If folder exists, append number at the end and try again, incrementing - if needed. - - Args: - root (str): path root - name (str): folder name - - Returns: - str: folder name - - Example: - >>> create_folder("/Game/Foo") - /Game/Foo - >>> create_folder("/Game/Foo") - /Game/Foo1 - - """ - eal = unreal.EditorAssetLibrary - index = 1 - while True: - if eal.does_directory_exist(f"{root}/{name}"): - name = f"{name}{index}" - index += 1 - else: - eal.make_directory(f"{root}/{name}") - break - - return name - - -def move_assets_to_path(root: str, name: str, assets: List[str]) -> str: - """Moving (renaming) list of asset paths to new destination. - - Args: - root (str): root of the path (eg. `/Game`) - name (str): name of destination directory (eg. `Foo` ) - assets (list of str): list of asset paths - - Returns: - str: folder name - - Example: - This will get paths of all assets under `/Game/Test` and move them - to `/Game/NewTest`. If `/Game/NewTest` already exists, then resulting - path will be `/Game/NewTest1` - - >>> assets = unreal.EditorAssetLibrary.list_assets("/Game/Test") - >>> move_assets_to_path("/Game", "NewTest", assets) - NewTest - - """ - eal = unreal.EditorAssetLibrary - name = create_folder(root, name) - - unreal.log(assets) - for asset in assets: - loaded = eal.load_asset(asset) - eal.rename_asset(asset, f"{root}/{name}/{loaded.get_name()}") - - return name - - -def create_container(container: str, path: str) -> unreal.Object: - """Helper function to create Asset Container class on given path. - - This Asset Class helps to mark given path as Container - and enable asset version control on it. - - Args: - container (str): Asset Container name - path (str): Path where to create Asset Container. This path should - point into container folder - - Returns: - :class:`unreal.Object`: instance of created asset - - Example: - - create_container( - "/Game/modelingFooCharacter_CON", - "modelingFooCharacter_CON" - ) - - """ - factory = unreal.AyonAssetContainerFactory() - tools = unreal.AssetToolsHelpers().get_asset_tools() - - return tools.create_asset(container, path, None, factory) - - -def create_publish_instance(instance: str, path: str) -> unreal.Object: - """Helper function to create Ayon Publish Instance on given path. - - This behaves similarly as :func:`create_ayon_container`. - - Args: - path (str): Path where to create Publish Instance. - This path should point into container folder - instance (str): Publish Instance name - - Returns: - :class:`unreal.Object`: instance of created asset - - Example: - - create_publish_instance( - "/Game/modelingFooCharacter_INST", - "modelingFooCharacter_INST" - ) - - """ - factory = unreal.AyonPublishInstanceFactory() - tools = unreal.AssetToolsHelpers().get_asset_tools() - return tools.create_asset(instance, path, None, factory) - - -def cast_map_to_str_dict(umap) -> dict: - """Cast Unreal Map to dict. - - Helper function to cast Unreal Map object to plain old python - dict. This will also cast values and keys to str. Useful for - metadata dicts. - - Args: - umap: Unreal Map object - - Returns: - dict - - """ - return {str(key): str(value) for (key, value) in umap.items()} - - -def get_subsequences(sequence: unreal.LevelSequence): - """Get list of subsequences from sequence. - - Args: - sequence (unreal.LevelSequence): Sequence - - Returns: - list(unreal.LevelSequence): List of subsequences - - """ - tracks = sequence.get_master_tracks() - subscene_track = next( - ( - t - for t in tracks - if t.get_class() == unreal.MovieSceneSubTrack.static_class() - ), - None, - ) - if subscene_track is not None and subscene_track.get_sections(): - return subscene_track.get_sections() - return [] - - -def set_sequence_hierarchy( - seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths -): - # Get existing sequencer tracks or create them if they don't exist - tracks = seq_i.get_master_tracks() - subscene_track = None - visibility_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - if (t.get_class() == - unreal.MovieSceneLevelVisibilityTrack.static_class()): - visibility_track = t - if not subscene_track: - subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) - if not visibility_track: - visibility_track = seq_i.add_master_track( - unreal.MovieSceneLevelVisibilityTrack) - - # Create the sub-scene section - subscenes = subscene_track.get_sections() - subscene = None - for s in subscenes: - if s.get_editor_property('sub_sequence') == seq_j: - subscene = s - break - if not subscene: - subscene = subscene_track.add_section() - subscene.set_row_index(len(subscene_track.get_sections())) - subscene.set_editor_property('sub_sequence', seq_j) - subscene.set_range( - min_frame_j, - max_frame_j + 1) - - # Create the visibility section - ar = unreal.AssetRegistryHelpers.get_asset_registry() - maps = [] - for m in map_paths: - # Unreal requires to load the level to get the map name - unreal.EditorLevelLibrary.save_all_dirty_levels() - unreal.EditorLevelLibrary.load_level(m) - maps.append(str(ar.get_asset_by_object_path(m).asset_name)) - - vis_section = visibility_track.add_section() - index = len(visibility_track.get_sections()) - - vis_section.set_range( - min_frame_j, - max_frame_j + 1) - vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) - vis_section.set_row_index(index) - vis_section.set_level_names(maps) - - if min_frame_j > 1: - hid_section = visibility_track.add_section() - hid_section.set_range( - 1, - min_frame_j) - hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) - hid_section.set_row_index(index) - hid_section.set_level_names(maps) - if max_frame_j < max_frame_i: - hid_section = visibility_track.add_section() - hid_section.set_range( - max_frame_j + 1, - max_frame_i + 1) - hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) - hid_section.set_row_index(index) - hid_section.set_level_names(maps) - - -def generate_sequence(h, h_dir): - tools = unreal.AssetToolsHelpers().get_asset_tools() - - sequence = tools.create_asset( - asset_name=h, - package_path=h_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) - - project_name = legacy_io.active_project() - asset_data = get_asset_by_name( - project_name, - h_dir.split('/')[-1], - fields=["_id", "data.fps"] - ) - - start_frames = [] - end_frames = [] - - elements = list(get_assets( - project_name, - parent_ids=[asset_data["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - for e in elements: - start_frames.append(e.get('data').get('clipIn')) - end_frames.append(e.get('data').get('clipOut')) - - elements.extend(get_assets( - project_name, - parent_ids=[e["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - - min_frame = min(start_frames) - max_frame = max(end_frames) - - fps = asset_data.get('data').get("fps") - - sequence.set_display_rate( - unreal.FrameRate(fps, 1.0)) - sequence.set_playback_start(min_frame) - sequence.set_playback_end(max_frame) - - sequence.set_work_range_start(min_frame / fps) - sequence.set_work_range_end(max_frame / fps) - sequence.set_view_range_start(min_frame / fps) - sequence.set_view_range_end(max_frame / fps) - - tracks = sequence.get_master_tracks() - track = None - for t in tracks: - if (t.get_class() == - unreal.MovieSceneCameraCutTrack.static_class()): - track = t - break - if not track: - track = sequence.add_master_track( - unreal.MovieSceneCameraCutTrack) - - return sequence, (min_frame, max_frame) - - -def _get_comps_and_assets( - component_class, asset_class, old_assets, new_assets, selected -): - eas = unreal.get_editor_subsystem(unreal.EditorActorSubsystem) - - components = [] - if selected: - sel_actors = eas.get_selected_level_actors() - for actor in sel_actors: - comps = actor.get_components_by_class(component_class) - components.extend(comps) - else: - comps = eas.get_all_level_actors_components() - components = [ - c for c in comps if isinstance(c, component_class) - ] - - # Get all the static meshes among the old assets in a dictionary with - # the name as key - selected_old_assets = {} - for a in old_assets: - asset = unreal.EditorAssetLibrary.load_asset(a) - if isinstance(asset, asset_class): - selected_old_assets[asset.get_name()] = asset - - # Get all the static meshes among the new assets in a dictionary with - # the name as key - selected_new_assets = {} - for a in new_assets: - asset = unreal.EditorAssetLibrary.load_asset(a) - if isinstance(asset, asset_class): - selected_new_assets[asset.get_name()] = asset - - return components, selected_old_assets, selected_new_assets - - -def replace_static_mesh_actors(old_assets, new_assets, selected): - smes = unreal.get_editor_subsystem(unreal.StaticMeshEditorSubsystem) - - static_mesh_comps, old_meshes, new_meshes = _get_comps_and_assets( - unreal.StaticMeshComponent, - unreal.StaticMesh, - old_assets, - new_assets, - selected - ) - - for old_name, old_mesh in old_meshes.items(): - new_mesh = new_meshes.get(old_name) - - if not new_mesh: - continue - - smes.replace_mesh_components_meshes( - static_mesh_comps, old_mesh, new_mesh) - - -def replace_skeletal_mesh_actors(old_assets, new_assets, selected): - skeletal_mesh_comps, old_meshes, new_meshes = _get_comps_and_assets( - unreal.SkeletalMeshComponent, - unreal.SkeletalMesh, - old_assets, - new_assets, - selected - ) - - for old_name, old_mesh in old_meshes.items(): - new_mesh = new_meshes.get(old_name) - - if not new_mesh: - continue - - for comp in skeletal_mesh_comps: - if comp.get_skeletal_mesh_asset() == old_mesh: - comp.set_skeletal_mesh_asset(new_mesh) - - -def replace_geometry_cache_actors(old_assets, new_assets, selected): - geometry_cache_comps, old_caches, new_caches = _get_comps_and_assets( - unreal.GeometryCacheComponent, - unreal.GeometryCache, - old_assets, - new_assets, - selected - ) - - for old_name, old_mesh in old_caches.items(): - new_mesh = new_caches.get(old_name) - - if not new_mesh: - continue - - for comp in geometry_cache_comps: - if comp.get_editor_property("geometry_cache") == old_mesh: - comp.set_geometry_cache(new_mesh) - - -def delete_asset_if_unused(container, asset_content): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - references = set() - - for asset_path in asset_content: - asset = ar.get_asset_by_object_path(asset_path) - refs = ar.get_referencers( - asset.package_name, - unreal.AssetRegistryDependencyOptions( - include_soft_package_references=False, - include_hard_package_references=True, - include_searchable_names=False, - include_soft_management_references=False, - include_hard_management_references=False - )) - if not refs: - continue - references = references.union(set(refs)) - - # Filter out references that are in the Temp folder - cleaned_references = { - ref for ref in references if not str(ref).startswith("/Temp/")} - - # Check which of the references are Levels - for ref in cleaned_references: - loaded_asset = unreal.EditorAssetLibrary.load_asset(ref) - if isinstance(loaded_asset, unreal.World): - # If there is at least a level, we can stop, we don't want to - # delete the container - return - - unreal.log("Previous version unused, deleting...") - - # No levels, delete the asset - unreal.EditorAssetLibrary.delete_directory(container["namespace"]) - - -@contextmanager -def maintained_selection(): - """Stub to be either implemented or replaced. - - This is needed for old publisher implementation, but - it is not supported (yet) in UE. - """ - try: - yield - finally: - pass diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py deleted file mode 100644 index 26ef69af86..0000000000 --- a/openpype/hosts/unreal/api/plugin.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -import ast -import collections -import sys -import six -from abc import ( - ABC, - ABCMeta, -) - -import unreal - -from .pipeline import ( - create_publish_instance, - imprint, - ls_inst, - UNREAL_VERSION -) -from openpype.lib import ( - BoolDef, - UILabelDef -) -from openpype.pipeline import ( - Creator, - LoaderPlugin, - CreatorError, - CreatedInstance -) - - -@six.add_metaclass(ABCMeta) -class UnrealBaseCreator(Creator): - """Base class for Unreal creator plugins.""" - root = "/Game/Ayon/AyonPublishInstances" - suffix = "_INS" - - @staticmethod - def cache_subsets(shared_data): - """Cache instances for Creators to shared data. - - Create `unreal_cached_subsets` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `unreal_cached_legacy_subsets` there and fill it with - all legacy subsets under family as a key. - - Args: - Dict[str, Any]: Shared data. - - Return: - Dict[str, Any]: Shared data dictionary. - - """ - if shared_data.get("unreal_cached_subsets") is None: - unreal_cached_subsets = collections.defaultdict(list) - unreal_cached_legacy_subsets = collections.defaultdict(list) - for instance in ls_inst(): - creator_id = instance.get("creator_identifier") - if creator_id: - unreal_cached_subsets[creator_id].append(instance) - else: - family = instance.get("family") - unreal_cached_legacy_subsets[family].append(instance) - - shared_data["unreal_cached_subsets"] = unreal_cached_subsets - shared_data["unreal_cached_legacy_subsets"] = ( - unreal_cached_legacy_subsets - ) - return shared_data - - def create(self, subset_name, instance_data, pre_create_data): - try: - instance_name = f"{subset_name}{self.suffix}" - pub_instance = create_publish_instance(instance_name, self.root) - - instance_data["subset"] = subset_name - instance_data["instance_path"] = f"{self.root}/{instance_name}" - - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self) - self._add_instance_to_context(instance) - - pub_instance.set_editor_property('add_external_assets', True) - assets = pub_instance.get_editor_property('asset_data_external') - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - for member in pre_create_data.get("members", []): - obj = ar.get_asset_by_object_path(member).get_asset() - assets.add(obj) - - imprint(f"{self.root}/{instance_name}", instance.data_to_store()) - - return instance - - except Exception as er: - six.reraise( - CreatorError, - CreatorError(f"Creator error: {er}"), - sys.exc_info()[2]) - - def collect_instances(self): - # cache instances if missing - self.cache_subsets(self.collection_shared_data) - for instance in self.collection_shared_data[ - "unreal_cached_subsets"].get(self.identifier, []): - # Unreal saves metadata as string, so we need to convert it back - instance['creator_attributes'] = ast.literal_eval( - instance.get('creator_attributes', '{}')) - instance['publish_attributes'] = ast.literal_eval( - instance.get('publish_attributes', '{}')) - created_instance = CreatedInstance.from_existing(instance, self) - self._add_instance_to_context(created_instance) - - def update_instances(self, update_list): - for created_inst, changes in update_list: - instance_node = created_inst.get("instance_path", "") - - if not instance_node: - unreal.log_warning( - f"Instance node not found for {created_inst}") - continue - - new_values = { - key: changes[key].new_value - for key in changes.changed_keys - } - imprint( - instance_node, - new_values - ) - - def remove_instances(self, instances): - for instance in instances: - instance_node = instance.data.get("instance_path", "") - if instance_node: - unreal.EditorAssetLibrary.delete_asset(instance_node) - - self._remove_instance_from_context(instance) - - -@six.add_metaclass(ABCMeta) -class UnrealAssetCreator(UnrealBaseCreator): - """Base class for Unreal creator plugins based on assets.""" - - def create(self, subset_name, instance_data, pre_create_data): - """Create instance of the asset. - - Args: - subset_name (str): Name of the subset. - instance_data (dict): Data for the instance. - pre_create_data (dict): Data for the instance. - - Returns: - CreatedInstance: Created instance. - """ - try: - # Check if instance data has members, filled by the plugin. - # If not, use selection. - if not pre_create_data.get("members"): - pre_create_data["members"] = [] - - if pre_create_data.get("use_selection"): - utilib = unreal.EditorUtilityLibrary - sel_objects = utilib.get_selected_assets() - pre_create_data["members"] = [ - a.get_path_name() for a in sel_objects] - - super(UnrealAssetCreator, self).create( - subset_name, - instance_data, - pre_create_data) - - except Exception as er: - six.reraise( - CreatorError, - CreatorError(f"Creator error: {er}"), - sys.exc_info()[2]) - - def get_pre_create_attr_defs(self): - return [ - BoolDef("use_selection", label="Use selection", default=True) - ] - - -@six.add_metaclass(ABCMeta) -class UnrealActorCreator(UnrealBaseCreator): - """Base class for Unreal creator plugins based on actors.""" - - def create(self, subset_name, instance_data, pre_create_data): - """Create instance of the asset. - - Args: - subset_name (str): Name of the subset. - instance_data (dict): Data for the instance. - pre_create_data (dict): Data for the instance. - - Returns: - CreatedInstance: Created instance. - """ - try: - if UNREAL_VERSION.major == 5: - world = unreal.UnrealEditorSubsystem().get_editor_world() - else: - world = unreal.EditorLevelLibrary.get_editor_world() - - # Check if the level is saved - if world.get_path_name().startswith("/Temp/"): - raise CreatorError( - "Level must be saved before creating instances.") - - # Check if instance data has members, filled by the plugin. - # If not, use selection. - if not instance_data.get("members"): - actor_subsystem = unreal.EditorActorSubsystem() - sel_actors = actor_subsystem.get_selected_level_actors() - selection = [a.get_path_name() for a in sel_actors] - - instance_data["members"] = selection - - instance_data["level"] = world.get_path_name() - - super(UnrealActorCreator, self).create( - subset_name, - instance_data, - pre_create_data) - - except Exception as er: - six.reraise( - CreatorError, - CreatorError(f"Creator error: {er}"), - sys.exc_info()[2]) - - def get_pre_create_attr_defs(self): - return [ - UILabelDef("Select actors to create instance from them.") - ] - - -class Loader(LoaderPlugin, ABC): - """This serves as skeleton for future Ayon specific functionality""" - pass diff --git a/openpype/hosts/unreal/integration b/openpype/hosts/unreal/integration deleted file mode 160000 index 63266607ce..0000000000 --- a/openpype/hosts/unreal/integration +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 63266607ceb972a61484f046634ddfc9eb0b5757 diff --git a/openpype/hosts/unreal/lib.py b/openpype/hosts/unreal/lib.py deleted file mode 100644 index 6d544f65b2..0000000000 --- a/openpype/hosts/unreal/lib.py +++ /dev/null @@ -1,539 +0,0 @@ -# -*- coding: utf-8 -*- -"""Unreal launching and project tools.""" - -import json -import os -import platform -import re -import subprocess -from collections import OrderedDict -from distutils import dir_util -from pathlib import Path -from typing import List - -from openpype.settings import get_project_settings - - -def get_engine_versions(env=None): - """Detect Unreal Engine versions. - - This will try to detect location and versions of installed Unreal Engine. - Location can be overridden by `UNREAL_ENGINE_LOCATION` environment - variable. - - .. deprecated:: 3.15.4 - - Args: - env (dict, optional): Environment to use. - - Returns: - OrderedDict: dictionary with version as a key and dir as value. - so the highest version is first. - - Example: - >>> get_engine_versions() - { - "4.23": "C:/Epic Games/UE_4.23", - "4.24": "C:/Epic Games/UE_4.24" - } - - """ - env = env or os.environ - engine_locations = {} - try: - root, dirs, _ = next(os.walk(env["UNREAL_ENGINE_LOCATION"])) - - for directory in dirs: - if directory.startswith("UE"): - try: - ver = re.split(r"[-_]", directory)[1] - except IndexError: - continue - engine_locations[ver] = os.path.join(root, directory) - except KeyError: - # environment variable not set - pass - except OSError: - # specified directory doesn't exist - pass - except StopIteration: - # specified directory doesn't exist - pass - - # if we've got something, terminate auto-detection process - if engine_locations: - return OrderedDict(sorted(engine_locations.items())) - - # else kick in platform specific detection - if platform.system().lower() == "windows": - return OrderedDict(sorted(_win_get_engine_versions().items())) - if platform.system().lower() == "linux": - # on linux, there is no installation and getting Unreal Engine involves - # git clone. So we'll probably depend on `UNREAL_ENGINE_LOCATION`. - pass - if platform.system().lower() == "darwin": - return OrderedDict(sorted(_darwin_get_engine_version().items())) - - return OrderedDict() - - -def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path: - """Get UE Editor executable path.""" - ue_path = engine_path / "Engine/Binaries" - if platform.system().lower() == "windows": - if engine_version.split(".")[0] == "4": - ue_path /= "Win64/UE4Editor.exe" - elif engine_version.split(".")[0] == "5": - ue_path /= "Win64/UnrealEditor.exe" - - elif platform.system().lower() == "linux": - ue_path /= "Linux/UE4Editor" - - elif platform.system().lower() == "darwin": - ue_path /= "Mac/UE4Editor" - - return ue_path - - -def _win_get_engine_versions(): - """Get Unreal Engine versions on Windows. - - If engines are installed via Epic Games Launcher then there is: - `%PROGRAMDATA%/Epic/UnrealEngineLauncher/LauncherInstalled.dat` - This file is JSON file listing installed stuff, Unreal engines - are marked with `"AppName" = "UE_X.XX"`` like `UE_4.24` - - .. deprecated:: 3.15.4 - - Returns: - dict: version as a key and path as a value. - - """ - install_json_path = os.path.join( - os.getenv("PROGRAMDATA"), - "Epic", - "UnrealEngineLauncher", - "LauncherInstalled.dat", - ) - - return _parse_launcher_locations(install_json_path) - - -def _darwin_get_engine_version() -> dict: - """Get Unreal Engine versions on MacOS. - - It works the same as on Windows, just JSON file location is different. - - .. deprecated:: 3.15.4 - - Returns: - dict: version as a key and path as a value. - - See Also: - :func:`_win_get_engine_versions`. - - """ - install_json_path = os.path.join( - os.getenv("HOME"), - "Library", - "Application Support", - "Epic", - "UnrealEngineLauncher", - "LauncherInstalled.dat", - ) - - return _parse_launcher_locations(install_json_path) - - -def _parse_launcher_locations(install_json_path: str) -> dict: - """This will parse locations from json file. - - .. deprecated:: 3.15.4 - - Args: - install_json_path (str): Path to `LauncherInstalled.dat`. - - Returns: - dict: with unreal engine versions as keys and - paths to those engine installations as value. - - """ - engine_locations = {} - if os.path.isfile(install_json_path): - with open(install_json_path, "r") as ilf: - try: - install_data = json.load(ilf) - except json.JSONDecodeError as e: - raise Exception( - "Invalid `LauncherInstalled.dat file. `" - "Cannot determine Unreal Engine location." - ) from e - - for installation in install_data.get("InstallationList", []): - if installation.get("AppName").startswith("UE_"): - ver = installation.get("AppName").split("_")[1] - engine_locations[ver] = installation.get("InstallLocation") - - return engine_locations - - -def create_unreal_project(project_name: str, - unreal_project_name: str, - ue_version: str, - pr_dir: Path, - engine_path: Path, - dev_mode: bool = False, - env: dict = None) -> None: - """This will create `.uproject` file at specified location. - - As there is no way I know to create a project via command line, this is - easiest option. Unreal project file is basically a JSON file. If we find - the `AYON_UNREAL_PLUGIN` environment variable we assume this is the - location of the Integration Plugin and we copy its content to the project - folder and enable this plugin. - - Args: - project_name (str): Name of the project in AYON. - unreal_project_name (str): Name of the project in Unreal. - ue_version (str): Unreal engine version (like 4.23). - pr_dir (Path): Path to directory where project will be created. - engine_path (Path): Path to Unreal Engine installation. - dev_mode (bool, optional): Flag to trigger C++ style Unreal project - needing Visual Studio and other tools to compile plugins from - sources. This will trigger automatically if `Binaries` - directory is not found in plugin folders as this indicates - this is only source distribution of the plugin. Dev mode - is also set in Settings. - env (dict, optional): Environment to use. If not set, `os.environ`. - - Throws: - NotImplementedError: For unsupported platforms. - - Returns: - None - - Deprecated: - since 3.16.0 - - """ - env = env or os.environ - - preset = get_project_settings(project_name)["unreal"]["project_setup"] - ue_id = ".".join(ue_version.split(".")[:2]) - # get unreal engine identifier - # ------------------------------------------------------------------------- - # FIXME (antirotor): As of 4.26 this is problem with UE4 built from - # sources. In that case Engine ID is calculated per machine/user and not - # from Engine files as this code then reads. This then prevents UE4 - # to directly open project as it will complain about project being - # created in different UE4 version. When user convert such project - # to his UE4 version, Engine ID is replaced in uproject file. If some - # other user tries to open it, it will present him with similar error. - - # engine_path should be the location of UE_X.X folder - - ue_editor_exe: Path = get_editor_exe_path(engine_path, ue_version) - cmdlet_project: Path = get_path_to_cmdlet_project(ue_version) - - project_file = pr_dir / f"{unreal_project_name}.uproject" - - print("--- Generating a new project ...") - commandlet_cmd = [f'{ue_editor_exe.as_posix()}', - f'{cmdlet_project.as_posix()}', - f'-run=AyonGenerateProject', - f'{project_file.resolve().as_posix()}'] - - if dev_mode or preset["dev_mode"]: - commandlet_cmd.append('-GenerateCode') - - gen_process = subprocess.Popen(commandlet_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - for line in gen_process.stdout: - print(line.decode(), end='') - gen_process.stdout.close() - return_code = gen_process.wait() - - if return_code and return_code != 0: - raise RuntimeError( - (f"Failed to generate '{unreal_project_name}' project! " - f"Exited with return code {return_code}")) - - print("--- Project has been generated successfully.") - - with open(project_file.as_posix(), mode="r+") as pf: - pf_json = json.load(pf) - pf_json["EngineAssociation"] = get_build_id(engine_path, ue_version) - pf.seek(0) - json.dump(pf_json, pf, indent=4) - pf.truncate() - print(f'--- Engine ID has been written into the project file') - - if dev_mode or preset["dev_mode"]: - u_build_tool = get_path_to_ubt(engine_path, ue_version) - - arch = "Win64" - if platform.system().lower() == "windows": - arch = "Win64" - elif platform.system().lower() == "linux": - arch = "Linux" - elif platform.system().lower() == "darwin": - # we need to test this out - arch = "Mac" - - command1 = [u_build_tool.as_posix(), "-projectfiles", - f"-project={project_file}", "-progress"] - - subprocess.run(command1) - - command2 = [u_build_tool.as_posix(), - f"-ModuleWithSuffix={unreal_project_name},3555", arch, - "Development", "-TargetType=Editor", - f'-Project={project_file}', - f'{project_file}', - "-IgnoreJunk"] - - subprocess.run(command2) - - # ensure we have PySide2 installed in engine - python_path = None - if platform.system().lower() == "windows": - python_path = engine_path / ("Engine/Binaries/ThirdParty/" - "Python3/Win64/python.exe") - - if platform.system().lower() == "linux": - python_path = engine_path / ("Engine/Binaries/ThirdParty/" - "Python3/Linux/bin/python3") - - if platform.system().lower() == "darwin": - python_path = engine_path / ("Engine/Binaries/ThirdParty/" - "Python3/Mac/bin/python3") - - if not python_path: - raise NotImplementedError("Unsupported platform") - if not python_path.exists(): - raise RuntimeError(f"Unreal Python not found at {python_path}") - subprocess.check_call( - [python_path.as_posix(), "-m", "pip", "install", "pyside2"]) - - -def get_path_to_uat(engine_path: Path) -> Path: - if platform.system().lower() == "windows": - return engine_path / "Engine/Build/BatchFiles/RunUAT.bat" - - if platform.system().lower() in ["linux", "darwin"]: - return engine_path / "Engine/Build/BatchFiles/RunUAT.sh" - - -def get_compatible_integration( - ue_version: str, integration_root: Path) -> List[Path]: - """Get path to compatible version of integration plugin. - - This will try to get the closest compatible versions to the one - specified in sorted list. - - Args: - ue_version (str): version of the current Unreal Engine. - integration_root (Path): path to built-in integration plugins. - - Returns: - list of Path: Sorted list of paths closest to the specified - version. - - """ - major, minor = ue_version.split(".") - integration_paths = [p for p in integration_root.iterdir() - if p.is_dir()] - - compatible_versions = [] - for i in integration_paths: - # parse version from path - try: - i_major, i_minor = re.search( - r"(?P\d+).(?P\d+)$", i.name).groups() - except AttributeError: - # in case there is no match, just skip to next - continue - - # consider versions with different major so different that they - # are incompatible - if int(major) != int(i_major): - continue - - compatible_versions.append(i) - - sorted(set(compatible_versions)) - return compatible_versions - - -def get_path_to_cmdlet_project(ue_version: str) -> Path: - cmd_project = Path( - os.path.dirname(os.path.abspath(__file__))) - - # For now, only tested on Windows (For Linux and Mac - # it has to be implemented) - cmd_project /= f"integration/UE_{ue_version}" - - # if the integration doesn't exist for current engine version - # try to find the closest to it. - if cmd_project.exists(): - return cmd_project / "CommandletProject/CommandletProject.uproject" - - if compatible_versions := get_compatible_integration( - ue_version, cmd_project.parent - ): - return compatible_versions[-1] / "CommandletProject/CommandletProject.uproject" # noqa: E501 - else: - raise RuntimeError( - ("There are no compatible versions of Unreal " - "integration plugin compatible with running version " - f"of Unreal Engine {ue_version}")) - - -def get_path_to_ubt(engine_path: Path, ue_version: str) -> Path: - u_build_tool_path = engine_path / "Engine/Binaries/DotNET" - - if ue_version.split(".")[0] == "4": - u_build_tool_path /= "UnrealBuildTool.exe" - elif ue_version.split(".")[0] == "5": - u_build_tool_path /= "UnrealBuildTool/UnrealBuildTool.exe" - - return Path(u_build_tool_path) - - -def get_build_id(engine_path: Path, ue_version: str) -> str: - ue_modules = Path() - if platform.system().lower() == "windows": - ue_modules_path = engine_path / "Engine/Binaries/Win64" - if ue_version.split(".")[0] == "4": - ue_modules_path /= "UE4Editor.modules" - elif ue_version.split(".")[0] == "5": - ue_modules_path /= "UnrealEditor.modules" - ue_modules = Path(ue_modules_path) - - if platform.system().lower() == "linux": - ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Linux", "UE4Editor.modules")) - - if platform.system().lower() == "darwin": - ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Mac", "UE4Editor.modules")) - - if ue_modules.exists(): - print("--- Loading Engine ID from modules file ...") - with open(ue_modules, "r") as mp: - loaded_modules = json.load(mp) - - if loaded_modules.get("BuildId"): - return "{" + loaded_modules.get("BuildId") + "}" - - -def check_built_plugin_existance(plugin_path) -> bool: - if not plugin_path: - return False - - integration_plugin_path = Path(plugin_path) - - if not integration_plugin_path.is_dir(): - raise RuntimeError("Path to the integration plugin is null!") - - if not (integration_plugin_path / "Binaries").is_dir() \ - or not (integration_plugin_path / "Intermediate").is_dir(): - return False - - return True - - -def copy_built_plugin(engine_path: Path, plugin_path: Path) -> None: - ayon_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" - - if not ayon_plugin_path.is_dir(): - ayon_plugin_path.mkdir(parents=True, exist_ok=True) - - engine_plugin_config_path: Path = ayon_plugin_path / "Config" - engine_plugin_config_path.mkdir(exist_ok=True) - - dir_util._path_created = {} - - dir_util.copy_tree(plugin_path.as_posix(), ayon_plugin_path.as_posix()) - - -def check_plugin_existence(engine_path: Path, env: dict = None) -> bool: - env = env or os.environ - integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) - - if not os.path.isdir(integration_plugin_path): - raise RuntimeError("Path to the integration plugin is null!") - - # Create a path to the plugin in the engine - op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" - - if not op_plugin_path.is_dir(): - return False - - if not (op_plugin_path / "Binaries").is_dir() \ - or not (op_plugin_path / "Intermediate").is_dir(): - return False - - return True - - -def try_installing_plugin(engine_path: Path, env: dict = None) -> None: - env = env or os.environ - - integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) - - if not os.path.isdir(integration_plugin_path): - raise RuntimeError("Path to the integration plugin is null!") - - # Create a path to the plugin in the engine - op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" - - if not op_plugin_path.is_dir(): - op_plugin_path.mkdir(parents=True, exist_ok=True) - - engine_plugin_config_path: Path = op_plugin_path / "Config" - engine_plugin_config_path.mkdir(exist_ok=True) - - dir_util._path_created = {} - - if not (op_plugin_path / "Binaries").is_dir() \ - or not (op_plugin_path / "Intermediate").is_dir(): - _build_and_move_plugin(engine_path, op_plugin_path, env) - - -def _build_and_move_plugin(engine_path: Path, - plugin_build_path: Path, - env: dict = None) -> None: - uat_path: Path = get_path_to_uat(engine_path) - - env = env or os.environ - integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) - - if uat_path.is_file(): - temp_dir: Path = integration_plugin_path.parent / "Temp" - temp_dir.mkdir(exist_ok=True) - uplugin_path: Path = integration_plugin_path / "Ayon.uplugin" - - # in order to successfully build the plugin, - # It must be built outside the Engine directory and then moved - build_plugin_cmd: List[str] = [f'{uat_path.as_posix()}', - 'BuildPlugin', - f'-Plugin={uplugin_path.as_posix()}', - f'-Package={temp_dir.as_posix()}'] - subprocess.run(build_plugin_cmd) - - # Copy the contents of the 'Temp' dir into the - # 'Ayon' directory in the engine - dir_util.copy_tree(temp_dir.as_posix(), plugin_build_path.as_posix()) - - # We need to also copy the config folder. - # The UAT doesn't include the Config folder in the build - plugin_install_config_path: Path = plugin_build_path / "Config" - integration_plugin_config_path = integration_plugin_path / "Config" - - dir_util.copy_tree(integration_plugin_config_path.as_posix(), - plugin_install_config_path.as_posix()) - - dir_util.remove_tree(temp_dir.as_posix()) diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py deleted file mode 100644 index 73afb6cefd..0000000000 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -import unreal - -from openpype.pipeline import CreatorError -from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION -from openpype.hosts.unreal.api.plugin import ( - UnrealAssetCreator, -) - - -class CreateCamera(UnrealAssetCreator): - """Create Camera.""" - - identifier = "io.ayon.creators.unreal.camera" - label = "Camera" - family = "camera" - icon = "fa.camera" - - def create(self, subset_name, instance_data, pre_create_data): - if pre_create_data.get("use_selection"): - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [a.get_path_name() for a in sel_objects] - - if len(selection) != 1: - raise CreatorError("Please select only one object.") - - # Add the current level path to the metadata - if UNREAL_VERSION.major == 5: - world = unreal.UnrealEditorSubsystem().get_editor_world() - else: - world = unreal.EditorLevelLibrary.get_editor_world() - - instance_data["level"] = world.get_path_name() - - super(CreateCamera, self).create( - subset_name, - instance_data, - pre_create_data) diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py deleted file mode 100644 index e5c7b8ee19..0000000000 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -from openpype.hosts.unreal.api.plugin import ( - UnrealActorCreator, -) - - -class CreateLayout(UnrealActorCreator): - """Layout output for character rigs.""" - - identifier = "io.ayon.creators.unreal.layout" - label = "Layout" - family = "layout" - icon = "cubes" diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py deleted file mode 100644 index e15b57b2ee..0000000000 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -import unreal - -from openpype.pipeline import CreatorError -from openpype.hosts.unreal.api.pipeline import ( - create_folder -) -from openpype.hosts.unreal.api.plugin import ( - UnrealAssetCreator -) -from openpype.lib import UILabelDef - - -class CreateLook(UnrealAssetCreator): - """Shader connections defining shape look.""" - - identifier = "io.ayon.creators.unreal.look" - label = "Look" - family = "look" - icon = "paint-brush" - - def create(self, subset_name, instance_data, pre_create_data): - # We need to set this to True for the parent class to work - pre_create_data["use_selection"] = True - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [a.get_path_name() for a in sel_objects] - - if len(selection) != 1: - raise CreatorError("Please select only one asset.") - - selected_asset = selection[0] - - look_directory = "/Game/Ayon/Looks" - - # Create the folder - folder_name = create_folder(look_directory, subset_name) - path = f"{look_directory}/{folder_name}" - - instance_data["look"] = path - - # Create a new cube static mesh - ar = unreal.AssetRegistryHelpers.get_asset_registry() - cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube") - - # Get the mesh of the selected object - original_mesh = ar.get_asset_by_object_path(selected_asset).get_asset() - materials = original_mesh.get_editor_property('static_materials') - - pre_create_data["members"] = [] - - # Add the materials to the cube - for material in materials: - mat_name = material.get_editor_property('material_slot_name') - object_path = f"{path}/{mat_name}.{mat_name}" - unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset( - cube.get_asset(), object_path - ) - - # Remove the default material of the cube object - unreal_object.get_editor_property('static_materials').pop() - - unreal_object.add_material( - material.get_editor_property('material_interface')) - - pre_create_data["members"].append(object_path) - - unreal.EditorAssetLibrary.save_asset(object_path) - - super(CreateLook, self).create( - subset_name, - instance_data, - pre_create_data) - - def get_pre_create_attr_defs(self): - return [ - UILabelDef("Select the asset from which to create the look.") - ] diff --git a/openpype/hosts/unreal/plugins/create/create_render.py b/openpype/hosts/unreal/plugins/create/create_render.py deleted file mode 100644 index 5f561e68ad..0000000000 --- a/openpype/hosts/unreal/plugins/create/create_render.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -from pathlib import Path - -import unreal - -from openpype.hosts.unreal.api.pipeline import ( - UNREAL_VERSION, - create_folder, - get_subsequences, -) -from openpype.hosts.unreal.api.plugin import ( - UnrealAssetCreator -) -from openpype.lib import ( - UILabelDef, - UISeparatorDef, - BoolDef, - NumberDef -) - - -class CreateRender(UnrealAssetCreator): - """Create instance for sequence for rendering""" - - identifier = "io.ayon.creators.unreal.render" - label = "Render" - family = "render" - icon = "eye" - - def create_instance( - self, instance_data, subset_name, pre_create_data, - selected_asset_path, master_seq, master_lvl, seq_data - ): - instance_data["members"] = [selected_asset_path] - instance_data["sequence"] = selected_asset_path - instance_data["master_sequence"] = master_seq - instance_data["master_level"] = master_lvl - instance_data["output"] = seq_data.get('output') - instance_data["frameStart"] = seq_data.get('frame_range')[0] - instance_data["frameEnd"] = seq_data.get('frame_range')[1] - - super(CreateRender, self).create( - subset_name, - instance_data, - pre_create_data) - - def create_with_new_sequence( - self, subset_name, instance_data, pre_create_data - ): - # If the option to create a new level sequence is selected, - # create a new level sequence and a master level. - - root = f"/Game/Ayon/Sequences" - - # Create a new folder for the sequence in root - sequence_dir_name = create_folder(root, subset_name) - sequence_dir = f"{root}/{sequence_dir_name}" - - unreal.log_warning(f"sequence_dir: {sequence_dir}") - - # Create the level sequence - asset_tools = unreal.AssetToolsHelpers.get_asset_tools() - seq = asset_tools.create_asset( - asset_name=subset_name, - package_path=sequence_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew()) - - seq.set_playback_start(pre_create_data.get("start_frame")) - seq.set_playback_end(pre_create_data.get("end_frame")) - - pre_create_data["members"] = [seq.get_path_name()] - - unreal.EditorAssetLibrary.save_asset(seq.get_path_name()) - - # Create the master level - if UNREAL_VERSION.major >= 5: - curr_level = unreal.LevelEditorSubsystem().get_current_level() - else: - world = unreal.EditorLevelLibrary.get_editor_world() - levels = unreal.EditorLevelUtils.get_levels(world) - curr_level = levels[0] if len(levels) else None - if not curr_level: - raise RuntimeError("No level loaded.") - curr_level_path = curr_level.get_outer().get_path_name() - - # If the level path does not start with "/Game/", the current - # level is a temporary, unsaved level. - if curr_level_path.startswith("/Game/"): - if UNREAL_VERSION.major >= 5: - unreal.LevelEditorSubsystem().save_current_level() - else: - unreal.EditorLevelLibrary.save_current_level() - - ml_path = f"{sequence_dir}/{subset_name}_MasterLevel" - - if UNREAL_VERSION.major >= 5: - unreal.LevelEditorSubsystem().new_level(ml_path) - else: - unreal.EditorLevelLibrary.new_level(ml_path) - - seq_data = { - "sequence": seq, - "output": f"{seq.get_name()}", - "frame_range": ( - seq.get_playback_start(), - seq.get_playback_end())} - - self.create_instance( - instance_data, subset_name, pre_create_data, - seq.get_path_name(), seq.get_path_name(), ml_path, seq_data) - - def create_from_existing_sequence( - self, subset_name, instance_data, pre_create_data - ): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [ - a.get_path_name() for a in sel_objects - if a.get_class().get_name() == "LevelSequence"] - - if len(selection) == 0: - raise RuntimeError("Please select at least one Level Sequence.") - - seq_data = None - - for sel in selection: - selected_asset = ar.get_asset_by_object_path(sel).get_asset() - selected_asset_path = selected_asset.get_path_name() - - # Check if the selected asset is a level sequence asset. - if selected_asset.get_class().get_name() != "LevelSequence": - unreal.log_warning( - f"Skipping {selected_asset.get_name()}. It isn't a Level " - "Sequence.") - - if pre_create_data.get("use_hierarchy"): - # The asset name is the the third element of the path which - # contains the map. - # To take the asset name, we remove from the path the prefix - # "/Game/OpenPype/" and then we split the path by "/". - sel_path = selected_asset_path - asset_name = sel_path.replace( - "/Game/Ayon/", "").split("/")[0] - - search_path = f"/Game/Ayon/{asset_name}" - else: - search_path = Path(selected_asset_path).parent.as_posix() - - # Get the master sequence and the master level. - # There should be only one sequence and one level in the directory. - try: - ar_filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[search_path], - recursive_paths=False) - sequences = ar.get_assets(ar_filter) - master_seq = sequences[0].get_asset().get_path_name() - master_seq_obj = sequences[0].get_asset() - ar_filter = unreal.ARFilter( - class_names=["World"], - package_paths=[search_path], - recursive_paths=False) - levels = ar.get_assets(ar_filter) - master_lvl = levels[0].get_asset().get_path_name() - except IndexError: - raise RuntimeError( - f"Could not find the hierarchy for the selected sequence.") - - # If the selected asset is the master sequence, we get its data - # and then we create the instance for the master sequence. - # Otherwise, we cycle from the master sequence to find the selected - # sequence and we get its data. This data will be used to create - # the instance for the selected sequence. In particular, - # we get the frame range of the selected sequence and its final - # output path. - master_seq_data = { - "sequence": master_seq_obj, - "output": f"{master_seq_obj.get_name()}", - "frame_range": ( - master_seq_obj.get_playback_start(), - master_seq_obj.get_playback_end())} - - if (selected_asset_path == master_seq or - pre_create_data.get("use_hierarchy")): - seq_data = master_seq_data - else: - seq_data_list = [master_seq_data] - - for seq in seq_data_list: - subscenes = get_subsequences(seq.get('sequence')) - - for sub_seq in subscenes: - sub_seq_obj = sub_seq.get_sequence() - curr_data = { - "sequence": sub_seq_obj, - "output": (f"{seq.get('output')}/" - f"{sub_seq_obj.get_name()}"), - "frame_range": ( - sub_seq.get_start_frame(), - sub_seq.get_end_frame() - 1)} - - # If the selected asset is the current sub-sequence, - # we get its data and we break the loop. - # Otherwise, we add the current sub-sequence data to - # the list of sequences to check. - if sub_seq_obj.get_path_name() == selected_asset_path: - seq_data = curr_data - break - - seq_data_list.append(curr_data) - - # If we found the selected asset, we break the loop. - if seq_data is not None: - break - - # If we didn't find the selected asset, we don't create the - # instance. - if not seq_data: - unreal.log_warning( - f"Skipping {selected_asset.get_name()}. It isn't a " - "sub-sequence of the master sequence.") - continue - - self.create_instance( - instance_data, subset_name, pre_create_data, - selected_asset_path, master_seq, master_lvl, seq_data) - - def create(self, subset_name, instance_data, pre_create_data): - if pre_create_data.get("create_seq"): - self.create_with_new_sequence( - subset_name, instance_data, pre_create_data) - else: - self.create_from_existing_sequence( - subset_name, instance_data, pre_create_data) - - def get_pre_create_attr_defs(self): - return [ - UILabelDef( - "Select a Level Sequence to render or create a new one." - ), - BoolDef( - "create_seq", - label="Create a new Level Sequence", - default=False - ), - UILabelDef( - "WARNING: If you create a new Level Sequence, the current\n" - "level will be saved and a new Master Level will be created." - ), - NumberDef( - "start_frame", - label="Start Frame", - default=0, - minimum=-999999, - maximum=999999 - ), - NumberDef( - "end_frame", - label="Start Frame", - default=150, - minimum=-999999, - maximum=999999 - ), - UISeparatorDef(), - UILabelDef( - "The following settings are valid only if you are not\n" - "creating a new sequence." - ), - BoolDef( - "use_hierarchy", - label="Use Hierarchy", - default=False - ), - ] diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py deleted file mode 100644 index 7ed85ee411..0000000000 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ /dev/null @@ -1,330 +0,0 @@ -# -*- coding: utf-8 -*- -"""Load FBX with animations.""" -import os -import json - -import unreal -from unreal import EditorAssetLibrary -from unreal import MovieSceneSkeletalAnimationTrack -from unreal import MovieSceneSkeletalAnimationSection - -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.pipeline import ( - get_representation_path, - AYON_CONTAINER_ID -) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline - - -class AnimationFBXLoader(plugin.Loader): - """Load Unreal SkeletalMesh from FBX.""" - - families = ["animation"] - label = "Import FBX Animation" - representations = ["fbx"] - icon = "cube" - color = "orange" - - def _process(self, path, asset_dir, asset_name, instance_name): - automated = False - actor = None - - task = unreal.AssetImportTask() - task.options = unreal.FbxImportUI() - - if instance_name: - automated = True - # Old method to get the actor - # actor_name = 'PersistentLevel.' + instance_name - # actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name) - actors = unreal.EditorLevelLibrary.get_all_level_actors() - for a in actors: - if a.get_class().get_name() != "SkeletalMeshActor": - continue - if a.get_actor_label() == instance_name: - actor = a - break - if not actor: - raise Exception(f"Could not find actor {instance_name}") - skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton - task.options.set_editor_property('skeleton', skeleton) - - if not actor: - return None - - asset_doc = get_current_project_asset(fields=["data.fps"]) - - task.set_editor_property('filename', path) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', automated) - task.set_editor_property('save', False) - - # set import options here - task.options.set_editor_property( - 'automated_import_should_detect_type', False) - task.options.set_editor_property( - 'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH) - task.options.set_editor_property( - 'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION) - task.options.set_editor_property('import_mesh', False) - task.options.set_editor_property('import_animations', True) - task.options.set_editor_property('override_full_name', True) - - task.options.anim_sequence_import_data.set_editor_property( - 'animation_length', - unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME - ) - task.options.anim_sequence_import_data.set_editor_property( - 'import_meshes_in_bone_hierarchy', False) - task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', False) - task.options.anim_sequence_import_data.set_editor_property( - 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) - task.options.anim_sequence_import_data.set_editor_property( - 'import_custom_attribute', True) - task.options.anim_sequence_import_data.set_editor_property( - 'import_bone_tracks', True) - task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', False) - task.options.anim_sequence_import_data.set_editor_property( - 'convert_scene', True) - - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - - asset_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=True - ) - - animation = None - - for a in asset_content: - imported_asset_data = EditorAssetLibrary.find_asset_data(a) - imported_asset = unreal.AssetRegistryHelpers.get_asset( - imported_asset_data) - if imported_asset.__class__ == unreal.AnimSequence: - animation = imported_asset - break - - if animation: - animation.set_editor_property('enable_root_motion', True) - actor.skeletal_mesh_component.set_editor_property( - 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) - actor.skeletal_mesh_component.animation_data.set_editor_property( - 'anim_to_play', animation) - - return animation - - def load(self, context, name, namespace, options=None): - """ - Load and containerise representation into Content Browser. - - This is two step process. First, import FBX to temporary path and - then call `containerise()` on it - this moves all content to new - directory and then it will create AssetContainer there and imprint it - with metadata. This will mark this path as container. - - Args: - context (dict): application context - name (str): subset name - namespace (str): in Unreal this is basically path to container. - This is not passed here, so namespace is set - by `containerise()` because only then we know - real path. - data (dict): Those would be data to be imprinted. This is not used - now, data are imprinted by `containerise()`. - - Returns: - list(str): list of container content - """ - # Create directory for asset and Ayon container - hierarchy = context.get('asset').get('data').get('parents') - root = "/Game/Ayon" - asset = context.get('asset').get('name') - suffix = "_CON" - asset_name = f"{asset}_{name}" if asset else f"{name}" - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_dir, container_name = tools.create_unique_asset_name( - f"{root}/Animations/{asset}/{name}", suffix="") - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{root}/{hierarchy[0]}"], - recursive_paths=False) - levels = ar.get_assets(_filter) - master_level = levels[0].get_asset().get_path_name() - - hierarchy_dir = root - for h in hierarchy: - hierarchy_dir = f"{hierarchy_dir}/{h}" - hierarchy_dir = f"{hierarchy_dir}/{asset}" - - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{hierarchy_dir}/"], - recursive_paths=True) - levels = ar.get_assets(_filter) - level = levels[0].get_asset().get_path_name() - - unreal.EditorLevelLibrary.save_all_dirty_levels() - unreal.EditorLevelLibrary.load_level(level) - - container_name += suffix - - EditorAssetLibrary.make_directory(asset_dir) - - path = self.filepath_from_context(context) - libpath = path.replace(".fbx", ".json") - - with open(libpath, "r") as fp: - data = json.load(fp) - - instance_name = data.get("instance_name") - - animation = self._process(path, asset_dir, asset_name, instance_name) - - asset_content = EditorAssetLibrary.list_assets( - hierarchy_dir, recursive=True, include_folder=False) - - # Get the sequence for the layout, excluding the camera one. - sequences = [a for a in asset_content - if (EditorAssetLibrary.find_asset_data(a).get_class() == - unreal.LevelSequence.static_class() and - "_camera" not in a.split("/")[-1])] - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - for s in sequences: - sequence = ar.get_asset_by_object_path(s).get_asset() - possessables = [ - p for p in sequence.get_possessables() - if p.get_display_name() == instance_name] - - for p in possessables: - tracks = [ - t for t in p.get_tracks() - if (t.get_class() == - MovieSceneSkeletalAnimationTrack.static_class())] - - for t in tracks: - sections = [ - s for s in t.get_sections() - if (s.get_class() == - MovieSceneSkeletalAnimationSection.static_class())] - - for s in sections: - s.params.set_editor_property('animation', animation) - - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) - - data = { - "schema": "ayon:container-2.0", - "id": AYON_CONTAINER_ID, - "asset": asset, - "namespace": asset_dir, - "container_name": container_name, - "asset_name": asset_name, - "loader": str(self.__class__.__name__), - "representation": context["representation"]["_id"], - "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] - } - unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) - - imported_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=False) - - for a in imported_content: - EditorAssetLibrary.save_asset(a) - - unreal.EditorLevelLibrary.save_current_level() - unreal.EditorLevelLibrary.load_level(master_level) - - def update(self, container, representation): - name = container["asset_name"] - source_path = get_representation_path(representation) - asset_doc = get_current_project_asset(fields=["data.fps"]) - destination_path = container["namespace"] - - task = unreal.AssetImportTask() - task.options = unreal.FbxImportUI() - - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - task.options.set_editor_property( - 'automated_import_should_detect_type', False) - task.options.set_editor_property( - 'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH) - task.options.set_editor_property( - 'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION) - task.options.set_editor_property('import_mesh', False) - task.options.set_editor_property('import_animations', True) - task.options.set_editor_property('override_full_name', True) - - task.options.anim_sequence_import_data.set_editor_property( - 'animation_length', - unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME - ) - task.options.anim_sequence_import_data.set_editor_property( - 'import_meshes_in_bone_hierarchy', False) - task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', False) - task.options.anim_sequence_import_data.set_editor_property( - 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) - task.options.anim_sequence_import_data.set_editor_property( - 'import_custom_attribute', True) - task.options.anim_sequence_import_data.set_editor_property( - 'import_bone_tracks', True) - task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', False) - task.options.anim_sequence_import_data.set_editor_property( - 'convert_scene', True) - - skeletal_mesh = EditorAssetLibrary.load_asset( - container.get('namespace') + "/" + container.get('asset_name')) - skeleton = skeletal_mesh.get_editor_property('skeleton') - task.options.set_editor_property('skeleton', skeleton) - - # do import fbx and replace existing data - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - container_path = f'{container["namespace"]}/{container["objectName"]}' - # update metadata - unreal_pipeline.imprint( - container_path, - { - "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - }) - - asset_content = EditorAssetLibrary.list_assets( - destination_path, recursive=True, include_folder=True - ) - - for a in asset_content: - EditorAssetLibrary.save_asset(a) - - def remove(self, container): - path = container["namespace"] - parent_path = os.path.dirname(path) - - EditorAssetLibrary.delete_directory(path) - - asset_content = EditorAssetLibrary.list_assets( - parent_path, recursive=False, include_folder=True - ) - - if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py deleted file mode 100644 index d663ce20ea..0000000000 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ /dev/null @@ -1,567 +0,0 @@ -# -*- coding: utf-8 -*- -"""Load camera from FBX.""" -from pathlib import Path - -import unreal -from unreal import ( - EditorAssetLibrary, - EditorLevelLibrary, - EditorLevelUtils, - LevelSequenceEditorBlueprintLibrary as LevelSequenceLib, -) -from openpype.client import get_asset_by_name -from openpype.pipeline import ( - AYON_CONTAINER_ID, - get_current_project_name, -) -from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api.pipeline import ( - generate_sequence, - set_sequence_hierarchy, - create_container, - imprint, -) - - -class CameraLoader(plugin.Loader): - """Load Unreal StaticMesh from FBX""" - - families = ["camera"] - label = "Load Camera" - representations = ["fbx"] - icon = "cube" - color = "orange" - - def _import_camera( - self, world, sequence, bindings, import_fbx_settings, import_filename - ): - ue_version = unreal.SystemLibrary.get_engine_version().split('.') - ue_major = int(ue_version[0]) - ue_minor = int(ue_version[1]) - - if ue_major == 4 and ue_minor <= 26: - unreal.SequencerTools.import_fbx( - world, - sequence, - bindings, - import_fbx_settings, - import_filename - ) - elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5: - unreal.SequencerTools.import_level_sequence_fbx( - world, - sequence, - bindings, - import_fbx_settings, - import_filename - ) - else: - raise NotImplementedError( - f"Unreal version {ue_major} not supported") - - def load(self, context, name, namespace, data): - """ - Load and containerise representation into Content Browser. - - This is two step process. First, import FBX to temporary path and - then call `containerise()` on it - this moves all content to new - directory and then it will create AssetContainer there and imprint it - with metadata. This will mark this path as container. - - Args: - context (dict): application context - name (str): subset name - namespace (str): in Unreal this is basically path to container. - This is not passed here, so namespace is set - by `containerise()` because only then we know - real path. - data (dict): Those would be data to be imprinted. This is not used - now, data are imprinted by `containerise()`. - - Returns: - list(str): list of container content - """ - - # Create directory for asset and Ayon container - hierarchy = context.get('asset').get('data').get('parents') - root = "/Game/Ayon" - hierarchy_dir = root - hierarchy_dir_list = [] - for h in hierarchy: - hierarchy_dir = f"{hierarchy_dir}/{h}" - hierarchy_dir_list.append(hierarchy_dir) - asset = context.get('asset').get('name') - suffix = "_CON" - asset_name = f"{asset}_{name}" if asset else f"{name}" - - tools = unreal.AssetToolsHelpers().get_asset_tools() - - # Create a unique name for the camera directory - unique_number = 1 - if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"): - asset_content = EditorAssetLibrary.list_assets( - f"{root}/{asset}", recursive=False, include_folder=True - ) - - # Get highest number to make a unique name - folders = [a for a in asset_content - if a[-1] == "/" and f"{name}_" in a] - # Get number from folder name. Splits the string by "_" and - # removes the last element (which is a "/"). - f_numbers = [int(f.split("_")[-1][:-1]) for f in folders] - f_numbers.sort() - unique_number = f_numbers[-1] + 1 if f_numbers else 1 - - asset_dir, container_name = tools.create_unique_asset_name( - f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") - - container_name += suffix - - EditorAssetLibrary.make_directory(asset_dir) - - # Create map for the shot, and create hierarchy of map. If the maps - # already exist, we will use them. - h_dir = hierarchy_dir_list[0] - h_asset = hierarchy[0] - master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" - if not EditorAssetLibrary.does_asset_exist(master_level): - EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") - - level = f"{asset_dir}/{asset}_map_camera.{asset}_map_camera" - if not EditorAssetLibrary.does_asset_exist(level): - EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map_camera") - - EditorLevelLibrary.load_level(master_level) - EditorLevelUtils.add_level_to_world( - EditorLevelLibrary.get_editor_world(), - level, - unreal.LevelStreamingDynamic - ) - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(level) - - # Get all the sequences in the hierarchy. It will create them, if - # they don't exist. - frame_ranges = [] - sequences = [] - for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): - root_content = EditorAssetLibrary.list_assets( - h_dir, recursive=False, include_folder=False) - - existing_sequences = [ - EditorAssetLibrary.find_asset_data(asset) - for asset in root_content - if EditorAssetLibrary.find_asset_data( - asset).get_class().get_name() == 'LevelSequence' - ] - - if existing_sequences: - for seq in existing_sequences: - sequences.append(seq.get_asset()) - frame_ranges.append(( - seq.get_asset().get_playback_start(), - seq.get_asset().get_playback_end())) - else: - sequence, frame_range = generate_sequence(h, h_dir) - - sequences.append(sequence) - frame_ranges.append(frame_range) - - EditorAssetLibrary.make_directory(asset_dir) - - cam_seq = tools.create_asset( - asset_name=f"{asset}_camera", - package_path=asset_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) - - # Add sequences data to hierarchy - for i in range(len(sequences) - 1): - set_sequence_hierarchy( - sequences[i], sequences[i + 1], - frame_ranges[i][1], - frame_ranges[i + 1][0], frame_ranges[i + 1][1], - [level]) - - project_name = get_current_project_name() - data = get_asset_by_name(project_name, asset)["data"] - cam_seq.set_display_rate( - unreal.FrameRate(data.get("fps"), 1.0)) - cam_seq.set_playback_start(data.get('clipIn')) - cam_seq.set_playback_end(data.get('clipOut') + 1) - set_sequence_hierarchy( - sequences[-1], cam_seq, - frame_ranges[-1][1], - data.get('clipIn'), data.get('clipOut'), - [level]) - - settings = unreal.MovieSceneUserImportFBXSettings() - settings.set_editor_property('reduce_keys', False) - - if cam_seq: - path = self.filepath_from_context(context) - self._import_camera( - EditorLevelLibrary.get_editor_world(), - cam_seq, - cam_seq.get_bindings(), - settings, - path - ) - - # Set range of all sections - # Changing the range of the section is not enough. We need to change - # the frame of all the keys in the section. - for possessable in cam_seq.get_possessables(): - for tracks in possessable.get_tracks(): - for section in tracks.get_sections(): - section.set_range( - data.get('clipIn'), - data.get('clipOut') + 1) - for channel in section.get_all_channels(): - for key in channel.get_keys(): - old_time = key.get_time().get_editor_property( - 'frame_number') - old_time_value = old_time.get_editor_property( - 'value') - new_time = old_time_value + ( - data.get('clipIn') - data.get('frameStart') - ) - key.set_time(unreal.FrameNumber(value=new_time)) - - # Create Asset Container - create_container( - container=container_name, path=asset_dir) - - data = { - "schema": "ayon:container-2.0", - "id": AYON_CONTAINER_ID, - "asset": asset, - "namespace": asset_dir, - "container_name": container_name, - "asset_name": asset_name, - "loader": str(self.__class__.__name__), - "representation": context["representation"]["_id"], - "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] - } - imprint(f"{asset_dir}/{container_name}", data) - - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(master_level) - - # Save all assets in the hierarchy - asset_content = EditorAssetLibrary.list_assets( - hierarchy_dir_list[0], recursive=True, include_folder=False - ) - - for a in asset_content: - EditorAssetLibrary.save_asset(a) - - return asset_content - - def update(self, container, representation): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - curr_level_sequence = LevelSequenceLib.get_current_level_sequence() - curr_time = LevelSequenceLib.get_current_time() - is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport() - - editor_subsystem = unreal.UnrealEditorSubsystem() - vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info() - - asset_dir = container.get('namespace') - - EditorLevelLibrary.save_current_level() - - _filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[asset_dir], - recursive_paths=False) - sequences = ar.get_assets(_filter) - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[asset_dir], - recursive_paths=True) - maps = ar.get_assets(_filter) - - # There should be only one map in the list - EditorLevelLibrary.load_level(maps[0].get_asset().get_path_name()) - - level_sequence = sequences[0].get_asset() - - display_rate = level_sequence.get_display_rate() - playback_start = level_sequence.get_playback_start() - playback_end = level_sequence.get_playback_end() - - sequence_name = f"{container.get('asset')}_camera" - - # Get the actors in the level sequence. - objs = unreal.SequencerTools.get_bound_objects( - unreal.EditorLevelLibrary.get_editor_world(), - level_sequence, - level_sequence.get_bindings(), - unreal.SequencerScriptingRange( - has_start_value=True, - has_end_value=True, - inclusive_start=level_sequence.get_playback_start(), - exclusive_end=level_sequence.get_playback_end() - ) - ) - - # Delete actors from the map - for o in objs: - if o.bound_objects[0].get_class().get_name() == "CineCameraActor": - actor_path = o.bound_objects[0].get_path_name().split(":")[-1] - actor = EditorLevelLibrary.get_actor_reference(actor_path) - EditorLevelLibrary.destroy_actor(actor) - - # Remove the Level Sequence from the parent. - # We need to traverse the hierarchy from the master sequence to find - # the level sequence. - root = "/Game/Ayon" - namespace = container.get('namespace').replace(f"{root}/", "") - ms_asset = namespace.split('/')[0] - _filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - sequences = ar.get_assets(_filter) - master_sequence = sequences[0].get_asset() - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - levels = ar.get_assets(_filter) - master_level = levels[0].get_asset().get_path_name() - - sequences = [master_sequence] - - parent = None - sub_scene = None - for s in sequences: - tracks = s.get_master_tracks() - subscene_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - if subscene_track: - sections = subscene_track.get_sections() - for ss in sections: - if ss.get_sequence().get_name() == sequence_name: - parent = s - sub_scene = ss - break - sequences.append(ss.get_sequence()) - for i, ss in enumerate(sections): - ss.set_row_index(i) - if parent: - break - - assert parent, "Could not find the parent sequence" - - EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) - - settings = unreal.MovieSceneUserImportFBXSettings() - settings.set_editor_property('reduce_keys', False) - - tools = unreal.AssetToolsHelpers().get_asset_tools() - new_sequence = tools.create_asset( - asset_name=sequence_name, - package_path=asset_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) - - new_sequence.set_display_rate(display_rate) - new_sequence.set_playback_start(playback_start) - new_sequence.set_playback_end(playback_end) - - sub_scene.set_sequence(new_sequence) - - self._import_camera( - EditorLevelLibrary.get_editor_world(), - new_sequence, - new_sequence.get_bindings(), - settings, - str(representation["data"]["path"]) - ) - - # Set range of all sections - # Changing the range of the section is not enough. We need to change - # the frame of all the keys in the section. - project_name = get_current_project_name() - asset = container.get('asset') - data = get_asset_by_name(project_name, asset)["data"] - - for possessable in new_sequence.get_possessables(): - for tracks in possessable.get_tracks(): - for section in tracks.get_sections(): - section.set_range( - data.get('clipIn'), - data.get('clipOut') + 1) - for channel in section.get_all_channels(): - for key in channel.get_keys(): - old_time = key.get_time().get_editor_property( - 'frame_number') - old_time_value = old_time.get_editor_property( - 'value') - new_time = old_time_value + ( - data.get('clipIn') - data.get('frameStart') - ) - key.set_time(unreal.FrameNumber(value=new_time)) - - data = { - "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - } - imprint(f"{asset_dir}/{container.get('container_name')}", data) - - EditorLevelLibrary.save_current_level() - - asset_content = EditorAssetLibrary.list_assets( - f"{root}/{ms_asset}", recursive=True, include_folder=False) - - for a in asset_content: - EditorAssetLibrary.save_asset(a) - - EditorLevelLibrary.load_level(master_level) - - if curr_level_sequence: - LevelSequenceLib.open_level_sequence(curr_level_sequence) - LevelSequenceLib.set_current_time(curr_time) - LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock) - - editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot) - - def remove(self, container): - asset_dir = container.get('namespace') - path = Path(asset_dir) - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - _filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[asset_dir], - recursive_paths=False) - sequences = ar.get_assets(_filter) - - if not sequences: - raise Exception("Could not find sequence.") - - world = ar.get_asset_by_object_path( - EditorLevelLibrary.get_editor_world().get_path_name()) - - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[asset_dir], - recursive_paths=True) - maps = ar.get_assets(_filter) - - # There should be only one map in the list - if not maps: - raise Exception("Could not find map.") - - map = maps[0] - - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(map.get_asset().get_path_name()) - - # Remove the camera from the level. - actors = EditorLevelLibrary.get_all_level_actors() - - for a in actors: - if a.__class__ == unreal.CineCameraActor: - EditorLevelLibrary.destroy_actor(a) - - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(world.get_asset().get_path_name()) - - # There should be only one sequence in the path. - sequence_name = sequences[0].asset_name - - # Remove the Level Sequence from the parent. - # We need to traverse the hierarchy from the master sequence to find - # the level sequence. - root = "/Game/Ayon" - namespace = container.get('namespace').replace(f"{root}/", "") - ms_asset = namespace.split('/')[0] - _filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - sequences = ar.get_assets(_filter) - master_sequence = sequences[0].get_asset() - _filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"{root}/{ms_asset}"], - recursive_paths=False) - levels = ar.get_assets(_filter) - master_level = levels[0].get_full_name() - - sequences = [master_sequence] - - parent = None - for s in sequences: - tracks = s.get_master_tracks() - subscene_track = None - visibility_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - if (t.get_class() == - unreal.MovieSceneLevelVisibilityTrack.static_class()): - visibility_track = t - if subscene_track: - sections = subscene_track.get_sections() - for ss in sections: - if ss.get_sequence().get_name() == sequence_name: - parent = s - subscene_track.remove_section(ss) - break - sequences.append(ss.get_sequence()) - # Update subscenes indexes. - for i, ss in enumerate(sections): - ss.set_row_index(i) - - if visibility_track: - sections = visibility_track.get_sections() - for ss in sections: - if (unreal.Name(f"{container.get('asset')}_map_camera") - in ss.get_level_names()): - visibility_track.remove_section(ss) - # Update visibility sections indexes. - i = -1 - prev_name = [] - for ss in sections: - if prev_name != ss.get_level_names(): - i += 1 - ss.set_row_index(i) - prev_name = ss.get_level_names() - if parent: - break - - assert parent, "Could not find the parent sequence" - - # Create a temporary level to delete the layout level. - EditorLevelLibrary.save_all_dirty_levels() - EditorAssetLibrary.make_directory(f"{root}/tmp") - tmp_level = f"{root}/tmp/temp_map" - if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): - EditorLevelLibrary.new_level(tmp_level) - else: - EditorLevelLibrary.load_level(tmp_level) - - # Delete the layout directory. - EditorAssetLibrary.delete_directory(asset_dir) - - EditorLevelLibrary.load_level(master_level) - EditorAssetLibrary.delete_directory(f"{root}/tmp") - - # Check if there isn't any more assets in the parent folder, and - # delete it if not. - asset_content = EditorAssetLibrary.list_assets( - path.parent.as_posix(), recursive=False, include_folder=True - ) - - if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(path.parent.as_posix()) diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py deleted file mode 100644 index dad0310dfc..0000000000 --- a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py +++ /dev/null @@ -1,114 +0,0 @@ -import os -from pathlib import Path - -import unreal - -from openpype.pipeline import get_current_project_name -from openpype.pipeline import Anatomy -from openpype.hosts.unreal.api import pipeline -import pyblish.api - - -class CollectRenderInstances(pyblish.api.InstancePlugin): - """ This collector will try to find all the rendered frames. - - """ - order = pyblish.api.CollectorOrder - hosts = ["unreal"] - families = ["render"] - label = "Collect Render Instances" - - def process(self, instance): - self.log.debug("Preparing Rendering Instances") - - context = instance.context - - data = instance.data - data['remove'] = True - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - sequence = ar.get_asset_by_object_path( - data.get('sequence')).get_asset() - - sequences = [{ - "sequence": sequence, - "output": data.get('output'), - "frame_range": ( - data.get('frameStart'), data.get('frameEnd')) - }] - - for s in sequences: - self.log.debug(f"Processing: {s.get('sequence').get_name()}") - subscenes = pipeline.get_subsequences(s.get('sequence')) - - if subscenes: - for ss in subscenes: - sequences.append({ - "sequence": ss.get_sequence(), - "output": (f"{s.get('output')}/" - f"{ss.get_sequence().get_name()}"), - "frame_range": ( - ss.get_start_frame(), ss.get_end_frame() - 1) - }) - else: - # Avoid creating instances for camera sequences - if "_camera" not in s.get('sequence').get_name(): - seq = s.get('sequence') - seq_name = seq.get_name() - - new_instance = context.create_instance( - f"{data.get('subset')}_" - f"{seq_name}") - new_instance[:] = seq_name - - new_data = new_instance.data - - new_data["asset"] = seq_name - new_data["setMembers"] = seq_name - new_data["family"] = "render" - new_data["families"] = ["render", "review"] - new_data["parent"] = data.get("parent") - new_data["subset"] = f"{data.get('subset')}_{seq_name}" - new_data["level"] = data.get("level") - new_data["output"] = s.get('output') - new_data["fps"] = seq.get_display_rate().numerator - new_data["frameStart"] = int(s.get('frame_range')[0]) - new_data["frameEnd"] = int(s.get('frame_range')[1]) - new_data["sequence"] = seq.get_path_name() - new_data["master_sequence"] = data["master_sequence"] - new_data["master_level"] = data["master_level"] - - self.log.debug(f"new instance data: {new_data}") - - try: - project = get_current_project_name() - anatomy = Anatomy(project) - root = anatomy.roots['renders'] - except Exception as e: - raise Exception(( - "Could not find render root " - "in anatomy settings.")) from e - - render_dir = f"{root}/{project}/{s.get('output')}" - render_path = Path(render_dir) - - frames = [] - - for x in render_path.iterdir(): - if x.is_file() and x.suffix == '.png': - frames.append(str(x.name)) - - if "representations" not in new_instance.data: - new_instance.data["representations"] = [] - - repr = { - 'frameStart': instance.data["frameStart"], - 'frameEnd': instance.data["frameEnd"], - 'name': 'png', - 'ext': 'png', - 'files': frames, - 'stagingDir': render_dir, - 'tags': ['review'] - } - new_instance.data["representations"].append(repr) diff --git a/openpype/hosts/unreal/plugins/publish/extract_camera.py b/openpype/hosts/unreal/plugins/publish/extract_camera.py deleted file mode 100644 index 16e365ca96..0000000000 --- a/openpype/hosts/unreal/plugins/publish/extract_camera.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -"""Extract camera from Unreal.""" -import os - -import unreal - -from openpype.pipeline import publish -from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION - - -class ExtractCamera(publish.Extractor): - """Extract a camera.""" - - label = "Extract Camera" - hosts = ["unreal"] - families = ["camera"] - optional = True - - def process(self, instance): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - # Define extract output file path - staging_dir = self.staging_dir(instance) - fbx_filename = "{}.fbx".format(instance.name) - - # Perform extraction - self.log.info("Performing extraction..") - - # Check if the loaded level is the same of the instance - if UNREAL_VERSION.major == 5: - world = unreal.UnrealEditorSubsystem().get_editor_world() - else: - world = unreal.EditorLevelLibrary.get_editor_world() - current_level = world.get_path_name() - assert current_level == instance.data.get("level"), \ - "Wrong level loaded" - - for member in instance.data.get('members'): - data = ar.get_asset_by_object_path(member) - if UNREAL_VERSION.major == 5: - is_level_sequence = ( - data.asset_class_path.asset_name == "LevelSequence") - else: - is_level_sequence = (data.asset_class == "LevelSequence") - - if is_level_sequence: - sequence = data.get_asset() - if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor >= 1: - params = unreal.SequencerExportFBXParams( - world=world, - root_sequence=sequence, - sequence=sequence, - bindings=sequence.get_bindings(), - master_tracks=sequence.get_master_tracks(), - fbx_file_name=os.path.join(staging_dir, fbx_filename) - ) - unreal.SequencerTools.export_level_sequence_fbx(params) - elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26: - unreal.SequencerTools.export_fbx( - world, - sequence, - sequence.get_bindings(), - unreal.FbxExportOption(), - os.path.join(staging_dir, fbx_filename) - ) - else: - # Unreal 5.0 or 4.27 - unreal.SequencerTools.export_level_sequence_fbx( - world, - sequence, - sequence.get_bindings(), - unreal.FbxExportOption(), - os.path.join(staging_dir, fbx_filename) - ) - - if not os.path.isfile(os.path.join(staging_dir, fbx_filename)): - raise RuntimeError("Failed to extract camera") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - fbx_representation = { - 'name': 'fbx', - 'ext': 'fbx', - 'files': fbx_filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(fbx_representation) diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py deleted file mode 100644 index d30d04551d..0000000000 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import json -import math - -import unreal -from unreal import EditorLevelLibrary as ell -from unreal import EditorAssetLibrary as eal - -from openpype.client import get_representation_by_name -from openpype.pipeline import publish - - -class ExtractLayout(publish.Extractor): - """Extract a layout.""" - - label = "Extract Layout" - hosts = ["unreal"] - families = ["layout"] - optional = True - - def process(self, instance): - # Define extract output file path - staging_dir = self.staging_dir(instance) - - # Perform extraction - self.log.info("Performing extraction..") - - # Check if the loaded level is the same of the instance - current_level = ell.get_editor_world().get_path_name() - assert current_level == instance.data.get("level"), \ - "Wrong level loaded" - - json_data = [] - project_name = instance.context.data["projectName"] - - for member in instance[:]: - actor = ell.get_actor_reference(member) - mesh = None - - # Check type the type of mesh - if actor.get_class().get_name() == 'SkeletalMeshActor': - mesh = actor.skeletal_mesh_component.skeletal_mesh - elif actor.get_class().get_name() == 'StaticMeshActor': - mesh = actor.static_mesh_component.static_mesh - - if mesh: - # Search the reference to the Asset Container for the object - path = unreal.Paths.get_path(mesh.get_path_name()) - filter = unreal.ARFilter( - class_names=["AyonAssetContainer"], package_paths=[path]) - ar = unreal.AssetRegistryHelpers.get_asset_registry() - try: - asset_container = ar.get_assets(filter)[0].get_asset() - except IndexError: - self.log.error("AssetContainer not found.") - return - - parent_id = eal.get_metadata_tag(asset_container, "parent") - family = eal.get_metadata_tag(asset_container, "family") - - self.log.info("Parent: {}".format(parent_id)) - blend = get_representation_by_name( - project_name, "blend", parent_id, fields=["_id"] - ) - blend_id = blend["_id"] - - json_element = {} - json_element["reference"] = str(blend_id) - json_element["family"] = family - json_element["instance_name"] = actor.get_name() - json_element["asset_name"] = mesh.get_name() - import_data = mesh.get_editor_property("asset_import_data") - json_element["file_path"] = import_data.get_first_filename() - transform = actor.get_actor_transform() - - json_element["transform"] = { - "translation": { - "x": -transform.translation.x, - "y": transform.translation.y, - "z": transform.translation.z - }, - "rotation": { - "x": math.radians(transform.rotation.euler().x), - "y": math.radians(transform.rotation.euler().y), - "z": math.radians(180.0 - transform.rotation.euler().z) - }, - "scale": { - "x": transform.scale3d.x, - "y": transform.scale3d.y, - "z": transform.scale3d.z - } - } - json_data.append(json_element) - - json_filename = "{}.json".format(instance.name) - json_path = os.path.join(staging_dir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": staging_dir, - } - instance.data["representations"].append(json_representation) diff --git a/openpype/hosts/unreal/plugins/publish/extract_look.py b/openpype/hosts/unreal/plugins/publish/extract_look.py deleted file mode 100644 index 4b32b4eb95..0000000000 --- a/openpype/hosts/unreal/plugins/publish/extract_look.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -import json -import os - -import unreal -from unreal import MaterialEditingLibrary as mat_lib - -from openpype.pipeline import publish - - -class ExtractLook(publish.Extractor): - """Extract look.""" - - label = "Extract Look" - hosts = ["unreal"] - families = ["look"] - optional = True - - def process(self, instance): - # Define extract output file path - staging_dir = self.staging_dir(instance) - resources_dir = instance.data["resourcesDir"] - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - transfers = [] - - json_data = [] - - for member in instance: - asset = ar.get_asset_by_object_path(member) - obj = asset.get_asset() - - name = asset.get_editor_property('asset_name') - - json_element = {'material': str(name)} - - material_obj = obj.get_editor_property('static_materials')[0] - material = material_obj.material_interface - - base_color = mat_lib.get_material_property_input_node( - material, unreal.MaterialProperty.MP_BASE_COLOR) - - base_color_name = base_color.get_editor_property('parameter_name') - - texture = mat_lib.get_material_default_texture_parameter_value( - material, base_color_name) - - if texture: - # Export Texture - tga_filename = f"{instance.name}_{name}_texture.tga" - - tga_exporter = unreal.TextureExporterTGA() - - tga_export_task = unreal.AssetExportTask() - - tga_export_task.set_editor_property('exporter', tga_exporter) - tga_export_task.set_editor_property('automated', True) - tga_export_task.set_editor_property('object', texture) - tga_export_task.set_editor_property( - 'filename', f"{staging_dir}/{tga_filename}") - tga_export_task.set_editor_property('prompt', False) - tga_export_task.set_editor_property('selected', False) - - unreal.Exporter.run_asset_export_task(tga_export_task) - - json_element['tga_filename'] = tga_filename - - transfers.append(( - f"{staging_dir}/{tga_filename}", - f"{resources_dir}/{tga_filename}")) - - fbx_filename = f"{instance.name}_{name}.fbx" - - fbx_exporter = unreal.StaticMeshExporterFBX() - fbx_exporter.set_editor_property('text', False) - - options = unreal.FbxExportOption() - options.set_editor_property('ascii', False) - options.set_editor_property('collision', False) - - task = unreal.AssetExportTask() - task.set_editor_property('exporter', fbx_exporter) - task.set_editor_property('options', options) - task.set_editor_property('automated', True) - task.set_editor_property('object', object) - task.set_editor_property( - 'filename', f"{staging_dir}/{fbx_filename}") - task.set_editor_property('prompt', False) - task.set_editor_property('selected', False) - - unreal.Exporter.run_asset_export_task(task) - - json_element['fbx_filename'] = fbx_filename - - transfers.append(( - f"{staging_dir}/{fbx_filename}", - f"{resources_dir}/{fbx_filename}")) - - json_data.append(json_element) - - json_filename = f"{instance.name}.json" - json_path = os.path.join(staging_dir, json_filename) - - with open(json_path, "w+") as file: - json.dump(json_data, fp=file, indent=2) - - if "transfers" not in instance.data: - instance.data["transfers"] = [] - if "representations" not in instance.data: - instance.data["representations"] = [] - - json_representation = { - 'name': 'json', - 'ext': 'json', - 'files': json_filename, - "stagingDir": staging_dir, - } - - instance.data["representations"].append(json_representation) - instance.data["transfers"].extend(transfers) diff --git a/openpype/hosts/webpublisher/README.md b/openpype/hosts/webpublisher/README.md deleted file mode 100644 index 07a957fa7f..0000000000 --- a/openpype/hosts/webpublisher/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Webpublisher -------------- - -Plugins meant for processing of Webpublisher. - -Gets triggered by calling `openpype_console modules webpublisher publish` with appropriate arguments. diff --git a/openpype/hosts/webpublisher/__init__.py b/openpype/hosts/webpublisher/__init__.py deleted file mode 100644 index 4e918c5d7d..0000000000 --- a/openpype/hosts/webpublisher/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .addon import ( - WebpublisherAddon, - WEBPUBLISHER_ROOT_DIR, -) - - -__all__ = ( - "WebpublisherAddon", - "WEBPUBLISHER_ROOT_DIR", -) diff --git a/openpype/hosts/webpublisher/addon.py b/openpype/hosts/webpublisher/addon.py deleted file mode 100644 index 810d9aa6c3..0000000000 --- a/openpype/hosts/webpublisher/addon.py +++ /dev/null @@ -1,102 +0,0 @@ -import os - -from openpype.modules import click_wrap, OpenPypeModule, IHostAddon - -WEBPUBLISHER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class WebpublisherAddon(OpenPypeModule, IHostAddon): - name = "webpublisher" - host_name = "webpublisher" - - def initialize(self, module_settings): - self.enabled = True - - def headless_publish(self, log, close_plugin_name=None, is_test=False): - """Runs publish in a opened host with a context. - - Close Python process at the end. - """ - - from .lib import get_webpublish_conn, publish_and_log, publish_in_test - - if is_test: - publish_in_test(log, close_plugin_name) - return - - dbcon = get_webpublish_conn() - _id = os.environ.get("BATCH_LOG_ID") - if not _id: - log.warning("Unable to store log records, " - "batch will be unfinished!") - return - - publish_and_log( - dbcon, _id, log, close_plugin_name=close_plugin_name - ) - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -@click_wrap.group( - WebpublisherAddon.name, - help="Webpublisher related commands.") -def cli_main(): - pass - - -@cli_main.command() -@click_wrap.argument("path") -@click_wrap.option("-u", "--user", help="User email address") -@click_wrap.option("-p", "--project", help="Project") -@click_wrap.option("-t", "--targets", help="Targets", default=None, - multiple=True) -def publish(project, path, user=None, targets=None): - """Start publishing (Inner command). - - Publish collects json from paths provided as an argument. - More than one path is allowed. - """ - - from .publish_functions import cli_publish - - cli_publish(project, path, user, targets) - - -@cli_main.command() -@click_wrap.argument("path") -@click_wrap.option("-p", "--project", help="Project") -@click_wrap.option("-h", "--host", help="Host") -@click_wrap.option("-u", "--user", help="User email address") -@click_wrap.option("-t", "--targets", help="Targets", default=None, - multiple=True) -def publishfromapp(project, path, host, user=None, targets=None): - """Start publishing through application (Inner command). - - Publish collects json from paths provided as an argument. - More than one path is allowed. - """ - - from .publish_functions import cli_publish_from_app - - cli_publish_from_app(project, path, host, user, targets) - - -@cli_main.command() -@click_wrap.option("-e", "--executable", help="Executable") -@click_wrap.option("-u", "--upload_dir", help="Upload dir") -@click_wrap.option("-h", "--host", help="Host", default=None) -@click_wrap.option("-p", "--port", help="Port", default=None) -def webserver(executable, upload_dir, host=None, port=None): - """Start service for communication with Webpublish Front end. - - OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND - FTRACK_BOT_API_KEY provided with api key from Ftrack. - - Expect "pype.club" user created on Ftrack. - """ - - from .webserver_service import run_webserver - - run_webserver(executable, upload_dir, host, port) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py deleted file mode 100644 index afea838e2c..0000000000 --- a/openpype/hosts/webpublisher/api/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import logging - -import pyblish.api - -from openpype.host import HostBase -from openpype.hosts.webpublisher import WEBPUBLISHER_ROOT_DIR - -log = logging.getLogger("openpype.hosts.webpublisher") - - -class WebpublisherHost(HostBase): - name = "webpublisher" - - def install(self): - print("Installing Pype config...") - pyblish.api.register_host(self.name) - - publish_plugin_dir = os.path.join( - WEBPUBLISHER_ROOT_DIR, "plugins", "publish" - ) - pyblish.api.register_plugin_path(publish_plugin_dir) - self.log.info(publish_plugin_dir) diff --git a/openpype/hosts/webpublisher/lib.py b/openpype/hosts/webpublisher/lib.py deleted file mode 100644 index ecd28d2432..0000000000 --- a/openpype/hosts/webpublisher/lib.py +++ /dev/null @@ -1,308 +0,0 @@ -import os -from datetime import datetime -import collections -import json - -from bson.objectid import ObjectId - -import pyblish.util -import pyblish.api - -from openpype.client.mongo import OpenPypeMongoConnection -from openpype.settings import get_project_settings -from openpype.lib import Logger -from openpype.lib.profiles_filtering import filter_profiles - -ERROR_STATUS = "error" -IN_PROGRESS_STATUS = "in_progress" -REPROCESS_STATUS = "reprocess" -SENT_REPROCESSING_STATUS = "sent_for_reprocessing" -FINISHED_REPROCESS_STATUS = "republishing_finished" -FINISHED_OK_STATUS = "finished_ok" - -log = Logger.get_logger(__name__) - - -def parse_json(path): - """Parses json file at 'path' location - - Returns: - (dict) or None if unparsable - Raises: - AssertionError if 'path' doesn't exist - """ - path = path.strip('\"') - assert os.path.isfile(path), ( - "Path to json file doesn't exist. \"{}\"".format(path) - ) - data = None - with open(path, "r") as json_file: - try: - data = json.load(json_file) - except Exception as exc: - log.error( - "Error loading json: {} - Exception: {}".format(path, exc) - ) - return data - - -def get_batch_asset_task_info(ctx): - """Parses context data from webpublisher's batch metadata - - Returns: - (tuple): asset, task_name (Optional), task_type - """ - task_type = "default_task_type" - task_name = None - asset = None - - if ctx["type"] == "task": - items = ctx["path"].split('/') - asset = items[-2] - task_name = ctx["name"] - task_type = ctx["attributes"]["type"] - else: - asset = ctx["name"] - - return asset, task_name, task_type - - -def find_close_plugin(close_plugin_name, log): - if close_plugin_name: - plugins = pyblish.api.discover() - for plugin in plugins: - if plugin.__name__ == close_plugin_name: - return plugin - - log.debug("Close plugin not found, app might not close.") - - -def publish_in_test(log, close_plugin_name=None): - """Loops through all plugins, logs to console. Used for tests. - - Args: - log (Logger) - close_plugin_name (Optional[str]): Name of plugin with responsibility - to close application. - """ - - # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" - - close_plugin = find_close_plugin(close_plugin_name, log) - - for result in pyblish.util.publish_iter(): - for record in result["records"]: - # Why do we log again? pyblish logger is logging to stdout... - log.info("{}: {}".format(result["plugin"].label, record.msg)) - - if not result["error"]: - continue - - # QUESTION We don't break on error? - error_message = error_format.format(**result) - log.error(error_message) - if close_plugin: # close host app explicitly after error - context = pyblish.api.Context() - close_plugin().process(context) - - -def get_webpublish_conn(): - """Get connection to OP 'webpublishes' collection.""" - mongo_client = OpenPypeMongoConnection.get_mongo_client() - database_name = os.environ["OPENPYPE_DATABASE_NAME"] - return mongo_client[database_name]["webpublishes"] - - -def start_webpublish_log(dbcon, batch_id, user): - """Start new log record for 'batch_id' - - Args: - dbcon (OpenPypeMongoConnection) - batch_id (str) - user (str) - Returns - (ObjectId) from DB - """ - return dbcon.insert_one({ - "batch_id": batch_id, - "start_date": datetime.now(), - "user": user, - "status": IN_PROGRESS_STATUS, - "progress": 0 # integer 0-100, percentage - }).inserted_id - - -def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): - """Loops through all plugins, logs ok and fails into OP DB. - - Args: - dbcon (OpenPypeMongoConnection) - _id (str) - id of current job in DB - log (openpype.lib.Logger) - batch_id (str) - id sent from frontend - close_plugin_name (str): name of plugin with responsibility to - close host app - """ - # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}\n" - error_format += "-" * 80 + "\n" - - close_plugin = find_close_plugin(close_plugin_name, log) - - if isinstance(_id, str): - _id = ObjectId(_id) - - log_lines = [] - processed = 0 - log_every = 5 - for result in pyblish.util.publish_iter(): - for record in result["records"]: - log_lines.append("{}: {}".format( - result["plugin"].label, record.msg)) - processed += 1 - - if result["error"]: - log.error(error_format.format(**result)) - log_lines = [error_format.format(**result)] + log_lines - dbcon.update_one( - {"_id": _id}, - {"$set": - { - "finish_date": datetime.now(), - "status": ERROR_STATUS, - "log": os.linesep.join(log_lines) - - }} - ) - if close_plugin: # close host app explicitly after error - context = pyblish.api.Context() - close_plugin().process(context) - return - elif processed % log_every == 0: - # pyblish returns progress in 0.0 - 2.0 - progress = min(round(result["progress"] / 2 * 100), 99) - dbcon.update_one( - {"_id": _id}, - {"$set": - { - "progress": progress, - "log": os.linesep.join(log_lines) - }} - ) - - # final update - if batch_id: - dbcon.update_many( - {"batch_id": batch_id, "status": SENT_REPROCESSING_STATUS}, - { - "$set": - { - "finish_date": datetime.now(), - "status": FINISHED_REPROCESS_STATUS, - } - } - ) - - dbcon.update_one( - {"_id": _id}, - { - "$set": - { - "finish_date": datetime.now(), - "status": FINISHED_OK_STATUS, - "progress": 100, - "log": os.linesep.join(log_lines) - } - } - ) - - -def fail_batch(_id, dbcon, msg): - """Set current batch as failed as there is some problem. - - Raises: - ValueError - """ - dbcon.update_one( - {"_id": _id}, - {"$set": - { - "finish_date": datetime.now(), - "status": ERROR_STATUS, - "log": msg - - }} - ) - raise ValueError(msg) - - -def find_variant_key(application_manager, host): - """Searches for latest installed variant for 'host' - - Args: - application_manager (ApplicationManager) - host (str) - Returns - (string) (optional) - Raises: - (ValueError) if no variant found - """ - app_group = application_manager.app_groups.get(host) - if not app_group or not app_group.enabled: - raise ValueError("No application {} configured".format(host)) - - found_variant_key = None - # finds most up-to-date variant if any installed - sorted_variants = collections.OrderedDict( - sorted(app_group.variants.items())) - for variant_key, variant in sorted_variants.items(): - for executable in variant.executables: - if executable.exists(): - found_variant_key = variant_key - - if not found_variant_key: - raise ValueError("No executable for {} found".format(host)) - - return found_variant_key - - -def get_task_data(batch_dir): - """Return parsed data from first task manifest.json - - Used for `publishfromapp` command where batch contains only - single task with publishable workfile. - - Returns: - (dict) - Throws: - (ValueError) if batch or task manifest not found or broken - """ - batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) - if not batch_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(batch_dir)) - task_dir_name = batch_data["tasks"][0] - task_data = parse_json(os.path.join(batch_dir, task_dir_name, - "manifest.json")) - if not task_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(task_data)) - - return task_data - - -def get_timeout(project_name, host_name, task_type): - """Returns timeout(seconds) from Setting profile.""" - filter_data = { - "task_types": task_type, - "hosts": host_name - } - timeout_profiles = (get_project_settings(project_name)["webpublisher"] - ["timeout_profiles"]) - matching_item = filter_profiles(timeout_profiles, filter_data) - timeout = 3600 - if matching_item: - timeout = matching_item["timeout"] - - return timeout diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py deleted file mode 100644 index eb2737b276..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Parses batch context from json and continues in publish process. - -Provides: - context -> Loaded batch file. - - asset - - task (task name) - - taskType - - project_name - - variant -""" - -import os - -import pyblish.api - -from openpype.pipeline import legacy_io -from openpype_modules.webpublisher.lib import ( - parse_json, - get_batch_asset_task_info, - get_webpublish_conn, - IN_PROGRESS_STATUS -) - - -class CollectBatchData(pyblish.api.ContextPlugin): - """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. - - The directory must contain 'manifest.json' file where batch data should be - stored. - """ - # must be really early, context values are only in json file - order = pyblish.api.CollectorOrder - 0.495 - label = "Collect batch data" - hosts = ["webpublisher"] - - def process(self, context): - batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") - - assert batch_dir, ( - "Missing `OPENPYPE_PUBLISH_DATA`") - - assert os.path.exists(batch_dir), \ - "Folder {} doesn't exist".format(batch_dir) - - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) - - context.data["batchDir"] = batch_dir - context.data["batchData"] = batch_data - - asset_name, task_name, task_type = get_batch_asset_task_info( - batch_data["context"] - ) - - os.environ["AVALON_ASSET"] = asset_name - legacy_io.Session["AVALON_ASSET"] = asset_name - os.environ["AVALON_TASK"] = task_name - legacy_io.Session["AVALON_TASK"] = task_name - - context.data["asset"] = asset_name - context.data["task"] = task_name - context.data["taskType"] = task_type - context.data["project_name"] = project_name - context.data["variant"] = batch_data["variant"] - - self._set_ctx_path(batch_data) - - def _set_ctx_path(self, batch_data): - dbcon = get_webpublish_conn() - - batch_id = batch_data["batch"] - ctx_path = batch_data["context"]["path"] - self.log.info("ctx_path: {}".format(ctx_path)) - self.log.info("batch_id: {}".format(batch_id)) - if ctx_path and batch_id: - self.log.info("Updating log record") - dbcon.update_one( - { - "batch_id": batch_id, - "status": IN_PROGRESS_STATUS - }, - { - "$set": { - "path": ctx_path - } - } - ) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_fps.py b/openpype/hosts/webpublisher/plugins/publish/collect_fps.py deleted file mode 100644 index b5e665c761..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/collect_fps.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -Requires: - Nothing - -Provides: - Instance -""" - -import pyblish.api -from pprint import pformat - - -class CollectFPS(pyblish.api.InstancePlugin): - """ - Adds fps from context to instance because of ExtractReview - """ - - label = "Collect fps" - order = pyblish.api.CollectorOrder + 0.49 - hosts = ["webpublisher"] - - def process(self, instance): - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance.data["fps"] = instance.context.data["fps"] - - self.log.debug(f"instance.data: {pformat(instance.data)}") diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py deleted file mode 100644 index 6bb67ef260..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ /dev/null @@ -1,351 +0,0 @@ -"""Create instances from batch data and continues in publish process. - -Requires: - CollectBatchData - -Provides: - context, instances -> All data from previous publishing process. -""" - -import os -import clique -import tempfile -import math - -import pyblish.api - -from openpype.client import ( - get_asset_by_name, - get_last_version_by_subset_name -) -from openpype.lib import ( - prepare_template_data, - get_ffprobe_streams, - convert_ffprobe_fps_value, -) -from openpype.pipeline.create import get_subset_name -from openpype_modules.webpublisher.lib import parse_json -from openpype.pipeline.version_start import get_versioning_start - - -class CollectPublishedFiles(pyblish.api.ContextPlugin): - """ - This collector will try to find json files in provided - `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. - - This covers 'basic' webpublishes, eg artists uses Standalone Publisher to - publish rendered frames or assets. - - This is not applicable for 'studio' processing where host application is - called to process uploaded workfile and render frames itself. - - For each task configure what properties should resulting instance have - based on uploaded files: - - uploading sequence of 'png' >> create instance of 'render' family, - by adding 'review' to 'Families' and 'Create review' to Tags it will - produce review. - - There might be difference between single(>>image) and sequence(>>render) - uploaded files. - """ - # must be really early, context values are only in json file - order = pyblish.api.CollectorOrder - 0.490 - label = "Collect rendered frames" - hosts = ["webpublisher"] - targets = ["filespublish"] - - # from Settings - task_type_to_family = [] - sync_next_version = False # find max version to be published, use for all - - def process(self, context): - batch_dir = context.data["batchDir"] - task_subfolders = [] - for folder_name in os.listdir(batch_dir): - full_path = os.path.join(batch_dir, folder_name) - if os.path.isdir(full_path): - task_subfolders.append(full_path) - - self.log.info("task_sub:: {}".format(task_subfolders)) - - project_name = context.data["project_name"] - asset_name = context.data["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - task_name = context.data["task"] - task_type = context.data["taskType"] - project_name = context.data["project_name"] - variant = context.data["variant"] - - next_versions = [] - instances = [] - for task_dir in task_subfolders: - task_data = parse_json(os.path.join(task_dir, - "manifest.json")) - self.log.info("task_data:: {}".format(task_data)) - - is_sequence = len(task_data["files"]) > 1 - first_file = task_data["files"][0] - - _, extension = os.path.splitext(first_file) - extension = extension.lower() - family, families, tags = self._get_family( - self.task_type_to_family, - task_type, - is_sequence, - extension.replace(".", '')) - - subset_name = get_subset_name( - family, - variant, - task_name, - asset_doc, - project_name=project_name, - host_name="webpublisher", - project_settings=context.data["project_settings"] - ) - version = self._get_next_version( - project_name, - asset_doc, - task_name, - task_type, - family, - subset_name, - context - ) - next_versions.append(version) - - instance = context.create_instance(subset_name) - instance.data["asset"] = asset_name - instance.data["subset"] = subset_name - # set configurable result family - instance.data["family"] = family - # set configurable additional families - instance.data["families"] = families - instance.data["version"] = version - instance.data["stagingDir"] = tempfile.mkdtemp() - instance.data["source"] = "webpublisher" - - # to convert from email provided into Ftrack username - instance.data["user_email"] = task_data["user"] - - if is_sequence: - instance.data["representations"] = self._process_sequence( - task_data["files"], task_dir, tags - ) - instance.data["frameStart"] = \ - instance.data["representations"][0]["frameStart"] - instance.data["frameEnd"] = \ - instance.data["representations"][0]["frameEnd"] - else: - frame_start = asset_doc["data"]["frameStart"] - instance.data["frameStart"] = frame_start - instance.data["frameEnd"] = asset_doc["data"]["frameEnd"] - instance.data["representations"] = self._get_single_repre( - task_dir, task_data["files"], tags - ) - if family != 'workfile': - file_url = os.path.join(task_dir, task_data["files"][0]) - try: - no_of_frames = self._get_number_of_frames(file_url) - if no_of_frames: - frame_end = ( - int(frame_start) + math.ceil(no_of_frames) - ) - frame_end = math.ceil(frame_end) - 1 - instance.data["frameEnd"] = frame_end - self.log.debug("frameEnd:: {}".format( - instance.data["frameEnd"])) - except Exception: - self.log.warning("Unable to count frames duration.") - - instance.data["handleStart"] = asset_doc["data"]["handleStart"] - instance.data["handleEnd"] = asset_doc["data"]["handleEnd"] - - if "review" in tags: - first_file_path = os.path.join(task_dir, first_file) - instance.data["thumbnailSource"] = first_file_path - - instances.append(instance) - self.log.info("instance.data:: {}".format(instance.data)) - - if not self.sync_next_version: - return - - # overwrite specific version with same version for all - max_next_version = max(next_versions) - for inst in instances: - inst.data["version"] = max_next_version - self.log.debug("overwritten version:: {}".format(max_next_version)) - - def _get_subset_name(self, family, subset_template, task_name, variant): - fill_pairs = { - "variant": variant, - "family": family, - "task": task_name - } - subset = subset_template.format(**prepare_template_data(fill_pairs)) - return subset - - def _get_single_repre(self, task_dir, files, tags): - _, ext = os.path.splitext(files[0]) - ext = ext.lower() - repre_data = { - "name": ext[1:], - "ext": ext[1:], - "files": files[0], - "stagingDir": task_dir, - "tags": tags - } - self.log.info("single file repre_data.data:: {}".format(repre_data)) - return [repre_data] - - def _process_sequence(self, files, task_dir, tags): - """Prepare representation for sequence of files.""" - collections, remainder = clique.assemble(files) - assert len(collections) == 1, \ - "Too many collections in {}".format(files) - - frame_start = list(collections[0].indexes)[0] - frame_end = list(collections[0].indexes)[-1] - ext = collections[0].tail - ext = ext.lower() - repre_data = { - "frameStart": frame_start, - "frameEnd": frame_end, - "name": ext[1:], - "ext": ext[1:], - "files": files, - "stagingDir": task_dir, - "tags": tags # configurable tags from Settings - } - self.log.info("sequences repre_data.data:: {}".format(repre_data)) - return [repre_data] - - def _get_family(self, settings, task_type, is_sequence, extension): - """Guess family based on input data. - - Args: - settings (dict): configuration per task_type - task_type (str): Animation|Art etc - is_sequence (bool): single file or sequence - extension (str): without '.' - - Returns: - (family, [families], tags) tuple - AssertionError if not matching family found - """ - task_type = task_type.lower() - lower_cased_task_types = {} - for t_type, task in settings.items(): - lower_cased_task_types[t_type.lower()] = task - task_obj = lower_cased_task_types.get(task_type) - assert task_obj, "No family configuration for '{}'".format(task_type) - - found_family = None - families_config = [] - # backward compatibility, should be removed pretty soon - if isinstance(task_obj, dict): - for family, config in task_obj: - config["result_family"] = family - families_config.append(config) - else: - families_config = task_obj - - for config in families_config: - if is_sequence != config["is_sequence"]: - continue - extensions = config.get("extensions") or [] - lower_extensions = set() - for ext in extensions: - if ext: - ext = ext.lower() - if ext.startswith("."): - ext = ext[1:] - lower_extensions.add(ext) - - # all extensions setting - if not lower_extensions or extension in lower_extensions: - found_family = config["result_family"] - break - - msg = "No family found for combination of " +\ - "task_type: {}, is_sequence:{}, extension: {}".format( - task_type, is_sequence, extension) - assert found_family, msg - - return (found_family, - config["families"], - config["tags"]) - - def _get_next_version( - self, - project_name, - asset_doc, - task_name, - task_type, - family, - subset_name, - context - ): - """Returns version number or 1 for 'asset' and 'subset'""" - - version_doc = get_last_version_by_subset_name( - project_name, - subset_name, - asset_doc["_id"], - fields=["name"] - ) - if version_doc: - version = int(version_doc["name"]) + 1 - else: - version = get_versioning_start( - project_name, - "webpublisher", - task_name=task_name, - task_type=task_type, - family=family, - subset=subset_name, - project_settings=context.data["project_settings"] - ) - - return version - - def _get_number_of_frames(self, file_url): - """Return duration in frames""" - try: - streams = get_ffprobe_streams(file_url, self.log) - except Exception as exc: - raise AssertionError(( - "FFprobe couldn't read information about input file: \"{}\"." - " Error message: {}" - ).format(file_url, str(exc))) - - first_video_stream = None - for stream in streams: - if "width" in stream and "height" in stream: - first_video_stream = stream - break - - if first_video_stream: - nb_frames = stream.get("nb_frames") - if nb_frames: - try: - return int(nb_frames) - except ValueError: - self.log.warning( - "nb_frames {} not convertible".format(nb_frames)) - - duration = stream.get("duration") - frame_rate = convert_ffprobe_fps_value( - stream.get("r_frame_rate", '0/0') - ) - self.log.debug("duration:: {} frame_rate:: {}".format( - duration, frame_rate)) - try: - return float(duration) * float(frame_rate) - except ValueError: - self.log.warning( - "{} or {} cannot be converted".format(duration, - frame_rate)) - - self.log.warning("Cannot get number of frames") diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py deleted file mode 100644 index 948e86c23e..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py +++ /dev/null @@ -1,259 +0,0 @@ -""" -Requires: - CollectTVPaintWorkfileData - -Provides: - Instances -""" -import os -import re -import copy -import pyblish.api - -from openpype.pipeline.create import get_subset_name - - -class CollectTVPaintInstances(pyblish.api.ContextPlugin): - label = "Collect TVPaint Instances" - order = pyblish.api.CollectorOrder + 0.2 - hosts = ["webpublisher"] - targets = ["tvpaint_worker"] - - workfile_family = "workfile" - workfile_variant = "" - review_family = "review" - review_variant = "Main" - render_pass_family = "renderPass" - render_layer_family = "renderLayer" - render_layer_pass_name = "beauty" - - # Set by settings - # Regex must contain 'layer' and 'variant' groups which are extracted from - # name when instances are created - layer_name_regex = r"(?PL[0-9]{3}_\w+)_(?P.+)" - - def process(self, context): - # Prepare compiled regex - layer_name_regex = re.compile(self.layer_name_regex) - - layers_data = context.data["layersData"] - - host_name = "tvpaint" - task_name = context.data.get("task") - asset_doc = context.data["assetEntity"] - project_doc = context.data["projectEntity"] - project_name = project_doc["name"] - - new_instances = [] - - # Workfile instance - workfile_subset_name = get_subset_name( - self.workfile_family, - self.workfile_variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - workfile_instance = self._create_workfile_instance( - context, workfile_subset_name - ) - new_instances.append(workfile_instance) - - # Review instance - review_subset_name = get_subset_name( - self.review_family, - self.review_variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - review_instance = self._create_review_instance( - context, review_subset_name - ) - new_instances.append(review_instance) - - # Get render layers and passes from TVPaint layers - # - it's based on regex extraction - layers_by_layer_and_pass = {} - for layer in layers_data: - # Filter only visible layers - if not layer["visible"]: - continue - - result = layer_name_regex.search(layer["name"]) - # Layer name not matching layer name regex - # should raise an exception? - if result is None: - continue - render_layer = result.group("layer") - render_pass = result.group("pass") - - render_pass_maping = layers_by_layer_and_pass.get( - render_layer - ) - if render_pass_maping is None: - render_pass_maping = {} - layers_by_layer_and_pass[render_layer] = render_pass_maping - - if render_pass not in render_pass_maping: - render_pass_maping[render_pass] = [] - render_pass_maping[render_pass].append(copy.deepcopy(layer)) - - layers_by_render_layer = {} - for render_layer, render_passes in layers_by_layer_and_pass.items(): - render_layer_layers = [] - layers_by_render_layer[render_layer] = render_layer_layers - for render_pass, layers in render_passes.items(): - render_layer_layers.extend(copy.deepcopy(layers)) - dynamic_data = { - "render_pass": render_pass, - "render_layer": render_layer, - # Override family for subset name - "family": "render" - } - - subset_name = get_subset_name( - self.render_pass_family, - render_pass, - task_name, - asset_doc, - project_name, - host_name, - dynamic_data=dynamic_data, - project_settings=context.data["project_settings"] - ) - - instance = self._create_render_pass_instance( - context, layers, subset_name - ) - new_instances.append(instance) - - for render_layer, layers in layers_by_render_layer.items(): - variant = render_layer - dynamic_data = { - "render_pass": self.render_layer_pass_name, - "render_layer": render_layer, - # Override family for subset name - "family": "render" - } - subset_name = get_subset_name( - self.render_layer_family, - variant, - task_name, - asset_doc, - project_name, - host_name, - dynamic_data=dynamic_data, - project_settings=context.data["project_settings"] - ) - instance = self._create_render_layer_instance( - context, layers, subset_name - ) - new_instances.append(instance) - - # Set data same for all instances - frame_start = context.data.get("frameStart") - frame_end = context.data.get("frameEnd") - - for instance in new_instances: - if ( - instance.data.get("frameStart") is None - or instance.data.get("frameEnd") is None - ): - instance.data["frameStart"] = frame_start - instance.data["frameEnd"] = frame_end - - if instance.data.get("asset") is None: - instance.data["asset"] = asset_doc["name"] - - if instance.data.get("task") is None: - instance.data["task"] = task_name - - if "representations" not in instance.data: - instance.data["representations"] = [] - - if "source" not in instance.data: - instance.data["source"] = "webpublisher" - - def _create_workfile_instance(self, context, subset_name): - workfile_path = context.data["workfilePath"] - staging_dir = os.path.dirname(workfile_path) - filename = os.path.basename(workfile_path) - ext = os.path.splitext(filename)[-1] - - return context.create_instance(**{ - "name": subset_name, - "label": subset_name, - "subset": subset_name, - "family": self.workfile_family, - "families": [], - "stagingDir": staging_dir, - "representations": [{ - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": filename, - "stagingDir": staging_dir - }] - }) - - def _create_review_instance(self, context, subset_name): - staging_dir = self._create_staging_dir(context, subset_name) - layers_data = context.data["layersData"] - # Filter hidden layers - filtered_layers_data = [ - copy.deepcopy(layer) - for layer in layers_data - if layer["visible"] - ] - return context.create_instance(**{ - "name": subset_name, - "label": subset_name, - "subset": subset_name, - "family": self.review_family, - "families": [], - "layers": filtered_layers_data, - "stagingDir": staging_dir - }) - - def _create_render_pass_instance(self, context, layers, subset_name): - staging_dir = self._create_staging_dir(context, subset_name) - # Global instance data modifications - # Fill families - return context.create_instance(**{ - "name": subset_name, - "subset": subset_name, - "label": subset_name, - "family": "render", - # Add `review` family for thumbnail integration - "families": [self.render_pass_family, "review"], - "representations": [], - "layers": layers, - "stagingDir": staging_dir - }) - - def _create_render_layer_instance(self, context, layers, subset_name): - staging_dir = self._create_staging_dir(context, subset_name) - # Global instance data modifications - # Fill families - return context.create_instance(**{ - "name": subset_name, - "subset": subset_name, - "label": subset_name, - "family": "render", - # Add `review` family for thumbnail integration - "families": [self.render_layer_family, "review"], - "representations": [], - "layers": layers, - "stagingDir": staging_dir - }) - - def _create_staging_dir(self, context, subset_name): - context_staging_dir = context.data["contextStagingDir"] - staging_dir = os.path.join(context_staging_dir, subset_name) - if not os.path.exists(staging_dir): - os.makedirs(staging_dir) - return staging_dir diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py deleted file mode 100644 index b5f8ed9c8f..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Requires: - CollectPublishedFiles - CollectModules - -Provides: - workfilePath - Path to tvpaint workfile - sceneData - Scene data loaded from the workfile - groupsData - - layersData - layersExposureFrames - layersPrePostBehavior -""" -import os -import uuid -import json -import shutil -import pyblish.api -from openpype.hosts.tvpaint.worker import ( - SenderTVPaintCommands, - CollectSceneData -) -from openpype_modules.webpublisher.lib import parse_json - - -class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin): - label = "Collect TVPaint Workfile data" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["webpublisher"] - targets = ["tvpaint_worker"] - - def process(self, context): - # Get JobQueue module - modules = context.data["openPypeModules"] - job_queue_module = modules["job_queue"] - jobs_root = job_queue_module.get_jobs_root() - if not jobs_root: - raise ValueError("Job Queue root is not set.") - - context.data["jobsRoot"] = jobs_root - - context_staging_dir = self._create_context_staging_dir(jobs_root) - workfile_path = self._extract_workfile_path( - context, context_staging_dir - ) - context.data["contextStagingDir"] = context_staging_dir - context.data["workfilePath"] = workfile_path - - # Prepare tvpaint command - collect_scene_data_command = CollectSceneData() - # Create TVPaint sender commands - commands = SenderTVPaintCommands(workfile_path, job_queue_module) - commands.add_command(collect_scene_data_command) - - # Send job and wait for answer - commands.send_job_and_wait() - - collected_data = collect_scene_data_command.result() - layers_data = collected_data["layers_data"] - groups_data = collected_data["groups_data"] - scene_data = collected_data["scene_data"] - exposure_frames_by_layer_id = ( - collected_data["exposure_frames_by_layer_id"] - ) - pre_post_beh_by_layer_id = ( - collected_data["pre_post_beh_by_layer_id"] - ) - - # Store results - # scene data store the same way as TVPaint collector - scene_data = { - "sceneWidth": scene_data["width"], - "sceneHeight": scene_data["height"], - "scenePixelAspect": scene_data["pixel_aspect"], - "sceneFps": scene_data["fps"], - "sceneFieldOrder": scene_data["field_order"], - "sceneMarkIn": scene_data["mark_in"], - # scene_data["mark_in_state"], - "sceneMarkInState": scene_data["mark_in_set"], - "sceneMarkOut": scene_data["mark_out"], - # scene_data["mark_out_state"], - "sceneMarkOutState": scene_data["mark_out_set"], - "sceneStartFrame": scene_data["start_frame"], - "sceneBgColor": scene_data["bg_color"] - } - context.data["sceneData"] = scene_data - # Store only raw data - context.data["groupsData"] = groups_data - context.data["layersData"] = layers_data - context.data["layersExposureFrames"] = exposure_frames_by_layer_id - context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id - - self.log.debug( - ( - "Collected data" - "\nScene data: {}" - "\nLayers data: {}" - "\nExposure frames: {}" - "\nPre/Post behavior: {}" - ).format( - json.dumps(scene_data, indent=4), - json.dumps(layers_data, indent=4), - json.dumps(exposure_frames_by_layer_id, indent=4), - json.dumps(pre_post_beh_by_layer_id, indent=4) - ) - ) - - def _create_context_staging_dir(self, jobs_root): - if not os.path.exists(jobs_root): - os.makedirs(jobs_root) - - random_folder_name = str(uuid.uuid4()) - full_path = os.path.join(jobs_root, random_folder_name) - if not os.path.exists(full_path): - os.makedirs(full_path) - return full_path - - def _extract_workfile_path(self, context, context_staging_dir): - """Find first TVPaint file in tasks and use it.""" - batch_dir = context.data["batchDir"] - batch_data = context.data["batchData"] - src_workfile_path = None - for task_id in batch_data["tasks"]: - if src_workfile_path is not None: - break - task_dir = os.path.join(batch_dir, task_id) - task_manifest_path = os.path.join(task_dir, "manifest.json") - task_data = parse_json(task_manifest_path) - task_files = task_data["files"] - for filename in task_files: - _, ext = os.path.splitext(filename) - if ext.lower() == ".tvpp": - src_workfile_path = os.path.join(task_dir, filename) - break - - # Copy workfile to job queue work root - new_workfile_path = os.path.join( - context_staging_dir, os.path.basename(src_workfile_path) - ) - shutil.copy(src_workfile_path, new_workfile_path) - - return new_workfile_path diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py b/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py deleted file mode 100644 index 2142d740a5..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py +++ /dev/null @@ -1,535 +0,0 @@ -import os -import copy - -from openpype.hosts.tvpaint.worker import ( - SenderTVPaintCommands, - ExecuteSimpleGeorgeScript, - ExecuteGeorgeScript -) - -import pyblish.api -from openpype.hosts.tvpaint.lib import ( - calculate_layers_extraction_data, - get_frame_filename_template, - fill_reference_frames, - composite_rendered_layers, - rename_filepaths_by_frame_start -) -from PIL import Image - - -class ExtractTVPaintSequences(pyblish.api.Extractor): - label = "Extract TVPaint Sequences" - hosts = ["webpublisher"] - targets = ["tvpaint_worker"] - - # Context plugin does not have families filtering - families_filter = ["review", "renderPass", "renderLayer"] - - job_queue_root_key = "jobs_root" - - # Modifiable with settings - review_bg = [255, 255, 255, 255] - - def process(self, context): - # Get workfle path - workfile_path = context.data["workfilePath"] - jobs_root = context.data["jobsRoot"] - jobs_root_slashed = jobs_root.replace("\\", "/") - - # Prepare scene data - scene_data = context.data["sceneData"] - scene_mark_in = scene_data["sceneMarkIn"] - scene_mark_out = scene_data["sceneMarkOut"] - scene_start_frame = scene_data["sceneStartFrame"] - scene_bg_color = scene_data["sceneBgColor"] - - # Prepare layers behavior - behavior_by_layer_id = context.data["layersPrePostBehavior"] - exposure_frames_by_layer_id = context.data["layersExposureFrames"] - - # Handles are not stored per instance but on Context - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - - # Get JobQueue module - modules = context.data["openPypeModules"] - job_queue_module = modules["job_queue"] - - tvpaint_commands = SenderTVPaintCommands( - workfile_path, job_queue_module - ) - - # Change scene Start Frame to 0 to prevent frame index issues - # - issue is that TVPaint versions deal with frame indexes in a - # different way when Start Frame is not `0` - # NOTE It will be set back after rendering - tvpaint_commands.add_command( - ExecuteSimpleGeorgeScript("tv_startframe 0") - ) - - root_key_replacement = "{" + self.job_queue_root_key + "}" - after_render_instances = [] - for instance in context: - instance_families = set(instance.data.get("families", [])) - instance_families.add(instance.data["family"]) - valid = False - for family in instance_families: - if family in self.families_filter: - valid = True - break - - if not valid: - continue - - self.log.info("* Preparing commands for instance \"{}\"".format( - instance.data["label"] - )) - # Get all layers and filter out not visible - layers = instance.data["layers"] - filtered_layers = [layer for layer in layers if layer["visible"]] - if not filtered_layers: - self.log.info( - "None of the layers from the instance" - " are visible. Extraction skipped." - ) - continue - - joined_layer_names = ", ".join([ - "\"{}\"".format(str(layer["name"])) - for layer in filtered_layers - ]) - self.log.debug( - "Instance has {} layers with names: {}".format( - len(filtered_layers), joined_layer_names - ) - ) - - # Staging dir must be created during collection - staging_dir = instance.data["stagingDir"].replace("\\", "/") - - job_root_template = staging_dir.replace( - jobs_root_slashed, root_key_replacement - ) - - # Frame start/end may be stored as float - frame_start = int(instance.data["frameStart"]) - frame_end = int(instance.data["frameEnd"]) - - # Prepare output frames - output_frame_start = frame_start - handle_start - output_frame_end = frame_end + handle_end - - # Change output frame start to 0 if handles cause it's negative - # number - if output_frame_start < 0: - self.log.warning(( - "Frame start with handles has negative value." - " Changed to \"0\". Frames start: {}, Handle Start: {}" - ).format(frame_start, handle_start)) - output_frame_start = 0 - - # Create copy of scene Mark In/Out - mark_in, mark_out = scene_mark_in, scene_mark_out - - # Fix possible changes of output frame - mark_out, output_frame_end = self._fix_range_changes( - mark_in, mark_out, output_frame_start, output_frame_end - ) - filename_template = get_frame_filename_template( - max(scene_mark_out, output_frame_end) - ) - - # ----------------------------------------------------------------- - self.log.debug( - "Files will be rendered to folder: {}".format(staging_dir) - ) - - output_filepaths_by_frame_idx = {} - for frame_idx in range(mark_in, mark_out + 1): - filename = filename_template.format(frame=frame_idx) - filepath = os.path.join(staging_dir, filename) - output_filepaths_by_frame_idx[frame_idx] = filepath - - # Prepare data for post render processing - post_render_data = { - "output_dir": staging_dir, - "layers": filtered_layers, - "output_filepaths_by_frame_idx": output_filepaths_by_frame_idx, - "instance": instance, - "is_layers_render": False, - "output_frame_start": output_frame_start, - "output_frame_end": output_frame_end - } - # Store them to list - after_render_instances.append(post_render_data) - - # Review rendering - if instance.data["family"] == "review": - self.add_render_review_command( - tvpaint_commands, mark_in, mark_out, scene_bg_color, - job_root_template, filename_template - ) - continue - - # Layers rendering - extraction_data_by_layer_id = calculate_layers_extraction_data( - filtered_layers, - exposure_frames_by_layer_id, - behavior_by_layer_id, - mark_in, - mark_out - ) - filepaths_by_layer_id = self.add_render_command( - tvpaint_commands, - job_root_template, - staging_dir, - filtered_layers, - extraction_data_by_layer_id - ) - # Add more data to post render processing - post_render_data.update({ - "is_layers_render": True, - "extraction_data_by_layer_id": extraction_data_by_layer_id, - "filepaths_by_layer_id": filepaths_by_layer_id - }) - - # Change scene frame Start back to previous value - tvpaint_commands.add_command( - ExecuteSimpleGeorgeScript( - "tv_startframe {}".format(scene_start_frame) - ) - ) - self.log.info("Sending the job and waiting for response...") - tvpaint_commands.send_job_and_wait() - self.log.info("Render job finished") - - for post_render_data in after_render_instances: - self._post_render_processing(post_render_data, mark_in, mark_out) - - def _fix_range_changes( - self, mark_in, mark_out, output_frame_start, output_frame_end - ): - # Check Marks range and output range - output_range = output_frame_end - output_frame_start - marks_range = mark_out - mark_in - - # Lower Mark Out if mark range is bigger than output - # - do not rendered not used frames - if output_range < marks_range: - new_mark_out = mark_out - (marks_range - output_range) - self.log.warning(( - "Lowering render range to {} frames. Changed Mark Out {} -> {}" - ).format(marks_range + 1, mark_out, new_mark_out)) - # Assign new mark out to variable - mark_out = new_mark_out - - # Lower output frame end so representation has right `frameEnd` value - elif output_range > marks_range: - new_output_frame_end = ( - output_frame_end - (output_range - marks_range) - ) - self.log.warning(( - "Lowering representation range to {} frames." - " Changed frame end {} -> {}" - ).format(output_range + 1, mark_out, new_output_frame_end)) - output_frame_end = new_output_frame_end - return mark_out, output_frame_end - - def _post_render_processing(self, post_render_data, mark_in, mark_out): - # Unpack values - instance = post_render_data["instance"] - output_filepaths_by_frame_idx = ( - post_render_data["output_filepaths_by_frame_idx"] - ) - is_layers_render = post_render_data["is_layers_render"] - output_dir = post_render_data["output_dir"] - layers = post_render_data["layers"] - output_frame_start = post_render_data["output_frame_start"] - output_frame_end = post_render_data["output_frame_end"] - - # Trigger post processing of layers rendering - # - only few frames were rendered this will complete the sequence - # - multiple layers can be in single instance they must be composite - # over each other - if is_layers_render: - self._finish_layer_render( - layers, - post_render_data["extraction_data_by_layer_id"], - post_render_data["filepaths_by_layer_id"], - mark_in, - mark_out, - output_filepaths_by_frame_idx - ) - - # Create thumbnail - thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") - thumbnail_src_path = output_filepaths_by_frame_idx[mark_in] - self._create_thumbnail(thumbnail_src_path, thumbnail_filepath) - - # Rename filepaths to final frames - repre_files = self._rename_output_files( - output_filepaths_by_frame_idx, - mark_in, - mark_out, - output_frame_start - ) - - # Fill tags and new families - family_lowered = instance.data["family"].lower() - tags = [] - if family_lowered in ("review", "renderlayer"): - tags.append("review") - - # Sequence of one frame - single_file = len(repre_files) == 1 - if single_file: - repre_files = repre_files[0] - - # Extension is hardcoded - # - changing extension would require change code - new_repre = { - "name": "png", - "ext": "png", - "files": repre_files, - "stagingDir": output_dir, - "tags": tags - } - - if not single_file: - new_repre["frameStart"] = output_frame_start - new_repre["frameEnd"] = output_frame_end - - self.log.debug("Creating new representation: {}".format(new_repre)) - - instance.data["representations"].append(new_repre) - - if family_lowered in ("renderpass", "renderlayer"): - # Change family to render - instance.data["family"] = "render" - - thumbnail_ext = os.path.splitext(thumbnail_filepath)[1] - # Create thumbnail representation - thumbnail_repre = { - "name": "thumbnail", - "ext": thumbnail_ext.replace(".", ""), - "outputName": "thumb", - "files": os.path.basename(thumbnail_filepath), - "stagingDir": output_dir, - "tags": ["thumbnail"] - } - instance.data["representations"].append(thumbnail_repre) - - def _rename_output_files( - self, filepaths_by_frame, mark_in, mark_out, output_frame_start - ): - new_filepaths_by_frame = rename_filepaths_by_frame_start( - filepaths_by_frame, mark_in, mark_out, output_frame_start - ) - - repre_filenames = [] - for filepath in new_filepaths_by_frame.values(): - repre_filenames.append(os.path.basename(filepath)) - - if mark_in < output_frame_start: - repre_filenames = list(reversed(repre_filenames)) - - return repre_filenames - - def add_render_review_command( - self, - tvpaint_commands, - mark_in, - mark_out, - scene_bg_color, - job_root_template, - filename_template - ): - """ Export images from TVPaint using `tv_savesequence` command. - - Args: - output_dir (str): Directory where files will be stored. - mark_in (int): Starting frame index from which export will begin. - mark_out (int): On which frame index export will end. - scene_bg_color (list): Bg color set in scene. Result of george - script command `tv_background`. - """ - self.log.debug("Preparing data for rendering.") - bg_color = self._get_review_bg_color() - first_frame_filepath = "/".join([ - job_root_template, - filename_template.format(frame=mark_in) - ]) - - george_script_lines = [ - # Change bg color to color from settings - "tv_background \"color\" {} {} {}".format(*bg_color), - "tv_SaveMode \"PNG\"", - "export_path = \"{}\"".format( - first_frame_filepath.replace("\\", "/") - ), - "tv_savesequence '\"'export_path'\"' {} {}".format( - mark_in, mark_out - ) - ] - if scene_bg_color: - # Change bg color back to previous scene bg color - _scene_bg_color = copy.deepcopy(scene_bg_color) - bg_type = _scene_bg_color.pop(0) - orig_color_command = [ - "tv_background", - "\"{}\"".format(bg_type) - ] - orig_color_command.extend(_scene_bg_color) - - george_script_lines.append(" ".join(orig_color_command)) - - tvpaint_commands.add_command( - ExecuteGeorgeScript( - george_script_lines, - root_dir_key=self.job_queue_root_key - ) - ) - - def add_render_command( - self, - tvpaint_commands, - job_root_template, - staging_dir, - layers, - extraction_data_by_layer_id - ): - """ Export images from TVPaint. - - Args: - output_dir (str): Directory where files will be stored. - mark_in (int): Starting frame index from which export will begin. - mark_out (int): On which frame index export will end. - layers (list): List of layers to be exported. - - Returns: - tuple: With 2 items first is list of filenames second is path to - thumbnail. - """ - # Map layers by position - layers_by_id = { - layer["layer_id"]: layer - for layer in layers - } - - # Render layers - filepaths_by_layer_id = {} - for layer_id, render_data in extraction_data_by_layer_id.items(): - layer = layers_by_id[layer_id] - frame_references = render_data["frame_references"] - filenames_by_frame_index = render_data["filenames_by_frame_index"] - - filepaths_by_frame = {} - command_filepath_by_frame = {} - for frame_idx, ref_idx in frame_references.items(): - # None reference is skipped because does not have source - if ref_idx is None: - filepaths_by_frame[frame_idx] = None - continue - filename = filenames_by_frame_index[frame_idx] - - filepaths_by_frame[frame_idx] = os.path.join( - staging_dir, filename - ) - if frame_idx == ref_idx: - command_filepath_by_frame[frame_idx] = "/".join( - [job_root_template, filename] - ) - - self._add_render_layer_command( - tvpaint_commands, layer, command_filepath_by_frame - ) - filepaths_by_layer_id[layer_id] = filepaths_by_frame - - return filepaths_by_layer_id - - def _add_render_layer_command( - self, tvpaint_commands, layer, filepaths_by_frame - ): - george_script_lines = [ - # Set current layer by position - "tv_layergetid {}".format(layer["position"]), - "layer_id = result", - "tv_layerset layer_id", - "tv_SaveMode \"PNG\"" - ] - - for frame_idx, filepath in filepaths_by_frame.items(): - if filepath is None: - continue - - # Go to frame - george_script_lines.append("tv_layerImage {}".format(frame_idx)) - # Store image to output - george_script_lines.append( - "tv_saveimage \"{}\"".format(filepath.replace("\\", "/")) - ) - - tvpaint_commands.add_command( - ExecuteGeorgeScript( - george_script_lines, - root_dir_key=self.job_queue_root_key - ) - ) - - def _finish_layer_render( - self, - layers, - extraction_data_by_layer_id, - filepaths_by_layer_id, - mark_in, - mark_out, - output_filepaths_by_frame_idx - ): - # Fill frames between `frame_start_index` and `frame_end_index` - self.log.debug("Filling frames not rendered frames.") - for layer_id, render_data in extraction_data_by_layer_id.items(): - frame_references = render_data["frame_references"] - filepaths_by_frame = filepaths_by_layer_id[layer_id] - fill_reference_frames(frame_references, filepaths_by_frame) - - # Prepare final filepaths where compositing should store result - self.log.info("Started compositing of layer frames.") - composite_rendered_layers( - layers, filepaths_by_layer_id, - mark_in, mark_out, - output_filepaths_by_frame_idx - ) - - def _create_thumbnail(self, thumbnail_src_path, thumbnail_filepath): - if not os.path.exists(thumbnail_src_path): - return - - source_img = Image.open(thumbnail_src_path) - - # Composite background only on rgba images - # - just making sure - if source_img.mode.lower() == "rgba": - bg_color = self._get_review_bg_color() - self.log.debug("Adding thumbnail background color {}.".format( - " ".join([str(val) for val in bg_color]) - )) - bg_image = Image.new("RGBA", source_img.size, bg_color) - thumbnail_obj = Image.alpha_composite(bg_image, source_img) - thumbnail_obj.convert("RGB").save(thumbnail_filepath) - - else: - self.log.info(( - "Source for thumbnail has mode \"{}\" (Expected: RGBA)." - " Can't use thubmanail background color." - ).format(source_img.mode)) - source_img.save(thumbnail_filepath) - - def _get_review_bg_color(self): - red = green = blue = 255 - if self.review_bg: - if len(self.review_bg) == 4: - red, green, blue, _ = self.review_bg - elif len(self.review_bg) == 3: - red, green, blue = self.review_bg - return (red, green, blue) diff --git a/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py b/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py deleted file mode 100644 index fc5cd1ea9a..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/others_cleanup_job_root.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -"""Cleanup leftover files from publish.""" -import os -import shutil -import pyblish.api - - -class CleanUpJobRoot(pyblish.api.ContextPlugin): - """Cleans up the job root directory after a successful publish. - - Remove all files in job root as all of them should be published. - """ - - order = pyblish.api.IntegratorOrder + 1 - label = "Clean Up Job Root" - optional = True - active = True - - def process(self, context): - context_staging_dir = context.data.get("contextStagingDir") - if not context_staging_dir: - self.log.info("Key 'contextStagingDir' is empty.") - - elif not os.path.exists(context_staging_dir): - self.log.info(( - "Job root directory for this publish does not" - " exists anymore \"{}\"." - ).format(context_staging_dir)) - else: - self.log.info("Deleting job root with all files.") - shutil.rmtree(context_staging_dir) diff --git a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py deleted file mode 100644 index d8b7bb9078..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py +++ /dev/null @@ -1,36 +0,0 @@ -import pyblish.api - - -class ValidateWorkfileData(pyblish.api.ContextPlugin): - """Validate mark in and out are enabled and it's duration. - - Mark In/Out does not have to match frameStart and frameEnd but duration is - important. - """ - - label = "Validate Workfile Data" - order = pyblish.api.ValidatorOrder - targets = ["tvpaint_worker"] - - def process(self, context): - # Data collected in `CollectContextEntities` - frame_start = context.data["frameStart"] - frame_end = context.data["frameEnd"] - handle_start = context.data["handleStart"] - handle_end = context.data["handleEnd"] - - scene_data = context.data["sceneData"] - scene_mark_in = scene_data["sceneMarkIn"] - scene_mark_out = scene_data["sceneMarkOut"] - - expected_range = ( - (frame_end - frame_start + 1) - + handle_start - + handle_end - ) - marks_range = scene_mark_out - scene_mark_in + 1 - if expected_range != marks_range: - raise AssertionError(( - "Wrong Mark In/Out range." - " Expected range is {} frames got {} frames" - ).format(expected_range, marks_range)) diff --git a/openpype/hosts/webpublisher/publish_functions.py b/openpype/hosts/webpublisher/publish_functions.py deleted file mode 100644 index f5dc88f54d..0000000000 --- a/openpype/hosts/webpublisher/publish_functions.py +++ /dev/null @@ -1,206 +0,0 @@ -import os -import time -import pyblish.api -import pyblish.util - -from openpype.lib import Logger -from openpype.lib.applications import ( - ApplicationManager, - LaunchTypes, -) -from openpype.pipeline import install_host -from openpype.hosts.webpublisher.api import WebpublisherHost - -from .lib import ( - get_batch_asset_task_info, - get_webpublish_conn, - start_webpublish_log, - publish_and_log, - fail_batch, - find_variant_key, - get_task_data, - get_timeout, - IN_PROGRESS_STATUS -) - - -def cli_publish(project_name, batch_path, user_email, targets): - """Start headless publishing. - - Used to publish rendered assets, workfiles etc via Webpublisher. - Eventually should be yanked out to Webpublisher cli. - - Publish use json from passed paths argument. - - Args: - project_name (str): project to publish (only single context is - expected per call of 'publish') - batch_path (str): Path batch folder. Contains subfolders with - resources (workfile, another subfolder 'renders' etc.) - user_email (string): email address for webpublisher - used to - find Ftrack user with same email - targets (list): Pyblish targets - (to choose validator for example) - - Raises: - RuntimeError: When there is no path to process. - """ - - if not batch_path: - raise RuntimeError("No publish paths specified") - - log = Logger.get_logger("Webpublish") - log.info("Webpublish command") - - # Register target and host - webpublisher_host = WebpublisherHost() - - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path - os.environ["AVALON_PROJECT"] = project_name - os.environ["AVALON_APP"] = webpublisher_host.name - os.environ["USER_EMAIL"] = user_email - os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib - - if targets: - if isinstance(targets, str): - targets = [targets] - for target in targets: - pyblish.api.register_target(target) - - install_host(webpublisher_host) - - log.info("Running publish ...") - - _, batch_id = os.path.split(batch_path) - dbcon = get_webpublish_conn() - _id = start_webpublish_log(dbcon, batch_id, user_email) - - task_data = get_task_data(batch_path) - if not task_data["context"]: - msg = "Batch manifest must contain context data" - msg += "Create new batch and set context properly." - fail_batch(_id, dbcon, msg) - - publish_and_log(dbcon, _id, log, batch_id=batch_id) - - log.info("Publish finished.") - - -def cli_publish_from_app( - project_name, batch_path, host_name, user_email, targets -): - """Opens installed variant of 'host' and run remote publish there. - - Eventually should be yanked out to Webpublisher cli. - - Currently implemented and tested for Photoshop where customer - wants to process uploaded .psd file and publish collected layers - from there. Triggered by Webpublisher. - - Checks if no other batches are running (status =='in_progress). If - so, it sleeps for SLEEP (this is separate process), - waits for WAIT_FOR seconds altogether. - - Requires installed host application on the machine. - - Runs publish process as user would, in automatic fashion. - - Args: - project_name (str): project to publish (only single context is - expected per call of publish - batch_path (str): Path batch folder. Contains subfolders with - resources (workfile, another subfolder 'renders' etc.) - host_name (str): 'photoshop' - user_email (string): email address for webpublisher - used to - find Ftrack user with same email - targets (list): Pyblish targets - (to choose validator for example) - """ - - log = Logger.get_logger("PublishFromApp") - - log.info("Webpublish photoshop command") - - task_data = get_task_data(batch_path) - - workfile_path = os.path.join(batch_path, - task_data["task"], - task_data["files"][0]) - - print("workfile_path {}".format(workfile_path)) - - batch_id = task_data["batch"] - dbcon = get_webpublish_conn() - # safer to start logging here, launch might be broken altogether - _id = start_webpublish_log(dbcon, batch_id, user_email) - - batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) - if len(batches_in_progress) > 1: - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" - fail_batch(_id, dbcon, msg) - - if not task_data["context"]: - msg = "Batch manifest must contain context data" - msg += "Create new batch and set context properly." - fail_batch(_id, dbcon, msg) - - asset_name, task_name, task_type = get_batch_asset_task_info( - task_data["context"]) - - application_manager = ApplicationManager() - found_variant_key = find_variant_key(application_manager, host_name) - app_name = "{}/{}".format(host_name, found_variant_key) - - data = { - "last_workfile_path": workfile_path, - "start_last_workfile": True, - "project_name": project_name, - "asset_name": asset_name, - "task_name": task_name, - "launch_type": LaunchTypes.automated, - } - launch_context = application_manager.create_launch_context( - app_name, **data) - launch_context.run_prelaunch_hooks() - - # must have for proper launch of app - env = launch_context.env - print("env:: {}".format(env)) - env["OPENPYPE_PUBLISH_DATA"] = batch_path - # must pass identifier to update log lines for a batch - env["BATCH_LOG_ID"] = str(_id) - env["HEADLESS_PUBLISH"] = 'true' # to use in app lib - env["USER_EMAIL"] = user_email - - os.environ.update(env) - - # Why is this here? Registered host in this process does not affect - # regitered host in launched process. - pyblish.api.register_host(host_name) - if targets: - if isinstance(targets, str): - targets = [targets] - current_targets = os.environ.get("PYBLISH_TARGETS", "").split( - os.pathsep) - for target in targets: - current_targets.append(target) - - os.environ["PYBLISH_TARGETS"] = os.pathsep.join( - set(current_targets)) - - launched_app = application_manager.launch_with_context(launch_context) - - timeout = get_timeout(project_name, host_name, task_type) - - time_start = time.time() - while launched_app.poll() is None: - time.sleep(0.5) - if time.time() - time_start > timeout: - launched_app.terminate() - msg = "Timeout reached" - fail_batch(_id, dbcon, msg) diff --git a/openpype/hosts/webpublisher/webserver_service/__init__.py b/openpype/hosts/webpublisher/webserver_service/__init__.py deleted file mode 100644 index 73111d286e..0000000000 --- a/openpype/hosts/webpublisher/webserver_service/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .webserver import run_webserver - - -__all__ = ( - "run_webserver", -) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py deleted file mode 100644 index 20d585e906..0000000000 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ /dev/null @@ -1,429 +0,0 @@ -"""Routes and etc. for webpublisher API.""" -import os -import json -import datetime -import collections -import subprocess -from bson.objectid import ObjectId -from aiohttp.web_response import Response - -from openpype.client import ( - get_projects, - get_assets, -) -from openpype.lib import Logger -from openpype.settings import get_project_settings -from openpype_modules.webserver.base_routes import RestApiEndpoint -from openpype_modules.webpublisher import WebpublisherAddon -from openpype_modules.webpublisher.lib import ( - get_webpublish_conn, - get_task_data, - ERROR_STATUS, - REPROCESS_STATUS -) - -log = Logger.get_logger("WebpublishRoutes") - - -class ResourceRestApiEndpoint(RestApiEndpoint): - def __init__(self, resource): - self.resource = resource - super(ResourceRestApiEndpoint, self).__init__() - - -class WebpublishApiEndpoint(ResourceRestApiEndpoint): - @property - def dbcon(self): - return self.resource.dbcon - - -class JsonApiResource: - """Resource for json manipulation. - - All resources handling sending output to REST should inherit from - """ - @staticmethod - def json_dump_handler(value): - if isinstance(value, datetime.datetime): - return value.isoformat() - if isinstance(value, ObjectId): - return str(value) - if isinstance(value, set): - return list(value) - raise TypeError(value) - - @classmethod - def encode(cls, data): - return json.dumps( - data, - indent=4, - default=cls.json_dump_handler - ).encode("utf-8") - - -class RestApiResource(JsonApiResource): - """Resource carrying needed info and Avalon DB connection for publish.""" - def __init__(self, server_manager, executable, upload_dir, - studio_task_queue=None): - self.server_manager = server_manager - self.upload_dir = upload_dir - self.executable = executable - - if studio_task_queue is None: - studio_task_queue = collections.deque().dequeu - self.studio_task_queue = studio_task_queue - - -class WebpublishRestApiResource(JsonApiResource): - """Resource carrying OP DB connection for storing batch info into DB.""" - - def __init__(self): - self.dbcon = get_webpublish_conn() - - -class ProjectsEndpoint(ResourceRestApiEndpoint): - """Returns list of dict with project info (id, name).""" - async def get(self) -> Response: - output = [] - for project_doc in get_projects(): - ret_val = { - "id": project_doc["_id"], - "name": project_doc["name"] - } - output.append(ret_val) - return Response( - status=200, - body=self.resource.encode(output), - content_type="application/json" - ) - - -class HiearchyEndpoint(ResourceRestApiEndpoint): - """Returns dictionary with context tree from assets.""" - async def get(self, project_name) -> Response: - query_projection = { - "_id": 1, - "data.tasks": 1, - "data.visualParent": 1, - "data.entityType": 1, - "name": 1, - "type": 1, - } - - asset_docs = get_assets(project_name, fields=query_projection.keys()) - asset_docs_by_id = { - asset_doc["_id"]: asset_doc - for asset_doc in asset_docs - } - - asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in asset_docs_by_id.values(): - parent_id = asset_doc["data"].get("visualParent") - asset_docs_by_parent_id[parent_id].append(asset_doc) - - assets = collections.defaultdict(list) - - for parent_id, children in asset_docs_by_parent_id.items(): - for child in children: - node = assets.get(child["_id"]) - if not node: - node = Node(child["_id"], - child["data"].get("entityType", "Folder"), - child["name"]) - assets[child["_id"]] = node - - tasks = child["data"].get("tasks", {}) - for t_name, t_con in tasks.items(): - task_node = TaskNode("task", t_name) - task_node["attributes"]["type"] = t_con.get("type") - - task_node.parent = node - - parent_node = assets.get(parent_id) - if not parent_node: - asset_doc = asset_docs_by_id.get(parent_id) - if asset_doc: # regular node - parent_node = Node(parent_id, - asset_doc["data"].get("entityType", - "Folder"), - asset_doc["name"]) - else: # root - parent_node = Node(parent_id, - "project", - project_name) - assets[parent_id] = parent_node - node.parent = parent_node - - roots = [x for x in assets.values() if x.parent is None] - - return Response( - status=200, - body=self.resource.encode(roots[0]), - content_type="application/json" - ) - - -class Node(dict): - """Node element in context tree.""" - - def __init__(self, uid, node_type, name): - self._parent = None # pointer to parent Node - self["type"] = node_type - self["name"] = name - self['id'] = uid # keep reference to id # - self['children'] = [] # collection of pointers to child Nodes - - @property - def parent(self): - return self._parent # simply return the object at the _parent pointer - - @parent.setter - def parent(self, node): - self._parent = node - # add this node to parent's list of children - node['children'].append(self) - - -class TaskNode(Node): - """Special node type only for Tasks.""" - - def __init__(self, node_type, name): - self._parent = None - self["type"] = node_type - self["name"] = name - self["attributes"] = {} - - -class BatchPublishEndpoint(WebpublishApiEndpoint): - """Triggers headless publishing of batch.""" - async def post(self, request) -> Response: - # Validate existence of openpype executable - openpype_app = self.resource.executable - if not openpype_app or not os.path.exists(openpype_app): - msg = "Non existent OpenPype executable {}".format(openpype_app) - raise RuntimeError(msg) - - log.info("BatchPublishEndpoint called") - content = await request.json() - - # Each filter have extensions which are checked on first task item - # - first filter with extensions that are on first task is used - # - filter defines command and can extend arguments dictionary - # This is used only if 'studio_processing' is enabled on batch - studio_processing_filters = [ - # TVPaint filter - { - "extensions": [".tvpp"], - "command": "publish", - "arguments": { - "targets": ["tvpaint_worker", "webpublish"] - }, - "add_to_queue": False - }, - # Photoshop filter - { - "extensions": [".psd", ".psb"], - "command": "publishfromapp", - "arguments": { - # Command 'publishfromapp' requires --host argument - "host": "photoshop", - # Make sure targets are set to None for cases that default - # would change - # - targets argument is not used in 'publishfromapp' - "targets": ["automated", "webpublish"] - }, - # does publish need to be handled by a queue, eg. only - # single process running concurrently? - "add_to_queue": True - } - ] - - batch_dir = os.path.join(self.resource.upload_dir, content["batch"]) - - # Default command and arguments - command = "publish" - add_args = { - # All commands need 'project' and 'user' - "project": content["project_name"], - "user": content["user"], - - "targets": ["filespublish", "webpublish"] - } - - add_to_queue = False - if content.get("studio_processing"): - log.info("Post processing called for {}".format(batch_dir)) - - task_data = get_task_data(batch_dir) - - for process_filter in studio_processing_filters: - filter_extensions = process_filter.get("extensions") or [] - for file_name in task_data["files"]: - file_ext = os.path.splitext(file_name)[-1].lower() - if file_ext in filter_extensions: - # Change command - command = process_filter["command"] - # Update arguments - add_args.update( - process_filter.get("arguments") or {} - ) - add_to_queue = process_filter["add_to_queue"] - break - - args = [ - openpype_app, - "module", - WebpublisherAddon.name, - command, - batch_dir - ] - - for key, value in add_args.items(): - # Skip key values where value is None - if value is None: - continue - arg_key = "--{}".format(key) - if not isinstance(value, (tuple, list)): - value = [value] - - for item in value: - args += [arg_key, item] - - log.info("args:: {}".format(args)) - if add_to_queue: - log.debug("Adding to queue") - self.resource.studio_task_queue.append(args) - else: - subprocess.Popen(args) - - return Response( - status=200, - content_type="application/json" - ) - - -class TaskPublishEndpoint(WebpublishApiEndpoint): - """Prepared endpoint triggered after each task - for future development.""" - async def post(self, request) -> Response: - return Response( - status=200, - body=self.resource.encode([]), - content_type="application/json" - ) - - -class BatchStatusEndpoint(WebpublishApiEndpoint): - """Returns dict with info for batch_id. - - Uses 'WebpublishRestApiResource'. - """ - - async def get(self, batch_id) -> Response: - output = self.dbcon.find_one({"batch_id": batch_id}) - - if output: - status = 200 - else: - output = {"msg": "Batch id {} not found".format(batch_id), - "status": "queued", - "progress": 0} - status = 404 - body = self.resource.encode(output) - return Response( - status=status, - body=body, - content_type="application/json" - ) - - -class UserReportEndpoint(WebpublishApiEndpoint): - """Returns list of dict with batch info for user (email address). - - Uses 'WebpublishRestApiResource'. - """ - - async def get(self, user) -> Response: - output = list(self.dbcon.find({"user": user}, - projection={"log": False})) - - if output: - status = 200 - else: - output = {"msg": "User {} not found".format(user)} - status = 404 - body = self.resource.encode(output) - - return Response( - status=status, - body=body, - content_type="application/json" - ) - - -class ConfiguredExtensionsEndpoint(WebpublishApiEndpoint): - """Returns dict of extensions which have mapping to family. - - Returns: - { - "file_exts": [], - "sequence_exts": [] - } - """ - async def get(self, project_name=None) -> Response: - sett = get_project_settings(project_name) - - configured = { - "file_exts": set(), - "sequence_exts": set(), - # workfiles that could have "Studio Processing" hardcoded for now - "studio_exts": set(["psd", "psb", "tvpp", "tvp"]) - } - collect_conf = sett["webpublisher"]["publish"]["CollectPublishedFiles"] - configs = collect_conf.get("task_type_to_family", []) - mappings = [] - for _, conf_mappings in configs.items(): - if isinstance(conf_mappings, dict): - conf_mappings = conf_mappings.values() - for conf_mapping in conf_mappings: - mappings.append(conf_mapping) - - for mapping in mappings: - if mapping["is_sequence"]: - configured["sequence_exts"].update(mapping["extensions"]) - else: - configured["file_exts"].update(mapping["extensions"]) - - return Response( - status=200, - body=self.resource.encode(dict(configured)), - content_type="application/json" - ) - - -class BatchReprocessEndpoint(WebpublishApiEndpoint): - """Marks latest 'batch_id' for reprocessing, returns 404 if not found. - - Uses 'WebpublishRestApiResource'. - """ - - async def post(self, batch_id) -> Response: - batches = self.dbcon.find({"batch_id": batch_id, - "status": ERROR_STATUS}).sort("_id", -1) - - if batches: - self.dbcon.update_one( - {"_id": batches[0]["_id"]}, - {"$set": {"status": REPROCESS_STATUS}} - ) - output = [{"msg": "Batch id {} set to reprocess".format(batch_id)}] - status = 200 - else: - output = [{"msg": "Batch id {} not found".format(batch_id)}] - status = 404 - body = self.resource.encode(output) - - return Response( - status=status, - body=body, - content_type="application/json" - ) diff --git a/openpype/hosts/webpublisher/webserver_service/webserver.py b/openpype/hosts/webpublisher/webserver_service/webserver.py deleted file mode 100644 index d7c2ea01b9..0000000000 --- a/openpype/hosts/webpublisher/webserver_service/webserver.py +++ /dev/null @@ -1,182 +0,0 @@ -import collections -import time -import os -from datetime import datetime -import requests -import json -import subprocess - -from openpype.client import OpenPypeMongoConnection -from openpype.modules import ModulesManager -from openpype.lib import Logger - -from openpype_modules.webpublisher.lib import ( - ERROR_STATUS, - REPROCESS_STATUS, - SENT_REPROCESSING_STATUS -) - -from .webpublish_routes import ( - RestApiResource, - WebpublishRestApiResource, - HiearchyEndpoint, - ProjectsEndpoint, - ConfiguredExtensionsEndpoint, - BatchPublishEndpoint, - BatchReprocessEndpoint, - BatchStatusEndpoint, - TaskPublishEndpoint, - UserReportEndpoint -) - -log = Logger.get_logger("webserver_gui") - - -def run_webserver(executable, upload_dir, host=None, port=None): - """Runs webserver in command line, adds routes.""" - - if not host: - host = "localhost" - if not port: - port = 8079 - - manager = ModulesManager() - webserver_module = manager.modules_by_name["webserver"] - - server_manager = webserver_module.create_new_server_manager(port, host) - webserver_url = server_manager.url - # queue for publishfromapp tasks - studio_task_queue = collections.deque() - - resource = RestApiResource(server_manager, - upload_dir=upload_dir, - executable=executable, - studio_task_queue=studio_task_queue) - projects_endpoint = ProjectsEndpoint(resource) - server_manager.add_route( - "GET", - "/api/projects", - projects_endpoint.dispatch - ) - - hiearchy_endpoint = HiearchyEndpoint(resource) - server_manager.add_route( - "GET", - "/api/hierarchy/{project_name}", - hiearchy_endpoint.dispatch - ) - - configured_ext_endpoint = ConfiguredExtensionsEndpoint(resource) - server_manager.add_route( - "GET", - "/api/webpublish/configured_ext/{project_name}", - configured_ext_endpoint.dispatch - ) - - # triggers publish - webpublisher_task_publish_endpoint = BatchPublishEndpoint(resource) - server_manager.add_route( - "POST", - "/api/webpublish/batch", - webpublisher_task_publish_endpoint.dispatch - ) - - webpublisher_batch_publish_endpoint = TaskPublishEndpoint(resource) - server_manager.add_route( - "POST", - "/api/webpublish/task", - webpublisher_batch_publish_endpoint.dispatch - ) - - # reporting - webpublish_resource = WebpublishRestApiResource() - batch_status_endpoint = BatchStatusEndpoint(webpublish_resource) - server_manager.add_route( - "GET", - "/api/batch_status/{batch_id}", - batch_status_endpoint.dispatch - ) - - user_status_endpoint = UserReportEndpoint(webpublish_resource) - server_manager.add_route( - "GET", - "/api/publishes/{user}", - user_status_endpoint.dispatch - ) - - batch_reprocess_endpoint = BatchReprocessEndpoint(webpublish_resource) - server_manager.add_route( - "POST", - "/api/webpublish/reprocess/{batch_id}", - batch_reprocess_endpoint.dispatch - ) - - server_manager.start_server() - last_reprocessed = time.time() - while True: - if time.time() - last_reprocessed > 20: - reprocess_failed(upload_dir, webserver_url) - last_reprocessed = time.time() - if studio_task_queue: - args = studio_task_queue.popleft() - subprocess.call(args) # blocking call - - time.sleep(1.0) - - -def reprocess_failed(upload_dir, webserver_url): - # log.info("check_reprocesable_records") - mongo_client = OpenPypeMongoConnection.get_mongo_client() - database_name = os.environ["OPENPYPE_DATABASE_NAME"] - dbcon = mongo_client[database_name]["webpublishes"] - - results = dbcon.find({"status": REPROCESS_STATUS}) - reprocessed_batches = set() - for batch in results: - if batch["batch_id"] in reprocessed_batches: - continue - - batch_url = os.path.join(upload_dir, - batch["batch_id"], - "manifest.json") - log.info("batch:: {} {}".format(os.path.exists(batch_url), batch_url)) - if not os.path.exists(batch_url): - msg = "Manifest {} not found".format(batch_url) - print(msg) - dbcon.update_one( - {"_id": batch["_id"]}, - {"$set": - { - "finish_date": datetime.now(), - "status": ERROR_STATUS, - "progress": 100, - "log": batch.get("log") + msg - }} - ) - continue - server_url = "{}/api/webpublish/batch".format(webserver_url) - - with open(batch_url) as f: - data = json.loads(f.read()) - - dbcon.update_many( - { - "batch_id": batch["batch_id"], - "status": {"$in": [ERROR_STATUS, REPROCESS_STATUS]} - }, - { - "$set": { - "finish_date": datetime.now(), - "status": SENT_REPROCESSING_STATUS, - "progress": 100 - } - } - ) - - try: - r = requests.post(server_url, json=data) - log.info("response{}".format(r)) - except Exception: - log.info("exception", exc_info=True) - - reprocessed_batches.add(batch["batch_id"]) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py deleted file mode 100644 index b3b12ac250..0000000000 --- a/openpype/lib/__init__.py +++ /dev/null @@ -1,297 +0,0 @@ -# -*- coding: utf-8 -*- -# flake8: noqa E402 -"""OpenPype lib functions.""" -# add vendor to sys path based on Python version -import sys -import os -import site -from openpype import PACKAGE_DIR - -# Add Python version specific vendor folder -python_version_dir = os.path.join( - PACKAGE_DIR, "vendor", "python", "python_{}".format(sys.version[0]) -) -# Prepend path in sys paths -sys.path.insert(0, python_version_dir) -site.addsitedir(python_version_dir) - - -from .events import ( - emit_event, - register_event_callback -) - -from .vendor_bin_utils import ( - ToolNotFoundError, - find_executable, - get_vendor_bin_path, - get_oiio_tools_path, - get_oiio_tool_args, - get_ffmpeg_tool_path, - get_ffmpeg_tool_args, - is_oiio_supported, -) - -from .attribute_definitions import ( - AbstractAttrDef, - - UIDef, - UISeparatorDef, - UILabelDef, - - UnknownDef, - NumberDef, - TextDef, - EnumDef, - BoolDef, - FileDef, - FileDefItem, -) - -from .env_tools import ( - env_value_to_bool, - get_paths_from_environ, -) - -from .terminal import Terminal -from .execute import ( - get_ayon_launcher_args, - get_openpype_execute_args, - get_linux_launcher_args, - execute, - run_subprocess, - run_detached_process, - run_ayon_launcher_process, - run_openpype_process, - clean_envs_for_openpype_process, - path_to_subprocess_arg, - CREATE_NO_WINDOW -) -from .log import ( - Logger, -) - -from .path_templates import ( - merge_dict, - TemplateMissingKey, - TemplateUnsolved, - StringTemplate, - TemplatesDict, - FormatObject, -) - -from .dateutils import ( - get_datetime_data, - get_timestamp, - get_formatted_current_time -) - -from .python_module_tools import ( - import_filepath, - modules_from_path, - recursive_bases_from_class, - classes_from_module, - import_module_from_dirpath, - is_func_signature_supported, -) - -from .profiles_filtering import ( - compile_list_of_regexes, - filter_profiles -) - -from .transcoding import ( - get_transcode_temp_directory, - should_convert_for_ffmpeg, - convert_for_ffmpeg, - convert_input_paths_for_ffmpeg, - get_ffprobe_data, - get_ffprobe_streams, - get_ffmpeg_codec_args, - get_ffmpeg_format_args, - convert_ffprobe_fps_value, - convert_ffprobe_fps_to_float, - get_rescaled_command_arguments, -) - -from .local_settings import ( - IniSettingRegistry, - JSONSettingRegistry, - OpenPypeSecureRegistry, - OpenPypeSettingsRegistry, - get_local_site_id, - change_openpype_mongo_url, - get_openpype_username, - is_admin_password_required -) - -from .applications import ( - ApplicationLaunchFailed, - ApplictionExecutableNotFound, - ApplicationNotFound, - ApplicationManager, - - PreLaunchHook, - PostLaunchHook, - - EnvironmentPrepData, - prepare_app_environments, - prepare_context_environments, - get_app_environments_for_context, - apply_project_environments_value -) - -from .plugin_tools import ( - prepare_template_data, - source_hash, -) - -from .path_tools import ( - format_file_size, - collect_frames, - create_hard_link, - version_up, - get_version_from_path, - get_last_version_from_path, -) - -from .openpype_version import ( - op_version_control_available, - get_openpype_version, - get_build_version, - get_expected_version, - is_running_from_build, - is_running_staging, - is_current_version_studio_latest, - is_current_version_higher_than_expected -) - - -from .connections import ( - requests_get, - requests_post -) - -terminal = Terminal - -__all__ = [ - "emit_event", - "register_event_callback", - - "get_ayon_launcher_args", - "get_openpype_execute_args", - "get_linux_launcher_args", - "execute", - "run_subprocess", - "run_detached_process", - "run_ayon_launcher_process", - "run_openpype_process", - "clean_envs_for_openpype_process", - "path_to_subprocess_arg", - "CREATE_NO_WINDOW", - - "env_value_to_bool", - "get_paths_from_environ", - - "ToolNotFoundError", - "find_executable", - "get_vendor_bin_path", - "get_oiio_tools_path", - "get_oiio_tool_args", - "get_ffmpeg_tool_path", - "get_ffmpeg_tool_args", - "is_oiio_supported", - - "AbstractAttrDef", - - "UIDef", - "UISeparatorDef", - "UILabelDef", - - "UnknownDef", - "NumberDef", - "TextDef", - "EnumDef", - "BoolDef", - "FileDef", - "FileDefItem", - - "import_filepath", - "modules_from_path", - "recursive_bases_from_class", - "classes_from_module", - "import_module_from_dirpath", - "is_func_signature_supported", - - "get_transcode_temp_directory", - "should_convert_for_ffmpeg", - "convert_for_ffmpeg", - "convert_input_paths_for_ffmpeg", - "get_ffprobe_data", - "get_ffprobe_streams", - "get_ffmpeg_codec_args", - "get_ffmpeg_format_args", - "convert_ffprobe_fps_value", - "convert_ffprobe_fps_to_float", - "get_rescaled_command_arguments", - - "IniSettingRegistry", - "JSONSettingRegistry", - "OpenPypeSecureRegistry", - "OpenPypeSettingsRegistry", - "get_local_site_id", - "change_openpype_mongo_url", - "get_openpype_username", - "is_admin_password_required", - - "ApplicationLaunchFailed", - "ApplictionExecutableNotFound", - "ApplicationNotFound", - "ApplicationManager", - "PreLaunchHook", - "PostLaunchHook", - "EnvironmentPrepData", - "prepare_app_environments", - "prepare_context_environments", - "get_app_environments_for_context", - "apply_project_environments_value", - - "compile_list_of_regexes", - - "filter_profiles", - - "prepare_template_data", - "source_hash", - - "format_file_size", - "collect_frames", - "create_hard_link", - "version_up", - "get_version_from_path", - "get_last_version_from_path", - - "merge_dict", - "TemplateMissingKey", - "TemplateUnsolved", - "StringTemplate", - "TemplatesDict", - "FormatObject", - - "terminal", - - "get_datetime_data", - "get_formatted_current_time", - - "Logger", - - "op_version_control_available", - "get_openpype_version", - "get_build_version", - "get_expected_version", - "is_running_from_build", - "is_running_staging", - "is_current_version_studio_latest", - - "requests_get", - "requests_post" -] diff --git a/openpype/lib/log.py b/openpype/lib/log.py deleted file mode 100644 index 72071063ec..0000000000 --- a/openpype/lib/log.py +++ /dev/null @@ -1,494 +0,0 @@ -""" -Logging to console and to mongo. For mongo logging, you need to set either -``OPENPYPE_LOG_MONGO_URL`` to something like: - -.. example:: - mongo://user:password@hostname:port/database/collection?authSource=avalon - -or set ``OPENPYPE_LOG_MONGO_HOST`` and other variables. -See :func:`_mongo_settings` - -Best place for it is in ``repos/pype-config/environments/global.json`` -""" - - -import datetime -import getpass -import logging -import os -import platform -import socket -import sys -import time -import traceback -import threading -import copy - -from openpype import AYON_SERVER_ENABLED -from openpype.client.mongo import ( - MongoEnvNotSet, - get_default_components, - OpenPypeMongoConnection, -) -from . import Terminal - -try: - import log4mongo - from log4mongo.handlers import MongoHandler -except ImportError: - log4mongo = None - MongoHandler = type("NOT_SET", (), {}) - -# Check for `unicode` in builtins -USE_UNICODE = hasattr(__builtins__, "unicode") - - -class LogStreamHandler(logging.StreamHandler): - """ StreamHandler class designed to handle utf errors in python 2.x hosts. - - """ - - def __init__(self, stream=None): - super(LogStreamHandler, self).__init__(stream) - self.enabled = True - - def enable(self): - """ Enable StreamHandler - - Used to silence output - """ - self.enabled = True - - def disable(self): - """ Disable StreamHandler - - Make StreamHandler output again - """ - self.enabled = False - - def emit(self, record): - if not self.enable: - return - try: - msg = self.format(record) - msg = Terminal.log(msg) - stream = self.stream - if stream is None: - return - fs = "%s\n" - # if no unicode support... - if not USE_UNICODE: - stream.write(fs % msg) - else: - try: - if (isinstance(msg, unicode) and # noqa: F821 - getattr(stream, 'encoding', None)): - ufs = u'%s\n' - try: - stream.write(ufs % msg) - except UnicodeEncodeError: - stream.write((ufs % msg).encode(stream.encoding)) - else: - if (getattr(stream, 'encoding', 'utf-8')): - ufs = u'%s\n' - stream.write(ufs % unicode(msg)) # noqa: F821 - else: - stream.write(fs % msg) - except UnicodeError: - stream.write(fs % msg.encode("UTF-8")) - self.flush() - except (KeyboardInterrupt, SystemExit): - raise - - except OSError: - self.handleError(record) - - except Exception: - print(repr(record)) - self.handleError(record) - - -class LogFormatter(logging.Formatter): - - DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' - default_formatter = logging.Formatter(DFT) - - def __init__(self, formats): - super(LogFormatter, self).__init__() - self.formatters = {} - for loglevel in formats: - self.formatters[loglevel] = logging.Formatter(formats[loglevel]) - - def format(self, record): - formatter = self.formatters.get(record.levelno, self.default_formatter) - - _exc_info = record.exc_info - record.exc_info = None - - out = formatter.format(record) - record.exc_info = _exc_info - - if record.exc_info is not None: - line_len = len(str(record.exc_info[1])) - if line_len > 30: - line_len = 30 - out = "{}\n{}\n{}\n{}\n{}".format( - out, - line_len * "=", - str(record.exc_info[1]), - line_len * "=", - self.formatException(record.exc_info) - ) - return out - - -class MongoFormatter(logging.Formatter): - - DEFAULT_PROPERTIES = logging.LogRecord( - '', '', '', '', '', '', '', '').__dict__.keys() - - def format(self, record): - """Formats LogRecord into python dictionary.""" - # Standard document - document = { - 'timestamp': datetime.datetime.now(), - 'level': record.levelname, - 'thread': record.thread, - 'threadName': record.threadName, - 'message': record.getMessage(), - 'loggerName': record.name, - 'fileName': record.pathname, - 'module': record.module, - 'method': record.funcName, - 'lineNumber': record.lineno - } - document.update(Logger.get_process_data()) - - # Standard document decorated with exception info - if record.exc_info is not None: - document['exception'] = { - 'message': str(record.exc_info[1]), - 'code': 0, - 'stackTrace': self.formatException(record.exc_info) - } - - # Standard document decorated with extra contextual information - if len(self.DEFAULT_PROPERTIES) != len(record.__dict__): - contextual_extra = set(record.__dict__).difference( - set(self.DEFAULT_PROPERTIES)) - if contextual_extra: - for key in contextual_extra: - document[key] = record.__dict__[key] - return document - - -class Logger: - DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' - DBG = " - { %(name)s }: [ %(message)s ] " - INF = ">>> [ %(message)s ] " - WRN = "*** WRN: >>> { %(name)s }: [ %(message)s ] " - ERR = "!!! ERR: %(asctime)s >>> { %(name)s }: [ %(message)s ] " - CRI = "!!! CRI: %(asctime)s >>> { %(name)s }: [ %(message)s ] " - - FORMAT_FILE = { - logging.INFO: INF, - logging.DEBUG: DBG, - logging.WARNING: WRN, - logging.ERROR: ERR, - logging.CRITICAL: CRI, - } - - # Is static class initialized - bootstraped = False - initialized = False - _init_lock = threading.Lock() - - # Defines if mongo logging should be used - use_mongo_logging = None - mongo_process_id = None - - # Backwards compatibility - was used in start.py - # TODO remove when all old builds are replaced with new one - # not using 'log_mongo_url_components' - log_mongo_url_components = None - - # Database name in Mongo - log_database_name = os.environ.get("OPENPYPE_DATABASE_NAME") - # Collection name under database in Mongo - log_collection_name = "logs" - - # Logging level - OPENPYPE_LOG_LEVEL - log_level = None - - # Data same for all record documents - process_data = None - # Cached process name or ability to set different process name - _process_name = None - - @classmethod - def get_logger(cls, name=None, _host=None): - if not cls.initialized: - cls.initialize() - - logger = logging.getLogger(name or "__main__") - - logger.setLevel(cls.log_level) - - add_mongo_handler = cls.use_mongo_logging - add_console_handler = True - - for handler in logger.handlers: - if isinstance(handler, MongoHandler): - add_mongo_handler = False - elif isinstance(handler, LogStreamHandler): - add_console_handler = False - - if add_console_handler: - logger.addHandler(cls._get_console_handler()) - - if add_mongo_handler: - try: - handler = cls._get_mongo_handler() - if handler: - logger.addHandler(handler) - - except MongoEnvNotSet: - # Skip if mongo environments are not set yet - cls.use_mongo_logging = False - - except Exception: - lines = traceback.format_exception(*sys.exc_info()) - for line in lines: - if line.endswith("\n"): - line = line[:-1] - Terminal.echo(line) - cls.use_mongo_logging = False - - # Do not propagate logs to root logger - logger.propagate = False - - if _host is not None: - # Warn about deprecated argument - # TODO remove backwards compatibility of host argument which is - # not used for more than a year - logger.warning( - "Logger \"{}\" is using argument `host` on `get_logger`" - " which is deprecated. Please remove as backwards" - " compatibility will be removed soon." - ) - return logger - - @classmethod - def _get_mongo_handler(cls): - cls.bootstrap_mongo_log() - - if not cls.use_mongo_logging: - return - - components = get_default_components() - kwargs = { - "host": components["host"], - "database_name": cls.log_database_name, - "collection": cls.log_collection_name, - "username": components["username"], - "password": components["password"], - "capped": True, - "formatter": MongoFormatter() - } - if components["port"] is not None: - kwargs["port"] = int(components["port"]) - if components["auth_db"]: - kwargs["authentication_db"] = components["auth_db"] - - return MongoHandler(**kwargs) - - @classmethod - def _get_console_handler(cls): - formatter = LogFormatter(cls.FORMAT_FILE) - console_handler = LogStreamHandler() - - console_handler.set_name("LogStreamHandler") - console_handler.setFormatter(formatter) - return console_handler - - @classmethod - def initialize(cls): - # TODO update already created loggers on re-initialization - if not cls._init_lock.locked(): - with cls._init_lock: - cls._initialize() - else: - # If lock is locked wait until is finished - while cls._init_lock.locked(): - time.sleep(0.1) - - @classmethod - def _initialize(cls): - # Change initialization state to prevent runtime changes - # if is executed during runtime - cls.initialized = False - if not AYON_SERVER_ENABLED: - cls.log_mongo_url_components = get_default_components() - - # Define if should logging to mongo be used - if AYON_SERVER_ENABLED: - use_mongo_logging = False - else: - use_mongo_logging = ( - log4mongo is not None - and os.environ.get("OPENPYPE_LOG_TO_SERVER") == "1" - ) - - # Set mongo id for process (ONLY ONCE) - if use_mongo_logging and cls.mongo_process_id is None: - try: - from bson.objectid import ObjectId - except Exception: - use_mongo_logging = False - - # Check if mongo id was passed with environments and pop it - # - This is for subprocesses that are part of another process - # like Ftrack event server has 3 other subprocesses that should - # use same mongo id - if use_mongo_logging: - mongo_id = os.environ.pop("OPENPYPE_PROCESS_MONGO_ID", None) - if not mongo_id: - # Create new object id - mongo_id = ObjectId() - else: - # Convert string to ObjectId object - mongo_id = ObjectId(mongo_id) - cls.mongo_process_id = mongo_id - - # Store result to class definition - cls.use_mongo_logging = use_mongo_logging - - # Define what is logging level - log_level = os.getenv("OPENPYPE_LOG_LEVEL") - if not log_level: - # Check OPENPYPE_DEBUG for backwards compatibility - op_debug = os.getenv("OPENPYPE_DEBUG") - if op_debug and int(op_debug) > 0: - log_level = 10 - else: - log_level = 20 - cls.log_level = int(log_level) - - if not os.environ.get("OPENPYPE_MONGO"): - cls.use_mongo_logging = False - - # Mark as initialized - cls.initialized = True - - @classmethod - def get_process_data(cls): - """Data about current process which should be same for all records. - - Process data are used for each record sent to mongo database. - """ - if cls.process_data is not None: - return copy.deepcopy(cls.process_data) - - if not cls.initialized: - cls.initialize() - - host_name = socket.gethostname() - try: - host_ip = socket.gethostbyname(host_name) - except socket.gaierror: - host_ip = "127.0.0.1" - - process_name = cls.get_process_name() - - cls.process_data = { - "process_id": cls.mongo_process_id, - "hostname": host_name, - "hostip": host_ip, - "username": getpass.getuser(), - "system_name": platform.system(), - "process_name": process_name - } - return copy.deepcopy(cls.process_data) - - @classmethod - def set_process_name(cls, process_name): - """Set process name for mongo logs.""" - # Just change the attribute - cls._process_name = process_name - # Update process data if are already set - if cls.process_data is not None: - cls.process_data["process_name"] = process_name - - @classmethod - def get_process_name(cls): - """Process name that is like "label" of a process. - - OpenPype's logging can be used from OpenPyppe itself of from hosts. - Even in OpenPype process it's good to know if logs are from tray or - from other cli commands. This should help to identify that information. - """ - if cls._process_name is not None: - return cls._process_name - - # Get process name - process_name = os.environ.get("AVALON_APP_NAME") - if not process_name: - try: - import psutil - process = psutil.Process(os.getpid()) - process_name = process.name() - - except ImportError: - pass - - if not process_name: - process_name = os.path.basename(sys.executable) - - cls._process_name = process_name - return cls._process_name - - @classmethod - def bootstrap_mongo_log(cls): - """Prepare mongo logging.""" - if cls.bootstraped: - return - - if not cls.initialized: - cls.initialize() - - if not cls.use_mongo_logging: - return - - if not cls.log_database_name: - raise ValueError("Database name for logs is not set") - - client = log4mongo.handlers._connection - if not client: - client = cls.get_log_mongo_connection() - # Set the client inside log4mongo handlers to not create another - # mongo db connection. - log4mongo.handlers._connection = client - - logdb = client[cls.log_database_name] - - collist = logdb.list_collection_names() - if cls.log_collection_name not in collist: - logdb.create_collection( - cls.log_collection_name, - capped=True, - max=5000, - size=1073741824 - ) - cls.bootstraped = True - - @classmethod - def get_log_mongo_connection(cls): - """Mongo connection that allows to get to log collection. - - This is implemented to prevent multiple connections to mongo from same - process. - """ - if not cls.initialized: - cls.initialize() - - return OpenPypeMongoConnection.get_mongo_client() diff --git a/openpype/lib/openpype_version.py b/openpype/lib/openpype_version.py deleted file mode 100644 index 5618eb0c2e..0000000000 --- a/openpype/lib/openpype_version.py +++ /dev/null @@ -1,302 +0,0 @@ -"""Lib access to OpenPypeVersion from igniter. - -Access to logic from igniter is available only for OpenPype processes. -Is meant to be able check OpenPype versions for studio. The logic is dependent -on igniter's inner logic of versions. - -Keep in mind that all functions except 'get_installed_version' does not return -OpenPype version located in build but versions available in remote versions -repository or locally available. -""" - -import os -import sys - -import openpype.version -from openpype import AYON_SERVER_ENABLED - -from .python_module_tools import import_filepath - - -# ---------------------------------------- -# Functions independent on OpenPypeVersion -# ---------------------------------------- -def get_openpype_version(): - """Version of pype that is currently used.""" - return openpype.version.__version__ - - -def get_ayon_launcher_version(): - version_filepath = os.path.join( - os.environ["AYON_ROOT"], - "version.py" - ) - if not os.path.exists(version_filepath): - return None - content = {} - with open(version_filepath, "r") as stream: - exec(stream.read(), content) - return content["__version__"] - - -def get_build_version(): - """OpenPype version of build.""" - - if AYON_SERVER_ENABLED: - return get_ayon_launcher_version() - - # Return OpenPype version if is running from code - if not is_running_from_build(): - return get_openpype_version() - - # Import `version.py` from build directory - version_filepath = os.path.join( - os.environ["OPENPYPE_ROOT"], - "openpype", - "version.py" - ) - if not os.path.exists(version_filepath): - return None - - module = import_filepath(version_filepath, "openpype_build_version") - return getattr(module, "__version__", None) - - -def is_running_from_build(): - """Determine if current process is running from build or code. - - Returns: - bool: True if running from build. - """ - - if AYON_SERVER_ENABLED: - executable_path = os.environ["AYON_EXECUTABLE"] - else: - executable_path = os.environ["OPENPYPE_EXECUTABLE"] - executable_filename = os.path.basename(executable_path) - if "python" in executable_filename.lower(): - return False - return True - - -def is_staging_enabled(): - if AYON_SERVER_ENABLED: - return os.getenv("AYON_USE_STAGING") == "1" - return os.environ.get("OPENPYPE_USE_STAGING") == "1" - - -def is_running_staging(): - """Currently used OpenPype is staging version. - - This function is not 100% proper check of staging version. It is possible - to have enabled to use staging version but be in different one. - - The function is based on 4 factors: - - env 'OPENPYPE_IS_STAGING' is set - - current production version - - current staging version - - use staging is enabled - - First checks for 'OPENPYPE_IS_STAGING' environment which can be set to '1'. - The value should be set only when a process without access to - OpenPypeVersion is launched (e.g. in DCCs). If current version is same - as production version it is expected that it is not staging, and it - doesn't matter what would 'is_staging_enabled' return. If current version - is same as staging version it is expected we're in staging. In all other - cases 'is_staging_enabled' is used as source of outpu value. - - The function is used to decide which icon is used. To check e.g. updates - the output should be combined with other functions from this file. - - Returns: - bool: Using staging version or not. - """ - - if AYON_SERVER_ENABLED: - return is_staging_enabled() - - if os.environ.get("OPENPYPE_IS_STAGING") == "1": - return True - - if not op_version_control_available(): - return False - - from openpype.settings import get_global_settings - - global_settings = get_global_settings() - production_version = global_settings["production_version"] - latest_version = None - if not production_version or production_version == "latest": - latest_version = get_latest_version(local=False, remote=True) - production_version = latest_version - - current_version = get_openpype_version() - if current_version == production_version: - return False - - staging_version = global_settings["staging_version"] - if not staging_version or staging_version == "latest": - if latest_version is None: - latest_version = get_latest_version(local=False, remote=True) - staging_version = latest_version - - if current_version == staging_version: - return True - - return is_staging_enabled() - - -# ---------------------------------------- -# Functions dependent on OpenPypeVersion -# - Make sense to call only in OpenPype process -# ---------------------------------------- -def get_OpenPypeVersion(): - """Access to OpenPypeVersion class stored in sys modules.""" - return sys.modules.get("OpenPypeVersion") - - -def op_version_control_available(): - """Check if current process has access to OpenPypeVersion.""" - if get_OpenPypeVersion() is None: - return False - return True - - -def get_installed_version(): - """Get OpenPype version inside build. - - This version is not returned by any other functions here. - """ - if op_version_control_available(): - return get_OpenPypeVersion().get_installed_version() - return None - - -def get_available_versions(*args, **kwargs): - """Get list of available versions.""" - if op_version_control_available(): - return get_OpenPypeVersion().get_available_versions( - *args, **kwargs - ) - return None - - -def openpype_path_is_set(): - """OpenPype repository path is set in settings.""" - if op_version_control_available(): - return get_OpenPypeVersion().openpype_path_is_set() - return None - - -def openpype_path_is_accessible(): - """OpenPype version repository path can be accessed.""" - if op_version_control_available(): - return get_OpenPypeVersion().openpype_path_is_accessible() - return None - - -def get_local_versions(*args, **kwargs): - """OpenPype versions available on this workstation.""" - if op_version_control_available(): - return get_OpenPypeVersion().get_local_versions(*args, **kwargs) - return None - - -def get_remote_versions(*args, **kwargs): - """OpenPype versions in repository path.""" - if op_version_control_available(): - return get_OpenPypeVersion().get_remote_versions(*args, **kwargs) - return None - - -def get_latest_version(local=None, remote=None): - """Get latest version from repository path.""" - - if op_version_control_available(): - return get_OpenPypeVersion().get_latest_version( - local=local, - remote=remote - ) - return None - - -def get_expected_studio_version(staging=None): - """Expected production or staging version in studio.""" - if op_version_control_available(): - if staging is None: - staging = is_staging_enabled() - return get_OpenPypeVersion().get_expected_studio_version(staging) - return None - - -def get_expected_version(staging=None): - expected_version = get_expected_studio_version(staging) - if expected_version is None: - # Look for latest if expected version is not set in settings - expected_version = get_latest_version( - local=False, - remote=True - ) - return expected_version - - -def is_current_version_studio_latest(): - """Is currently running OpenPype version which is defined by studio. - - It is not recommended to ask in each process as there may be situations - when older OpenPype should be used. For example on farm. But it does make - sense in processes that can run for a long time. - - Returns: - None: Can't determine. e.g. when running from code or the build is - too old. - bool: True when is using studio - """ - output = None - # Skip if is not running from build or build does not support version - # control or path to folder with zip files is not accessible - if ( - not is_running_from_build() - or not op_version_control_available() - or not openpype_path_is_accessible() - ): - return output - - # Get OpenPypeVersion class - OpenPypeVersion = get_OpenPypeVersion() - # Convert current version to OpenPypeVersion object - current_version = OpenPypeVersion(version=get_openpype_version()) - - # Get expected version (from settings) - expected_version = get_expected_version() - # Check if current version is expected version - return current_version == expected_version - - -def is_current_version_higher_than_expected(): - """Is current OpenPype version higher than version defined by studio. - - Returns: - None: Can't determine. e.g. when running from code or the build is - too old. - bool: True when is higher than studio version. - """ - output = None - # Skip if is not running from build or build does not support version - # control or path to folder with zip files is not accessible - if ( - not is_running_from_build() - or not op_version_control_available() - or not openpype_path_is_accessible() - ): - return output - - # Get OpenPypeVersion class - OpenPypeVersion = get_OpenPypeVersion() - # Convert current version to OpenPypeVersion object - current_version = OpenPypeVersion(version=get_openpype_version()) - - # Get expected version (from settings) - expected_version = get_expected_version() - # Check if current version is expected version - return current_version > expected_version diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py deleted file mode 100644 index 91a5b76e35..0000000000 --- a/openpype/lib/project_backpack.py +++ /dev/null @@ -1,325 +0,0 @@ -"""These lib functions are for development purposes. - -WARNING: - This is not meant for production data. Please don't write code which is - dependent on functionality here. - -Goal is to be able to create package of current state of project with related -documents from mongo and files from disk to zip file and then be able -to recreate the project based on the zip. - -This gives ability to create project where a changes and tests can be done. - -Keep in mind that to be able to create a package of project has few -requirements. Possible requirement should be listed in 'pack_project' function. -""" - -import os -import json -import platform -import tempfile -import shutil -import datetime - -import zipfile -from openpype.client.mongo import ( - load_json_file, - get_project_connection, - replace_project_documents, - store_project_documents, -) - -DOCUMENTS_FILE_NAME = "database" -METADATA_FILE_NAME = "metadata" -PROJECT_FILES_DIR = "project_files" - - -def add_timestamp(filepath): - """Add timestamp string to a file.""" - base, ext = os.path.splitext(filepath) - timestamp = datetime.datetime.now().strftime("%y%m%d_%H%M%S") - new_base = "{}_{}".format(base, timestamp) - return new_base + ext - - -def get_project_document(project_name, database_name=None): - """Query project document. - - Function 'get_project' from client api cannot be used as it does not allow - to change which 'database_name' is used. - - Args: - project_name (str): Name of project. - database_name (Optional[str]): Name of mongo database where to look for - project. - - Returns: - Union[dict[str, Any], None]: Project document or None. - """ - - col = get_project_connection(project_name, database_name) - return col.find_one({"type": "project"}) - - -def _pack_files_to_zip(zip_stream, source_path, root_path): - """Pack files to a zip stream. - - Args: - zip_stream (zipfile.ZipFile): Stream to a zipfile. - source_path (str): Path to a directory where files are. - root_path (str): Path to a directory which is used for calculation - of relative path. - """ - - for root, _, filenames in os.walk(source_path): - for filename in filenames: - filepath = os.path.join(root, filename) - # TODO add one more folder - archive_name = os.path.join( - PROJECT_FILES_DIR, - os.path.relpath(filepath, root_path) - ) - zip_stream.write(filepath, archive_name) - - -def pack_project( - project_name, - destination_dir=None, - only_documents=False, - database_name=None -): - """Make a package of a project with mongo documents and files. - - This function has few restrictions: - - project must have only one root - - project must have all templates starting with - "{root[...]}/{project[name]}" - - Args: - project_name (str): Project that should be packaged. - destination_dir (Optional[str]): Optional path where zip will be - stored. Project's root is used if not passed. - only_documents (Optional[bool]): Pack only Mongo documents and skip - files. - database_name (Optional[str]): Custom database name from which is - project queried. - """ - - print("Creating package of project \"{}\"".format(project_name)) - # Validate existence of project - project_doc = get_project_document(project_name, database_name) - if not project_doc: - raise ValueError("Project \"{}\" was not found in database".format( - project_name - )) - - if only_documents and not destination_dir: - raise ValueError(( - "Destination directory must be defined" - " when only documents should be packed." - )) - - root_path = None - source_root = {} - project_source_path = None - if not only_documents: - roots = project_doc["config"]["roots"] - # Determine root directory of project - source_root = None - source_root_name = None - for root_name, root_value in roots.items(): - if source_root is not None: - raise ValueError( - "Packaging is supported only for single root projects" - ) - source_root = root_value - source_root_name = root_name - - root_path = source_root[platform.system().lower()] - print("Using root \"{}\" with path \"{}\"".format( - source_root_name, root_path - )) - - project_source_path = os.path.join(root_path, project_name) - if not os.path.exists(project_source_path): - raise ValueError("Didn't find source of project files") - - # Determine zip filepath where data will be stored - if not destination_dir: - destination_dir = root_path - - if not destination_dir: - raise ValueError( - "Project {} does not have any roots.".format(project_name) - ) - - destination_dir = os.path.normpath(destination_dir) - if not os.path.exists(destination_dir): - os.makedirs(destination_dir) - - zip_path = os.path.join(destination_dir, project_name + ".zip") - - print("Project will be packaged into \"{}\"".format(zip_path)) - # Rename already existing zip - if os.path.exists(zip_path): - dst_filepath = add_timestamp(zip_path) - os.rename(zip_path, dst_filepath) - - # We can add more data - metadata = { - "project_name": project_name, - "root": source_root, - "version": 1 - } - # Create temp json file where metadata are stored - with tempfile.NamedTemporaryFile("w", suffix=".json", delete=False) as s: - temp_metadata_json = s.name - - with open(temp_metadata_json, "w") as stream: - json.dump(metadata, stream) - - # Create temp json file where database documents are stored - with tempfile.NamedTemporaryFile("w", suffix=".json", delete=False) as s: - temp_docs_json = s.name - - # Query all project documents and store them to temp json - store_project_documents(project_name, temp_docs_json, database_name) - - print("Packing files into zip") - # Write all to zip file - with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zip_stream: - # Add metadata file - zip_stream.write(temp_metadata_json, METADATA_FILE_NAME + ".json") - # Add database documents - zip_stream.write(temp_docs_json, DOCUMENTS_FILE_NAME + ".json") - - # Add project files to zip - if not only_documents: - _pack_files_to_zip(zip_stream, project_source_path, root_path) - - print("Cleaning up") - # Cleanup - os.remove(temp_docs_json) - os.remove(temp_metadata_json) - - print("*** Packing finished ***") - - -def _unpack_project_files(unzip_dir, root_path, project_name): - """Move project files from unarchived temp folder to new root. - - Unpack is skipped if source files are not available in the zip. That can - happen if nothing was published yet or only documents were stored to - package. - - Args: - unzip_dir (str): Location where zip was unzipped. - root_path (str): Path to new root. - project_name (str): Name of project. - """ - - src_project_files_dir = os.path.join( - unzip_dir, PROJECT_FILES_DIR, project_name - ) - # Skip if files are not in the zip - if not os.path.exists(src_project_files_dir): - return - - # Make sure root path exists - if not os.path.exists(root_path): - os.makedirs(root_path) - - dst_project_files_dir = os.path.normpath( - os.path.join(root_path, project_name) - ) - if os.path.exists(dst_project_files_dir): - new_path = add_timestamp(dst_project_files_dir) - print("Project folder already exists. Renamed \"{}\" -> \"{}\"".format( - dst_project_files_dir, new_path - )) - os.rename(dst_project_files_dir, new_path) - - print("Moving project files from temp \"{}\" -> \"{}\"".format( - src_project_files_dir, dst_project_files_dir - )) - shutil.move(src_project_files_dir, dst_project_files_dir) - - -def unpack_project( - path_to_zip, new_root=None, database_only=None, database_name=None -): - """Unpack project zip file to recreate project. - - Args: - path_to_zip (str): Path to zip which was created using 'pack_project' - function. - new_root (str): Optional way how to set different root path for - unpacked project. - database_only (Optional[bool]): Unpack only database from zip. - database_name (str): Name of database where project will be recreated. - """ - - if database_only is None: - database_only = False - - print("Unpacking project from zip {}".format(path_to_zip)) - if not os.path.exists(path_to_zip): - print("Zip file does not exists: {}".format(path_to_zip)) - return - - tmp_dir = tempfile.mkdtemp(prefix="unpack_") - print("Zip is extracted to temp: {}".format(tmp_dir)) - with zipfile.ZipFile(path_to_zip, "r") as zip_stream: - if database_only: - for filename in ( - "{}.json".format(METADATA_FILE_NAME), - "{}.json".format(DOCUMENTS_FILE_NAME), - ): - zip_stream.extract(filename, tmp_dir) - else: - zip_stream.extractall(tmp_dir) - - metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json") - with open(metadata_json_path, "r") as stream: - metadata = json.load(stream) - - docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json") - docs = load_json_file(docs_json_path) - - low_platform = platform.system().lower() - project_name = metadata["project_name"] - root_path = metadata["root"].get(low_platform) - - # Drop existing collection - replace_project_documents(project_name, docs, database_name) - print("Creating project documents ({})".format(len(docs))) - - # Skip change of root if is the same as the one stored in metadata - if ( - new_root - and (os.path.normpath(new_root) == os.path.normpath(root_path)) - ): - new_root = None - - if new_root: - print("Using different root path {}".format(new_root)) - root_path = new_root - - project_doc = get_project_document(project_name) - roots = project_doc["config"]["roots"] - key = tuple(roots.keys())[0] - update_key = "config.roots.{}.{}".format(key, low_platform) - collection = get_project_connection(project_name, database_name) - collection.update_one( - {"_id": project_doc["_id"]}, - {"$set": { - update_key: new_root - }} - ) - - _unpack_project_files(tmp_dir, root_path, project_name) - - # CLeanup - print("Cleaning up") - shutil.rmtree(tmp_dir) - print("*** Unpack finished ***") diff --git a/openpype/lib/pype_info.py b/openpype/lib/pype_info.py deleted file mode 100644 index 2f57d76850..0000000000 --- a/openpype/lib/pype_info.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import json -import datetime -import platform -import getpass -import socket - -from openpype import AYON_SERVER_ENABLED -from openpype.settings.lib import get_local_settings -from .execute import get_openpype_execute_args -from .local_settings import get_local_site_id -from .openpype_version import ( - is_running_from_build, - get_openpype_version, - get_build_version -) - - -def get_openpype_info(): - """Information about currently used Pype process.""" - executable_args = get_openpype_execute_args() - if is_running_from_build(): - version_type = "build" - else: - version_type = "code" - - return { - "build_verison": get_build_version(), - "version": get_openpype_version(), - "version_type": version_type, - "executable": executable_args[-1], - "pype_root": os.environ["OPENPYPE_REPOS_ROOT"], - "mongo_url": os.environ["OPENPYPE_MONGO"] - } - - -def get_ayon_info(): - executable_args = get_openpype_execute_args() - if is_running_from_build(): - version_type = "build" - else: - version_type = "code" - return { - "build_verison": get_build_version(), - "version_type": version_type, - "executable": executable_args[-1], - "ayon_root": os.environ["AYON_ROOT"], - "server_url": os.environ["AYON_SERVER_URL"] - } - - -def get_workstation_info(): - """Basic information about workstation.""" - host_name = socket.gethostname() - try: - host_ip = socket.gethostbyname(host_name) - except socket.gaierror: - host_ip = "127.0.0.1" - - return { - "hostname": host_name, - "hostip": host_ip, - "username": getpass.getuser(), - "system_name": platform.system(), - "local_id": get_local_site_id() - } - - -def get_all_current_info(): - """All information about current process in one dictionary.""" - - output = { - "workstation": get_workstation_info(), - "env": os.environ.copy(), - "local_settings": get_local_settings() - } - if AYON_SERVER_ENABLED: - output["ayon"] = get_ayon_info() - else: - output["openpype"] = get_openpype_info() - return output - - -def extract_pype_info_to_file(dirpath): - """Extract all current info to a file. - - It is possible to define onpy directory path. Filename is concatenated with - pype version, workstation site id and timestamp. - - Args: - dirpath (str): Path to directory where file will be stored. - - Returns: - filepath (str): Full path to file where data were extracted. - """ - filename = "{}_{}_{}.json".format( - get_openpype_version(), - get_local_site_id(), - datetime.datetime.now().strftime("%y%m%d%H%M%S") - ) - filepath = os.path.join(dirpath, filename) - data = get_all_current_info() - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - with open(filepath, "w") as file_stream: - json.dump(data, file_stream, indent=4) - return filepath diff --git a/openpype/modules/README.md b/openpype/modules/README.md deleted file mode 100644 index ce3f99b338..0000000000 --- a/openpype/modules/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# OpenPype modules/addons -OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its usage code, Deadline farm rendering or may contain only special plugins. Addons work the same way currently, there is no difference between module and addon functionality. - -## Modules concept -- modules and addons are dynamically imported to virtual python module `openpype_modules` from which it is possible to import them no matter where is the module located -- modules or addons should never be imported directly, even if you know possible full import path - - it is because all of their content must be imported in specific order and should not be imported without defined functions as it may also break few implementation parts - -### TODOs -- add module/addon manifest - - definition of module (not 100% defined content e.g. minimum required OpenPype version etc.) - - defining a folder as a content of a module or an addon - -## Base class `OpenPypeModule` -- abstract class as base for each module -- implementation should contain module's api without GUI parts -- may implement `get_global_environments` method which should return dictionary of environments that are globally applicable and value is the same for whole studio if launched at any workstation (except os specific paths) -- abstract parts: - - `name` attribute - name of a module - - `initialize` method - method for own initialization of a module (should not override `__init__`) - - `connect_with_modules` method - where module may look for it's interfaces implementations or check for other modules -- `__init__` should not be overridden and `initialize` should not do time consuming part but only prepare base data about module - - also keep in mind that they may be initialized in headless mode -- connection with other modules is made with help of interfaces -- `cli` method - add cli commands specific for the module - - command line arguments are handled using `click` python module - - `cli` method should expect single argument which is click group on which can be called any group specific methods (e.g. `add_command` to add another click group as children see `ExampleAddon`) - - it is possible to add trigger cli commands using `./openpype_console module *args` - -## Addon class `OpenPypeAddOn` -- inherits from `OpenPypeModule` but is enabled by default and doesn't have to implement `initialize` and `connect_with_modules` methods - - that is because it is expected that addons don't need to have system settings and `enabled` value on it (but it is possible...) - -## How to add addons/modules -- in System settings go to `modules/addon_paths` (`Modules/OpenPype AddOn Paths`) where you have to add path to addon root folder -- for openpype example addons use `{OPENPYPE_REPOS_ROOT}/openpype/modules/example_addons` - -## Addon/module settings -- addons/modules may have defined custom settings definitions with default values -- it is based on settings type `dynamic_schema` which has `name` - - that item defines that it can be replaced dynamically with any schemas from module or module which won't be saved to openpype core defaults - - they can't be added to any schema hierarchy - - item must not be in settings group (under overrides) or in dynamic item (e.g. `list` of `dict-modifiable`) - - addons may define it's dynamic schema items -- they can be defined with class which inherits from `BaseModuleSettingsDef` - - it is recommended to use pre implemented `JsonFilesSettingsDef` which defined structure and use json files to define dynamic schemas, schemas and default values - - check it's docstring and check for `example_addon` in example addons -- settings definition returns schemas by dynamic schemas names - -# Interfaces -- interface is class that has defined abstract methods to implement and may contain pre implemented helper methods -- module that inherit from an interface must implement those abstract methods otherwise won't be initialized -- it is easy to find which module object inherited from which interfaces with 100% chance they have implemented required methods -- interfaces can be defined in `interfaces.py` inside module directory - - the file can't use relative imports or import anything from other parts - of module itself at the header of file - - this is one of reasons why modules/addons can't be imported directly without using defined functions in OpenPype modules implementation - -## Base class `OpenPypeInterface` -- has nothing implemented -- has ABCMeta as metaclass -- is defined to be able find out classes which inherit from this base to be - able tell this is an Interface - -## Global interfaces -- few interfaces are implemented for global usage - -### IPluginPaths -- module wants to add directory path/s to avalon or publish plugins -- module must implement `get_plugin_paths` which must return dictionary with possible keys `"publish"`, `"load"`, `"create"` or `"actions"` - - each key may contain list or string with a path to directory with plugins - -### ITrayModule -- module has more logic when used in a tray - - it is possible that module can be used only in the tray -- abstract methods - - `tray_init` - initialization triggered after `initialize` when used in `TrayModulesManager` and before `connect_with_modules` - - `tray_menu` - add actions to tray widget's menu that represent the module - - `tray_start` - start of module's login in tray - - module is initialized and connected with other modules - - `tray_exit` - module's cleanup like stop and join threads etc. - - order of calling is based on implementation this order is how it works with `TrayModulesManager` - - it is recommended to import and use GUI implementation only in these methods -- has attribute `tray_initialized` (bool) which is set to False by default and is set by `TrayModulesManager` to True after `tray_init` - - if module has logic only in tray or for both then should be checking for `tray_initialized` attribute to decide how should handle situations - -### ITrayService -- inherits from `ITrayModule` and implements `tray_menu` method for you - - adds action to submenu "Services" in tray widget menu with icon and label -- abstract attribute `label` - - label shown in menu -- interface has pre implemented methods to change icon color - - `set_service_running` - green icon - - `set_service_failed` - red icon - - `set_service_idle` - orange icon - - these states must be set by module itself `set_service_running` is default state on initialization - -### ITrayAction -- inherits from `ITrayModule` and implements `tray_menu` method for you - - adds action to tray widget menu with label -- abstract attribute `label` - - label shown in menu -- abstract method `on_action_trigger` - - what should happen when an action is triggered -- NOTE: It is a good idea to implement logic in `on_action_trigger` to the api method and trigger that method on callbacks. This gives ability to trigger that method outside tray - -## Modules interfaces -- modules may have defined their own interfaces to be able to recognize other modules that would want to use their features - -### Example: -- Ftrack module has `IFtrackEventHandlerPaths` which helps to tell Ftrack module which other modules want to add paths to server/user event handlers - - Clockify module use `IFtrackEventHandlerPaths` and returns paths to clockify ftrack synchronizers - -- Clockify inherits from more interfaces. It's class definition looks like: -``` -class ClockifyModule( - OpenPypeModule, # Says it's Pype module so ModulesManager will try to initialize. - ITrayModule, # Says has special implementation when used in tray. - IPluginPaths, # Says has plugin paths that want to register (paths to clockify actions for launcher). - IFtrackEventHandlerPaths, # Says has Ftrack actions/events for user/server. - ITimersManager # Listen to other modules with timer and can trigger changes in other module timers through `TimerManager` module. -): -``` - -### ModulesManager -- collects module classes and tries to initialize them -- important attributes - - `modules` - list of available attributes - - `modules_by_id` - dictionary of modules mapped by their ids - - `modules_by_name` - dictionary of modules mapped by their names - - all these attributes contain all found modules even if are not enabled -- helper methods - - `collect_global_environments` to collect all global environments from enabled modules with calling `get_global_environments` on each of them - - `collect_plugin_paths` collects plugin paths from all enabled modules - - output is always dictionary with all keys and values as an list - ``` - { - "publish": [], - "create": [], - "load": [], - "actions": [], - "inventory": [] - } - ``` - -### TrayModulesManager -- inherits from `ModulesManager` -- has specific implementation for Pype Tray tool and handle `ITrayModule` methods diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py deleted file mode 100644 index 87f3233afc..0000000000 --- a/openpype/modules/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -from . import click_wrap -from .interfaces import ( - ILaunchHookPaths, - IPluginPaths, - ITrayModule, - ITrayAction, - ITrayService, - ISettingsChangeListener, - IHostAddon, -) - -from .base import ( - AYONAddon, - OpenPypeModule, - OpenPypeAddOn, - - load_modules, - - ModulesManager, - TrayModulesManager, - - BaseModuleSettingsDef, - ModuleSettingsDef, - JsonFilesSettingsDef, - - get_module_settings_defs -) - - -__all__ = ( - "click_wrap", - - "ILaunchHookPaths", - "IPluginPaths", - "ITrayModule", - "ITrayAction", - "ITrayService", - "ISettingsChangeListener", - "IHostAddon", - - "AYONAddon", - "OpenPypeModule", - "OpenPypeAddOn", - - "load_modules", - - "ModulesManager", - "TrayModulesManager", - - "BaseModuleSettingsDef", - "ModuleSettingsDef", - "JsonFilesSettingsDef", - - "get_module_settings_defs" -) diff --git a/openpype/modules/asset_reporter/__init__.py b/openpype/modules/asset_reporter/__init__.py deleted file mode 100644 index 6267b4824b..0000000000 --- a/openpype/modules/asset_reporter/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .module import ( - AssetReporterAction -) - - -__all__ = ( - "AssetReporterAction", -) diff --git a/openpype/modules/asset_reporter/module.py b/openpype/modules/asset_reporter/module.py deleted file mode 100644 index 8c754cc3c0..0000000000 --- a/openpype/modules/asset_reporter/module.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path - -from openpype import AYON_SERVER_ENABLED -from openpype.modules import OpenPypeModule, ITrayAction -from openpype.lib import run_detached_process, get_openpype_execute_args - - -class AssetReporterAction(OpenPypeModule, ITrayAction): - - label = "Asset Usage Report" - name = "asset_reporter" - - def tray_init(self): - pass - - def initialize(self, modules_settings): - self.enabled = not AYON_SERVER_ENABLED - - def on_action_trigger(self): - args = get_openpype_execute_args() - args += ["run", - os.path.join( - os.path.dirname(__file__), - "window.py")] - - print(" ".join(args)) - run_detached_process(args) diff --git a/openpype/modules/asset_reporter/window.py b/openpype/modules/asset_reporter/window.py deleted file mode 100644 index ed3bc298e1..0000000000 --- a/openpype/modules/asset_reporter/window.py +++ /dev/null @@ -1,418 +0,0 @@ -"""Tool for generating asset usage report. - -This tool is used to generate asset usage report for a project. -It is using links between published version to find out where -the asset is used. - -""" - -import csv -import time - -import appdirs -import qtawesome -from pymongo.collection import Collection -from qtpy import QtCore, QtWidgets -from qtpy.QtGui import QClipboard, QColor - -from openpype import style -from openpype.client import OpenPypeMongoConnection -from openpype.lib import JSONSettingRegistry -from openpype.tools.utils import PlaceholderLineEdit, get_openpype_qt_app -from openpype.tools.utils.constants import PROJECT_NAME_ROLE -from openpype.tools.utils.models import ProjectModel, ProjectSortFilterProxy - - -class AssetReporterRegistry(JSONSettingRegistry): - """Class handling OpenPype general settings registry. - - This is used to store last selected project. - - Attributes: - vendor (str): Name used for path construction. - product (str): Additional name used for path construction. - - """ - - def __init__(self): - self.vendor = "ynput" - self.product = "openpype" - name = "asset_usage_reporter" - path = appdirs.user_data_dir(self.product, self.vendor) - super(AssetReporterRegistry, self).__init__(name, path) - - -class OverlayWidget(QtWidgets.QFrame): - """Overlay widget for choosing project. - - This code is taken from the Tray Publisher tool. - """ - project_selected = QtCore.Signal(str) - - def __init__(self, publisher_window): - super(OverlayWidget, self).__init__(publisher_window) - self.setObjectName("OverlayFrame") - - middle_frame = QtWidgets.QFrame(self) - middle_frame.setObjectName("ChooseProjectFrame") - - content_widget = QtWidgets.QWidget(middle_frame) - - header_label = QtWidgets.QLabel("Choose project", content_widget) - header_label.setObjectName("ChooseProjectLabel") - # Create project models and view - projects_model = ProjectModel() - projects_proxy = ProjectSortFilterProxy() - projects_proxy.setSourceModel(projects_model) - projects_proxy.setFilterKeyColumn(0) - - projects_view = QtWidgets.QListView(content_widget) - projects_view.setObjectName("ChooseProjectView") - projects_view.setModel(projects_proxy) - projects_view.setEditTriggers( - QtWidgets.QAbstractItemView.NoEditTriggers - ) - - confirm_btn = QtWidgets.QPushButton("Confirm", content_widget) - cancel_btn = QtWidgets.QPushButton("Cancel", content_widget) - cancel_btn.setVisible(False) - btns_layout = QtWidgets.QHBoxLayout() - btns_layout.addStretch(1) - btns_layout.addWidget(cancel_btn, 0) - btns_layout.addWidget(confirm_btn, 0) - - txt_filter = PlaceholderLineEdit(content_widget) - txt_filter.setPlaceholderText("Quick filter projects..") - txt_filter.setClearButtonEnabled(True) - txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"), - QtWidgets.QLineEdit.LeadingPosition) - - content_layout = QtWidgets.QVBoxLayout(content_widget) - content_layout.setContentsMargins(0, 0, 0, 0) - content_layout.setSpacing(20) - content_layout.addWidget(header_label, 0) - content_layout.addWidget(txt_filter, 0) - content_layout.addWidget(projects_view, 1) - content_layout.addLayout(btns_layout, 0) - - middle_layout = QtWidgets.QHBoxLayout(middle_frame) - middle_layout.setContentsMargins(30, 30, 10, 10) - middle_layout.addWidget(content_widget) - - main_layout = QtWidgets.QHBoxLayout(self) - main_layout.setContentsMargins(10, 10, 10, 10) - main_layout.addStretch(1) - main_layout.addWidget(middle_frame, 2) - main_layout.addStretch(1) - - projects_view.doubleClicked.connect(self._on_double_click) - confirm_btn.clicked.connect(self._on_confirm_click) - cancel_btn.clicked.connect(self._on_cancel_click) - txt_filter.textChanged.connect(self._on_text_changed) - - self._projects_view = projects_view - self._projects_model = projects_model - self._projects_proxy = projects_proxy - self._cancel_btn = cancel_btn - self._confirm_btn = confirm_btn - self._txt_filter = txt_filter - - self._publisher_window = publisher_window - self._project_name = None - - def showEvent(self, event): - self._projects_model.refresh() - # Sort projects after refresh - self._projects_proxy.sort(0) - - setting_registry = AssetReporterRegistry() - try: - project_name = str(setting_registry.get_item("project_name")) - except ValueError: - project_name = None - - if project_name: - index = None - src_index = self._projects_model.find_project(project_name) - if src_index is not None: - index = self._projects_proxy.mapFromSource(src_index) - - if index is not None: - selection_model = self._projects_view.selectionModel() - selection_model.select( - index, - QtCore.QItemSelectionModel.SelectCurrent - ) - self._projects_view.setCurrentIndex(index) - - self._cancel_btn.setVisible(self._project_name is not None) - super(OverlayWidget, self).showEvent(event) - - def _on_double_click(self): - self.set_selected_project() - - def _on_confirm_click(self): - self.set_selected_project() - - def _on_cancel_click(self): - self._set_project(self._project_name) - - def _on_text_changed(self): - self._projects_proxy.setFilterRegularExpression( - self._txt_filter.text()) - - def set_selected_project(self): - index = self._projects_view.currentIndex() - - if project_name := index.data(PROJECT_NAME_ROLE): - self._set_project(project_name) - - def _set_project(self, project_name): - self._project_name = project_name - self.setVisible(False) - self.project_selected.emit(project_name) - - setting_registry = AssetReporterRegistry() - setting_registry.set_item("project_name", project_name) - - -class AssetReporterWindow(QtWidgets.QDialog): - default_width = 1000 - default_height = 800 - _content = None - - def __init__(self, parent=None, controller=None, reset_on_show=None): - super(AssetReporterWindow, self).__init__(parent) - - self._result = {} - self.setObjectName("AssetReporterWindow") - - self.setWindowTitle("Asset Usage Reporter") - - if parent is None: - on_top_flag = QtCore.Qt.WindowStaysOnTopHint - else: - on_top_flag = QtCore.Qt.Dialog - - self.setWindowFlags( - QtCore.Qt.WindowTitleHint - | QtCore.Qt.WindowMaximizeButtonHint - | QtCore.Qt.WindowMinimizeButtonHint - | QtCore.Qt.WindowCloseButtonHint - | on_top_flag - ) - self.table = QtWidgets.QTableWidget(self) - self.table.setColumnCount(3) - self.table.setColumnWidth(0, 400) - self.table.setColumnWidth(1, 300) - self.table.setHorizontalHeaderLabels(["Subset", "Used in", "Version"]) - - # self.text_area = QtWidgets.QTextEdit(self) - self.copy_button = QtWidgets.QPushButton('Copy to Clipboard', self) - self.save_button = QtWidgets.QPushButton('Save to CSV File', self) - - self.copy_button.clicked.connect(self.copy_to_clipboard) - self.save_button.clicked.connect(self.save_to_file) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(self.table) - # layout.addWidget(self.text_area) - layout.addWidget(self.copy_button) - layout.addWidget(self.save_button) - - self.resize(self.default_width, self.default_height) - self.setStyleSheet(style.load_stylesheet()) - - overlay_widget = OverlayWidget(self) - overlay_widget.project_selected.connect(self._on_project_select) - self._overlay_widget = overlay_widget - - def _on_project_select(self, project_name: str): - """Generate table when project is selected. - - This will generate the table and fill it with data. - Source data are held in memory in `_result` attribute that - is used to transform them into clipboard or csv file. - """ - self._project_name = project_name - self.process() - if not self._result: - self.set_content("no result generated") - return - - rows = sum(len(value) for key, value in self._result.items()) - self.table.setRowCount(rows) - - row = 0 - content = [] - for key, value in self._result.items(): - item = QtWidgets.QTableWidgetItem(key) - # this doesn't work as it is probably overriden by stylesheet? - # item.setBackground(QColor(32, 32, 32)) - self.table.setItem(row, 0, item) - for source in value: - self.table.setItem( - row, 1, QtWidgets.QTableWidgetItem(source["name"])) - self.table.setItem( - row, 2, QtWidgets.QTableWidgetItem( - str(source["version"]))) - row += 1 - - # generate clipboard content - content.append(key) - content.extend( - f"\t{source['name']} (v{source['version']})" for source in value # noqa: E501 - ) - self.set_content("\n".join(content)) - - def copy_to_clipboard(self): - clipboard = QtWidgets.QApplication.clipboard() - clipboard.setText(self._content, QClipboard.Clipboard) - - def save_to_file(self): - file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File') - if file_name: - self._write_csv(file_name) - - def set_content(self, content): - self._content = content - - def get_content(self): - return self._content - - def _resize_overlay(self): - self._overlay_widget.resize( - self.width(), - self.height() - ) - - def resizeEvent(self, event): - super(AssetReporterWindow, self).resizeEvent(event) - self._resize_overlay() - - def _get_subset(self, version_id, project: Collection): - pipeline = [ - { - "$match": { - "_id": version_id - }, - }, { - "$lookup": { - "from": project.name, - "localField": "parent", - "foreignField": "_id", - "as": "parents" - } - } - ] - - result = project.aggregate(pipeline) - doc = next(result) - # print(doc) - return { - "name": f'{"/".join(doc["parents"][0]["data"]["parents"])}/{doc["parents"][0]["name"]}/{doc["name"]}', # noqa: E501 - "family": doc["data"].get("family") or doc["data"].get("families")[0] # noqa: E501 - } - - def process(self): - """Generate asset usage report data. - - This is the main method of the tool. It is using MongoDB - aggregation pipeline to find all published versions that - are used as input for other published versions. Then it - generates a map of assets and their usage. - - """ - start = time.perf_counter() - project = self._project_name - - # get all versions of published workfiles that has non-empty - # inputLinks and connect it with their respective documents - # using ID. - pipeline = [ - { - "$match": { - "data.inputLinks": { - "$exists": True, - "$ne": [] - }, - "data.families": {"$in": ["workfile"]} - } - }, { - "$lookup": { - "from": project, - "localField": "data.inputLinks.id", - "foreignField": "_id", - "as": "linked_docs" - } - } - ] - - client = OpenPypeMongoConnection.get_mongo_client() - db = client["avalon"] - - result = db[project].aggregate(pipeline) - - asset_map = [] - # this is creating the map - for every workfile and its linked - # documents, create a dictionary with "source" and "refs" keys - # and resolve the subset name and version from the document - for doc in result: - source = { - "source": self._get_subset(doc["parent"], db[project]), - } - source["source"].update({"version": doc["name"]}) - refs = [] - version = '' - for linked in doc["linked_docs"]: - try: - version = f'v{linked["name"]}' - except KeyError: - if linked["type"] == "hero_version": - version = "hero" - finally: - refs.append({ - "subset": self._get_subset( - linked["parent"], db[project]), - "version": version - }) - - source["refs"] = refs - asset_map.append(source) - - grouped = {} - - # this will group the assets by subset name and version - for asset in asset_map: - for ref in asset["refs"]: - key = f'{ref["subset"]["name"]} ({ref["version"]})' - if key in grouped: - grouped[key].append(asset["source"]) - else: - grouped[key] = [asset["source"]] - self._result = grouped - - end = time.perf_counter() - - print(f"Finished in {end - start:0.4f} seconds", 2) - - def _write_csv(self, file_name: str) -> None: - """Write CSV file with results.""" - with open(file_name, "w", newline="") as csvfile: - writer = csv.writer(csvfile, delimiter=";") - writer.writerow(["Subset", "Used in", "Version"]) - for key, value in self._result.items(): - writer.writerow([key, "", ""]) - for source in value: - writer.writerow(["", source["name"], source["version"]]) - - -def main(): - app_instance = get_openpype_qt_app() - window = AssetReporterWindow() - window.show() - app_instance.exec_() - - -if __name__ == "__main__": - main() diff --git a/openpype/modules/avalon_apps/__init__.py b/openpype/modules/avalon_apps/__init__.py deleted file mode 100644 index baa21cc803..0000000000 --- a/openpype/modules/avalon_apps/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .avalon_app import AvalonModule - - -__all__ = ( - "AvalonModule", -) diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py deleted file mode 100644 index 57754793c4..0000000000 --- a/openpype/modules/avalon_apps/avalon_app.py +++ /dev/null @@ -1,122 +0,0 @@ -import os - -from openpype import AYON_SERVER_ENABLED -from openpype.modules import OpenPypeModule, ITrayModule - - -class AvalonModule(OpenPypeModule, ITrayModule): - name = "avalon" - - def initialize(self, modules_settings): - # This module is always enabled - self.enabled = True - - avalon_settings = modules_settings[self.name] - - thumbnail_root = os.environ.get("AVALON_THUMBNAIL_ROOT") - if not thumbnail_root: - thumbnail_root = avalon_settings["AVALON_THUMBNAIL_ROOT"] - - # Mongo timeout - avalon_mongo_timeout = os.environ.get("AVALON_TIMEOUT") - if not avalon_mongo_timeout: - avalon_mongo_timeout = avalon_settings["AVALON_TIMEOUT"] - - self.thumbnail_root = thumbnail_root - self.avalon_mongo_timeout = avalon_mongo_timeout - - # Tray attributes - self._library_loader_imported = None - self._library_loader_window = None - self.rest_api_obj = None - - def get_global_environments(self): - """Avalon global environments for pype implementation.""" - return { - # TODO thumbnails root should be multiplafrom - # - thumbnails root - "AVALON_THUMBNAIL_ROOT": self.thumbnail_root, - # - mongo timeout in ms - "AVALON_TIMEOUT": str(self.avalon_mongo_timeout), - } - - def tray_init(self): - # Add library tool - self._library_loader_imported = False - try: - from openpype.tools.libraryloader import LibraryLoaderWindow - - self._library_loader_imported = True - except Exception: - self.log.warning( - "Couldn't load Library loader tool for tray.", - exc_info=True - ) - - # Definition of Tray menu - def tray_menu(self, tray_menu): - if not self._library_loader_imported: - return - - from qtpy import QtWidgets - # Actions - action_library_loader = QtWidgets.QAction( - "Loader", tray_menu - ) - - action_library_loader.triggered.connect(self.show_library_loader) - - tray_menu.addAction(action_library_loader) - - def tray_start(self, *_a, **_kw): - return - - def tray_exit(self, *_a, **_kw): - return - - def show_library_loader(self): - if self._library_loader_window is None: - from openpype.pipeline import install_openpype_plugins - if AYON_SERVER_ENABLED: - self._init_ayon_loader() - else: - self._init_library_loader() - - install_openpype_plugins() - - self._library_loader_window.show() - - # Raise and activate the window - # for MacOS - self._library_loader_window.raise_() - # for Windows - self._library_loader_window.activateWindow() - - # Webserver module implementation - def webserver_initialization(self, server_manager): - """Add routes for webserver.""" - if self.tray_initialized: - from .rest_api import AvalonRestApiResource - self.rest_api_obj = AvalonRestApiResource(self, server_manager) - - def _init_library_loader(self): - from qtpy import QtCore - from openpype.tools.libraryloader import LibraryLoaderWindow - - libraryloader = LibraryLoaderWindow( - show_projects=True, - show_libraries=True - ) - # Remove always on top flag for tray - window_flags = libraryloader.windowFlags() - if window_flags | QtCore.Qt.WindowStaysOnTopHint: - window_flags ^= QtCore.Qt.WindowStaysOnTopHint - libraryloader.setWindowFlags(window_flags) - self._library_loader_window = libraryloader - - def _init_ayon_loader(self): - from openpype.tools.ayon_loader.ui import LoaderWindow - - libraryloader = LoaderWindow() - - self._library_loader_window = libraryloader diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py deleted file mode 100644 index a52ce1b6df..0000000000 --- a/openpype/modules/avalon_apps/rest_api.py +++ /dev/null @@ -1,131 +0,0 @@ -import json -import datetime - -from bson.objectid import ObjectId - -from aiohttp.web_response import Response - -from openpype.client import ( - get_projects, - get_project, - get_assets, - get_asset_by_name, -) -from openpype_modules.webserver.base_routes import RestApiEndpoint - - -class _RestApiEndpoint(RestApiEndpoint): - def __init__(self, resource): - self.resource = resource - super(_RestApiEndpoint, self).__init__() - - -class AvalonProjectsEndpoint(_RestApiEndpoint): - async def get(self) -> Response: - output = [ - project_doc - for project_doc in get_projects() - ] - return Response( - status=200, - body=self.resource.encode(output), - content_type="application/json" - ) - - -class AvalonProjectEndpoint(_RestApiEndpoint): - async def get(self, project_name) -> Response: - project_doc = get_project(project_name) - if project_doc: - return Response( - status=200, - body=self.resource.encode(project_doc), - content_type="application/json" - ) - return Response( - status=404, - reason="Project name {} not found".format(project_name) - ) - - -class AvalonAssetsEndpoint(_RestApiEndpoint): - async def get(self, project_name) -> Response: - asset_docs = list(get_assets(project_name)) - return Response( - status=200, - body=self.resource.encode(asset_docs), - content_type="application/json" - ) - - -class AvalonAssetEndpoint(_RestApiEndpoint): - async def get(self, project_name, asset_name) -> Response: - asset_doc = get_asset_by_name(project_name, asset_name) - if asset_doc: - return Response( - status=200, - body=self.resource.encode(asset_doc), - content_type="application/json" - ) - return Response( - status=404, - reason="Asset name {} not found in project {}".format( - asset_name, project_name - ) - ) - - -class AvalonRestApiResource: - def __init__(self, avalon_module, server_manager): - self.module = avalon_module - self.server_manager = server_manager - - self.prefix = "/avalon" - - self.endpoint_defs = ( - ( - "GET", - "/projects", - AvalonProjectsEndpoint(self) - ), - ( - "GET", - "/projects/{project_name}", - AvalonProjectEndpoint(self) - ), - ( - "GET", - "/projects/{project_name}/assets", - AvalonAssetsEndpoint(self) - ), - ( - "GET", - "/projects/{project_name}/assets/{asset_name}", - AvalonAssetEndpoint(self) - ) - ) - - self.register() - - def register(self): - for methods, url, endpoint in self.endpoint_defs: - final_url = self.prefix + url - self.server_manager.add_route( - methods, final_url, endpoint.dispatch - ) - - @staticmethod - def json_dump_handler(value): - if isinstance(value, datetime.datetime): - return value.isoformat() - if isinstance(value, ObjectId): - return str(value) - raise TypeError(value) - - @classmethod - def encode(cls, data): - return json.dumps( - data, - indent=4, - default=cls.json_dump_handler - ).encode("utf-8") diff --git a/openpype/modules/base.py b/openpype/modules/base.py deleted file mode 100644 index cb64816cc9..0000000000 --- a/openpype/modules/base.py +++ /dev/null @@ -1,1929 +0,0 @@ -# -*- coding: utf-8 -*- -"""Base class for AYON addons.""" -import copy -import os -import sys -import json -import time -import inspect -import logging -import platform -import threading -import collections -import traceback - -from uuid import uuid4 -from abc import ABCMeta, abstractmethod - -import six -import appdirs - -from openpype import AYON_SERVER_ENABLED -from openpype.client import get_ayon_server_api_connection -from openpype.settings import ( - get_system_settings, - SYSTEM_SETTINGS_KEY, - PROJECT_SETTINGS_KEY, - SCHEMA_KEY_SYSTEM_SETTINGS, - SCHEMA_KEY_PROJECT_SETTINGS -) - -from openpype.settings.lib import ( - get_studio_system_settings_overrides, - load_json_file, -) -from openpype.settings.ayon_settings import ( - is_dev_mode_enabled, - get_ayon_settings, -) - -from openpype.lib import ( - Logger, - import_filepath, - import_module_from_dirpath, -) - -from .interfaces import ( - OpenPypeInterface, - IPluginPaths, - IHostAddon, - ITrayModule, - ITrayService -) - -# Files that will be always ignored on addons import -IGNORED_FILENAMES = ( - "__pycache__", -) -# Files ignored on addons import from "./openpype/modules" -IGNORED_DEFAULT_FILENAMES = ( - "__init__.py", - "base.py", - "interfaces.py", - "example_addons", - "default_modules", -) -# Addons that won't be loaded in AYON mode from "./openpype/modules" -# - the same addons are ignored in "./server_addon/create_ayon_addons.py" -IGNORED_FILENAMES_IN_AYON = { - "ftrack", - "shotgrid", - "sync_server", - "slack", - "kitsu", -} -IGNORED_HOSTS_IN_AYON = { - "flame", - "harmony", -} - - -# Inherit from `object` for Python 2 hosts -class _ModuleClass(object): - """Fake module class for storing OpenPype modules. - - Object of this class can be stored to `sys.modules` and used for storing - dynamically imported modules. - """ - - def __init__(self, name): - # Call setattr on super class - super(_ModuleClass, self).__setattr__("name", name) - super(_ModuleClass, self).__setattr__("__name__", name) - - # Where modules and interfaces are stored - super(_ModuleClass, self).__setattr__("__attributes__", dict()) - super(_ModuleClass, self).__setattr__("__defaults__", set()) - - super(_ModuleClass, self).__setattr__("_log", None) - - def __getattr__(self, attr_name): - if attr_name not in self.__attributes__: - if attr_name in ("__path__", "__file__"): - return None - raise AttributeError("'{}' has not attribute '{}'".format( - self.name, attr_name - )) - return self.__attributes__[attr_name] - - def __iter__(self): - for module in self.values(): - yield module - - def __setattr__(self, attr_name, value): - if attr_name in self.__attributes__: - self.log.warning( - "Duplicated name \"{}\" in {}. Overriding.".format( - attr_name, self.name - ) - ) - self.__attributes__[attr_name] = value - - def __setitem__(self, key, value): - self.__setattr__(key, value) - - def __getitem__(self, key): - return getattr(self, key) - - @property - def log(self): - if self._log is None: - super(_ModuleClass, self).__setattr__( - "_log", Logger.get_logger(self.name) - ) - return self._log - - def get(self, key, default=None): - return self.__attributes__.get(key, default) - - def keys(self): - return self.__attributes__.keys() - - def values(self): - return self.__attributes__.values() - - def items(self): - return self.__attributes__.items() - - -class _InterfacesClass(_ModuleClass): - """Fake module class for storing OpenPype interfaces. - - MissingInterface object is returned if interfaces does not exists. - - this is because interfaces must be available even if are missing - implementation - """ - - def __getattr__(self, attr_name): - if attr_name not in self.__attributes__: - if attr_name in ("__path__", "__file__"): - return None - - raise AttributeError(( - "cannot import name '{}' from 'openpype_interfaces'" - ).format(attr_name)) - - if _LoadCache.interfaces_loaded and attr_name != "log": - stack = list(traceback.extract_stack()) - stack.pop(-1) - self.log.warning(( - "Using deprecated import of \"{}\" from 'openpype_interfaces'." - " Please switch to use import" - " from 'openpype.modules.interfaces'" - " (will be removed after 3.16.x).{}" - ).format(attr_name, "".join(traceback.format_list(stack)))) - return self.__attributes__[attr_name] - - -class _LoadCache: - interfaces_lock = threading.Lock() - modules_lock = threading.Lock() - interfaces_loaded = False - modules_loaded = False - - -def get_default_modules_dir(): - """Path to default OpenPype modules.""" - - current_dir = os.path.dirname(os.path.abspath(__file__)) - - output = [] - for folder_name in ("default_modules", ): - path = os.path.join(current_dir, folder_name) - if os.path.exists(path) and os.path.isdir(path): - output.append(path) - - return output - - -def get_dynamic_modules_dirs(): - """Possible paths to OpenPype Addons of Modules. - - Paths are loaded from studio settings under: - `modules -> addon_paths -> {platform name}` - - Path may contain environment variable as a formatting string. - - They are not validated or checked their existence. - - Returns: - list: Paths loaded from studio overrides. - """ - - output = [] - if AYON_SERVER_ENABLED: - return output - - value = get_studio_system_settings_overrides() - for key in ("modules", "addon_paths", platform.system().lower()): - if key not in value: - return output - value = value[key] - - for path in value: - if not path: - continue - - try: - path = path.format(**os.environ) - except Exception: - pass - output.append(path) - return output - - -def get_module_dirs(): - """List of paths where OpenPype modules can be found.""" - _dirpaths = [] - _dirpaths.extend(get_default_modules_dir()) - _dirpaths.extend(get_dynamic_modules_dirs()) - - dirpaths = [] - for path in _dirpaths: - if not path: - continue - normalized = os.path.normpath(path) - if normalized not in dirpaths: - dirpaths.append(normalized) - return dirpaths - - -def load_interfaces(force=False): - """Load interfaces from modules into `openpype_interfaces`. - - Only classes which inherit from `OpenPypeInterface` are loaded and stored. - - Args: - force(bool): Force to load interfaces even if are already loaded. - This won't update already loaded and used (cached) interfaces. - """ - - if _LoadCache.interfaces_loaded and not force: - return - - if not _LoadCache.interfaces_lock.locked(): - with _LoadCache.interfaces_lock: - _load_interfaces() - _LoadCache.interfaces_loaded = True - else: - # If lock is locked wait until is finished - while _LoadCache.interfaces_lock.locked(): - time.sleep(0.1) - - -def _load_interfaces(): - # Key under which will be modules imported in `sys.modules` - modules_key = "openpype_interfaces" - - sys.modules[modules_key] = openpype_interfaces = ( - _InterfacesClass(modules_key) - ) - - from . import interfaces - - for attr_name in dir(interfaces): - attr = getattr(interfaces, attr_name) - if ( - not inspect.isclass(attr) - or attr is OpenPypeInterface - or not issubclass(attr, OpenPypeInterface) - ): - continue - setattr(openpype_interfaces, attr_name, attr) - - -def load_modules(force=False): - """Load OpenPype modules as python modules. - - Modules does not load only classes (like in Interfaces) because there must - be ability to use inner code of module and be able to import it from one - defined place. - - With this it is possible to import module's content from predefined module. - - Function makes sure that `load_interfaces` was triggered. Modules import - has specific order which can't be changed. - - Args: - force(bool): Force to load modules even if are already loaded. - This won't update already loaded and used (cached) modules. - """ - - if _LoadCache.modules_loaded and not force: - return - - # First load interfaces - # - modules must not be imported before interfaces - load_interfaces(force) - - if not _LoadCache.modules_lock.locked(): - with _LoadCache.modules_lock: - _load_modules() - _LoadCache.modules_loaded = True - else: - # If lock is locked wait until is finished - while _LoadCache.modules_lock.locked(): - time.sleep(0.1) - - -def _get_ayon_bundle_data(): - con = get_ayon_server_api_connection() - bundles = con.get_bundles()["bundles"] - - bundle_name = os.getenv("AYON_BUNDLE_NAME") - - return next( - ( - bundle - for bundle in bundles - if bundle["name"] == bundle_name - ), - None - ) - - -def _get_ayon_addons_information(bundle_info): - """Receive information about addons to use from server. - - Todos: - Actually ask server for the information. - Allow project name as optional argument to be able to query information - about used addons for specific project. - - Returns: - List[Dict[str, Any]]: List of addon information to use. - """ - - output = [] - bundle_addons = bundle_info["addons"] - con = get_ayon_server_api_connection() - addons = con.get_addons_info()["addons"] - for addon in addons: - name = addon["name"] - versions = addon.get("versions") - addon_version = bundle_addons.get(name) - if addon_version is None or not versions: - continue - version = versions.get(addon_version) - if version: - version = copy.deepcopy(version) - version["name"] = name - version["version"] = addon_version - output.append(version) - return output - - -def _load_ayon_addons(openpype_modules, modules_key, log): - """Load AYON addons based on information from server. - - This function should not trigger downloading of any addons but only use - what is already available on the machine (at least in first stages of - development). - - Args: - openpype_modules (_ModuleClass): Module object where modules are - stored. - log (logging.Logger): Logger object. - - Returns: - List[str]: List of v3 addons to skip to load because v4 alternative is - imported. - """ - - v3_addons_to_skip = [] - - bundle_info = _get_ayon_bundle_data() - addons_info = _get_ayon_addons_information(bundle_info) - if not addons_info: - return v3_addons_to_skip - - addons_dir = os.environ.get("AYON_ADDONS_DIR") - if not addons_dir: - addons_dir = os.path.join( - appdirs.user_data_dir("AYON", "Ynput"), - "addons" - ) - - dev_mode_enabled = is_dev_mode_enabled() - dev_addons_info = {} - if dev_mode_enabled: - # Get dev addons info only when dev mode is enabled - dev_addons_info = bundle_info.get("addonDevelopment", dev_addons_info) - - addons_dir_exists = os.path.exists(addons_dir) - if not addons_dir_exists: - log.warning("Addons directory does not exists. Path \"{}\"".format( - addons_dir - )) - - for addon_info in addons_info: - addon_name = addon_info["name"] - addon_version = addon_info["version"] - - # OpenPype addon does not have any addon object - if addon_name == "openpype": - continue - - dev_addon_info = dev_addons_info.get(addon_name, {}) - use_dev_path = dev_addon_info.get("enabled", False) - - addon_dir = None - if use_dev_path: - addon_dir = dev_addon_info["path"] - if not addon_dir or not os.path.exists(addon_dir): - log.warning(( - "Dev addon {} {} path does not exists. Path \"{}\"" - ).format(addon_name, addon_version, addon_dir)) - continue - - elif addons_dir_exists: - folder_name = "{}_{}".format(addon_name, addon_version) - addon_dir = os.path.join(addons_dir, folder_name) - if not os.path.exists(addon_dir): - log.debug(( - "No localized client code found for addon {} {}." - ).format(addon_name, addon_version)) - continue - - if not addon_dir: - continue - - sys.path.insert(0, addon_dir) - imported_modules = [] - for name in os.listdir(addon_dir): - # Ignore of files is implemented to be able to run code from code - # where usually is more files than just the addon - # Ignore start and setup scripts - if name in ("setup.py", "start.py", "__pycache__"): - continue - - path = os.path.join(addon_dir, name) - basename, ext = os.path.splitext(name) - # Ignore folders/files with dot in name - # - dot names cannot be imported in Python - if "." in basename: - continue - is_dir = os.path.isdir(path) - is_py_file = ext.lower() == ".py" - if not is_py_file and not is_dir: - continue - - try: - mod = __import__(basename, fromlist=("",)) - for attr_name in dir(mod): - attr = getattr(mod, attr_name) - if ( - inspect.isclass(attr) - and issubclass(attr, AYONAddon) - ): - imported_modules.append(mod) - break - - except BaseException: - log.warning( - "Failed to import \"{}\"".format(basename), - exc_info=True - ) - - if not imported_modules: - log.warning("Addon {} {} has no content to import".format( - addon_name, addon_version - )) - continue - - if len(imported_modules) > 1: - log.warning(( - "Skipping addon '{}'." - " Multiple modules were found ({}) in dir {}." - ).format( - addon_name, - ", ".join([m.__name__ for m in imported_modules]), - addon_dir, - )) - continue - - mod = imported_modules[0] - addon_alias = getattr(mod, "V3_ALIAS", None) - if not addon_alias: - addon_alias = addon_name - v3_addons_to_skip.append(addon_alias) - new_import_str = "{}.{}".format(modules_key, addon_alias) - - sys.modules[new_import_str] = mod - setattr(openpype_modules, addon_alias, mod) - - return v3_addons_to_skip - - -def _load_modules(): - # Key under which will be modules imported in `sys.modules` - modules_key = "openpype_modules" - - # Change `sys.modules` - sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key) - - log = Logger.get_logger("ModulesLoader") - - ignore_addon_names = [] - if AYON_SERVER_ENABLED: - ignore_addon_names = _load_ayon_addons( - openpype_modules, modules_key, log - ) - - # Look for OpenPype modules in paths defined with `get_module_dirs` - # - dynamically imported OpenPype modules and addons - module_dirs = get_module_dirs() - - # Add current directory at first place - # - has small differences in import logic - current_dir = os.path.abspath(os.path.dirname(__file__)) - hosts_dir = os.path.join(os.path.dirname(current_dir), "hosts") - module_dirs.insert(0, hosts_dir) - module_dirs.insert(0, current_dir) - - addons_dir = os.path.join(os.path.dirname(current_dir), "addons") - if os.path.exists(addons_dir): - module_dirs.append(addons_dir) - - ignored_host_names = set(IGNORED_HOSTS_IN_AYON) - ignored_current_dir_filenames = set(IGNORED_DEFAULT_FILENAMES) - if AYON_SERVER_ENABLED: - ignored_current_dir_filenames |= IGNORED_FILENAMES_IN_AYON - - processed_paths = set() - for dirpath in frozenset(module_dirs): - # Skip already processed paths - if dirpath in processed_paths: - continue - processed_paths.add(dirpath) - - if not os.path.exists(dirpath): - log.warning(( - "Could not find path when loading OpenPype modules \"{}\"" - ).format(dirpath)) - continue - - is_in_current_dir = dirpath == current_dir - is_in_host_dir = dirpath == hosts_dir - - for filename in os.listdir(dirpath): - # Ignore filenames - if filename in IGNORED_FILENAMES: - continue - - if ( - is_in_current_dir - and filename in ignored_current_dir_filenames - ): - continue - - if ( - is_in_host_dir - and filename in ignored_host_names - ): - continue - - fullpath = os.path.join(dirpath, filename) - basename, ext = os.path.splitext(filename) - - if basename in ignore_addon_names: - continue - - # Validations - if os.path.isdir(fullpath): - # Check existence of init file - init_path = os.path.join(fullpath, "__init__.py") - if not os.path.exists(init_path): - log.debug(( - "Module directory does not contain __init__.py" - " file {}" - ).format(fullpath)) - continue - - elif ext not in (".py", ): - continue - - # TODO add more logic how to define if folder is module or not - # - check manifest and content of manifest - try: - # Don't import dynamically current directory modules - if is_in_current_dir: - import_str = "openpype.modules.{}".format(basename) - new_import_str = "{}.{}".format(modules_key, basename) - default_module = __import__(import_str, fromlist=("", )) - sys.modules[new_import_str] = default_module - setattr(openpype_modules, basename, default_module) - - elif is_in_host_dir: - import_str = "openpype.hosts.{}".format(basename) - new_import_str = "{}.{}".format(modules_key, basename) - # Until all hosts are converted to be able use them as - # modules is this error check needed - try: - default_module = __import__( - import_str, fromlist=("", ) - ) - sys.modules[new_import_str] = default_module - setattr(openpype_modules, basename, default_module) - - except Exception: - log.warning( - "Failed to import host folder {}".format(basename), - exc_info=True - ) - - elif os.path.isdir(fullpath): - import_module_from_dirpath(dirpath, filename, modules_key) - - else: - module = import_filepath(fullpath) - setattr(openpype_modules, basename, module) - - except Exception: - if is_in_current_dir: - msg = "Failed to import default module '{}'.".format( - basename - ) - else: - msg = "Failed to import module '{}'.".format(fullpath) - log.error(msg, exc_info=True) - - -@six.add_metaclass(ABCMeta) -class AYONAddon(object): - """Base class of AYON addon. - - Attributes: - id (UUID): Addon object id. - enabled (bool): Is addon enabled. - name (str): Addon name. - - Args: - manager (ModulesManager): Manager object who discovered addon. - settings (dict[str, Any]): AYON settings. - """ - - enabled = True - _id = None - - def __init__(self, manager, settings): - self.manager = manager - - self.log = Logger.get_logger(self.name) - - self.initialize(settings) - - @property - def id(self): - """Random id of addon object. - - Returns: - str: Object id. - """ - - if self._id is None: - self._id = uuid4() - return self._id - - @property - @abstractmethod - def name(self): - """Addon name. - - Returns: - str: Addon name. - """ - - pass - - def initialize(self, settings): - """Initialization of module attributes. - - It is not recommended to override __init__ that's why specific method - was implemented. - - Args: - settings (dict[str, Any]): Settings. - """ - - pass - - def connect_with_modules(self, enabled_addons): - """Connect with other enabled addons. - - Args: - enabled_addons (list[AYONAddon]): Addons that are enabled. - """ - - pass - - def get_global_environments(self): - """Get global environments values of module. - - Environment variables that can be get only from system settings. - - Returns: - dict[str, str]: Environment variables. - """ - - return {} - - def modify_application_launch_arguments(self, application, env): - """Give option to modify launch environments before application launch. - - Implementation is optional. To change environments modify passed - dictionary of environments. - - Args: - application (Application): Application that is launched. - env (dict[str, str]): Current environment variables. - """ - - pass - - def on_host_install(self, host, host_name, project_name): - """Host was installed which gives option to handle in-host logic. - - It is a good option to register in-host event callbacks which are - specific for the module. The module is kept in memory for rest of - the process. - - Arguments may change in future. E.g. 'host_name' should be possible - to receive from 'host' object. - - Args: - host (Union[ModuleType, HostBase]): Access to installed/registered - host object. - host_name (str): Name of host. - project_name (str): Project name which is main part of host - context. - """ - - pass - - def cli(self, module_click_group): - """Add commands to click group. - - The best practise is to create click group for whole module which is - used to separate commands. - - Example: - class MyPlugin(AYONAddon): - ... - def cli(self, module_click_group): - module_click_group.add_command(cli_main) - - - @click.group(, help="") - def cli_main(): - pass - - @cli_main.command() - def mycommand(): - print("my_command") - - Args: - module_click_group (click.Group): Group to which can be added - commands. - """ - - pass - - -class OpenPypeModule(AYONAddon): - """Base class of OpenPype module. - - Instead of 'AYONAddon' are passed in module settings. - - Args: - manager (ModulesManager): Manager object who discovered addon. - settings (dict[str, Any]): OpenPype settings. - """ - - # Disable by default - enabled = False - - -class OpenPypeAddOn(OpenPypeModule): - # Enable Addon by default - enabled = True - - -class ModulesManager: - """Manager of Pype modules helps to load and prepare them to work. - - Args: - system_settings (Optional[dict[str, Any]]): OpenPype system settings. - ayon_settings (Optional[dict[str, Any]]): AYON studio settings. - """ - - # Helper attributes for report - _report_total_key = "Total" - _system_settings = None - _ayon_settings = None - - def __init__(self, system_settings=None, ayon_settings=None): - self.log = logging.getLogger(self.__class__.__name__) - - self._system_settings = system_settings - self._ayon_settings = ayon_settings - - self.modules = [] - self.modules_by_id = {} - self.modules_by_name = {} - # For report of time consumption - self._report = {} - - self.initialize_modules() - self.connect_modules() - - def __getitem__(self, module_name): - return self.modules_by_name[module_name] - - def get(self, module_name, default=None): - """Access module by name. - - Args: - module_name (str): Name of module which should be returned. - default (Any): Default output if module is not available. - - Returns: - Union[AYONAddon, None]: Module found by name or None. - """ - - return self.modules_by_name.get(module_name, default) - - def get_enabled_module(self, module_name, default=None): - """Fast access to enabled module. - - If module is available but is not enabled default value is returned. - - Args: - module_name (str): Name of module which should be returned. - default (Any): Default output if module is not available or is - not enabled. - - Returns: - Union[AYONAddon, None]: Enabled module found by name or None. - """ - - module = self.get(module_name) - if module is not None and module.enabled: - return module - return default - - def initialize_modules(self): - """Import and initialize modules.""" - # Make sure modules are loaded - load_modules() - - import openpype_modules - - self.log.debug("*** {} initialization.".format( - "AYON addons" - if AYON_SERVER_ENABLED - else "OpenPype modules" - )) - # Prepare settings for modules - system_settings = self._system_settings - if system_settings is None: - system_settings = get_system_settings() - - ayon_settings = self._ayon_settings - if AYON_SERVER_ENABLED and ayon_settings is None: - ayon_settings = get_ayon_settings() - - modules_settings = system_settings["modules"] - - report = {} - time_start = time.time() - prev_start_time = time_start - - module_classes = [] - for module in openpype_modules: - # Go through globals in `pype.modules` - for name in dir(module): - modules_item = getattr(module, name, None) - # Filter globals that are not classes which inherit from - # AYONAddon - if ( - not inspect.isclass(modules_item) - or modules_item is AYONAddon - or modules_item is OpenPypeModule - or modules_item is OpenPypeAddOn - or not issubclass(modules_item, AYONAddon) - ): - continue - - # Check if class is abstract (Developing purpose) - if inspect.isabstract(modules_item): - # Find abstract attributes by convention on `abc` module - not_implemented = [] - for attr_name in dir(modules_item): - attr = getattr(modules_item, attr_name, None) - abs_method = getattr( - attr, "__isabstractmethod__", None - ) - if attr and abs_method: - not_implemented.append(attr_name) - - # Log missing implementations - self.log.warning(( - "Skipping abstract Class: {}." - " Missing implementations: {}" - ).format(name, ", ".join(not_implemented))) - continue - module_classes.append(modules_item) - - for modules_item in module_classes: - is_openpype_module = issubclass(modules_item, OpenPypeModule) - settings = ( - modules_settings if is_openpype_module else ayon_settings - ) - name = modules_item.__name__ - try: - # Try initialize module - module = modules_item(self, settings) - # Store initialized object - self.modules.append(module) - self.modules_by_id[module.id] = module - self.modules_by_name[module.name] = module - enabled_str = "X" - if not module.enabled: - enabled_str = " " - self.log.debug("[{}] {}".format(enabled_str, name)) - - now = time.time() - report[module.__class__.__name__] = now - prev_start_time - prev_start_time = now - - except Exception: - self.log.warning( - "Initialization of module {} failed.".format(name), - exc_info=True - ) - - if self._report is not None: - report[self._report_total_key] = time.time() - time_start - self._report["Initialization"] = report - - def connect_modules(self): - """Trigger connection with other enabled modules. - - Modules should handle their interfaces in `connect_with_modules`. - """ - report = {} - time_start = time.time() - prev_start_time = time_start - enabled_modules = self.get_enabled_modules() - self.log.debug("Has {} enabled modules.".format(len(enabled_modules))) - for module in enabled_modules: - try: - module.connect_with_modules(enabled_modules) - except Exception: - self.log.error( - "BUG: Module failed on connection with other modules.", - exc_info=True - ) - - now = time.time() - report[module.__class__.__name__] = now - prev_start_time - prev_start_time = now - - if self._report is not None: - report[self._report_total_key] = time.time() - time_start - self._report["Connect modules"] = report - - def get_enabled_modules(self): - """Enabled modules initialized by the manager. - - Returns: - list[AYONAddon]: Initialized and enabled modules. - """ - - return [ - module - for module in self.modules - if module.enabled - ] - - def collect_global_environments(self): - """Helper to collect global environment variabled from modules. - - Returns: - dict: Global environment variables from enabled modules. - - Raises: - AssertionError: Global environment variables must be unique for - all modules. - """ - module_envs = {} - for module in self.get_enabled_modules(): - # Collect global module's global environments - _envs = module.get_global_environments() - for key, value in _envs.items(): - if key in module_envs: - # TODO better error message - raise AssertionError( - "Duplicated environment key {}".format(key) - ) - module_envs[key] = value - return module_envs - - def collect_plugin_paths(self): - """Helper to collect all plugins from modules inherited IPluginPaths. - - Unknown keys are logged out. - - Returns: - dict: Output is dictionary with keys "publish", "create", "load", - "actions" and "inventory" each containing list of paths. - """ - # Output structure - output = { - "publish": [], - "create": [], - "load": [], - "actions": [], - "inventory": [] - } - unknown_keys_by_module = {} - for module in self.get_enabled_modules(): - # Skip module that do not inherit from `IPluginPaths` - if not isinstance(module, IPluginPaths): - continue - plugin_paths = module.get_plugin_paths() - for key, value in plugin_paths.items(): - # Filter unknown keys - if key not in output: - if module.name not in unknown_keys_by_module: - unknown_keys_by_module[module.name] = [] - unknown_keys_by_module[module.name].append(key) - continue - - # Skip if value is empty - if not value: - continue - - # Convert to list if value is not list - if not isinstance(value, (list, tuple, set)): - value = [value] - output[key].extend(value) - - # Report unknown keys (Developing purposes) - if unknown_keys_by_module: - expected_keys = ", ".join([ - "\"{}\"".format(key) for key in output.keys() - ]) - msg_template = "Module: \"{}\" - got key {}" - msg_items = [] - for module_name, keys in unknown_keys_by_module.items(): - joined_keys = ", ".join([ - "\"{}\"".format(key) for key in keys - ]) - msg_items.append(msg_template.format(module_name, joined_keys)) - self.log.warning(( - "Expected keys from `get_plugin_paths` are {}. {}" - ).format(expected_keys, " | ".join(msg_items))) - return output - - def _collect_plugin_paths(self, method_name, *args, **kwargs): - output = [] - for module in self.get_enabled_modules(): - # Skip module that do not inherit from `IPluginPaths` - if not isinstance(module, IPluginPaths): - continue - - method = getattr(module, method_name) - try: - paths = method(*args, **kwargs) - except Exception: - self.log.warning( - ( - "Failed to get plugin paths from module" - " '{}' using '{}'." - ).format(module.__class__.__name__, method_name), - exc_info=True - ) - continue - - if paths: - # Convert to list if value is not list - if not isinstance(paths, (list, tuple, set)): - paths = [paths] - output.extend(paths) - return output - - def collect_create_plugin_paths(self, host_name): - """Helper to collect creator plugin paths from modules. - - Args: - host_name (str): For which host are creators meant. - - Returns: - list: List of creator plugin paths. - """ - - return self._collect_plugin_paths( - "get_create_plugin_paths", - host_name - ) - - collect_creator_plugin_paths = collect_create_plugin_paths - - def collect_load_plugin_paths(self, host_name): - """Helper to collect load plugin paths from modules. - - Args: - host_name (str): For which host are load plugins meant. - - Returns: - list: List of load plugin paths. - """ - - return self._collect_plugin_paths( - "get_load_plugin_paths", - host_name - ) - - def collect_publish_plugin_paths(self, host_name): - """Helper to collect load plugin paths from modules. - - Args: - host_name (str): For which host are load plugins meant. - - Returns: - list: List of pyblish plugin paths. - """ - - return self._collect_plugin_paths( - "get_publish_plugin_paths", - host_name - ) - - def collect_inventory_action_paths(self, host_name): - """Helper to collect load plugin paths from modules. - - Args: - host_name (str): For which host are load plugins meant. - - Returns: - list: List of pyblish plugin paths. - """ - - return self._collect_plugin_paths( - "get_inventory_action_paths", - host_name - ) - - def get_host_module(self, host_name): - """Find host module by host name. - - Args: - host_name (str): Host name for which is found host module. - - Returns: - AYONAddon: Found host module by name. - None: There was not found module inheriting IHostAddon which has - host name set to passed 'host_name'. - """ - - for module in self.get_enabled_modules(): - if ( - isinstance(module, IHostAddon) - and module.host_name == host_name - ): - return module - return None - - def get_host_names(self): - """List of available host names based on host modules. - - Returns: - Iterable[str]: All available host names based on enabled modules - inheriting 'IHostAddon'. - """ - - return { - module.host_name - for module in self.get_enabled_modules() - if isinstance(module, IHostAddon) - } - - def print_report(self): - """Print out report of time spent on modules initialization parts. - - Reporting is not automated must be implemented for each initialization - part separatelly. Reports must be stored to `_report` attribute. - Print is skipped if `_report` is empty. - - Attribute `_report` is dictionary where key is "label" describing - the processed part and value is dictionary where key is module's - class name and value is time delta of it's processing. - - It is good idea to add total time delta on processed part under key - which is defined in attribute `_report_total_key`. By default has value - `"Total"` but use the attribute please. - - ```javascript - { - "Initialization": { - "FtrackModule": 0.003, - ... - "Total": 1.003, - }, - ... - } - ``` - """ - if not self._report: - return - - available_col_names = set() - for module_names in self._report.values(): - available_col_names |= set(module_names.keys()) - - # Prepare ordered dictionary for columns - cols = collections.OrderedDict() - # Add module names to first columnt - cols["Module name"] = list(sorted( - module.__class__.__name__ - for module in self.modules - if module.__class__.__name__ in available_col_names - )) - # Add total key (as last module) - cols["Module name"].append(self._report_total_key) - - # Add columns from report - for label in self._report.keys(): - cols[label] = [] - - total_module_times = {} - for module_name in cols["Module name"]: - total_module_times[module_name] = 0 - - for label, reported in self._report.items(): - for module_name in cols["Module name"]: - col_time = reported.get(module_name) - if col_time is None: - cols[label].append("N/A") - continue - cols[label].append("{:.3f}".format(col_time)) - total_module_times[module_name] += col_time - - # Add to also total column that should sum the row - cols[self._report_total_key] = [] - for module_name in cols["Module name"]: - cols[self._report_total_key].append( - "{:.3f}".format(total_module_times[module_name]) - ) - - # Prepare column widths and total row count - # - column width is by - col_widths = {} - total_rows = None - for key, values in cols.items(): - if total_rows is None: - total_rows = 1 + len(values) - max_width = len(key) - for value in values: - value_length = len(value) - if value_length > max_width: - max_width = value_length - col_widths[key] = max_width - - rows = [] - for _idx in range(total_rows): - rows.append([]) - - for key, values in cols.items(): - width = col_widths[key] - idx = 0 - rows[idx].append(key.ljust(width)) - for value in values: - idx += 1 - rows[idx].append(value.ljust(width)) - - filler_parts = [] - for width in col_widths.values(): - filler_parts.append(width * "-") - filler = "+".join(filler_parts) - - formatted_rows = [filler] - last_row_idx = len(rows) - 1 - for idx, row in enumerate(rows): - # Add filler before last row - if idx == last_row_idx: - formatted_rows.append(filler) - - formatted_rows.append("|".join(row)) - - # Add filler after first row - if idx == 0: - formatted_rows.append(filler) - - # Join rows with newline char and add new line at the end - output = "\n".join(formatted_rows) + "\n" - print(output) - - -class TrayModulesManager(ModulesManager): - # Define order of modules in menu - modules_menu_order = ( - "user", - "ftrack", - "kitsu", - "launcher_tool", - "avalon", - "clockify", - "standalonepublish_tool", - "traypublish_tool", - "log_viewer", - "local_settings", - "settings" - ) - - def __init__(self): - self.log = Logger.get_logger(self.__class__.__name__) - - self.modules = [] - self.modules_by_id = {} - self.modules_by_name = {} - self._report = {} - - self.tray_manager = None - - self.doubleclick_callbacks = {} - self.doubleclick_callback = None - - def add_doubleclick_callback(self, module, callback): - """Register doubleclick callbacks on tray icon. - - Currently there is no way how to determine which is launched. Name of - callback can be defined with `doubleclick_callback` attribute. - - Missing feature how to define default callback. - - Args: - addon (AYONAddon): Addon object. - callback (FunctionType): Function callback. - """ - callback_name = "_".join([module.name, callback.__name__]) - if callback_name not in self.doubleclick_callbacks: - self.doubleclick_callbacks[callback_name] = callback - if self.doubleclick_callback is None: - self.doubleclick_callback = callback_name - return - - self.log.warning(( - "Callback with name \"{}\" is already registered." - ).format(callback_name)) - - def initialize(self, tray_manager, tray_menu): - self.tray_manager = tray_manager - self.initialize_modules() - self.tray_init() - self.connect_modules() - self.tray_menu(tray_menu) - - def get_enabled_tray_modules(self): - """Enabled tray modules. - - Returns: - list[AYONAddon]: Enabled addons that inherit from tray interface. - """ - - return [ - module - for module in self.modules - if module.enabled and isinstance(module, ITrayModule) - ] - - def restart_tray(self): - if self.tray_manager: - self.tray_manager.restart() - - def tray_init(self): - report = {} - time_start = time.time() - prev_start_time = time_start - for module in self.get_enabled_tray_modules(): - try: - module._tray_manager = self.tray_manager - module.tray_init() - module.tray_initialized = True - except Exception: - self.log.warning( - "Module \"{}\" crashed on `tray_init`.".format( - module.name - ), - exc_info=True - ) - - now = time.time() - report[module.__class__.__name__] = now - prev_start_time - prev_start_time = now - - if self._report is not None: - report[self._report_total_key] = time.time() - time_start - self._report["Tray init"] = report - - def tray_menu(self, tray_menu): - ordered_modules = [] - enabled_by_name = { - module.name: module - for module in self.get_enabled_tray_modules() - } - - for name in self.modules_menu_order: - module_by_name = enabled_by_name.pop(name, None) - if module_by_name: - ordered_modules.append(module_by_name) - ordered_modules.extend(enabled_by_name.values()) - - report = {} - time_start = time.time() - prev_start_time = time_start - for module in ordered_modules: - if not module.tray_initialized: - continue - - try: - module.tray_menu(tray_menu) - except Exception: - # Unset initialized mark - module.tray_initialized = False - self.log.warning( - "Module \"{}\" crashed on `tray_menu`.".format( - module.name - ), - exc_info=True - ) - now = time.time() - report[module.__class__.__name__] = now - prev_start_time - prev_start_time = now - - if self._report is not None: - report[self._report_total_key] = time.time() - time_start - self._report["Tray menu"] = report - - def start_modules(self): - report = {} - time_start = time.time() - prev_start_time = time_start - for module in self.get_enabled_tray_modules(): - if not module.tray_initialized: - if isinstance(module, ITrayService): - module.set_service_failed_icon() - continue - - try: - module.tray_start() - except Exception: - self.log.warning( - "Module \"{}\" crashed on `tray_start`.".format( - module.name - ), - exc_info=True - ) - now = time.time() - report[module.__class__.__name__] = now - prev_start_time - prev_start_time = now - - if self._report is not None: - report[self._report_total_key] = time.time() - time_start - self._report["Modules start"] = report - - def on_exit(self): - for module in self.get_enabled_tray_modules(): - if module.tray_initialized: - try: - module.tray_exit() - except Exception: - self.log.warning( - "Module \"{}\" crashed on `tray_exit`.".format( - module.name - ), - exc_info=True - ) - - -def get_module_settings_defs(): - """Check loaded addons/modules for existence of their settings definition. - - Check if OpenPype addon/module as python module has class that inherit - from `ModuleSettingsDef` in python module variables (imported - in `__init__py`). - - Returns: - list: All valid and not abstract settings definitions from imported - openpype addons and modules. - """ - # Make sure modules are loaded - load_modules() - - import openpype_modules - - settings_defs = [] - - log = Logger.get_logger("ModuleSettingsLoad") - - for raw_module in openpype_modules: - for attr_name in dir(raw_module): - attr = getattr(raw_module, attr_name) - if ( - not inspect.isclass(attr) - or attr is ModuleSettingsDef - or not issubclass(attr, ModuleSettingsDef) - ): - continue - - if inspect.isabstract(attr): - # Find missing implementations by convention on `abc` module - not_implemented = [] - for attr_name in dir(attr): - attr = getattr(attr, attr_name, None) - abs_method = getattr( - attr, "__isabstractmethod__", None - ) - if attr and abs_method: - not_implemented.append(attr_name) - - # Log missing implementations - log.warning(( - "Skipping abstract Class: {} in module {}." - " Missing implementations: {}" - ).format( - attr_name, raw_module.__name__, ", ".join(not_implemented) - )) - continue - - settings_defs.append(attr) - - return settings_defs - - -@six.add_metaclass(ABCMeta) -class BaseModuleSettingsDef: - """Definition of settings for OpenPype module or AddOn.""" - _id = None - - @property - def id(self): - """ID created on initialization. - - ID should be per created object. Helps to store objects. - """ - if self._id is None: - self._id = uuid4() - return self._id - - @abstractmethod - def get_settings_schemas(self, schema_type): - """Setting schemas for passed schema type. - - These are main schemas by dynamic schema keys. If they're using - sub schemas or templates they should be loaded with - `get_dynamic_schemas`. - - Returns: - dict: Schema by `dynamic_schema` keys. - """ - pass - - @abstractmethod - def get_dynamic_schemas(self, schema_type): - """Settings schemas and templates that can be used anywhere. - - It is recommended to add prefix specific for addon/module to keys - (e.g. "my_addon/real_schema_name"). - - Returns: - dict: Schemas and templates by their keys. - """ - pass - - @abstractmethod - def get_defaults(self, top_key): - """Default values for passed top key. - - Top keys are (currently) "system_settings" or "project_settings". - - Should return exactly what was passed with `save_defaults`. - - Returns: - dict: Default values by path to first key in OpenPype defaults. - """ - pass - - @abstractmethod - def save_defaults(self, top_key, data): - """Save default values for passed top key. - - Top keys are (currently) "system_settings" or "project_settings". - - Passed data are by path to first key defined in main schemas. - """ - pass - - -class ModuleSettingsDef(BaseModuleSettingsDef): - """Settings definition with separated system and procect settings parts. - - Reduce conditions that must be checked and adds predefined methods for - each case. - """ - def get_defaults(self, top_key): - """Split method into 2 methods by top key.""" - if top_key == SYSTEM_SETTINGS_KEY: - return self.get_default_system_settings() or {} - elif top_key == PROJECT_SETTINGS_KEY: - return self.get_default_project_settings() or {} - return {} - - def save_defaults(self, top_key, data): - """Split method into 2 methods by top key.""" - if top_key == SYSTEM_SETTINGS_KEY: - self.save_system_defaults(data) - elif top_key == PROJECT_SETTINGS_KEY: - self.save_project_defaults(data) - - def get_settings_schemas(self, schema_type): - """Split method into 2 methods by schema type.""" - if schema_type == SCHEMA_KEY_SYSTEM_SETTINGS: - return self.get_system_settings_schemas() or {} - elif schema_type == SCHEMA_KEY_PROJECT_SETTINGS: - return self.get_project_settings_schemas() or {} - return {} - - def get_dynamic_schemas(self, schema_type): - """Split method into 2 methods by schema type.""" - if schema_type == SCHEMA_KEY_SYSTEM_SETTINGS: - return self.get_system_dynamic_schemas() or {} - elif schema_type == SCHEMA_KEY_PROJECT_SETTINGS: - return self.get_project_dynamic_schemas() or {} - return {} - - @abstractmethod - def get_system_settings_schemas(self): - """Schemas and templates usable in system settings schemas. - - Returns: - dict: Schemas and templates by it's names. Names must be unique - across whole OpenPype. - """ - pass - - @abstractmethod - def get_project_settings_schemas(self): - """Schemas and templates usable in project settings schemas. - - Returns: - dict: Schemas and templates by it's names. Names must be unique - across whole OpenPype. - """ - pass - - @abstractmethod - def get_system_dynamic_schemas(self): - """System schemas by dynamic schema name. - - If dynamic schema name is not available in then schema will not used. - - Returns: - dict: Schemas or list of schemas by dynamic schema name. - """ - pass - - @abstractmethod - def get_project_dynamic_schemas(self): - """Project schemas by dynamic schema name. - - If dynamic schema name is not available in then schema will not used. - - Returns: - dict: Schemas or list of schemas by dynamic schema name. - """ - pass - - @abstractmethod - def get_default_system_settings(self): - """Default system settings values. - - Returns: - dict: Default values by path to first key. - """ - pass - - @abstractmethod - def get_default_project_settings(self): - """Default project settings values. - - Returns: - dict: Default values by path to first key. - """ - pass - - @abstractmethod - def save_system_defaults(self, data): - """Save default system settings values. - - Passed data are by path to first key defined in main schemas. - """ - pass - - @abstractmethod - def save_project_defaults(self, data): - """Save default project settings values. - - Passed data are by path to first key defined in main schemas. - """ - pass - - -class JsonFilesSettingsDef(ModuleSettingsDef): - """Preimplemented settings definition using json files and file structure. - - Expected file structure: - โ”• root - โ”‚ - โ”‚ # Default values - โ” defaults - โ”‚ โ” system_settings.json - โ”‚ โ”• project_settings.json - โ”‚ - โ”‚ # Schemas for `dynamic_template` type - โ” dynamic_schemas - โ”‚ โ” system_dynamic_schemas.json - โ”‚ โ”• project_dynamic_schemas.json - โ”‚ - โ”‚ # Schemas that can be used anywhere (enhancement for `dynamic_schemas`) - โ”• schemas - โ” system_schemas - โ”‚ โ” # Any schema or template files - โ”‚ โ”• ... - โ”• project_schemas - โ” # Any schema or template files - โ”• ... - - Schemas can be loaded with prefix to avoid duplicated schema/template names - across all OpenPype addons/modules. Prefix can be defined with class - attribute `schema_prefix`. - - Only think which must be implemented in `get_settings_root_path` which - should return directory path to `root` (in structure graph above). - """ - # Possible way how to define `schemas` prefix - schema_prefix = "" - - @abstractmethod - def get_settings_root_path(self): - """Directory path where settings and it's schemas are located.""" - pass - - def __init__(self): - settings_root_dir = self.get_settings_root_path() - defaults_dir = os.path.join( - settings_root_dir, "defaults" - ) - dynamic_schemas_dir = os.path.join( - settings_root_dir, "dynamic_schemas" - ) - schemas_dir = os.path.join( - settings_root_dir, "schemas" - ) - - self.system_defaults_filepath = os.path.join( - defaults_dir, "system_settings.json" - ) - self.project_defaults_filepath = os.path.join( - defaults_dir, "project_settings.json" - ) - - self.system_dynamic_schemas_filepath = os.path.join( - dynamic_schemas_dir, "system_dynamic_schemas.json" - ) - self.project_dynamic_schemas_filepath = os.path.join( - dynamic_schemas_dir, "project_dynamic_schemas.json" - ) - - self.system_schemas_dir = os.path.join( - schemas_dir, "system_schemas" - ) - self.project_schemas_dir = os.path.join( - schemas_dir, "project_schemas" - ) - - def _load_json_file_data(self, path): - if os.path.exists(path): - return load_json_file(path) - return {} - - def get_default_system_settings(self): - """Default system settings values. - - Returns: - dict: Default values by path to first key. - """ - return self._load_json_file_data(self.system_defaults_filepath) - - def get_default_project_settings(self): - """Default project settings values. - - Returns: - dict: Default values by path to first key. - """ - return self._load_json_file_data(self.project_defaults_filepath) - - def _save_data_to_filepath(self, path, data): - dirpath = os.path.dirname(path) - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - with open(path, "w") as file_stream: - json.dump(data, file_stream, indent=4) - - def save_system_defaults(self, data): - """Save default system settings values. - - Passed data are by path to first key defined in main schemas. - """ - self._save_data_to_filepath(self.system_defaults_filepath, data) - - def save_project_defaults(self, data): - """Save default project settings values. - - Passed data are by path to first key defined in main schemas. - """ - self._save_data_to_filepath(self.project_defaults_filepath, data) - - def get_system_dynamic_schemas(self): - """System schemas by dynamic schema name. - - If dynamic schema name is not available in then schema will not used. - - Returns: - dict: Schemas or list of schemas by dynamic schema name. - """ - return self._load_json_file_data(self.system_dynamic_schemas_filepath) - - def get_project_dynamic_schemas(self): - """Project schemas by dynamic schema name. - - If dynamic schema name is not available in then schema will not used. - - Returns: - dict: Schemas or list of schemas by dynamic schema name. - """ - return self._load_json_file_data(self.project_dynamic_schemas_filepath) - - def _load_files_from_path(self, path): - output = {} - if not path or not os.path.exists(path): - return output - - if os.path.isfile(path): - filename = os.path.basename(path) - basename, ext = os.path.splitext(filename) - if ext == ".json": - if self.schema_prefix: - key = "{}/{}".format(self.schema_prefix, basename) - else: - key = basename - output[key] = self._load_json_file_data(path) - return output - - path = os.path.normpath(path) - for root, _, files in os.walk(path, topdown=False): - for filename in files: - basename, ext = os.path.splitext(filename) - if ext != ".json": - continue - - json_path = os.path.join(root, filename) - store_key = os.path.join( - root.replace(path, ""), basename - ).replace("\\", "/") - if self.schema_prefix: - store_key = "{}/{}".format(self.schema_prefix, store_key) - output[store_key] = self._load_json_file_data(json_path) - - return output - - def get_system_settings_schemas(self): - """Schemas and templates usable in system settings schemas. - - Returns: - dict: Schemas and templates by it's names. Names must be unique - across whole OpenPype. - """ - return self._load_files_from_path(self.system_schemas_dir) - - def get_project_settings_schemas(self): - """Schemas and templates usable in project settings schemas. - - Returns: - dict: Schemas and templates by it's names. Names must be unique - across whole OpenPype. - """ - return self._load_files_from_path(self.project_schemas_dir) diff --git a/openpype/modules/click_wrap.py b/openpype/modules/click_wrap.py deleted file mode 100644 index ed67035ec8..0000000000 --- a/openpype/modules/click_wrap.py +++ /dev/null @@ -1,365 +0,0 @@ -"""Simplified wrapper for 'click' python module. - -Module 'click' is used as main cli handler in AYON/OpenPype. Addons can -register their own subcommands with options. This wrapper allows to define -commands and options as with 'click', but without any dependency. - -Why not to use 'click' directly? Version of 'click' used in AYON/OpenPype -is not compatible with 'click' version used in some DCCs (e.g. Houdini 20+). -And updating 'click' would break other DCCs. - -How to use it? If you already have cli commands defined in addon, just replace -'click' with 'click_wrap' and it should work and modify your addon's cli -method to convert 'click_wrap' object to 'click' object. - -Before -```python -import click -from openpype.modules import OpenPypeModule - - -class ExampleAddon(OpenPypeModule): - name = "example" - - def cli(self, click_group): - click_group.add_command(cli_main) - - -@click.group(ExampleAddon.name, help="Example addon") -def cli_main(): - pass - - -@cli_main.command(help="Example command") -@click.option("--arg1", help="Example argument 1", default="default1") -@click.option("--arg2", help="Example argument 2", is_flag=True) -def mycommand(arg1, arg2): - print(arg1, arg2) -``` - -Now -``` -from openpype import click_wrap -from openpype.modules import OpenPypeModule - - -class ExampleAddon(OpenPypeModule): - name = "example" - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -@click_wrap.group(ExampleAddon.name, help="Example addon") -def cli_main(): - pass - - -@cli_main.command(help="Example command") -@click_wrap.option("--arg1", help="Example argument 1", default="default1") -@click_wrap.option("--arg2", help="Example argument 2", is_flag=True) -def mycommand(arg1, arg2): - print(arg1, arg2) -``` - - -Added small enhancements: -- most of the methods can be used as chained calls -- functions/methods 'command' and 'group' can be used in a way that - first argument is callback function and the rest are arguments - for click - -Example: - ```python - from openpype import click_wrap - from openpype.modules import OpenPypeModule - - - class ExampleAddon(OpenPypeModule): - name = "example" - - def cli(self, click_group): - # Define main command (name 'example') - main = click_wrap.group( - self._cli_main, name=self.name, help="Example addon" - ) - # Add subcommand (name 'mycommand') - ( - main.command( - self._cli_command, name="mycommand", help="Example command" - ) - .option( - "--arg1", help="Example argument 1", default="default1" - ) - .option( - "--arg2", help="Example argument 2", is_flag=True, - ) - ) - # Convert main command to click object and add it to parent group - click_group.add_command(main.to_click_obj()) - - def _cli_main(self): - pass - - def _cli_command(self, arg1, arg2): - print(arg1, arg2) - ``` - - ```shell - openpype_console addon example mycommand --arg1 value1 --arg2 - ``` -""" - -import collections - -FUNC_ATTR_NAME = "__ayon_cli_options__" - - -class Command(object): - def __init__(self, func, *args, **kwargs): - # Command function - self._func = func - # Command definition arguments - self._args = args - # Command definition kwargs - self._kwargs = kwargs - # Both 'options' and 'arguments' are stored to the same variable - # - keep order of options and arguments - self._options = getattr(func, FUNC_ATTR_NAME, []) - - def to_click_obj(self): - """Converts this object to click object. - - Returns: - click.Command: Click command object. - """ - return convert_to_click(self) - - # --- Methods for 'convert_to_click' function --- - def get_args(self): - """ - Returns: - tuple: Command definition arguments. - """ - return self._args - - def get_kwargs(self): - """ - Returns: - dict[str, Any]: Command definition kwargs. - """ - return self._kwargs - - def get_func(self): - """ - Returns: - Function: Function to invoke on command trigger. - """ - return self._func - - def iter_options(self): - """ - Yields: - tuple[str, tuple, dict]: Option type name with args and kwargs. - """ - for item in self._options: - yield item - # ----------------------------------------------- - - def add_option(self, *args, **kwargs): - return self.add_option_by_type("option", *args, **kwargs) - - def add_argument(self, *args, **kwargs): - return self.add_option_by_type("argument", *args, **kwargs) - - option = add_option - argument = add_argument - - def add_option_by_type(self, option_name, *args, **kwargs): - self._options.append((option_name, args, kwargs)) - return self - - -class Group(Command): - def __init__(self, func, *args, **kwargs): - super(Group, self).__init__(func, *args, **kwargs) - # Store sub-groupd and sub-commands to the same variable - self._commands = [] - - # --- Methods for 'convert_to_click' function --- - def iter_commands(self): - for command in self._commands: - yield command - # ----------------------------------------------- - - def add_command(self, command): - """Add prepared command object as child. - - Args: - command (Command): Prepared command object. - """ - if command not in self._commands: - self._commands.append(command) - - def add_group(self, group): - """Add prepared group object as child. - - Args: - group (Group): Prepared group object. - """ - if group not in self._commands: - self._commands.append(group) - - def command(self, *args, **kwargs): - """Add child command. - - Returns: - Union[Command, Function]: New command object, or wrapper function. - """ - return self._add_new(Command, *args, **kwargs) - - def group(self, *args, **kwargs): - """Add child group. - - Returns: - Union[Group, Function]: New group object, or wrapper function. - """ - return self._add_new(Group, *args, **kwargs) - - def _add_new(self, target_cls, *args, **kwargs): - func = None - if args and callable(args[0]): - args = list(args) - func = args.pop(0) - args = tuple(args) - - def decorator(_func): - out = target_cls(_func, *args, **kwargs) - self._commands.append(out) - return out - - if func is not None: - return decorator(func) - return decorator - - -def convert_to_click(obj_to_convert): - """Convert wrapped object to click object. - - Args: - obj_to_convert (Command): Object to convert to click object. - - Returns: - click.Command: Click command object. - """ - import click - - commands_queue = collections.deque() - commands_queue.append((obj_to_convert, None)) - top_obj = None - while commands_queue: - item = commands_queue.popleft() - command_obj, parent_obj = item - if not isinstance(command_obj, Command): - raise TypeError( - "Invalid type '{}' expected 'Command'".format( - type(command_obj) - ) - ) - - if isinstance(command_obj, Group): - click_obj = ( - click.group( - *command_obj.get_args(), - **command_obj.get_kwargs() - )(command_obj.get_func()) - ) - - else: - click_obj = ( - click.command( - *command_obj.get_args(), - **command_obj.get_kwargs() - )(command_obj.get_func()) - ) - - for item in command_obj.iter_options(): - option_name, args, kwargs = item - if option_name == "option": - click.option(*args, **kwargs)(click_obj) - elif option_name == "argument": - click.argument(*args, **kwargs)(click_obj) - else: - raise ValueError( - "Invalid option name '{}'".format(option_name) - ) - - if top_obj is None: - top_obj = click_obj - - if parent_obj is not None: - parent_obj.add_command(click_obj) - - if isinstance(command_obj, Group): - for command in command_obj.iter_commands(): - commands_queue.append((command, click_obj)) - - return top_obj - - -def group(*args, **kwargs): - func = None - if args and callable(args[0]): - args = list(args) - func = args.pop(0) - args = tuple(args) - - def decorator(_func): - return Group(_func, *args, **kwargs) - - if func is not None: - return decorator(func) - return decorator - - -def command(*args, **kwargs): - func = None - if args and callable(args[0]): - args = list(args) - func = args.pop(0) - args = tuple(args) - - def decorator(_func): - return Command(_func, *args, **kwargs) - - if func is not None: - return decorator(func) - return decorator - - -def argument(*args, **kwargs): - def decorator(func): - return _add_option_to_func( - func, "argument", *args, **kwargs - ) - return decorator - - -def option(*args, **kwargs): - def decorator(func): - return _add_option_to_func( - func, "option", *args, **kwargs - ) - return decorator - - -def _add_option_to_func(func, option_name, *args, **kwargs): - if isinstance(func, Command): - func.add_option_by_type(option_name, *args, **kwargs) - return func - - if not hasattr(func, FUNC_ATTR_NAME): - setattr(func, FUNC_ATTR_NAME, []) - cli_options = getattr(func, FUNC_ATTR_NAME) - cli_options.append((option_name, args, kwargs)) - return func diff --git a/openpype/modules/clockify/widgets.py b/openpype/modules/clockify/widgets.py deleted file mode 100644 index 86e67569f2..0000000000 --- a/openpype/modules/clockify/widgets.py +++ /dev/null @@ -1,207 +0,0 @@ -from qtpy import QtCore, QtGui, QtWidgets -from openpype import resources, style - - -class MessageWidget(QtWidgets.QWidget): - - SIZE_W = 300 - SIZE_H = 130 - - closed = QtCore.Signal() - - def __init__(self, messages, title): - super(MessageWidget, self).__init__() - - # Icon - icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) - self.setWindowIcon(icon) - - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - # Size setting - self.resize(self.SIZE_W, self.SIZE_H) - self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) - - # Style - self.setStyleSheet(style.load_stylesheet()) - - self.setLayout(self._ui_layout(messages)) - self.setWindowTitle(title) - - def _ui_layout(self, messages): - if not messages: - messages = ["*Missing messages (This is a bug)*", ] - - elif not isinstance(messages, (tuple, list)): - messages = [messages, ] - - main_layout = QtWidgets.QVBoxLayout(self) - - labels = [] - for message in messages: - label = QtWidgets.QLabel(message) - label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - label.setTextFormat(QtCore.Qt.RichText) - label.setWordWrap(True) - - labels.append(label) - main_layout.addWidget(label) - - btn_close = QtWidgets.QPushButton("Close") - btn_close.setToolTip('Close this window') - btn_close.clicked.connect(self.on_close_clicked) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_close) - - main_layout.addLayout(btn_group) - - self.labels = labels - self.btn_group = btn_group - self.btn_close = btn_close - self.main_layout = main_layout - - return main_layout - - def on_close_clicked(self): - self.close() - - def close(self, *args, **kwargs): - self.closed.emit() - super(MessageWidget, self).close(*args, **kwargs) - - -class ClockifySettings(QtWidgets.QWidget): - SIZE_W = 500 - SIZE_H = 130 - - loginSignal = QtCore.Signal(object, object, object) - - def __init__(self, clockify_api, optional=True): - super(ClockifySettings, self).__init__() - - self.clockify_api = clockify_api - self.optional = optional - self.validated = False - - # Icon - icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) - self.setWindowIcon(icon) - - self.setWindowTitle("Clockify settings") - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - # Size setting - self.resize(self.SIZE_W, self.SIZE_H) - self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) - self.setStyleSheet(style.load_stylesheet()) - - self._ui_init() - - def _ui_init(self): - label_api_key = QtWidgets.QLabel("Clockify API key:") - - input_api_key = QtWidgets.QLineEdit() - input_api_key.setFrame(True) - input_api_key.setPlaceholderText("e.g. XX1XxXX2x3x4xXxx") - - error_label = QtWidgets.QLabel("") - error_label.setTextFormat(QtCore.Qt.RichText) - error_label.setWordWrap(True) - error_label.hide() - - form_layout = QtWidgets.QFormLayout() - form_layout.setContentsMargins(10, 15, 10, 5) - form_layout.addRow(label_api_key, input_api_key) - form_layout.addRow(error_label) - - btn_ok = QtWidgets.QPushButton("Ok") - btn_ok.setToolTip('Sets Clockify API Key so can Start/Stop timer') - - btn_cancel = QtWidgets.QPushButton("Cancel") - cancel_tooltip = 'Application won\'t start' - if self.optional: - cancel_tooltip = 'Close this window' - btn_cancel.setToolTip(cancel_tooltip) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_ok) - btn_group.addWidget(btn_cancel) - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.addLayout(form_layout) - main_layout.addLayout(btn_group) - - btn_ok.clicked.connect(self.click_ok) - btn_cancel.clicked.connect(self._close_widget) - - self.label_api_key = label_api_key - self.input_api_key = input_api_key - self.error_label = error_label - - self.btn_ok = btn_ok - self.btn_cancel = btn_cancel - - def setError(self, msg): - self.error_label.setText(msg) - self.error_label.show() - - def invalid_input(self, entity): - entity.setStyleSheet("border: 1px solid red;") - - def click_ok(self): - api_key = self.input_api_key.text().strip() - if self.optional is True and api_key == '': - self.clockify_api.save_api_key(None) - self.clockify_api.set_api(api_key) - self.validated = False - self._close_widget() - return - - validation = self.clockify_api.validate_api_key(api_key) - - if validation: - self.clockify_api.save_api_key(api_key) - self.clockify_api.set_api(api_key) - self.validated = True - self._close_widget() - else: - self.invalid_input(self.input_api_key) - self.validated = False - self.setError( - "Entered invalid API key" - ) - - def showEvent(self, event): - super(ClockifySettings, self).showEvent(event) - - # Make btns same width - max_width = max( - self.btn_ok.sizeHint().width(), - self.btn_cancel.sizeHint().width() - ) - self.btn_ok.setMinimumWidth(max_width) - self.btn_cancel.setMinimumWidth(max_width) - - def closeEvent(self, event): - if self.optional is True: - event.ignore() - self._close_widget() - else: - self.validated = False - - def _close_widget(self): - if self.optional is True: - self.hide() - else: - self.close() diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py deleted file mode 100644 index cd4cde2519..0000000000 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect default Deadline server.""" -import pyblish.api - -from openpype import AYON_SERVER_ENABLED - - -class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): - """Collect default Deadline Webservice URL. - - DL webservice addresses must be configured first in System Settings for - project settings enum to work. - - Default webservice could be overriden by - `project_settings/deadline/deadline_servers`. Currently only single url - is expected. - - This url could be overriden by some hosts directly on instances with - `CollectDeadlineServerFromInstance`. - """ - - # Run before collect_deadline_server_instance. - order = pyblish.api.CollectorOrder + 0.0025 - label = "Default Deadline Webservice" - - pass_mongo_url = False - - def process(self, context): - try: - deadline_module = context.data.get("openPypeModules")["deadline"] - except AttributeError: - self.log.error("Cannot get OpenPype Deadline module.") - raise AssertionError("OpenPype Deadline module not found.") - - deadline_settings = context.data["project_settings"]["deadline"] - deadline_server_name = None - if AYON_SERVER_ENABLED: - deadline_server_name = deadline_settings["deadline_server"] - else: - deadline_servers = deadline_settings["deadline_servers"] - if deadline_servers: - deadline_server_name = deadline_servers[0] - - context.data["deadlinePassMongoUrl"] = self.pass_mongo_url - - deadline_webservice = None - if deadline_server_name: - deadline_webservice = deadline_module.deadline_urls.get( - deadline_server_name) - - default_deadline_webservice = deadline_module.deadline_urls["default"] - deadline_webservice = ( - deadline_webservice - or default_deadline_webservice - ) - - context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py deleted file mode 100644 index 0bee42c4cb..0000000000 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ /dev/null @@ -1,173 +0,0 @@ -import os -import json -from datetime import datetime - -import requests - -import pyblish.api - -from openpype.pipeline import legacy_io -from openpype.tests.lib import is_in_tests -from openpype.lib import is_running_from_build - - -class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): - """Submit Houdini scene to perform a local publish in Deadline. - - Publishing in Deadline can be helpful for scenes that publish very slow. - This way it can process in the background on another machine without the - Artist having to wait for the publish to finish on their local machine. - - Submission is done through the Deadline Web Service as - supplied via the environment variable AVALON_DEADLINE. - - """ - - label = "Submit Scene to Deadline" - order = pyblish.api.IntegratorOrder - hosts = ["houdini"] - families = ["*"] - targets = ["deadline"] - - def process(self, context): - # Not all hosts can import this module. - import hou - - # Ensure no errors so far - assert all( - result["success"] for result in context.data["results"] - ), "Errors found, aborting integration.." - - # Deadline connection - AVALON_DEADLINE = legacy_io.Session.get( - "AVALON_DEADLINE", "http://localhost:8082" - ) - assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" - - # Note that `publish` data member might change in the future. - # See: https://github.com/pyblish/pyblish-base/issues/307 - actives = [i for i in context if i.data["publish"]] - instance_names = sorted(instance.name for instance in actives) - - if not instance_names: - self.log.warning( - "No active instances found. " "Skipping submission.." - ) - return - - scene = context.data["currentFile"] - scenename = os.path.basename(scene) - - # Get project code - project = context.data["projectEntity"] - code = project["data"].get("code", project["name"]) - - job_name = "{scene} [PUBLISH]".format(scene=scenename) - batch_name = "{code} - {scene}".format(code=code, scene=scenename) - if is_in_tests(): - batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - deadline_user = "roy" # todo: get deadline user dynamically - - # Get only major.minor version of Houdini, ignore patch version - version = hou.applicationVersionString() - version = ".".join(version.split(".")[:2]) - - # Generate the payload for Deadline submission - payload = { - "JobInfo": { - "Plugin": "Houdini", - "Pool": "houdini", # todo: remove hardcoded pool - "BatchName": batch_name, - "Comment": context.data.get("comment", ""), - "Priority": 50, - "Frames": "1-1", # Always trigger a single frame - "IsFrameDependent": False, - "Name": job_name, - "UserName": deadline_user, - # "Comment": instance.context.data.get("comment", ""), - # "InitialStatus": state - }, - "PluginInfo": { - "Build": None, # Don't force build - "IgnoreInputs": True, - # Inputs - "SceneFile": scene, - "OutputDriver": "/out/REMOTE_PUBLISH", - # Mandatory for Deadline - "Version": version, - }, - # Mandatory for Deadline, may be empty - "AuxFiles": [], - } - - # Process submission per individual instance if the submission - # is set to publish each instance as a separate job. Else submit - # a single job to process all instances. - per_instance = context.data.get("separateJobPerInstance", False) - if per_instance: - # Submit a job per instance - job_name = payload["JobInfo"]["Name"] - for instance in instance_names: - # Clarify job name per submission (include instance name) - payload["JobInfo"]["Name"] = job_name + " - %s" % instance - self.submit_job( - context, - payload, - instances=[instance], - deadline=AVALON_DEADLINE - ) - else: - # Submit a single job - self.submit_job( - context, - payload, - instances=instance_names, - deadline=AVALON_DEADLINE - ) - - def submit_job(self, context, payload, instances, deadline): - - # Ensure we operate on a copy, a shallow copy is fine. - payload = payload.copy() - - # Include critical environment variables with submission + api.Session - keys = [ - # Submit along the current Avalon tool setup that we launched - # this application with so the Render Slave can build its own - # similar environment using it, e.g. "houdini17.5;pluginx2.3" - "AVALON_TOOLS" - ] - - # Add OpenPype version if we are running from build. - if is_running_from_build(): - keys.append("OPENPYPE_VERSION") - - # Add mongo url if it's enabled - if context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - - environment = dict( - {key: os.environ[key] for key in keys if key in os.environ}, - **legacy_io.Session - ) - environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances) - - payload["JobInfo"].update( - { - "EnvironmentKeyValue%d" - % index: "{key}={value}".format( - key=key, value=environment[key] - ) - for index, key in enumerate(environment) - } - ) - - # Submit - self.log.debug("Submitting..") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(deadline) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) diff --git a/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py b/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py deleted file mode 100644 index 32ed76b58d..0000000000 --- a/openpype/modules/deadline/repository/custom/plugins/HarmonyOpenPype/HarmonyOpenPype.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python3 -from System import * -from System.Diagnostics import * -from System.IO import * -from System.Text import * - -from Deadline.Plugins import * -from Deadline.Scripting import * - -def GetDeadlinePlugin(): - return HarmonyOpenPypePlugin() - -def CleanupDeadlinePlugin( deadlinePlugin ): - deadlinePlugin.Cleanup() - -class HarmonyOpenPypePlugin( DeadlinePlugin ): - - def __init__( self ): - super().__init__() - self.InitializeProcessCallback += self.InitializeProcess - self.RenderExecutableCallback += self.RenderExecutable - self.RenderArgumentCallback += self.RenderArgument - self.CheckExitCodeCallback += self.CheckExitCode - - def Cleanup( self ): - print("Cleanup") - for stdoutHandler in self.StdoutHandlers: - del stdoutHandler.HandleCallback - - del self.InitializeProcessCallback - del self.RenderExecutableCallback - del self.RenderArgumentCallback - - def CheckExitCode( self, exitCode ): - print("check code") - if exitCode != 0: - if exitCode == 100: - self.LogInfo( "Renderer reported an error with error code 100. This will be ignored, since the option to ignore it is specified in the Job Properties." ) - else: - self.FailRender( "Renderer returned non-zero error code %d. Check the renderer's output." % exitCode ) - - def InitializeProcess( self ): - self.PluginType = PluginType.Simple - self.StdoutHandling = True - self.PopupHandling = True - - self.AddStdoutHandlerCallback( "Rendered frame ([0-9]+)" ).HandleCallback += self.HandleStdoutProgress - - def HandleStdoutProgress( self ): - startFrame = self.GetStartFrame() - endFrame = self.GetEndFrame() - if( endFrame - startFrame + 1 != 0 ): - self.SetProgress( 100 * ( int(self.GetRegexMatch(1)) - startFrame + 1 ) / ( endFrame - startFrame + 1 ) ) - - def RenderExecutable( self ): - version = int( self.GetPluginInfoEntry( "Version" ) ) - exe = "" - exeList = self.GetConfigEntry( "Harmony_RenderExecutable_" + str(version) ) - exe = FileUtils.SearchFileList( exeList ) - if( exe == "" ): - self.FailRender( "Harmony render executable was not found in the configured separated list \"" + exeList + "\". The path to the render executable can be configured from the Plugin Configuration in the Deadline Monitor." ) - return exe - - def RenderArgument( self ): - renderArguments = "-batch" - - if self.GetBooleanPluginInfoEntryWithDefault( "UsingResPreset", False ): - resName = self.GetPluginInfoEntryWithDefault( "ResolutionName", "HDTV_1080p24" ) - if resName == "Custom": - renderArguments += " -res " + self.GetPluginInfoEntryWithDefault( "PresetName", "HDTV_1080p24" ) - else: - renderArguments += " -res " + resName - else: - resolutionX = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionX", -1 ) - resolutionY = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionY", -1 ) - fov = self.GetFloatPluginInfoEntryWithDefault( "FieldOfView", -1 ) - - if resolutionX > 0 and resolutionY > 0 and fov > 0: - renderArguments += " -res " + str( resolutionX ) + " " + str( resolutionY ) + " " + str( fov ) - - camera = self.GetPluginInfoEntryWithDefault( "Camera", "" ) - - if not camera == "": - renderArguments += " -camera " + camera - - startFrame = str( self.GetStartFrame() ) - endFrame = str( self.GetEndFrame() ) - - renderArguments += " -frames " + startFrame + " " + endFrame - - if not self.GetBooleanPluginInfoEntryWithDefault( "IsDatabase", False ): - sceneFilename = self.GetPluginInfoEntryWithDefault( "SceneFile", self.GetDataFilename() ) - sceneFilename = RepositoryUtils.CheckPathMapping( sceneFilename ) - renderArguments += " \"" + sceneFilename + "\"" - else: - environment = self.GetPluginInfoEntryWithDefault( "Environment", "" ) - renderArguments += " -env " + environment - job = self.GetPluginInfoEntryWithDefault( "Job", "" ) - renderArguments += " -job " + job - scene = self.GetPluginInfoEntryWithDefault( "SceneName", "" ) - renderArguments += " -scene " + scene - version = self.GetPluginInfoEntryWithDefault( "SceneVersion", "" ) - renderArguments += " -version " + version - - #tempSceneDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) ) - #preRenderScript = - rendernodeNum = 0 - scriptBuilder = StringBuilder() - - while True: - nodeName = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Node", "" ) - if nodeName == "": - break - nodeType = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Type", "Image" ) - if nodeType == "Image": - nodePath = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Path", "" ) - nodeLeadingZero = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "LeadingZero", "" ) - nodeFormat = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Format", "" ) - nodeStartFrame = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "StartFrame", "" ) - - if not nodePath == "": - scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingName\", 1, \"" + nodePath + "\" );") - - if not nodeLeadingZero == "": - scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"leadingZeros\", 1, \"" + nodeLeadingZero + "\" );") - - if not nodeFormat == "": - scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingType\", 1, \"" + nodeFormat + "\" );") - - if not nodeStartFrame == "": - scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"start\", 1, \"" + nodeStartFrame + "\" );") - - if nodeType == "Movie": - nodePath = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Path", "" ) - if not nodePath == "": - scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"moviePath\", 1, \"" + nodePath + "\" );") - - rendernodeNum += 1 - - tempDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) ) - preRenderScriptName = Path.Combine( tempDirectory, "preRenderScript.txt" ) - - File.WriteAllText( preRenderScriptName, scriptBuilder.ToString() ) - - preRenderInlineScript = self.GetPluginInfoEntryWithDefault( "PreRenderInlineScript", "" ) - if preRenderInlineScript: - renderArguments += " -preRenderInlineScript \"" + preRenderInlineScript +"\"" - - renderArguments += " -preRenderScript \"" + preRenderScriptName +"\"" - - return renderArguments diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.ico b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.ico deleted file mode 100644 index f0c15accc4..0000000000 Binary files a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.ico and /dev/null differ diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.options b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.options deleted file mode 100644 index 6908fe865f..0000000000 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.options +++ /dev/null @@ -1,10 +0,0 @@ -[Arguments] -Type=string -Label=Arguments -Category=Python Options -CategoryOrder=0 -Index=1 -Description=The arguments to pass to the script. If no arguments are required, leave this blank. -Required=false -DisableIfBlank=true - diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param deleted file mode 100644 index b3ac18e20c..0000000000 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ /dev/null @@ -1,27 +0,0 @@ -[About] -Type=label -Label=About -Category=About Plugin -CategoryOrder=-1 -Index=0 -Default=OpenPype Plugin for Deadline -Description=Not configurable - -[OpenPypeInstallationDirs] -Type=multilinemultifolder -Label=Directories where OpenPype versions are installed -Category=OpenPype Installation Directories -CategoryOrder=0 -Index=0 -Default=C:\Program Files (x86)\OpenPype -Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. - -[OpenPypeExecutable] -Type=multilinemultifilename -Label=OpenPype Executable -Category=OpenPype Executables -CategoryOrder=1 -Index=0 -Default= -Description=The path to the OpenPype executable. Enter alternative paths on separate lines. - diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py deleted file mode 100644 index 004c58d346..0000000000 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python3 - -from System.IO import Path -from System.Text.RegularExpressions import Regex - -from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import ( - StringUtils, - FileUtils, - DirectoryUtils, - RepositoryUtils -) - -import re -import os -import platform - - -###################################################################### -# This is the function that Deadline calls to get an instance of the -# main DeadlinePlugin class. -###################################################################### -def GetDeadlinePlugin(): - return OpenPypeDeadlinePlugin() - - -def CleanupDeadlinePlugin(deadlinePlugin): - deadlinePlugin.Cleanup() - - -class OpenPypeDeadlinePlugin(DeadlinePlugin): - """ - Standalone plugin for publishing from OpenPype. - - Calls OpenPype executable 'openpype_console' from first correctly found - file based on plugin configuration. Uses 'publish' command and passes - path to metadata json file, which contains all needed information - for publish process. - """ - def __init__(self): - super().__init__() - self.InitializeProcessCallback += self.InitializeProcess - self.RenderExecutableCallback += self.RenderExecutable - self.RenderArgumentCallback += self.RenderArgument - - def Cleanup(self): - for stdoutHandler in self.StdoutHandlers: - del stdoutHandler.HandleCallback - - del self.InitializeProcessCallback - del self.RenderExecutableCallback - del self.RenderArgumentCallback - - def InitializeProcess(self): - self.PluginType = PluginType.Simple - self.StdoutHandling = True - - self.SingleFramesOnly = self.GetBooleanPluginInfoEntryWithDefault( - "SingleFramesOnly", False) - self.LogInfo("Single Frames Only: %s" % self.SingleFramesOnly) - - self.AddStdoutHandlerCallback( - ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress - - @staticmethod - def get_openpype_version_from_path(path, build=True): - """Get OpenPype version from provided path. - path (str): Path to scan. - build (bool, optional): Get only builds, not sources - - Returns: - str or None: version of OpenPype if found. - - """ - # fix path for application bundle on macos - if platform.system().lower() == "darwin": - path = os.path.join(path, "MacOS") - - version_file = os.path.join(path, "openpype", "version.py") - if not os.path.isfile(version_file): - return None - - # skip if the version is not build - exe = os.path.join(path, "openpype_console.exe") - if platform.system().lower() in ["linux", "darwin"]: - exe = os.path.join(path, "openpype_console") - - # if only builds are requested - if build and not os.path.isfile(exe): # noqa: E501 - print(f" ! path is not a build: {path}") - return None - - version = {} - with open(version_file, "r") as vf: - exec(vf.read(), version) - - version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) - return version_match[1] - - def RenderExecutable(self): - job = self.GetJob() - openpype_versions = [] - # if the job requires specific OpenPype version, - # lets go over all available and find compatible build. - requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") - if requested_version: - self.LogInfo(( - "Scanning for compatible requested " - f"version {requested_version}")) - dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") - - # clean '\ ' for MacOS pasting - if platform.system().lower() == "darwin": - dir_list = dir_list.replace("\\ ", " ") - - for dir_list in dir_list.split(","): - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if install_dir: - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = self.get_openpype_version_from_path(subdir) - if not version: - continue - openpype_versions.append((version, subdir)) - - exe_list = self.GetConfigEntry("OpenPypeExecutable") - # clean '\ ' for MacOS pasting - if platform.system().lower() == "darwin": - exe_list = exe_list.replace("\\ ", " ") - exe = FileUtils.SearchFileList(exe_list) - if openpype_versions: - # if looking for requested compatible version, - # add the implicitly specified to the list too. - version = self.get_openpype_version_from_path( - os.path.dirname(exe)) - if version: - openpype_versions.append((version, os.path.dirname(exe))) - - if requested_version: - # sort detected versions - if openpype_versions: - openpype_versions.sort( - key=lambda ver: [ - int(t) if t.isdigit() else t.lower() - for t in re.split(r"(\d+)", ver[0]) - ]) - requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 - compatible_versions = [] - for version in openpype_versions: - v = version[0].split(".")[:3] - if v[0] == requested_major and v[1] == requested_minor: - compatible_versions.append(version) - if not compatible_versions: - self.FailRender(("Cannot find compatible version available " - "for version {} requested by the job. " - "Please add it through plugin configuration " - "in Deadline or install it to configured " - "directory.").format(requested_version)) - # sort compatible versions nad pick the last one - compatible_versions.sort( - key=lambda ver: [ - int(t) if t.isdigit() else t.lower() - for t in re.split(r"(\d+)", ver[0]) - ]) - # create list of executables for different platform and let - # Deadline decide. - exe_list = [ - os.path.join( - compatible_versions[-1][1], "openpype_console.exe"), - os.path.join( - compatible_versions[-1][1], "openpype_console"), - os.path.join( - compatible_versions[-1][1], "MacOS", "openpype_console") - ] - exe = FileUtils.SearchFileList(";".join(exe_list)) - - if exe == "": - self.FailRender( - "OpenPype executable was not found " + - "in the semicolon separated list " + - "\"" + ";".join(exe_list) + "\". " + - "The path to the render executable can be configured " + - "from the Plugin Configuration in the Deadline Monitor.") - return exe - - def RenderArgument(self): - arguments = str(self.GetPluginInfoEntryWithDefault("Arguments", "")) - arguments = RepositoryUtils.CheckPathMapping(arguments) - - arguments = re.sub(r"<(?i)STARTFRAME>", str(self.GetStartFrame()), - arguments) - arguments = re.sub(r"<(?i)ENDFRAME>", str(self.GetEndFrame()), - arguments) - arguments = re.sub(r"<(?i)QUOTE>", "\"", arguments) - - arguments = self.ReplacePaddedFrame(arguments, - "<(?i)STARTFRAME%([0-9]+)>", - self.GetStartFrame()) - arguments = self.ReplacePaddedFrame(arguments, - "<(?i)ENDFRAME%([0-9]+)>", - self.GetEndFrame()) - - count = 0 - for filename in self.GetAuxiliaryFilenames(): - localAuxFile = Path.Combine(self.GetJobsDataDirectory(), filename) - arguments = re.sub(r"<(?i)AUXFILE" + str(count) + r">", - localAuxFile.replace("\\", "/"), arguments) - count += 1 - - return arguments - - def ReplacePaddedFrame(self, arguments, pattern, frame): - frameRegex = Regex(pattern) - while True: - frameMatch = frameRegex.Match(arguments) - if frameMatch.Success: - paddingSize = int(frameMatch.Groups[1].Value) - if paddingSize > 0: - padding = StringUtils.ToZeroPaddedString(frame, - paddingSize, - False) - else: - padding = str(frame) - arguments = arguments.replace(frameMatch.Groups[0].Value, - padding) - else: - break - - return arguments - - def HandleProgress(self): - progress = float(self.GetRegexMatch(1)) - self.SetProgress(progress) diff --git a/openpype/modules/example_addons/example_addon/__init__.py b/openpype/modules/example_addons/example_addon/__init__.py deleted file mode 100644 index 721d924436..0000000000 --- a/openpype/modules/example_addons/example_addon/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Addon class definition and Settings definition must be imported here. - -If addon class or settings definition won't be here their definition won't -be found by OpenPype discovery. -""" - -from .addon import ( - AddonSettingsDef, - ExampleAddon -) - -__all__ = ( - "AddonSettingsDef", - "ExampleAddon" -) diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py deleted file mode 100644 index e9de0c1bf5..0000000000 --- a/openpype/modules/example_addons/example_addon/addon.py +++ /dev/null @@ -1,146 +0,0 @@ -"""Addon definition is located here. - -Import of python packages that may not be available should not be imported -in global space here until are required or used. -- Qt related imports -- imports of Python 3 packages - - we still support Python 2 hosts where addon definition should available -""" - -import os - -from openpype.modules import ( - click_wrap, - JsonFilesSettingsDef, - OpenPypeAddOn, - ModulesManager, - IPluginPaths, - ITrayAction -) - - -# Settings definition of this addon using `JsonFilesSettingsDef` -# - JsonFilesSettingsDef is prepared settings definition using json files -# to define settings and store default values -class AddonSettingsDef(JsonFilesSettingsDef): - # This will add prefixes to every schema and template from `schemas` - # subfolder. - # - it is not required to fill the prefix but it is highly - # recommended as schemas and templates may have name clashes across - # multiple addons - # - it is also recommended that prefix has addon name in it - schema_prefix = "example_addon" - - def get_settings_root_path(self): - """Implemented abstract class of JsonFilesSettingsDef. - - Return directory path where json files defying addon settings are - located. - """ - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "settings" - ) - - -class ExampleAddon(OpenPypeAddOn, IPluginPaths, ITrayAction): - """This Addon has defined its settings and interface. - - This example has system settings with an enabled option. And use - few other interfaces: - - `IPluginPaths` to define custom plugin paths - - `ITrayAction` to be shown in tray tool - """ - label = "Example Addon" - name = "example_addon" - - def initialize(self, settings): - """Initialization of addon.""" - module_settings = settings[self.name] - # Enabled by settings - self.enabled = module_settings.get("enabled", False) - - # Prepare variables that can be used or set afterwards - self._connected_modules = None - # UI which must not be created at this time - self._dialog = None - - def tray_init(self): - """Implementation of abstract method for `ITrayAction`. - - We're definitely in tray tool so we can pre create dialog. - """ - - self._create_dialog() - - def _create_dialog(self): - # Don't recreate dialog if already exists - if self._dialog is not None: - return - - from .widgets import MyExampleDialog - - self._dialog = MyExampleDialog() - - def show_dialog(self): - """Show dialog with connected modules. - - This can be called from anywhere but can also crash in headless mode. - There is no way to prevent addon to do invalid operations if he's - not handling them. - """ - # Make sure dialog is created - self._create_dialog() - # Show dialog - self._dialog.open() - - def get_connected_modules(self): - """Custom implementation of addon.""" - names = set() - if self._connected_modules is not None: - for module in self._connected_modules: - names.add(module.name) - return names - - def on_action_trigger(self): - """Implementation of abstract method for `ITrayAction`.""" - self.show_dialog() - - def get_plugin_paths(self): - """Implementation of abstract method for `IPluginPaths`.""" - current_dir = os.path.dirname(os.path.abspath(__file__)) - - return { - "publish": [os.path.join(current_dir, "plugins", "publish")] - } - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -@click_wrap.group( - ExampleAddon.name, - help="Example addon dynamic cli commands.") -def cli_main(): - pass - - -@cli_main.command() -def nothing(): - """Does nothing but print a message.""" - print("You've triggered \"nothing\" command.") - - -@cli_main.command() -def show_dialog(): - """Show ExampleAddon dialog. - - We don't have access to addon directly through cli so we have to create - it again. - """ - from openpype.tools.utils.lib import qt_app_context - - manager = ModulesManager() - example_addon = manager.modules_by_name[ExampleAddon.name] - with qt_app_context(): - example_addon.show_dialog() diff --git a/openpype/modules/example_addons/example_addon/plugins/publish/example_plugin.py b/openpype/modules/example_addons/example_addon/plugins/publish/example_plugin.py deleted file mode 100644 index 695120e93b..0000000000 --- a/openpype/modules/example_addons/example_addon/plugins/publish/example_plugin.py +++ /dev/null @@ -1,9 +0,0 @@ -import pyblish.api - - -class CollectExampleAddon(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + 0.4 - label = "Collect Example Addon" - - def process(self, context): - self.log.info("I'm in example addon's plugin!") diff --git a/openpype/modules/example_addons/example_addon/settings/defaults/project_settings.json b/openpype/modules/example_addons/example_addon/settings/defaults/project_settings.json deleted file mode 100644 index 0a01fa8977..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/defaults/project_settings.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "project_settings/example_addon": { - "number": 0, - "color_1": [ - 0.0, - 0.0, - 0.0 - ], - "color_2": [ - 0.0, - 0.0, - 0.0 - ] - } -} \ No newline at end of file diff --git a/openpype/modules/example_addons/example_addon/settings/defaults/system_settings.json b/openpype/modules/example_addons/example_addon/settings/defaults/system_settings.json deleted file mode 100644 index 1e77356373..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/defaults/system_settings.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "modules/example_addon": { - "enabled": true - } -} \ No newline at end of file diff --git a/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/project_dynamic_schemas.json b/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/project_dynamic_schemas.json deleted file mode 100644 index 1f3da7b37f..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/project_dynamic_schemas.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "project_settings/global": { - "type": "schema", - "name": "example_addon/main" - } -} diff --git a/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/system_dynamic_schemas.json b/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/system_dynamic_schemas.json deleted file mode 100644 index 6faa48ba74..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/dynamic_schemas/system_dynamic_schemas.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "system_settings/modules": { - "type": "schema", - "name": "example_addon/main" - } -} diff --git a/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/main.json b/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/main.json deleted file mode 100644 index ba692d860e..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/main.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "type": "dict", - "key": "example_addon", - "label": "Example addon", - "collapsible": true, - "children": [ - { - "type": "number", - "key": "number", - "label": "This is your lucky number:", - "minimum": 7, - "maximum": 7, - "decimals": 0 - }, - { - "type": "template", - "name": "example_addon/the_template", - "template_data": [ - { - "name": "color_1", - "label": "Color 1" - }, - { - "name": "color_2", - "label": "Color 2" - } - ] - } - ] -} diff --git a/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/the_template.json b/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/the_template.json deleted file mode 100644 index af8fd9dae4..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/schemas/project_schemas/the_template.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "type": "list-strict", - "key": "{name}", - "label": "{label}", - "object_types": [ - { - "label": "Red", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Green", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Blue", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - } - ] - } -] diff --git a/openpype/modules/example_addons/example_addon/settings/schemas/system_schemas/main.json b/openpype/modules/example_addons/example_addon/settings/schemas/system_schemas/main.json deleted file mode 100644 index 0fb0a7c1be..0000000000 --- a/openpype/modules/example_addons/example_addon/settings/schemas/system_schemas/main.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "type": "dict", - "key": "example_addon", - "label": "Example addon", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - } - ] -} diff --git a/openpype/modules/example_addons/example_addon/widgets.py b/openpype/modules/example_addons/example_addon/widgets.py deleted file mode 100644 index cd0da3ae43..0000000000 --- a/openpype/modules/example_addons/example_addon/widgets.py +++ /dev/null @@ -1,31 +0,0 @@ -from qtpy import QtWidgets - -from openpype.style import load_stylesheet - - -class MyExampleDialog(QtWidgets.QDialog): - def __init__(self, parent=None): - super(MyExampleDialog, self).__init__(parent) - - self.setWindowTitle("Connected modules") - - msg = "This is example dialog of example addon." - label_widget = QtWidgets.QLabel(msg, self) - - ok_btn = QtWidgets.QPushButton("OK", self) - btns_layout = QtWidgets.QHBoxLayout() - btns_layout.addStretch(1) - btns_layout.addWidget(ok_btn) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(label_widget) - layout.addLayout(btns_layout) - - ok_btn.clicked.connect(self._on_ok_clicked) - - self._label_widget = label_widget - - self.setStyleSheet(load_stylesheet()) - - def _on_ok_clicked(self): - self.done(1) diff --git a/openpype/modules/example_addons/tiny_addon.py b/openpype/modules/example_addons/tiny_addon.py deleted file mode 100644 index 62962954f5..0000000000 --- a/openpype/modules/example_addons/tiny_addon.py +++ /dev/null @@ -1,9 +0,0 @@ -from openpype.modules import OpenPypeAddOn - - -class TinyAddon(OpenPypeAddOn): - """This is tiniest possible addon. - - This addon won't do much but will exist in OpenPype modules environment. - """ - name = "tiniest_addon_ever" diff --git a/openpype/modules/ftrack/__init__.py b/openpype/modules/ftrack/__init__.py deleted file mode 100644 index e520f08337..0000000000 --- a/openpype/modules/ftrack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .ftrack_module import ( - FtrackModule, - FTRACK_MODULE_DIR, - - resolve_ftrack_url, -) - -__all__ = ( - "FtrackModule", - "FTRACK_MODULE_DIR", - - "resolve_ftrack_url", -) diff --git a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py deleted file mode 100644 index 333228c699..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py +++ /dev/null @@ -1,130 +0,0 @@ -import json - -from openpype_modules.ftrack.lib import ServerAction - - -def clone_review_session(session, entity): - # Create a client review with timestamp. - name = entity["name"] - review_session = session.create( - "ReviewSession", - { - "name": f"Clone of {name}", - "project": entity["project"] - } - ) - - # Add all invitees. - for invitee in entity["review_session_invitees"]: - # Make sure email is not None but string - email = invitee["email"] or "" - session.create( - "ReviewSessionInvitee", - { - "name": invitee["name"], - "email": email, - "review_session": review_session - } - ) - - # Add all objects to new review session. - for obj in entity["review_session_objects"]: - session.create( - "ReviewSessionObject", - { - "name": obj["name"], - "version": obj["version"], - "review_session": review_session, - "asset_version": obj["asset_version"] - } - ) - - session.commit() - - -class CloneReviewSession(ServerAction): - '''Generate Client Review action - `label` a descriptive string identifying your action. - `varaint` To group actions together, give them the same - label and specify a unique variant per action. - `identifier` a unique identifier for your action. - `description` a verbose descriptive text for you action - ''' - label = "Clone Review Session" - variant = None - identifier = "clone-review-session" - description = None - settings_key = "clone_review_session" - - def discover(self, session, entities, event): - '''Return true if we can handle the selected entities. - *session* is a `ftrack_api.Session` instance - *entities* is a list of tuples each containing the entity type and the - entity id. - If the entity is a hierarchical you will always get the entity - type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - *event* the unmodified original event - ''' - is_valid = ( - len(entities) == 1 - and entities[0].entity_type == "ReviewSession" - ) - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, entities, event): - '''Callback method for the custom action. - return either a bool ( True if successful or False if the action - failed ) or a dictionary with they keys `message` and `success`, the - message should be a string and will be displayed as feedback to the - user, success should be a bool, True if successful or False if the - action failed. - *session* is a `ftrack_api.Session` instance - *entities* is a list of tuples each containing the entity type and the - entity id. - If the entity is a hierarchical you will always get the entity - type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - *event* the unmodified original event - ''' - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - job = session.create( - 'Job', - { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Cloning Review Session.' - }) - } - ) - session.commit() - - try: - clone_review_session(session, entities[0]) - - job['status'] = 'done' - session.commit() - except Exception: - session.rollback() - job["status"] = "failed" - session.commit() - self.log.error( - "Cloning review session failed ({})", exc_info=True - ) - - return { - 'success': True, - 'message': 'Action completed successfully' - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - CloneReviewSession(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py deleted file mode 100644 index 42a279e333..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py +++ /dev/null @@ -1,311 +0,0 @@ -import threading -import datetime -import copy -import collections - -import ftrack_api - -from openpype.lib import get_datetime_data -from openpype.settings.lib import ( - get_project_settings, - get_default_project_settings -) -from openpype_modules.ftrack.lib import ServerAction - - -class CreateDailyReviewSessionServerAction(ServerAction): - """Create daily review session object per project. - - Action creates review sessions based on settings. Settings define if is - action enabled and what is a template for review session name. Logic works - in a way that if review session with the name already exists then skip - process. If review session for current day does not exist but yesterdays - review exists and is empty then yesterdays is renamed otherwise creates - new review session. - - Also contains cycle creation of dailies which is triggered each morning. - This option must be enabled in project settings. Cycle creation is also - checked on registration of action. - """ - - identifier = "create.daily.review.session" - #: Action label. - label = "OpenPype Admin" - variant = "- Create Daily Review Session (Server)" - #: Action description. - description = "Manually create daily review session" - role_list = {"Pypeclub", "Administrator", "Project Manager"} - - settings_key = "create_daily_review_session" - default_template = "{yy}{mm}{dd}" - - def __init__(self, *args, **kwargs): - super(CreateDailyReviewSessionServerAction, self).__init__( - *args, **kwargs - ) - - self._cycle_timer = None - self._last_cyle_time = None - self._day_delta = datetime.timedelta(days=1) - - def discover(self, session, entities, event): - """Show action only on AssetVersions.""" - - valid_selection = False - for ent in event["data"]["selection"]: - # Ignore entities that are not tasks or projects - if ent["entityType"].lower() in ( - "show", "task", "reviewsession", "assetversion" - ): - valid_selection = True - break - - if not valid_selection: - return False - return self.valid_roles(session, entities, event) - - def launch(self, session, entities, event): - project_entity = self.get_project_from_entity(entities[0], session) - project_name = project_entity["full_name"] - project_settings = self.get_project_settings_from_event( - event, project_name - ) - action_settings = self._extract_action_settings(project_settings) - project_name_by_id = { - project_entity["id"]: project_name - } - settings_by_project_id = { - project_entity["id"]: action_settings - } - self._process_review_session( - session, settings_by_project_id, project_name_by_id - ) - return True - - def _calculate_next_cycle_delta(self): - studio_default_settings = get_default_project_settings() - action_settings = ( - studio_default_settings - ["ftrack"] - [self.settings_frack_subkey] - [self.settings_key] - ) - cycle_hour_start = action_settings.get("cycle_hour_start") - if not cycle_hour_start: - h = m = s = 0 - else: - h, m, s = cycle_hour_start - - # Create threading timer which will trigger creation of report - # at the 00:00:01 of next day - # - callback will trigger another timer which will have 1 day offset - now = datetime.datetime.now() - # Create object of today morning - expected_next_trigger = datetime.datetime( - now.year, now.month, now.day, h, m, s - ) - if expected_next_trigger > now: - seconds = (expected_next_trigger - now).total_seconds() - else: - expected_next_trigger += self._day_delta - seconds = (expected_next_trigger - now).total_seconds() - return seconds, expected_next_trigger - - def register(self, *args, **kwargs): - """Override register to be able trigger """ - # Register server action as would be normally - super(CreateDailyReviewSessionServerAction, self).register( - *args, **kwargs - ) - - seconds_delta, cycle_time = self._calculate_next_cycle_delta() - - # Store cycle time which will be used to create next timer - self._last_cyle_time = cycle_time - # Create timer thread - self._cycle_timer = threading.Timer( - seconds_delta, self._timer_callback - ) - self._cycle_timer.start() - - self._check_review_session() - - def _timer_callback(self): - if ( - self._cycle_timer is not None - and self._last_cyle_time is not None - ): - seconds_delta, cycle_time = self._calculate_next_cycle_delta() - self._last_cyle_time = cycle_time - - self._cycle_timer = threading.Timer( - seconds_delta, self._timer_callback - ) - self._cycle_timer.start() - self._check_review_session() - - def _check_review_session(self): - session = ftrack_api.Session( - server_url=self.session.server_url, - api_key=self.session.api_key, - api_user=self.session.api_user, - auto_connect_event_hub=False - ) - project_entities = session.query( - "select id, full_name from Project" - ).all() - project_names_by_id = { - project_entity["id"]: project_entity["full_name"] - for project_entity in project_entities - } - - action_settings_by_project_id = self._get_action_settings( - project_names_by_id - ) - enabled_action_settings_by_project_id = {} - for item in action_settings_by_project_id.items(): - project_id, action_settings = item - if action_settings.get("cycle_enabled"): - enabled_action_settings_by_project_id[project_id] = ( - action_settings - ) - - if not enabled_action_settings_by_project_id: - self.log.info(( - "There are no projects that have enabled" - " cycle review sesison creation" - )) - - else: - self._process_review_session( - session, - enabled_action_settings_by_project_id, - project_names_by_id - ) - - session.close() - - def _process_review_session( - self, session, settings_by_project_id, project_names_by_id - ): - review_sessions = session.query(( - "select id, name, project_id" - " from ReviewSession where project_id in ({})" - ).format(self.join_query_keys(settings_by_project_id))).all() - - review_sessions_by_project_id = collections.defaultdict(list) - for review_session in review_sessions: - project_id = review_session["project_id"] - review_sessions_by_project_id[project_id].append(review_session) - - # Prepare fill data for today's review sesison and yesterdays - now = datetime.datetime.now() - today_obj = datetime.datetime( - now.year, now.month, now.day, 0, 0, 0 - ) - yesterday_obj = today_obj - self._day_delta - - today_fill_data = get_datetime_data(today_obj) - yesterday_fill_data = get_datetime_data(yesterday_obj) - - # Loop through projects and try to create daily reviews - for project_id, action_settings in settings_by_project_id.items(): - review_session_template = ( - action_settings["review_session_template"] - ).strip() or self.default_template - - today_project_fill_data = copy.deepcopy(today_fill_data) - yesterday_project_fill_data = copy.deepcopy(yesterday_fill_data) - project_name = project_names_by_id[project_id] - today_project_fill_data["project_name"] = project_name - yesterday_project_fill_data["project_name"] = project_name - - today_session_name = self._fill_review_template( - review_session_template, today_project_fill_data - ) - yesterday_session_name = self._fill_review_template( - review_session_template, yesterday_project_fill_data - ) - # Skip if today's session name could not be filled - if not today_session_name: - continue - - # Find matching review session - project_review_sessions = review_sessions_by_project_id[project_id] - todays_session = None - yesterdays_session = None - for review_session in project_review_sessions: - session_name = review_session["name"] - if session_name == today_session_name: - todays_session = review_session - break - elif session_name == yesterday_session_name: - yesterdays_session = review_session - - # Skip if today's session already exist - if todays_session is not None: - self.log.debug(( - "Todays ReviewSession \"{}\"" - " in project \"{}\" already exists" - ).format(today_session_name, project_name)) - continue - - # Check if there is yesterday's session and is empty - # - in that case just rename it - if ( - yesterdays_session is not None - and len(yesterdays_session["review_session_objects"]) == 0 - ): - self.log.debug(( - "Renaming yesterdays empty review session \"{}\" to \"{}\"" - " in project \"{}\"" - ).format( - yesterday_session_name, today_session_name, project_name - )) - yesterdays_session["name"] = today_session_name - session.commit() - continue - - # Create new review session with new name - self.log.debug(( - "Creating new review session \"{}\" in project \"{}\"" - ).format(today_session_name, project_name)) - session.create("ReviewSession", { - "project_id": project_id, - "name": today_session_name - }) - session.commit() - - def _get_action_settings(self, project_names_by_id): - settings_by_project_id = {} - for project_id, project_name in project_names_by_id.items(): - project_settings = get_project_settings(project_name) - action_settings = self._extract_action_settings(project_settings) - settings_by_project_id[project_id] = action_settings - return settings_by_project_id - - def _extract_action_settings(self, project_settings): - return ( - project_settings - .get("ftrack", {}) - .get(self.settings_frack_subkey, {}) - .get(self.settings_key) - ) or {} - - def _fill_review_template(self, template, data): - output = None - try: - output = template.format(**data) - except Exception: - self.log.warning( - ( - "Failed to fill review session template {} with data {}" - ).format(template, data), - exc_info=True - ) - return output - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - CreateDailyReviewSessionServerAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py b/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py deleted file mode 100644 index f9aac2c80a..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_multiple_notes.py +++ /dev/null @@ -1,167 +0,0 @@ -from openpype_modules.ftrack.lib import ServerAction - - -class MultipleNotesServer(ServerAction): - """Action adds same note for muliple AssetVersions. - - Note is added to selection of AssetVersions. Note is created with user - who triggered the action. It is possible to define note category of note. - """ - - identifier = "multiple.notes.server" - label = "Multiple Notes (Server)" - description = "Add same note to multiple Asset Versions" - - _none_category = "__NONE__" - - def discover(self, session, entities, event): - """Show action only on AssetVersions.""" - if not entities: - return False - - for entity in entities: - if entity.entity_type.lower() != "assetversion": - return False - return True - - def interface(self, session, entities, event): - event_source = event["source"] - user_info = event_source.get("user") or {} - user_id = user_info.get("id") - if not user_id: - return None - - values = event["data"].get("values") - if values: - return None - - note_label = { - "type": "label", - "value": "# Enter note: #" - } - - note_value = { - "name": "note", - "type": "textarea" - } - - category_label = { - "type": "label", - "value": "## Category: ##" - } - - category_data = [] - category_data.append({ - "label": "- None -", - "value": self._none_category - }) - all_categories = session.query( - "select id, name from NoteCategory" - ).all() - for cat in all_categories: - category_data.append({ - "label": cat["name"], - "value": cat["id"] - }) - category_value = { - "type": "enumerator", - "name": "category", - "data": category_data, - "value": self._none_category - } - - splitter = { - "type": "label", - "value": "---" - } - - return [ - note_label, - note_value, - splitter, - category_label, - category_value - ] - - def launch(self, session, entities, event): - if "values" not in event["data"]: - return None - - values = event["data"]["values"] - if len(values) <= 0 or "note" not in values: - return False - - # Get Note text - note_value = values["note"] - if note_value.lower().strip() == "": - return { - "success": True, - "message": "Note was not entered. Skipping" - } - - # Get User - event_source = event["source"] - user_info = event_source.get("user") or {} - user_id = user_info.get("id") - user = None - if user_id: - user = session.query( - 'User where id is "{}"'.format(user_id) - ).first() - - if not user: - return { - "success": False, - "message": "Couldn't get user information." - } - - # Logging message preparation - # - username - username = user.get("username") or "N/A" - - # - AssetVersion ids - asset_version_ids_str = ",".join([entity["id"] for entity in entities]) - - # Base note data - note_data = { - "content": note_value, - "author": user - } - - # Get category - category_id = values["category"] - if category_id == self._none_category: - category_id = None - - category_name = None - if category_id is not None: - category = session.query( - "select id, name from NoteCategory where id is \"{}\"".format( - category_id - ) - ).first() - if category: - note_data["category"] = category - category_name = category["name"] - - category_msg = "" - if category_name: - category_msg = " with category: \"{}\"".format(category_name) - - self.log.warning(( - "Creating note{} as User \"{}\" on " - "AssetVersions: {} with value \"{}\"" - ).format(category_msg, username, asset_version_ids_str, note_value)) - - # Create notes for entities - for entity in entities: - new_note = session.create("Note", note_data) - entity["notes"].append(new_note) - session.commit() - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - MultipleNotesServer(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py deleted file mode 100644 index 02231cbe3c..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ /dev/null @@ -1,415 +0,0 @@ -import json -import copy - -from openpype.client import get_project, create_project -from openpype.settings import ProjectSettings, SaveWarningExc - -from openpype_modules.ftrack.lib import ( - ServerAction, - get_openpype_attr, - CUST_ATTR_AUTO_SYNC -) - - -class PrepareProjectServer(ServerAction): - """Prepare project attributes in Anatomy.""" - - identifier = "prepare.project.server" - label = "OpenPype Admin" - variant = "- Prepare Project (Server)" - description = "Set basic attributes on the project" - - settings_key = "prepare_project" - - role_list = ["Pypeclub", "Administrator", "Project Manager"] - - settings_key = "prepare_project" - - item_splitter = {"type": "label", "value": "---"} - _keys_order = ( - "fps", - "frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "clipIn", - "clipOut", - "resolutionHeight", - "resolutionWidth", - "pixelAspect", - "applications", - "tools_env", - "library_project", - ) - - def discover(self, session, entities, event): - """Show only on project.""" - if ( - len(entities) != 1 - or entities[0].entity_type.lower() != "project" - ): - return False - - return self.valid_roles(session, entities, event) - - def interface(self, session, entities, event): - if event['data'].get('values', {}): - return - - # Inform user that this may take a while - self.show_message(event, "Preparing data... Please wait", True) - self.log.debug("Preparing data which will be shown") - - self.log.debug("Loading custom attributes") - - project_entity = entities[0] - project_name = project_entity["full_name"] - - project_settings = ProjectSettings(project_name) - - project_anatom_settings = project_settings["project_anatomy"] - root_items = self.prepare_root_items(project_anatom_settings) - - ca_items, multiselect_enumerators = ( - self.prepare_custom_attribute_items(project_anatom_settings) - ) - - self.log.debug("Heavy items are ready. Preparing last items group.") - - title = "Prepare Project" - items = [] - - # Add root items - items.extend(root_items) - - items.append(self.item_splitter) - items.append({ - "type": "label", - "value": "

Set basic Attributes:

" - }) - - items.extend(ca_items) - - # This item will be last before enumerators - # Set value of auto synchronization - auto_sync_value = project_entity["custom_attributes"].get( - CUST_ATTR_AUTO_SYNC, False - ) - auto_sync_item = { - "name": CUST_ATTR_AUTO_SYNC, - "type": "boolean", - "value": auto_sync_value, - "label": "AutoSync to Avalon" - } - # Add autosync attribute - items.append(auto_sync_item) - - # Add enumerator items at the end - for item in multiselect_enumerators: - items.append(item) - - return { - "items": items, - "title": title - } - - def prepare_root_items(self, project_anatom_settings): - self.log.debug("Root items preparation begins.") - - root_items = [] - root_items.append({ - "type": "label", - "value": "

Check your Project root settings

" - }) - root_items.append({ - "type": "label", - "value": ( - "

NOTE: Roots are crucial for path filling" - " (and creating folder structure).

" - ) - }) - root_items.append({ - "type": "label", - "value": ( - "

WARNING: Do not change roots on running project," - " that will cause workflow issues.

" - ) - }) - - empty_text = "Enter root path here..." - - roots_entity = project_anatom_settings["roots"] - for root_name, root_entity in roots_entity.items(): - root_items.append(self.item_splitter) - root_items.append({ - "type": "label", - "value": "Root: \"{}\"".format(root_name) - }) - for platform_name, value_entity in root_entity.items(): - root_items.append({ - "label": platform_name, - "name": "__root__{}__{}".format(root_name, platform_name), - "type": "text", - "value": value_entity.value, - "empty_text": empty_text - }) - - root_items.append({ - "type": "hidden", - "name": "__rootnames__", - "value": json.dumps(list(roots_entity.keys())) - }) - - self.log.debug("Root items preparation ended.") - return root_items - - def _attributes_to_set(self, project_anatom_settings): - attributes_to_set = {} - - attribute_values_by_key = {} - for key, entity in project_anatom_settings["attributes"].items(): - attribute_values_by_key[key] = entity.value - - cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) - - for attr in hier_cust_attrs: - key = attr["key"] - if key.startswith("avalon_"): - continue - attributes_to_set[key] = { - "label": attr["label"], - "object": attr, - "default": attribute_values_by_key.get(key) - } - - for attr in cust_attrs: - if attr["entity_type"].lower() != "show": - continue - key = attr["key"] - if key.startswith("avalon_"): - continue - attributes_to_set[key] = { - "label": attr["label"], - "object": attr, - "default": attribute_values_by_key.get(key) - } - - # Sort by label - attributes_to_set = dict(sorted( - attributes_to_set.items(), - key=lambda x: x[1]["label"] - )) - return attributes_to_set - - def prepare_custom_attribute_items(self, project_anatom_settings): - items = [] - multiselect_enumerators = [] - attributes_to_set = self._attributes_to_set(project_anatom_settings) - - self.log.debug("Preparing interface for keys: \"{}\"".format( - str([key for key in attributes_to_set]) - )) - - attribute_keys = set(attributes_to_set.keys()) - keys_order = [] - for key in self._keys_order: - if key in attribute_keys: - keys_order.append(key) - - attribute_keys = attribute_keys - set(keys_order) - for key in sorted(attribute_keys): - keys_order.append(key) - - for key in keys_order: - in_data = attributes_to_set[key] - attr = in_data["object"] - - # initial item definition - item = { - "name": key, - "label": in_data["label"] - } - - # cust attr type - may have different visualization - type_name = attr["type"]["name"].lower() - easy_types = ["text", "boolean", "date", "number"] - - easy_type = False - if type_name in easy_types: - easy_type = True - - elif type_name == "enumerator": - - attr_config = json.loads(attr["config"]) - attr_config_data = json.loads(attr_config["data"]) - - if attr_config["multiSelect"] is True: - multiselect_enumerators.append(self.item_splitter) - multiselect_enumerators.append({ - "type": "label", - "value": "

{}

".format(in_data["label"]) - }) - - default = in_data["default"] - names = [] - for option in sorted( - attr_config_data, key=lambda x: x["menu"] - ): - name = option["value"] - new_name = "__{}__{}".format(key, name) - names.append(new_name) - item = { - "name": new_name, - "type": "boolean", - "label": "- {}".format(option["menu"]) - } - if default: - if isinstance(default, (list, tuple)): - if name in default: - item["value"] = True - else: - if name == default: - item["value"] = True - - multiselect_enumerators.append(item) - - multiselect_enumerators.append({ - "type": "hidden", - "name": "__hidden__{}".format(key), - "value": json.dumps(names) - }) - else: - easy_type = True - item["data"] = attr_config_data - - else: - self.log.warning(( - "Custom attribute \"{}\" has type \"{}\"." - " I don't know how to handle" - ).format(key, type_name)) - items.append({ - "type": "label", - "value": ( - "!!! Can't handle Custom attritubte type \"{}\"" - " (key: \"{}\")" - ).format(type_name, key) - }) - - if easy_type: - item["type"] = type_name - - # default value in interface - default = in_data["default"] - if default is not None: - item["value"] = default - - items.append(item) - - return items, multiselect_enumerators - - def launch(self, session, entities, event): - in_data = event["data"].get("values") - if not in_data: - return - - root_values = {} - root_key = "__root__" - for key in tuple(in_data.keys()): - if key.startswith(root_key): - _key = key[len(root_key):] - root_values[_key] = in_data.pop(key) - - root_names = in_data.pop("__rootnames__", None) - root_data = {} - for root_name in json.loads(root_names): - root_data[root_name] = {} - for key, value in tuple(root_values.items()): - prefix = "{}__".format(root_name) - if not key.startswith(prefix): - continue - - _key = key[len(prefix):] - root_data[root_name][_key] = value - - # Find hidden items for multiselect enumerators - keys_to_process = [] - for key in in_data: - if key.startswith("__hidden__"): - keys_to_process.append(key) - - self.log.debug("Preparing data for Multiselect Enumerators") - enumerators = {} - for key in keys_to_process: - new_key = key.replace("__hidden__", "") - enumerator_items = in_data.pop(key) - enumerators[new_key] = json.loads(enumerator_items) - - # find values set for multiselect enumerator - for key, enumerator_items in enumerators.items(): - in_data[key] = [] - - name = "__{}__".format(key) - - for item in enumerator_items: - value = in_data.pop(item) - if value is True: - new_key = item.replace(name, "") - in_data[key].append(new_key) - - self.log.debug("Setting Custom Attribute values") - - project_entity = entities[0] - project_name = project_entity["full_name"] - - # Try to find project document - project_doc = get_project(project_name) - - # Create project if is not available - # - creation is required to be able set project anatomy and attributes - if not project_doc: - project_code = project_entity["name"] - self.log.info("Creating project \"{} [{}]\"".format( - project_name, project_code - )) - create_project(project_name, project_code) - self.trigger_event( - "openpype.project.created", - {"project_name": project_name} - ) - - project_settings = ProjectSettings(project_name) - project_anatomy_settings = project_settings["project_anatomy"] - project_anatomy_settings["roots"] = root_data - - custom_attribute_values = {} - attributes_entity = project_anatomy_settings["attributes"] - for key, value in in_data.items(): - if key not in attributes_entity: - custom_attribute_values[key] = value - else: - attributes_entity[key] = value - - try: - project_settings.save() - except SaveWarningExc as exc: - self.log.info("Few warnings happened during settings save:") - for warning in exc.warnings: - self.log.info(str(warning)) - - # Change custom attributes on project - if custom_attribute_values: - for key, value in custom_attribute_values.items(): - project_entity["custom_attributes"][key] = value - self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value)) - session.commit() - - event_data = copy.deepcopy(in_data) - event_data["project_name"] = project_name - self.trigger_event("openpype.project.prepared", event_data) - - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - PrepareProjectServer(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py b/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py deleted file mode 100644 index 62772740cd..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_private_project_detection.py +++ /dev/null @@ -1,61 +0,0 @@ -from openpype_modules.ftrack.lib import ServerAction - - -class PrivateProjectDetectionAction(ServerAction): - """Action helps to identify if does not have access to project.""" - - identifier = "server.missing.perm.private.project" - label = "Missing permissions" - description = ( - "Main ftrack event server does not have access to this project." - ) - - def _discover(self, event): - """Show action only if there is a selection in event data.""" - entities = self._translate_event(event) - if entities: - return None - - selection = event["data"].get("selection") - if not selection: - return None - - return { - "items": [{ - "label": self.label, - "variant": self.variant, - "description": self.description, - "actionIdentifier": self.discover_identifier, - "icon": self.icon, - }] - } - - def _launch(self, event): - # Ignore if there are values in event data - # - somebody clicked on submit button - values = event["data"].get("values") - if values: - return None - - title = "# Private project (missing permissions) #" - msg = ( - "User ({}) or API Key used on Ftrack event server" - " does not have permissions to access this private project." - ).format(self.session.api_user) - return { - "type": "form", - "title": "Missing permissions", - "items": [ - {"type": "label", "value": title}, - {"type": "label", "value": msg}, - # Add hidden to be able detect if was clicked on submit - {"type": "hidden", "value": "1", "name": "hidden"} - ], - "submit_button_label": "Got it" - } - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - PrivateProjectDetectionAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py deleted file mode 100644 index a698195c59..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ /dev/null @@ -1,553 +0,0 @@ -import sys -import json -import collections -import ftrack_api -from openpype_modules.ftrack.lib import ( - ServerAction, - query_custom_attributes -) - - -class PushHierValuesToNonHier(ServerAction): - """Action push hierarchical custom attribute values to non-hierarchical. - - Hierarchical value is also pushed to their task entities. - - Action has 3 configurable attributes: - - `role_list`: List of use roles that can discover the action. - - `interest_attributes`: Keys of custom attributes that will be looking - for to push values. Attribute key must have both custom attribute types - hierarchical and on specific object type (entity type). - - `interest_entity_types`: Entity types that will be in focus of pushing - hierarchical to object type's custom attribute. - - EXAMPLE: - * Before action - |_ Project - |_ Shot1 - - hierarchical custom attribute value: `frameStart`: 1001 - - custom attribute for `Shot`: frameStart: 1 - |_ Task1 - - hierarchical custom attribute value: `frameStart`: 10 - - custom attribute for `Task`: frameStart: 0 - - * After action - |_ Project - |_ Shot1 - - hierarchical custom attribute value: `frameStart`: 1001 - - custom attribute for `Shot`: frameStart: 1001 - |_ Task1 - - hierarchical custom attribute value: `frameStart`: 1001 - - custom attribute for `Task`: frameStart: 1001 - """ - - identifier = "admin.push_hier_values_to_non_hier" - label = "OpenPype Admin" - variant = "- Push Hierarchical values To Non-Hierarchical" - - entities_query_by_project = ( - "select id, parent_id, object_type_id from TypedContext" - " where project_id is \"{}\"" - ) - cust_attrs_query = ( - "select id, key, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key in ({})" - ) - - # configurable - settings_key = "sync_hier_entity_attributes" - settings_enabled_key = "action_enabled" - - def discover(self, session, entities, event): - """ Validation """ - # Check if selection is valid - is_valid = False - for ent in event["data"]["selection"]: - # Ignore entities that are not tasks or projects - if ent["entityType"].lower() in ("task", "show"): - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, entities, event): - self.log.debug("{}: Creating job".format(self.label)) - - user_entity = session.query( - "User where id is {}".format(event["source"]["user"]["id"]) - ).one() - job = session.create("Job", { - "user": user_entity, - "status": "running", - "data": json.dumps({ - "description": "Propagation of Frame attribute values to task." - }) - }) - session.commit() - - try: - result = self.propagate_values(session, event, entities) - - except Exception as exc: - msg = "Pushing Custom attribute values to task Failed" - - self.log.warning(msg, exc_info=True) - - session.rollback() - - description = "{} (Download traceback)".format(msg) - self.add_traceback_to_job( - job, session, sys.exc_info(), description - ) - - return { - "success": False, - "message": "Error: {}".format(str(exc)) - } - - job["status"] = "done" - session.commit() - - return result - - def attrs_configurations(self, session, object_ids, interest_attributes): - attrs = session.query(self.cust_attrs_query.format( - self.join_query_keys(interest_attributes), - self.join_query_keys(object_ids) - )).all() - - attrs_by_obj_id = collections.defaultdict(list) - hiearchical = [] - for attr in attrs: - if attr["is_hierarchical"]: - hiearchical.append(attr) - continue - obj_id = attr["object_type_id"] - attrs_by_obj_id[obj_id].append(attr) - return attrs_by_obj_id, hiearchical - - def query_attr_value( - self, - session, - hier_attrs, - attrs_by_obj_id, - dst_object_type_ids, - task_entity_ids, - non_task_entity_ids, - parent_id_by_entity_id - ): - all_non_task_ids_with_parents = set() - for entity_id in non_task_entity_ids: - all_non_task_ids_with_parents.add(entity_id) - _entity_id = entity_id - while True: - parent_id = parent_id_by_entity_id.get(_entity_id) - if ( - parent_id is None - or parent_id in all_non_task_ids_with_parents - ): - break - all_non_task_ids_with_parents.add(parent_id) - _entity_id = parent_id - - all_entity_ids = ( - set(all_non_task_ids_with_parents) - | set(task_entity_ids) - ) - attr_ids = {attr["id"] for attr in hier_attrs} - for obj_id in dst_object_type_ids: - attrs = attrs_by_obj_id.get(obj_id) - if attrs is not None: - for attr in attrs: - attr_ids.add(attr["id"]) - - real_values_by_entity_id = { - entity_id: {} - for entity_id in all_entity_ids - } - - attr_values = query_custom_attributes( - session, attr_ids, all_entity_ids, True - ) - for item in attr_values: - entity_id = item["entity_id"] - attr_id = item["configuration_id"] - real_values_by_entity_id[entity_id][attr_id] = item["value"] - - # Fill hierarchical values - hier_attrs_key_by_id = { - hier_attr["id"]: hier_attr - for hier_attr in hier_attrs - } - hier_values_per_entity_id = {} - for entity_id in all_non_task_ids_with_parents: - real_values = real_values_by_entity_id[entity_id] - hier_values_per_entity_id[entity_id] = {} - for attr_id, attr in hier_attrs_key_by_id.items(): - key = attr["key"] - hier_values_per_entity_id[entity_id][key] = ( - real_values.get(attr_id) - ) - - output = {} - for entity_id in non_task_entity_ids: - output[entity_id] = {} - for attr in hier_attrs_key_by_id.values(): - key = attr["key"] - value = hier_values_per_entity_id[entity_id][key] - tried_ids = set() - if value is None: - tried_ids.add(entity_id) - _entity_id = entity_id - while value is None: - parent_id = parent_id_by_entity_id.get(_entity_id) - if not parent_id: - break - value = hier_values_per_entity_id[parent_id][key] - if value is not None: - break - _entity_id = parent_id - tried_ids.add(parent_id) - - if value is None: - value = attr["default"] - - if value is not None: - for ent_id in tried_ids: - hier_values_per_entity_id[ent_id][key] = value - - output[entity_id][key] = value - - return real_values_by_entity_id, output - - def propagate_values(self, session, event, selected_entities): - ftrack_settings = self.get_ftrack_settings( - session, event, selected_entities - ) - action_settings = ( - ftrack_settings[self.settings_frack_subkey][self.settings_key] - ) - - project_entity = self.get_project_from_entity(selected_entities[0]) - selected_ids = [entity["id"] for entity in selected_entities] - - self.log.debug("Querying project's entities \"{}\".".format( - project_entity["full_name"] - )) - interest_entity_types = tuple( - ent_type.lower() - for ent_type in action_settings["interest_entity_types"] - ) - all_object_types = session.query("ObjectType").all() - object_types_by_low_name = { - object_type["name"].lower(): object_type - for object_type in all_object_types - } - - task_object_type = object_types_by_low_name["task"] - dst_object_type_ids = {task_object_type["id"]} - for ent_type in interest_entity_types: - obj_type = object_types_by_low_name.get(ent_type) - if obj_type: - dst_object_type_ids.add(obj_type["id"]) - - interest_attributes = action_settings["interest_attributes"] - # Find custom attributes definitions - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, dst_object_type_ids, interest_attributes - ) - # Filter destination object types if they have any object specific - # custom attribute - for obj_id in tuple(dst_object_type_ids): - if obj_id not in attrs_by_obj_id: - dst_object_type_ids.remove(obj_id) - - if not dst_object_type_ids: - # TODO report that there are not matching custom attributes - return { - "success": True, - "message": "Nothing has changed." - } - - ( - parent_id_by_entity_id, - filtered_entities - ) = self.all_hierarchy_entities( - session, - selected_ids, - project_entity, - dst_object_type_ids - ) - - self.log.debug("Preparing whole project hierarchy by ids.") - - entities_by_obj_id = { - obj_id: [] - for obj_id in dst_object_type_ids - } - - self.log.debug("Filtering Task entities.") - focus_entity_ids = [] - non_task_entity_ids = [] - task_entity_ids = [] - for entity in filtered_entities: - entity_id = entity["id"] - focus_entity_ids.append(entity_id) - if entity.entity_type.lower() == "task": - task_entity_ids.append(entity_id) - else: - non_task_entity_ids.append(entity_id) - - obj_id = entity["object_type_id"] - entities_by_obj_id[obj_id].append(entity_id) - - if not non_task_entity_ids: - return { - "success": True, - "message": "Nothing to do in your selection." - } - - self.log.debug("Getting Custom attribute values.") - ( - real_values_by_entity_id, - hier_values_by_entity_id - ) = self.query_attr_value( - session, - hier_attrs, - attrs_by_obj_id, - dst_object_type_ids, - task_entity_ids, - non_task_entity_ids, - parent_id_by_entity_id - ) - - self.log.debug("Setting parents' values to task.") - self.set_task_attr_values( - session, - hier_attrs, - task_entity_ids, - hier_values_by_entity_id, - parent_id_by_entity_id, - real_values_by_entity_id - ) - - self.log.debug("Setting values to entities themselves.") - self.push_values_to_entities( - session, - entities_by_obj_id, - attrs_by_obj_id, - hier_values_by_entity_id, - real_values_by_entity_id - ) - - return True - - def all_hierarchy_entities( - self, - session, - selected_ids, - project_entity, - destination_object_type_ids - ): - selected_ids = set(selected_ids) - - filtered_entities = [] - parent_id_by_entity_id = {} - # Query is simple if project is in selection - if project_entity["id"] in selected_ids: - entities = session.query( - self.entities_query_by_project.format(project_entity["id"]) - ).all() - - for entity in entities: - if entity["object_type_id"] in destination_object_type_ids: - filtered_entities.append(entity) - entity_id = entity["id"] - parent_id_by_entity_id[entity_id] = entity["parent_id"] - return parent_id_by_entity_id, filtered_entities - - # Query selection and get it's link to be able calculate parentings - entities_with_link = session.query(( - "select id, parent_id, link, object_type_id" - " from TypedContext where id in ({})" - ).format(self.join_query_keys(selected_ids))).all() - - # Process and store queried entities and store all lower entities to - # `bottom_ids` - # - bottom_ids should not contain 2 ids where one is parent of second - bottom_ids = set(selected_ids) - for entity in entities_with_link: - if entity["object_type_id"] in destination_object_type_ids: - filtered_entities.append(entity) - children_id = None - for idx, item in enumerate(reversed(entity["link"])): - item_id = item["id"] - if idx > 0 and item_id in bottom_ids: - bottom_ids.remove(item_id) - - if children_id is not None: - parent_id_by_entity_id[children_id] = item_id - - children_id = item_id - - # Query all children of selection per one hierarchy level and process - # their data the same way as selection but parents are already known - chunk_size = 100 - while bottom_ids: - child_entities = [] - # Query entities in chunks - entity_ids = list(bottom_ids) - for idx in range(0, len(entity_ids), chunk_size): - _entity_ids = entity_ids[idx:idx + chunk_size] - child_entities.extend(session.query(( - "select id, parent_id, object_type_id from" - " TypedContext where parent_id in ({})" - ).format(self.join_query_keys(_entity_ids))).all()) - - bottom_ids = set() - for entity in child_entities: - entity_id = entity["id"] - parent_id_by_entity_id[entity_id] = entity["parent_id"] - bottom_ids.add(entity_id) - if entity["object_type_id"] in destination_object_type_ids: - filtered_entities.append(entity) - - return parent_id_by_entity_id, filtered_entities - - def set_task_attr_values( - self, - session, - hier_attrs, - task_entity_ids, - hier_values_by_entity_id, - parent_id_by_entity_id, - real_values_by_entity_id - ): - hier_attr_id_by_key = { - attr["key"]: attr["id"] - for attr in hier_attrs - } - filtered_task_ids = set() - for task_id in task_entity_ids: - parent_id = parent_id_by_entity_id.get(task_id) - parent_values = hier_values_by_entity_id.get(parent_id) - if parent_values: - filtered_task_ids.add(task_id) - - if not filtered_task_ids: - return - - for task_id in filtered_task_ids: - parent_id = parent_id_by_entity_id[task_id] - parent_values = hier_values_by_entity_id[parent_id] - hier_values_by_entity_id[task_id] = {} - real_task_attr_values = real_values_by_entity_id[task_id] - for key, value in parent_values.items(): - hier_values_by_entity_id[task_id][key] = value - if value is None: - continue - - configuration_id = hier_attr_id_by_key[key] - _entity_key = collections.OrderedDict([ - ("configuration_id", configuration_id), - ("entity_id", task_id) - ]) - op = None - if configuration_id not in real_task_attr_values: - op = ftrack_api.operation.CreateEntityOperation( - "CustomAttributeValue", - _entity_key, - {"value": value} - ) - elif real_task_attr_values[configuration_id] != value: - op = ftrack_api.operation.UpdateEntityOperation( - "CustomAttributeValue", - _entity_key, - "value", - real_task_attr_values[configuration_id], - value - ) - - if op is not None: - session.recorded_operations.push(op) - if len(session.recorded_operations) > 100: - session.commit() - - session.commit() - - def push_values_to_entities( - self, - session, - entities_by_obj_id, - attrs_by_obj_id, - hier_values_by_entity_id, - real_values_by_entity_id - ): - """Push values from hierarchical custom attributes to non-hierarchical. - - Args: - session (ftrack_api.Sessison): Session which queried entities, - values and which is used for change propagation. - entities_by_obj_id (dict[str, list[str]]): TypedContext - ftrack entity ids where the attributes are propagated by their - object ids. - attrs_by_obj_id (dict[str, ftrack_api.Entity]): Objects of - 'CustomAttributeConfiguration' by their ids. - hier_values_by_entity_id (doc[str, dict[str, Any]]): Attribute - values by entity id and by their keys. - real_values_by_entity_id (doc[str, dict[str, Any]]): Real attribute - values of entities. - """ - - for object_id, entity_ids in entities_by_obj_id.items(): - attrs = attrs_by_obj_id.get(object_id) - if not attrs or not entity_ids: - continue - - for entity_id in entity_ids: - real_values = real_values_by_entity_id.get(entity_id) - hier_values = hier_values_by_entity_id.get(entity_id) - if hier_values is None: - continue - - for attr in attrs: - attr_id = attr["id"] - attr_key = attr["key"] - value = hier_values.get(attr_key) - if value is None: - continue - - _entity_key = collections.OrderedDict([ - ("configuration_id", attr_id), - ("entity_id", entity_id) - ]) - - op = None - if attr_id not in real_values: - op = ftrack_api.operation.CreateEntityOperation( - "CustomAttributeValue", - _entity_key, - {"value": value} - ) - elif real_values[attr_id] != value: - op = ftrack_api.operation.UpdateEntityOperation( - "CustomAttributeValue", - _entity_key, - "value", - real_values[attr_id], - value - ) - - if op is not None: - session.recorded_operations.push(op) - if len(session.recorded_operations) > 100: - session.commit() - - session.commit() - - -def register(session): - PushHierValuesToNonHier(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py deleted file mode 100644 index 442206feba..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py +++ /dev/null @@ -1,226 +0,0 @@ -import time -import sys -import json - -import ftrack_api - -from openpype_modules.ftrack.lib import ServerAction -from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory - - -class SyncToAvalonServer(ServerAction): - """ - Synchronizing data action - from Ftrack to Avalon DB - - Stores all information about entity. - - Name(string) - Most important information = identifier of entity - - Parent(ObjectId) - Avalon Project Id, if entity is not project itself - - Data(dictionary): - - VisualParent(ObjectId) - Avalon Id of parent asset - - Parents(array of string) - All parent names except project - - Tasks(dictionary of dictionaries) - Tasks on asset - - FtrackId(string) - - entityType(string) - entity's type on Ftrack - * All Custom attributes in group 'Avalon' - - custom attributes that start with 'avalon_' are skipped - - * These information are stored for entities in whole project. - - Avalon ID of asset is stored to Ftrack - - Custom attribute 'avalon_mongo_id'. - - action IS NOT creating this Custom attribute if doesn't exist - - run 'Create Custom Attributes' action - - or do it manually (Not recommended) - """ - #: Action identifier. - identifier = "sync.to.avalon.server" - #: Action label. - label = "OpenPype Admin" - variant = "- Sync To Avalon (Server)" - #: Action description. - description = "Send data from Ftrack to Avalon" - role_list = {"Pypeclub", "Administrator", "Project Manager"} - settings_key = "sync_to_avalon" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.entities_factory = SyncEntitiesFactory(self.log, self.session) - - def discover(self, session, entities, event): - """ Validation """ - # Check if selection is valid - is_valid = False - for ent in event["data"]["selection"]: - # Ignore entities that are not tasks or projects - if ent["entityType"].lower() in ["show", "task"]: - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, in_entities, event): - self.log.debug("{}: Creating job".format(self.label)) - - user_entity = session.query( - "User where id is {}".format(event["source"]["user"]["id"]) - ).one() - job_entity = session.create("Job", { - "user": user_entity, - "status": "running", - "data": json.dumps({ - "description": "Sync to avalon is running..." - }) - }) - session.commit() - - project_entity = self.get_project_from_entity(in_entities[0]) - project_name = project_entity["full_name"] - - try: - result = self.synchronization(event, project_name) - - except Exception: - self.log.error( - "Synchronization failed due to code error", exc_info=True - ) - - description = "Sync to avalon Crashed (Download traceback)" - self.add_traceback_to_job( - job_entity, session, sys.exc_info(), description - ) - - msg = "An error has happened during synchronization" - title = "Synchronization report ({}):".format(project_name) - items = [] - items.append({ - "type": "label", - "value": "# {}".format(msg) - }) - items.append({ - "type": "label", - "value": ( - "

Download report from job for more information.

" - ) - }) - - report = {} - try: - report = self.entities_factory.report() - except Exception: - pass - - _items = report.get("items") or [] - if _items: - items.append(self.entities_factory.report_splitter) - items.extend(_items) - - self.show_interface(items, title, event, submit_btn_label="Ok") - - return {"success": True, "message": msg} - - job_entity["status"] = "done" - job_entity["data"] = json.dumps({ - "description": "Sync to avalon finished." - }) - session.commit() - - return result - - def synchronization(self, event, project_name): - time_start = time.time() - - self.show_message(event, "Synchronization - Preparing data", True) - - try: - output = self.entities_factory.launch_setup(project_name) - if output is not None: - return output - - time_1 = time.time() - - self.entities_factory.set_cutom_attributes() - time_2 = time.time() - - # This must happen before all filtering!!! - self.entities_factory.prepare_avalon_entities(project_name) - time_3 = time.time() - - self.entities_factory.filter_by_ignore_sync() - time_4 = time.time() - - self.entities_factory.duplicity_regex_check() - time_5 = time.time() - - self.entities_factory.prepare_ftrack_ent_data() - time_6 = time.time() - - self.entities_factory.synchronize() - time_7 = time.time() - - self.log.debug( - "*** Synchronization finished ***" - ) - self.log.debug( - "preparation <{}>".format(time_1 - time_start) - ) - self.log.debug( - "set_cutom_attributes <{}>".format(time_2 - time_1) - ) - self.log.debug( - "prepare_avalon_entities <{}>".format(time_3 - time_2) - ) - self.log.debug( - "filter_by_ignore_sync <{}>".format(time_4 - time_3) - ) - self.log.debug( - "duplicity_regex_check <{}>".format(time_5 - time_4) - ) - self.log.debug( - "prepare_ftrack_ent_data <{}>".format(time_6 - time_5) - ) - self.log.debug( - "synchronize <{}>".format(time_7 - time_6) - ) - self.log.debug( - "* Total time: {}".format(time_7 - time_start) - ) - - if self.entities_factory.project_created: - event = ftrack_api.event.base.Event( - topic="openpype.project.created", - data={"project_name": project_name} - ) - self.session.event_hub.publish(event) - - report = self.entities_factory.report() - if report and report.get("items"): - default_title = "Synchronization report ({}):".format( - project_name - ) - self.show_interface( - items=report["items"], - title=report.get("title", default_title), - event=event - ) - return { - "success": True, - "message": "Synchronization Finished" - } - - finally: - try: - self.entities_factory.dbcon.uninstall() - except Exception: - pass - - try: - self.entities_factory.session.close() - except Exception: - pass - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - SyncToAvalonServer(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py deleted file mode 100644 index 1d73318f6e..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py +++ /dev/null @@ -1,346 +0,0 @@ -import copy -import json -import collections - -import ftrack_api - -from openpype_modules.ftrack.lib import ( - ServerAction, - statics_icon, -) -from openpype_modules.ftrack.lib.avalon_sync import create_chunks - - -class TransferHierarchicalValues(ServerAction): - """Transfer values across hierarchical attributes. - - Aalso gives ability to convert types meanwhile. That is limited to - conversions between numbers and strings - - int <-> float - - in, float -> string - """ - - identifier = "transfer.hierarchical.values" - label = "OpenPype Admin" - variant = "- Transfer values between 2 custom attributes" - description = ( - "Move values from a hierarchical attribute to" - " second hierarchical attribute." - ) - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - - all_project_entities_query = ( - "select id, name, parent_id, link" - " from TypedContext where project_id is \"{}\"" - ) - cust_attr_query = ( - "select value, entity_id from CustomAttributeValue" - " where entity_id in ({}) and configuration_id is \"{}\"" - ) - settings_key = "transfer_values_of_hierarchical_attributes" - - def discover(self, session, entities, event): - """Show anywhere.""" - - return self.valid_roles(session, entities, event) - - def _selection_interface(self, session, event_values=None): - title = "Transfer hierarchical values" - - attr_confs = session.query( - ( - "select id, key from CustomAttributeConfiguration" - " where is_hierarchical is true" - ) - ).all() - attr_items = [] - for attr_conf in attr_confs: - attr_items.append({ - "value": attr_conf["id"], - "label": attr_conf["key"] - }) - - if len(attr_items) < 2: - return { - "title": title, - "items": [{ - "type": "label", - "value": ( - "Didn't find custom attributes" - " that can be transferred." - ) - }] - } - - attr_items = sorted(attr_items, key=lambda item: item["label"]) - items = [] - item_splitter = {"type": "label", "value": "---"} - items.append({ - "type": "label", - "value": ( - "

Please select source and destination" - " Custom attribute

" - ) - }) - items.append({ - "type": "label", - "value": ( - "WARNING: This will take affect for all projects!" - ) - }) - if event_values: - items.append({ - "type": "label", - "value": ( - "Note: Please select 2 different custom attributes." - ) - }) - - items.append(item_splitter) - - src_item = { - "type": "enumerator", - "label": "Source", - "name": "src_attr_id", - "data": copy.deepcopy(attr_items) - } - dst_item = { - "type": "enumerator", - "label": "Destination", - "name": "dst_attr_id", - "data": copy.deepcopy(attr_items) - } - delete_item = { - "type": "boolean", - "name": "delete_dst_attr_first", - "label": "Delete first", - "value": False - } - if event_values: - src_item["value"] = event_values["src_attr_id"] - dst_item["value"] = event_values["dst_attr_id"] - delete_item["value"] = event_values["delete_dst_attr_first"] - - items.append(src_item) - items.append(dst_item) - items.append(item_splitter) - items.append({ - "type": "label", - "value": ( - "WARNING: All values from destination" - " Custom Attribute will be removed if this is enabled." - ) - }) - items.append(delete_item) - - return { - "title": title, - "items": items - } - - def interface(self, session, entities, event): - if event["data"].get("values", {}): - return None - - return self._selection_interface(session) - - def launch(self, session, entities, event): - values = event["data"].get("values", {}) - if not values: - return None - src_attr_id = values["src_attr_id"] - dst_attr_id = values["dst_attr_id"] - delete_dst_values = values["delete_dst_attr_first"] - - if not src_attr_id or not dst_attr_id: - self.log.info("Attributes were not filled. Nothing to do.") - return { - "success": True, - "message": "Nothing to do" - } - - if src_attr_id == dst_attr_id: - self.log.info(( - "Same attributes were selected {}, {}." - " Showing interface again." - ).format(src_attr_id, dst_attr_id)) - return self._selection_interface(session, values) - - # Query custom attrbutes - src_conf = session.query(( - "select id from CustomAttributeConfiguration where id is {}" - ).format(src_attr_id)).one() - dst_conf = session.query(( - "select id from CustomAttributeConfiguration where id is {}" - ).format(dst_attr_id)).one() - src_type_name = src_conf["type"]["name"] - dst_type_name = dst_conf["type"]["name"] - # Limit conversion to - # - same type -> same type (there is no need to do conversion) - # - number -> number (int to float and back) - # - number -> str (any number can be converted to str) - src_type = None - dst_type = None - if src_type_name == "number" or src_type_name != dst_type_name: - src_type = self._get_attr_type(dst_conf) - dst_type = self._get_attr_type(dst_conf) - valid = False - # Can convert numbers - if src_type in (int, float) and dst_type in (int, float): - valid = True - # Can convert numbers to string - elif dst_type is str: - valid = True - - if not valid: - self.log.info(( - "Don't know how to properly convert" - " custom attribute types {} > {}" - ).format(src_type_name, dst_type_name)) - return { - "message": ( - "Don't know how to properly convert" - " custom attribute types {} > {}" - ).format(src_type_name, dst_type_name), - "success": False - } - - # Query source values - src_attr_values = session.query( - ( - "select value, entity_id" - " from CustomAttributeValue" - " where configuration_id is {}" - ).format(src_attr_id) - ).all() - - self.log.debug("Queried source values.") - failed_entity_ids = [] - if dst_type is not None: - self.log.debug("Converting source values to desctination type") - value_by_id = {} - for attr_value in src_attr_values: - entity_id = attr_value["entity_id"] - value = attr_value["value"] - if value is not None: - try: - if dst_type is not None: - value = dst_type(value) - value_by_id[entity_id] = value - except Exception: - failed_entity_ids.append(entity_id) - - if failed_entity_ids: - self.log.info( - "Couldn't convert some values to destination attribute" - ) - return { - "success": False, - "message": ( - "Couldn't convert some values to destination attribute" - ) - } - - # Delete destination custom attributes first - if delete_dst_values: - self.log.info("Deleting destination custom attribute values first") - self._delete_custom_attribute_values(session, dst_attr_id) - - self.log.info("Applying source values on destination custom attribute") - self._apply_values(session, value_by_id, dst_attr_id) - return True - - def _delete_custom_attribute_values(self, session, dst_attr_id): - dst_attr_values = session.query( - ( - "select configuration_id, entity_id" - " from CustomAttributeValue" - " where configuration_id is {}" - ).format(dst_attr_id) - ).all() - delete_operations = [] - for attr_value in dst_attr_values: - entity_id = attr_value["entity_id"] - configuration_id = attr_value["configuration_id"] - entity_key = collections.OrderedDict(( - ("configuration_id", configuration_id), - ("entity_id", entity_id) - )) - delete_operations.append( - ftrack_api.operation.DeleteEntityOperation( - "CustomAttributeValue", - entity_key - ) - ) - - if not delete_operations: - return - - for chunk in create_chunks(delete_operations, 500): - for operation in chunk: - session.recorded_operations.push(operation) - session.commit() - - def _apply_values(self, session, value_by_id, dst_attr_id): - dst_attr_values = session.query( - ( - "select configuration_id, entity_id" - " from CustomAttributeValue" - " where configuration_id is {}" - ).format(dst_attr_id) - ).all() - - dst_entity_ids_with_value = { - item["entity_id"] - for item in dst_attr_values - } - operations = [] - for entity_id, value in value_by_id.items(): - entity_key = collections.OrderedDict(( - ("configuration_id", dst_attr_id), - ("entity_id", entity_id) - )) - if entity_id in dst_entity_ids_with_value: - operations.append( - ftrack_api.operation.UpdateEntityOperation( - "CustomAttributeValue", - entity_key, - "value", - ftrack_api.symbol.NOT_SET, - value - ) - ) - else: - operations.append( - ftrack_api.operation.CreateEntityOperation( - "CustomAttributeValue", - entity_key, - {"value": value} - ) - ) - - if not operations: - return - - for chunk in create_chunks(operations, 500): - for operation in chunk: - session.recorded_operations.push(operation) - session.commit() - - def _get_attr_type(self, conf_def): - type_name = conf_def["type"]["name"] - if type_name == "text": - return str - - if type_name == "number": - config = json.loads(conf_def["config"]) - if config["isdecimal"]: - return float - return int - return None - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - TransferHierarchicalValues(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py b/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py deleted file mode 100644 index 35b5d809fd..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py +++ /dev/null @@ -1,54 +0,0 @@ -from openpype_modules.ftrack.lib import BaseEvent -from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype_modules.ftrack.event_handlers_server.event_sync_to_avalon import ( - SyncToAvalonEvent -) - - -class DelAvalonIdFromNew(BaseEvent): - ''' - This event removes AvalonId from custom attributes of new entities - Result: - - 'Copy->Pasted' entities won't have same AvalonID as source entity - - Priority of this event must be less than SyncToAvalon event - ''' - priority = SyncToAvalonEvent.priority - 1 - ignore_me = True - - def launch(self, session, event): - created = [] - entities = event['data']['entities'] - for entity in entities: - try: - entity_id = entity['entityId'] - - if entity.get('action', None) == 'add': - id_dict = entity['changes']['id'] - - if id_dict['new'] is not None and id_dict['old'] is None: - created.append(id_dict['new']) - - elif ( - entity.get('action', None) == 'update' and - CUST_ATTR_ID_KEY in entity['keys'] and - entity_id in created - ): - ftrack_entity = session.get( - self._get_entity_type(entity), - entity_id - ) - - cust_attrs = ftrack_entity["custom_attributes"] - if cust_attrs[CUST_ATTR_ID_KEY]: - cust_attrs[CUST_ATTR_ID_KEY] = "" - session.commit() - - except Exception: - session.rollback() - continue - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - DelAvalonIdFromNew(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py b/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py deleted file mode 100644 index 2ac02f233e..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py +++ /dev/null @@ -1,213 +0,0 @@ -import collections - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class FirstVersionStatus(BaseEvent): - - # WARNING Priority MUST be higher - # than handler in `event_version_to_task_statuses.py` - priority = 200 - - keys_enum = ["task", "task_type"] - # This should be set with presets - task_status_map = [] - - # EXAMPLE of `task_status_map` - __example_status_map__ = [{ - # `key` specify where to look for name (is enumerator of `keys_enum`) - # By default is set to "task" - "key": "task", - # speicification of name - "name": "compositing", - # Status to set to the asset version - "status": "Blocking" - }] - - def register(self, *args, **kwargs): - result = super(FirstVersionStatus, self).register(*args, **kwargs) - - valid_task_status_map = [] - for item in self.task_status_map: - key = (item.get("key") or "task").lower() - name = (item.get("name") or "").lower() - status = (item.get("status") or "").lower() - if not (key and name and status): - self.log.warning(( - "Invalid item in Task -> Status mapping. {}" - ).format(str(item))) - continue - - if key not in self.keys_enum: - expected_msg = "" - last_key_idx = len(self.keys_enum) - 1 - for idx, key in enumerate(self.keys_enum): - if idx == 0: - joining_part = "`{}`" - elif idx == last_key_idx: - joining_part = "or `{}`" - else: - joining_part = ", `{}`" - expected_msg += joining_part.format(key) - - self.log.warning(( - "Invalid key `{}`. Expected: {}." - ).format(key, expected_msg)) - continue - - valid_task_status_map.append({ - "key": key, - "name": name, - "status": status - }) - - self.task_status_map = valid_task_status_map - if not self.task_status_map: - self.log.warning(( - "Event handler `{}` don't have set presets." - ).format(self.__class__.__name__)) - - return result - - def launch(self, session, event): - """Set task's status for first created Asset Version.""" - - if not self.task_status_map: - return - - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return - - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) - - def process_by_project(self, session, event, project_id, entities_info): - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug( - f"Project '{project_name}' not found in OpenPype. Skipping" - ) - return - - entity_ids = [] - for entity_info in entities_info: - entity_ids.append(entity_info["entityId"]) - - joined_entity_ids = ",".join( - ["\"{}\"".format(entity_id) for entity_id in entity_ids] - ) - asset_versions = session.query( - "AssetVersion where id in ({})".format(joined_entity_ids) - ).all() - - asset_version_statuses = None - - project_schema = None - for asset_version in asset_versions: - task_entity = asset_version["task"] - found_item = None - for item in self.task_status_map: - if ( - item["key"] == "task" and - task_entity["name"].lower() != item["name"] - ): - continue - - elif ( - item["key"] == "task_type" and - task_entity["type"]["name"].lower() != item["name"] - ): - continue - - found_item = item - break - - if not found_item: - continue - - if project_schema is None: - project_schema = task_entity["project"]["project_schema"] - - # Get all available statuses for Task - if asset_version_statuses is None: - statuses = project_schema.get_statuses("AssetVersion") - - # map lowered status name with it's object - asset_version_statuses = { - status["name"].lower(): status for status in statuses - } - - ent_path = "/".join( - [ent["name"] for ent in task_entity["link"]] + - [ - str(asset_version["asset"]["name"]), - str(asset_version["version"]) - ] - ) - - new_status = asset_version_statuses.get(found_item["status"]) - if not new_status: - self.log.warning(( - "AssetVersion doesn't have status `{}`." - ).format(found_item["status"])) - continue - - try: - asset_version["status"] = new_status - session.commit() - self.log.debug("[ {} ] Status updated to [ {} ]".format( - ent_path, new_status['name'] - )) - - except Exception: - session.rollback() - self.log.warning( - "[ {} ] Status couldn't be set.".format(ent_path), - exc_info=True - ) - - def filter_entities_info(self, event): - filtered_entities_info = collections.defaultdict(list) - for entity_info in event["data"].get("entities", []): - # Care only about add actions - if entity_info.get("action") != "add": - continue - - # Filter AssetVersions - if entity_info["entityType"] != "assetversion": - continue - - entity_changes = entity_info.get("changes") or {} - - # Check if version of Asset Version is `1` - version_num = entity_changes.get("version", {}).get("new") - if version_num != 1: - continue - - # Skip in Asset Version don't have task - task_id = entity_changes.get("taskid", {}).get("new") - if not task_id: - continue - - project_id = None - for parent_item in reversed(entity_info["parents"]): - if parent_item["entityType"] == "show": - project_id = parent_item["entityId"] - break - - if project_id is None: - continue - - filtered_entities_info[project_id].append(entity_info) - - return filtered_entities_info - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - FirstVersionStatus(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py deleted file mode 100644 index 8632f038b8..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py +++ /dev/null @@ -1,444 +0,0 @@ -import collections - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class NextTaskUpdate(BaseEvent): - """Change status on following Task. - - Handler cares about changes of status id on Task entities. When new status - has state "Done" it will try to find following task and change it's status. - It is expected following task should be marked as "Ready to work on". - - By default all tasks with same task type must have state "Done" to do any - changes. And when all tasks with same task type are "done" it will change - statuses on all tasks with next task type. - - # Enable - Handler is based on settings, handler can be turned on/off with "enabled" - key. - ``` - "enabled": True - ``` - - # Status mappings - Must have set mappings of new statuses: - ``` - "mapping": { - # From -> To - "Not Ready": "Ready", - ... - } - ``` - - If current status name is not found then status change is skipped. - - # Ignored statuses - These status names are skipping as they would be in "Done" state. Best - example is status "Omitted" which in most of cases is "Blocked" state but - it will never change. - ``` - "ignored_statuses": [ - "Omitted", - ... - ] - ``` - - # Change statuses sorted by task type and by name - Change behaviour of task type batching. Statuses are not checked and set - by batches of tasks by Task type but one by one. Tasks are sorted by - Task type and then by name if all previous tasks are "Done" the following - will change status. - ``` - "name_sorting": True - ``` - """ - settings_key = "next_task_update" - - def launch(self, session, event): - '''Propagates status from version to task when changed''' - - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return - - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) - - def filter_entities_info(self, event): - # Filter if event contain relevant data - entities_info = event["data"].get("entities") - if not entities_info: - return - - filtered_entities_info = collections.defaultdict(list) - for entity_info in entities_info: - # Care only about Task `entity_type` - if entity_info.get("entity_type") != "Task": - continue - - # Care only about changes of status - changes = entity_info.get("changes") or {} - statusid_changes = changes.get("statusid") or {} - if ( - statusid_changes.get("new") is None - or statusid_changes.get("old") is None - ): - continue - - project_id = None - for parent_info in reversed(entity_info["parents"]): - if parent_info["entityType"] == "show": - project_id = parent_info["entityId"] - break - - if project_id: - filtered_entities_info[project_id].append(entity_info) - return filtered_entities_info - - def process_by_project(self, session, event, project_id, _entities_info): - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return - - # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name - ) - - # Load status mapping from presets - event_settings = ( - project_settings["ftrack"]["events"][self.settings_key] - ) - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" has disabled {}.".format( - project_name, self.__class__.__name__ - )) - return - - statuses = session.query("Status").all() - - entities_info = self.filter_by_status_state(_entities_info, statuses) - if not entities_info: - return - - parent_ids = set() - event_task_ids_by_parent_id = collections.defaultdict(list) - for entity_info in entities_info: - parent_id = entity_info["parentId"] - entity_id = entity_info["entityId"] - parent_ids.add(parent_id) - event_task_ids_by_parent_id[parent_id].append(entity_id) - - # From now it doesn't matter what was in event data - task_entities = session.query( - ( - "select id, type_id, status_id, parent_id, link from Task" - " where parent_id in ({})" - ).format(self.join_query_keys(parent_ids)) - ).all() - - tasks_by_parent_id = collections.defaultdict(list) - for task_entity in task_entities: - tasks_by_parent_id[task_entity["parent_id"]].append(task_entity) - - project_entity = session.get("Project", project_id) - self.set_next_task_statuses( - session, - tasks_by_parent_id, - event_task_ids_by_parent_id, - statuses, - project_entity, - event_settings - ) - - def filter_by_status_state(self, entities_info, statuses): - statuses_by_id = { - status["id"]: status - for status in statuses - } - - # Care only about tasks having status with state `Done` - filtered_entities_info = [] - for entity_info in entities_info: - status_id = entity_info["changes"]["statusid"]["new"] - status_entity = statuses_by_id[status_id] - if status_entity["state"]["name"].lower() == "done": - filtered_entities_info.append(entity_info) - return filtered_entities_info - - def set_next_task_statuses( - self, - session, - tasks_by_parent_id, - event_task_ids_by_parent_id, - statuses, - project_entity, - event_settings - ): - statuses_by_id = { - status["id"]: status - for status in statuses - } - - # Lower ignored statuses - ignored_statuses = set( - status_name.lower() - for status_name in event_settings["ignored_statuses"] - ) - # Lower both key and value of mapped statuses - mapping = { - status_from.lower(): status_to.lower() - for status_from, status_to in event_settings["mapping"].items() - } - # Should use name sorting or not - name_sorting = event_settings["name_sorting"] - - # Collect task type ids from changed entities - task_type_ids = set() - for task_entities in tasks_by_parent_id.values(): - for task_entity in task_entities: - task_type_ids.add(task_entity["type_id"]) - - statusese_by_obj_id = self.statuses_for_tasks( - task_type_ids, project_entity - ) - - sorted_task_type_ids = self.get_sorted_task_type_ids(session) - - for parent_id, _task_entities in tasks_by_parent_id.items(): - task_entities_by_type_id = collections.defaultdict(list) - for _task_entity in _task_entities: - type_id = _task_entity["type_id"] - task_entities_by_type_id[type_id].append(_task_entity) - - event_ids = set(event_task_ids_by_parent_id[parent_id]) - if name_sorting: - # Sort entities by name - self.sort_by_name_task_entities_by_type( - task_entities_by_type_id - ) - # Sort entities by type id - sorted_task_entities = [] - for type_id in sorted_task_type_ids: - task_entities = task_entities_by_type_id.get(type_id) - if task_entities: - sorted_task_entities.extend(task_entities) - - next_tasks = self.next_tasks_with_name_sorting( - sorted_task_entities, - event_ids, - statuses_by_id, - ignored_statuses - ) - - else: - next_tasks = self.next_tasks_with_type_sorting( - task_entities_by_type_id, - sorted_task_type_ids, - event_ids, - statuses_by_id, - ignored_statuses - ) - - for task_entity in next_tasks: - if task_entity["status"]["state"]["name"].lower() == "done": - continue - - task_status = statuses_by_id[task_entity["status_id"]] - old_status_name = task_status["name"].lower() - if old_status_name in ignored_statuses: - continue - - new_task_name = mapping.get(old_status_name) - if not new_task_name: - self.log.debug( - "Didn't find mapping for status \"{}\".".format( - task_status["name"] - ) - ) - continue - - ent_path = "/".join( - [ent["name"] for ent in task_entity["link"]] - ) - type_id = task_entity["type_id"] - new_status = statusese_by_obj_id[type_id].get(new_task_name) - if new_status is None: - self.log.warning(( - "\"{}\" does not have available status name \"{}\"" - ).format(ent_path, new_task_name)) - continue - - try: - task_entity["status_id"] = new_status["id"] - session.commit() - self.log.info( - "\"{}\" updated status to \"{}\"".format( - ent_path, new_status["name"] - ) - ) - except Exception: - session.rollback() - self.log.warning( - "\"{}\" status couldn't be set to \"{}\"".format( - ent_path, new_status["name"] - ), - exc_info=True - ) - - def next_tasks_with_name_sorting( - self, - sorted_task_entities, - event_ids, - statuses_by_id, - ignored_statuses, - ): - # Pre sort task entities by name - use_next_task = False - next_tasks = [] - for task_entity in sorted_task_entities: - if task_entity["id"] in event_ids: - event_ids.remove(task_entity["id"]) - use_next_task = True - continue - - if not use_next_task: - continue - - task_status = statuses_by_id[task_entity["status_id"]] - low_status_name = task_status["name"].lower() - if low_status_name in ignored_statuses: - continue - - next_tasks.append(task_entity) - use_next_task = False - if not event_ids: - break - - return next_tasks - - def check_statuses_done( - self, task_entities, ignored_statuses, statuses_by_id - ): - all_are_done = True - for task_entity in task_entities: - task_status = statuses_by_id[task_entity["status_id"]] - low_status_name = task_status["name"].lower() - if low_status_name in ignored_statuses: - continue - - low_state_name = task_status["state"]["name"].lower() - if low_state_name != "done": - all_are_done = False - break - return all_are_done - - def next_tasks_with_type_sorting( - self, - task_entities_by_type_id, - sorted_task_type_ids, - event_ids, - statuses_by_id, - ignored_statuses - ): - # `use_next_task` is used only if `name_sorting` is enabled! - next_tasks = [] - use_next_tasks = False - for type_id in sorted_task_type_ids: - if type_id not in task_entities_by_type_id: - continue - - task_entities = task_entities_by_type_id[type_id] - - # Check if any task was in event - event_id_in_tasks = False - for task_entity in task_entities: - task_id = task_entity["id"] - if task_id in event_ids: - event_ids.remove(task_id) - event_id_in_tasks = True - - if use_next_tasks: - # Check if next tasks are not done already - all_in_type_done = self.check_statuses_done( - task_entities, ignored_statuses, statuses_by_id - ) - if all_in_type_done: - continue - - next_tasks.extend(task_entities) - use_next_tasks = False - if not event_ids: - break - - if not event_id_in_tasks: - continue - - all_in_type_done = self.check_statuses_done( - task_entities, ignored_statuses, statuses_by_id - ) - use_next_tasks = all_in_type_done - if all_in_type_done: - continue - - if not event_ids: - break - - use_next_tasks = False - - return next_tasks - - def statuses_for_tasks(self, task_type_ids, project_entity): - project_schema = project_entity["project_schema"] - output = {} - for task_type_id in task_type_ids: - statuses = project_schema.get_statuses("Task", task_type_id) - output[task_type_id] = { - status["name"].lower(): status - for status in statuses - } - - return output - - def get_sorted_task_type_ids(self, session): - types_by_order = collections.defaultdict(list) - for _type in session.query("Type").all(): - sort_oder = _type.get("sort") - if sort_oder is not None: - types_by_order[sort_oder].append(_type["id"]) - - types = [] - for sort_oder in sorted(types_by_order.keys()): - types.extend(types_by_order[sort_oder]) - return types - - @staticmethod - def sort_by_name_task_entities_by_type(task_entities_by_type_id): - _task_entities_by_type_id = {} - for type_id, task_entities in task_entities_by_type_id.items(): - # Store tasks by name - task_entities_by_name = {} - for task_entity in task_entities: - task_name = task_entity["name"] - task_entities_by_name[task_name] = task_entity - - # Store task entities by sorted names - sorted_task_entities = [] - for task_name in sorted(task_entities_by_name.keys()): - task_entity = task_entities_by_name[task_name] - sorted_task_entities.append(task_entity) - # Store result to temp dictionary - _task_entities_by_type_id[type_id] = sorted_task_entities - - # Override values in source object - for type_id, value in _task_entities_by_type_id.items(): - task_entities_by_type_id[type_id] = value - - -def register(session): - NextTaskUpdate(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py deleted file mode 100644 index 65c3c1a69a..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ /dev/null @@ -1,504 +0,0 @@ -import collections -import copy -from typing import Any - -import ftrack_api - -from openpype.client import get_project -from openpype_modules.ftrack.lib import ( - BaseEvent, - query_custom_attributes, -) - - -class PushHierValuesToNonHierEvent(BaseEvent): - """Push value changes between hierarchical and non-hierarchical attributes. - - Changes of non-hierarchical attributes are pushed to hierarchical and back. - The attributes must have same definition of custom attribute. - - Handler does not handle changes of hierarchical parents. So if entity does - not have explicitly set value of hierarchical attribute and any parent - would change it the change would not be propagated. - - The handler also push the value to task entity on task creation - and movement. To push values between hierarchical & non-hierarchical - add 'Task' to entity types in settings. - - Todos: - Task attribute values push on create/move should be possible to - enabled by settings. - """ - - # Ignore event handler by default - cust_attrs_query = ( - "select id, key, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key in ({})" - ) - - _cached_task_object_id = None - _cached_interest_object_ids = None - _cached_user_id = None - _cached_changes = [] - _max_delta = 30 - - settings_key = "sync_hier_entity_attributes" - - def filter_entities_info( - self, event: ftrack_api.event.base.Event - ) -> dict[str, list[dict[str, Any]]]: - """Basic entities filter info we care about. - - This filtering is first of many filters. This does not query anything - from ftrack nor use settings. - - Args: - event (ftrack_api.event.base.Event): Ftrack event with update - information. - - Returns: - dict[str, list[dict[str, Any]]]: Filtered entity changes by - project id. - """ - - # Filter if event contain relevant data - entities_info = event["data"].get("entities") - if not entities_info: - return - - entities_info_by_project_id = collections.defaultdict(list) - for entity_info in entities_info: - # Ignore removed entities - if entity_info.get("action") == "remove": - continue - - # Care only about information with changes of entities - changes = entity_info.get("changes") - if not changes: - continue - - # Get project id from entity info - project_id = None - for parent_item in reversed(entity_info["parents"]): - if parent_item["entityType"] == "show": - project_id = parent_item["entityId"] - break - - if project_id is None: - continue - - entities_info_by_project_id[project_id].append(entity_info) - - return entities_info_by_project_id - - def _get_attrs_configurations(self, session, interest_attributes): - """Get custom attribute configurations by name. - - Args: - session (ftrack_api.Session): Ftrack sesson. - interest_attributes (list[str]): Names of custom attributes - that should be synchronized. - - Returns: - tuple[dict[str, list], list]: Attributes by object id and - hierarchical attributes. - """ - - attrs = session.query(self.cust_attrs_query.format( - self.join_query_keys(interest_attributes) - )).all() - - attrs_by_obj_id = collections.defaultdict(list) - hier_attrs = [] - for attr in attrs: - if attr["is_hierarchical"]: - hier_attrs.append(attr) - continue - obj_id = attr["object_type_id"] - attrs_by_obj_id[obj_id].append(attr) - return attrs_by_obj_id, hier_attrs - - def _get_handler_project_settings( - self, - session: ftrack_api.Session, - event: ftrack_api.event.base.Event, - project_id: str - ) -> tuple[set[str], set[str]]: - """Get handler settings based on the project. - - Args: - session (ftrack_api.Session): Ftrack session. - event (ftrack_api.event.base.Event): Ftrack event which triggered - the changes. - project_id (str): Project id where the current changes are handled. - - Returns: - tuple[set[str], set[str]]: Attribute names we care about and - entity types we care about. - """ - - project_name: str = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return set(), set() - - # Load settings - project_settings: dict[str, Any] = ( - self.get_project_settings_from_event(event, project_name) - ) - # Load status mapping from presets - event_settings: dict[str, Any] = ( - project_settings - ["ftrack"] - ["events"] - [self.settings_key] - ) - # Skip if event is not enabled - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" has disabled {}".format( - project_name, self.__class__.__name__ - )) - return set(), set() - - interest_attributes: list[str] = event_settings["interest_attributes"] - if not interest_attributes: - self.log.info(( - "Project \"{}\" does not have filled 'interest_attributes'," - " skipping." - )) - - interest_entity_types: list[str] = ( - event_settings["interest_entity_types"]) - if not interest_entity_types: - self.log.info(( - "Project \"{}\" does not have filled 'interest_entity_types'," - " skipping." - )) - - # Unify possible issues from settings ('Asset Build' -> 'assetbuild') - interest_entity_types: set[str] = { - entity_type.replace(" ", "").lower() - for entity_type in interest_entity_types - } - return set(interest_attributes), interest_entity_types - - def _entities_filter_by_settings( - self, - entities_info: list[dict[str, Any]], - interest_attributes: set[str], - interest_entity_types: set[str] - ): - new_entities_info = [] - for entity_info in entities_info: - entity_type_low = entity_info["entity_type"].lower() - - changes = entity_info["changes"] - # SPECIAL CASE: Capture changes of task created/moved under - # interested entity type - if ( - entity_type_low == "task" - and "parent_id" in changes - ): - # Direct parent is always second item in 'parents' and 'Task' - # must have at least one parent - parent_info = entity_info["parents"][1] - parent_entity_type = ( - parent_info["entity_type"] - .replace(" ", "") - .lower() - ) - if parent_entity_type in interest_entity_types: - new_entities_info.append(entity_info) - continue - - # Skip if entity type is not enabled for attr value sync - if entity_type_low not in interest_entity_types: - continue - - valid_attr_change = entity_info.get("action") == "add" - for attr_key in interest_attributes: - if valid_attr_change: - break - - if attr_key not in changes: - continue - - if changes[attr_key]["new"] is not None: - valid_attr_change = True - - if not valid_attr_change: - continue - - new_entities_info.append(entity_info) - - return new_entities_info - - def propagate_attribute_changes( - self, - session, - interest_attributes, - entities_info, - attrs_by_obj_id, - hier_attrs, - real_values_by_entity_id, - hier_values_by_entity_id, - ): - hier_attr_ids_by_key = { - attr["key"]: attr["id"] - for attr in hier_attrs - } - filtered_interest_attributes = { - attr_name - for attr_name in interest_attributes - if attr_name in hier_attr_ids_by_key - } - attrs_keys_by_obj_id = {} - for obj_id, attrs in attrs_by_obj_id.items(): - attrs_keys_by_obj_id[obj_id] = { - attr["key"]: attr["id"] - for attr in attrs - } - - op_changes = [] - for entity_info in entities_info: - entity_id = entity_info["entityId"] - obj_id = entity_info["objectTypeId"] - # Skip attributes sync if does not have object specific custom - # attribute - if obj_id not in attrs_keys_by_obj_id: - continue - attr_keys = attrs_keys_by_obj_id[obj_id] - real_values = real_values_by_entity_id[entity_id] - hier_values = hier_values_by_entity_id[entity_id] - - changes = copy.deepcopy(entity_info["changes"]) - obj_id_attr_keys = { - attr_key - for attr_key in filtered_interest_attributes - if attr_key in attr_keys - } - if not obj_id_attr_keys: - continue - - value_by_key = {} - is_new_entity = entity_info.get("action") == "add" - for attr_key in obj_id_attr_keys: - if ( - attr_key in changes - and changes[attr_key]["new"] is not None - ): - value_by_key[attr_key] = changes[attr_key]["new"] - - if not is_new_entity: - continue - - hier_attr_id = hier_attr_ids_by_key[attr_key] - attr_id = attr_keys[attr_key] - if hier_attr_id in real_values or attr_id in real_values: - continue - - value_by_key[attr_key] = hier_values[hier_attr_id] - - for key, new_value in value_by_key.items(): - if new_value is None: - continue - - hier_id = hier_attr_ids_by_key[key] - std_id = attr_keys[key] - real_hier_value = real_values.get(hier_id) - real_std_value = real_values.get(std_id) - hier_value = hier_values[hier_id] - # Get right type of value for conversion - # - values in event are strings - type_value = real_hier_value - if type_value is None: - type_value = real_std_value - if type_value is None: - type_value = hier_value - # Skip if current values are not set - if type_value is None: - continue - - try: - new_value = type(type_value)(new_value) - except Exception: - self.log.warning(( - "Couldn't convert from {} to {}." - " Skipping update values." - ).format(type(new_value), type(type_value))) - continue - - real_std_value_is_same = new_value == real_std_value - real_hier_value_is_same = new_value == real_hier_value - # New value does not match anything in current entity values - if ( - not is_new_entity - and not real_std_value_is_same - and not real_hier_value_is_same - ): - continue - - if not real_std_value_is_same: - op_changes.append(( - std_id, - entity_id, - new_value, - real_values.get(std_id), - std_id in real_values - )) - - if not real_hier_value_is_same: - op_changes.append(( - hier_id, - entity_id, - new_value, - real_values.get(hier_id), - hier_id in real_values - )) - - for change in op_changes: - ( - attr_id, - entity_id, - new_value, - old_value, - do_update - ) = change - - entity_key = collections.OrderedDict([ - ("configuration_id", attr_id), - ("entity_id", entity_id) - ]) - if do_update: - op = ftrack_api.operation.UpdateEntityOperation( - "CustomAttributeValue", - entity_key, - "value", - old_value, - new_value - ) - - else: - op = ftrack_api.operation.CreateEntityOperation( - "CustomAttributeValue", - entity_key, - {"value": new_value} - ) - - session.recorded_operations.push(op) - if len(session.recorded_operations) > 100: - session.commit() - session.commit() - - def process_by_project( - self, - session: ftrack_api.Session, - event: ftrack_api.event.base.Event, - project_id: str, - entities_info: list[dict[str, Any]] - ): - """Process changes in single project. - - Args: - session (ftrack_api.Session): Ftrack session. - event (ftrack_api.event.base.Event): Event which has all changes - information. - project_id (str): Project id related to changes. - entities_info (list[dict[str, Any]]): Changes of entities. - """ - - ( - interest_attributes, - interest_entity_types - ) = self._get_handler_project_settings(session, event, project_id) - if not interest_attributes or not interest_entity_types: - return - - entities_info: list[dict[str, Any]] = ( - self._entities_filter_by_settings( - entities_info, - interest_attributes, - interest_entity_types - ) - ) - if not entities_info: - return - - attrs_by_obj_id, hier_attrs = self._get_attrs_configurations( - session, interest_attributes - ) - # Skip if attributes are not available - # - there is nothing to sync - if not attrs_by_obj_id or not hier_attrs: - return - - entity_ids_by_parent_id = collections.defaultdict(set) - all_entity_ids = set() - for entity_info in entities_info: - entity_id = None - for item in entity_info["parents"]: - item_id = item["entityId"] - all_entity_ids.add(item_id) - if entity_id is not None: - entity_ids_by_parent_id[item_id].add(entity_id) - entity_id = item_id - - attr_ids = {attr["id"] for attr in hier_attrs} - for attrs in attrs_by_obj_id.values(): - attr_ids |= {attr["id"] for attr in attrs} - - # Query real custom attribute values - # - we have to know what are the real values, if are set and to what - # value - value_items = query_custom_attributes( - session, attr_ids, all_entity_ids, True - ) - real_values_by_entity_id = collections.defaultdict(dict) - for item in value_items: - entity_id = item["entity_id"] - attr_id = item["configuration_id"] - real_values_by_entity_id[entity_id][attr_id] = item["value"] - - hier_values_by_entity_id = {} - default_values = { - attr["id"]: attr["default"] - for attr in hier_attrs - } - hier_queue = collections.deque() - hier_queue.append((default_values, [project_id])) - while hier_queue: - parent_values, entity_ids = hier_queue.popleft() - for entity_id in entity_ids: - entity_values = copy.deepcopy(parent_values) - real_values = real_values_by_entity_id[entity_id] - for attr_id, value in real_values.items(): - entity_values[attr_id] = value - hier_values_by_entity_id[entity_id] = entity_values - hier_queue.append( - (entity_values, entity_ids_by_parent_id[entity_id]) - ) - - self.propagate_attribute_changes( - session, - interest_attributes, - entities_info, - attrs_by_obj_id, - hier_attrs, - real_values_by_entity_id, - hier_values_by_entity_id, - ) - - def launch(self, session, event): - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return - - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) - - -def register(session): - PushHierValuesToNonHierEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py deleted file mode 100644 index 358a8d2310..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py +++ /dev/null @@ -1,40 +0,0 @@ -import ftrack_api -from openpype_modules.ftrack.lib import BaseEvent - - -class RadioButtons(BaseEvent): - - ignore_me = True - - def launch(self, session, event): - '''Provides a radio button behaviour to any boolean attribute in - radio_button group.''' - - # start of event procedure ---------------------------------- - for entity in event['data'].get('entities', []): - - if entity['entityType'] == 'assetversion': - - query = 'CustomAttributeGroup where name is "radio_button"' - group = session.query(query).one() - radio_buttons = [] - for g in group['custom_attribute_configurations']: - radio_buttons.append(g['key']) - - for key in entity['keys']: - if (key in radio_buttons and entity['changes'] is not None): - if entity['changes'][key]['new'] == '1': - version = session.get('AssetVersion', - entity['entityId']) - asset = session.get('Asset', entity['parentId']) - for v in asset['versions']: - if version is not v: - v['custom_attributes'][key] = 0 - - session.commit() - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - RadioButtons(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py deleted file mode 100644 index ae70c6756f..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py +++ /dev/null @@ -1,147 +0,0 @@ -from pymongo import UpdateOne -from bson.objectid import ObjectId - -from openpype.pipeline import AvalonMongoDB - -from openpype_modules.ftrack.lib import ( - CUST_ATTR_ID_KEY, - query_custom_attributes, - - BaseEvent -) - - -class SyncLinksToAvalon(BaseEvent): - """Synchronize inpug linkts to avalon documents.""" - # Run after sync to avalon event handler - priority = 110 - - def __init__(self, session): - self.dbcon = AvalonMongoDB() - - super(SyncLinksToAvalon, self).__init__(session) - - def launch(self, session, event): - # Try to commit and if any error happen then recreate session - entities_info = event["data"]["entities"] - dependency_changes = [] - removed_entities = set() - for entity_info in entities_info: - action = entity_info.get("action") - entityType = entity_info.get("entityType") - if action not in ("remove", "add"): - continue - - if entityType == "task": - removed_entities.add(entity_info["entityId"]) - elif entityType == "dependency": - dependency_changes.append(entity_info) - - # Care only about dependency changes - if not dependency_changes: - return - - project_id = None - for entity_info in dependency_changes: - for parent_info in entity_info["parents"]: - if parent_info["entityType"] == "show": - project_id = parent_info["entityId"] - if project_id is not None: - break - - changed_to_ids = set() - for entity_info in dependency_changes: - to_id_change = entity_info["changes"]["to_id"] - if to_id_change["new"] is not None: - changed_to_ids.add(to_id_change["new"]) - - if to_id_change["old"] is not None: - changed_to_ids.add(to_id_change["old"]) - - self._update_in_links(session, changed_to_ids, project_id) - - def _update_in_links(self, session, ftrack_ids, project_id): - if not ftrack_ids or project_id is None: - return - - attr_def = session.query(( - "select id from CustomAttributeConfiguration where key is \"{}\"" - ).format(CUST_ATTR_ID_KEY)).first() - if attr_def is None: - return - - project_entity = session.query(( - "select full_name from Project where id is \"{}\"" - ).format(project_id)).first() - if not project_entity: - return - - project_name = project_entity["full_name"] - mongo_id_by_ftrack_id = self._get_mongo_ids_by_ftrack_ids( - session, attr_def["id"], ftrack_ids - ) - - filtered_ftrack_ids = tuple(mongo_id_by_ftrack_id.keys()) - context_links = session.query(( - "select from_id, to_id from TypedContextLink where to_id in ({})" - ).format(self.join_query_keys(filtered_ftrack_ids))).all() - - mapping_by_to_id = { - ftrack_id: set() - for ftrack_id in filtered_ftrack_ids - } - all_from_ids = set() - for context_link in context_links: - to_id = context_link["to_id"] - from_id = context_link["from_id"] - if from_id == to_id: - continue - all_from_ids.add(from_id) - mapping_by_to_id[to_id].add(from_id) - - mongo_id_by_ftrack_id.update(self._get_mongo_ids_by_ftrack_ids( - session, attr_def["id"], all_from_ids - )) - self.log.info(mongo_id_by_ftrack_id) - bulk_writes = [] - for to_id, from_ids in mapping_by_to_id.items(): - dst_mongo_id = mongo_id_by_ftrack_id[to_id] - links = [] - for ftrack_id in from_ids: - link_mongo_id = mongo_id_by_ftrack_id.get(ftrack_id) - if link_mongo_id is None: - continue - - links.append({ - "id": ObjectId(link_mongo_id), - "linkedBy": "ftrack", - "type": "breakdown" - }) - - bulk_writes.append(UpdateOne( - {"_id": ObjectId(dst_mongo_id)}, - {"$set": {"data.inputLinks": links}} - )) - - if bulk_writes: - self.dbcon.database[project_name].bulk_write(bulk_writes) - - def _get_mongo_ids_by_ftrack_ids(self, session, attr_id, ftrack_ids): - output = query_custom_attributes( - session, [attr_id], ftrack_ids, True - ) - mongo_id_by_ftrack_id = {} - for item in output: - mongo_id = item["value"] - if not mongo_id: - continue - - ftrack_id = item["entity_id"] - - mongo_id_by_ftrack_id[ftrack_id] = mongo_id - return mongo_id_by_ftrack_id - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - SyncLinksToAvalon(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py deleted file mode 100644 index d4dc53b655..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ /dev/null @@ -1,2731 +0,0 @@ -import collections -import copy -import json -import time -import datetime -import atexit -import traceback - -from bson.objectid import ObjectId -from pymongo import UpdateOne - -import arrow -import ftrack_api - -from openpype.client import ( - get_project, - get_assets, - get_archived_assets, - get_asset_ids_with_subsets -) -from openpype.client.operations import CURRENT_ASSET_DOC_SCHEMA -from openpype.pipeline import AvalonMongoDB, schema - -from openpype_modules.ftrack.lib import ( - get_openpype_attr, - query_custom_attributes, - CUST_ATTR_ID_KEY, - CUST_ATTR_AUTO_SYNC, - FPS_KEYS, - - avalon_sync, - - BaseEvent -) -from openpype_modules.ftrack.lib.avalon_sync import ( - convert_to_fps, - InvalidFpsValue -) - - -class SyncToAvalonEvent(BaseEvent): - interest_entTypes = ["show", "task"] - ignore_ent_types = ["Milestone"] - ignore_keys = ["statusid", "thumbid"] - - cust_attr_query_keys = [ - "id", - "key", - "entity_type", - "object_type_id", - "is_hierarchical", - "config", - "default" - ] - project_query = ( - "select full_name, name, custom_attributes" - ", project_schema._task_type_schema.types.name" - " from Project where id is \"{}\"" - ) - - entities_query_by_id = ( - "select id, name, parent_id, link, custom_attributes, description" - " from TypedContext where project_id is \"{}\" and id in ({})" - ) - - # useful for getting all tasks for asset - task_entities_query_by_parent_id = ( - "select id, name, parent_id, type_id from Task" - " where project_id is \"{}\" and parent_id in ({})" - ) - task_types_query = ( - "select id, name from Type" - ) - entities_name_query_by_name = ( - "select id, name from TypedContext" - " where project_id is \"{}\" and name in ({})" - ) - created_entities = [] - report_splitter = {"type": "label", "value": "---"} - - def __init__(self, session): - '''Expects a ftrack_api.Session instance''' - # Debug settings - # - time expiration in seconds - self.debug_print_time_expiration = 5 * 60 - # - store current time - self.debug_print_time = datetime.datetime.now() - # - store synchronize entity types to be able to use - # only entityTypes in interest instead of filtering by ignored - self.debug_sync_types = collections.defaultdict(list) - - self.dbcon = AvalonMongoDB() - # Set processing session to not use global - self.set_process_session(session) - super().__init__(session) - - def debug_logs(self): - """This is debug method for printing small debugs messages. """ - now_datetime = datetime.datetime.now() - delta = now_datetime - self.debug_print_time - if delta.total_seconds() < self.debug_print_time_expiration: - return - - self.debug_print_time = now_datetime - known_types_items = [] - for entityType, entity_type in self.debug_sync_types.items(): - ent_types_msg = ", ".join(entity_type) - known_types_items.append( - "<{}> ({})".format(entityType, ent_types_msg) - ) - - known_entityTypes = ", ".join(known_types_items) - self.log.debug( - "DEBUG MESSAGE: Known types {}".format(known_entityTypes) - ) - - @property - def cur_project(self): - if self._cur_project is None: - found_id = None - for ent_info in self._cur_event["data"]["entities"]: - if found_id is not None: - break - parents = ent_info.get("parents") or [] - for parent in parents: - if parent.get("entityType") == "show": - found_id = parent.get("entityId") - break - if found_id: - self._cur_project = self.process_session.query( - self.project_query.format(found_id) - ).one() - return self._cur_project - - @property - def avalon_cust_attrs(self): - if self._avalon_cust_attrs is None: - self._avalon_cust_attrs = get_openpype_attr( - self.process_session, query_keys=self.cust_attr_query_keys - ) - return self._avalon_cust_attrs - - @property - def cust_attr_types_by_id(self): - if self._cust_attr_types_by_id is None: - cust_attr_types = self.process_session.query( - "select id, name from CustomAttributeType" - ).all() - self._cust_attr_types_by_id = { - cust_attr_type["id"]: cust_attr_type - for cust_attr_type in cust_attr_types - } - return self._cust_attr_types_by_id - - @property - def avalon_entities(self): - if self._avalon_ents is None: - project_name = self.cur_project["full_name"] - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = project_name - avalon_project = get_project(project_name) - avalon_entities = list(get_assets(project_name)) - self._avalon_ents = (avalon_project, avalon_entities) - return self._avalon_ents - - @property - def avalon_ents_by_name(self): - if self._avalon_ents_by_name is None: - self._avalon_ents_by_name = {} - proj, ents = self.avalon_entities - for ent in ents: - self._avalon_ents_by_name[ent["name"]] = ent - return self._avalon_ents_by_name - - @property - def avalon_ents_by_id(self): - if self._avalon_ents_by_id is None: - self._avalon_ents_by_id = {} - proj, ents = self.avalon_entities - if proj: - self._avalon_ents_by_id[proj["_id"]] = proj - for ent in ents: - self._avalon_ents_by_id[ent["_id"]] = ent - return self._avalon_ents_by_id - - @property - def avalon_ents_by_parent_id(self): - if self._avalon_ents_by_parent_id is None: - self._avalon_ents_by_parent_id = collections.defaultdict(list) - proj, ents = self.avalon_entities - for ent in ents: - vis_par = ent["data"]["visualParent"] - if vis_par is None: - vis_par = proj["_id"] - self._avalon_ents_by_parent_id[vis_par].append(ent) - return self._avalon_ents_by_parent_id - - @property - def avalon_ents_by_ftrack_id(self): - if self._avalon_ents_by_ftrack_id is None: - self._avalon_ents_by_ftrack_id = {} - proj, ents = self.avalon_entities - if proj: - ftrack_id = proj["data"].get("ftrackId") - if ftrack_id is None: - self.handle_missing_ftrack_id(proj) - ftrack_id = proj["data"]["ftrackId"] - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - for ent in ents: - ftrack_id = ent["data"].get("ftrackId") - if ftrack_id is None: - continue - self._avalon_ents_by_ftrack_id[ftrack_id] = ent - return self._avalon_ents_by_ftrack_id - - def handle_missing_ftrack_id(self, doc): - # TODO handling of missing ftrack id is primarily issue of editorial - # publishing it would be better to find out what causes that - # ftrack id is removed during the publishing - ftrack_id = doc["data"].get("ftrackId") - if ftrack_id is not None: - return - - if doc["type"] == "project": - ftrack_id = self.cur_project["id"] - - self.dbcon.update_one( - {"type": "project"}, - {"$set": { - "data.ftrackId": ftrack_id, - "data.entityType": self.cur_project.entity_type - }} - ) - - doc["data"]["ftrackId"] = ftrack_id - doc["data"]["entityType"] = self.cur_project.entity_type - self.log.info("Updated ftrack id of project \"{}\"".format( - self.cur_project["full_name"] - )) - return - - if doc["type"] != "asset": - return - - doc_parents = doc.get("data", {}).get("parents") - if doc_parents is None: - return - - entities = self.process_session.query(( - "select id, link from TypedContext" - " where project_id is \"{}\" and name is \"{}\"" - ).format(self.cur_project["id"], doc["name"])).all() - self.log.info("Entities: {}".format(str(entities))) - matching_entity = None - for entity in entities: - parents = [] - for item in entity["link"]: - if item["id"] == entity["id"]: - break - low_type = item["type"].lower() - if low_type == "typedcontext": - parents.append(item["name"]) - if doc_parents == parents: - matching_entity = entity - break - - if matching_entity is None: - return - - ftrack_id = matching_entity["id"] - self.dbcon.update_one( - {"_id": doc["_id"]}, - {"$set": { - "data.ftrackId": ftrack_id, - "data.entityType": matching_entity.entity_type - }} - ) - doc["data"]["ftrackId"] = ftrack_id - doc["data"]["entityType"] = matching_entity.entity_type - - entity_path_items = [] - for item in entity["link"]: - entity_path_items.append(item["name"]) - self.log.info("Updated ftrack id of entity \"{}\"".format( - "/".join(entity_path_items) - )) - self._avalon_ents_by_ftrack_id[ftrack_id] = doc - - @property - def avalon_asset_ids_with_subsets(self): - if self._avalon_asset_ids_with_subsets is None: - project_name = self.cur_project["full_name"] - self._avalon_asset_ids_with_subsets = get_asset_ids_with_subsets( - project_name - ) - - return self._avalon_asset_ids_with_subsets - - @property - def avalon_archived_by_id(self): - if self._avalon_archived_by_id is None: - self._avalon_archived_by_id = {} - project_name = self.cur_project["full_name"] - for asset in get_archived_assets(project_name): - self._avalon_archived_by_id[asset["_id"]] = asset - return self._avalon_archived_by_id - - @property - def avalon_archived_by_name(self): - if self._avalon_archived_by_name is None: - self._avalon_archived_by_name = {} - for asset in self.avalon_archived_by_id.values(): - self._avalon_archived_by_name[asset["name"]] = asset - return self._avalon_archived_by_name - - @property - def changeability_by_mongo_id(self): - """Return info about changeability of entity and it's parents.""" - if self._changeability_by_mongo_id is None: - self._changeability_by_mongo_id = collections.defaultdict( - lambda: True - ) - avalon_project, avalon_entities = self.avalon_entities - self._changeability_by_mongo_id[avalon_project["_id"]] = False - self._bubble_changeability( - list(self.avalon_asset_ids_with_subsets) - ) - - return self._changeability_by_mongo_id - - def remove_cached_by_key(self, key, values): - if self._avalon_ents is None: - return - - if not isinstance(values, (list, tuple)): - values = [values] - - def get_found_data(entity): - if not entity: - return None - return { - "ftrack_id": entity["data"]["ftrackId"], - "parent_id": entity["data"]["visualParent"], - "_id": entity["_id"], - "name": entity["name"], - "entity": entity - } - - if key == "id": - key = "_id" - elif key == "ftrack_id": - key = "data.ftrackId" - - found_data = {} - project, entities = self._avalon_ents - key_items = key.split(".") - for value in values: - ent = None - if key == "_id": - if self._avalon_ents_by_id is not None: - ent = self._avalon_ents_by_id.get(value) - - elif key == "name": - if self._avalon_ents_by_name is not None: - ent = self._avalon_ents_by_name.get(value) - - elif key == "data.ftrackId": - if self._avalon_ents_by_ftrack_id is not None: - ent = self._avalon_ents_by_ftrack_id.get(value) - - if ent is None: - for _ent in entities: - _temp = _ent - for item in key_items: - _temp = _temp[item] - - if _temp == value: - ent = _ent - break - - found_data[value] = get_found_data(ent) - - for value in values: - data = found_data[value] - if not data: - # TODO logging - self.log.warning( - "Didn't find entity by key/value \"{}\" / \"{}\"".format( - key, value - ) - ) - continue - - ftrack_id = data["ftrack_id"] - parent_id = data["parent_id"] - mongo_id = data["_id"] - name = data["name"] - entity = data["entity"] - - project, ents = self._avalon_ents - ents.remove(entity) - self._avalon_ents = project, ents - - if self._avalon_ents_by_ftrack_id is not None: - self._avalon_ents_by_ftrack_id.pop(ftrack_id, None) - - if self._avalon_ents_by_parent_id is not None: - self._avalon_ents_by_parent_id[parent_id].remove(entity) - - if self._avalon_ents_by_id is not None: - self._avalon_ents_by_id.pop(mongo_id, None) - - if self._avalon_ents_by_name is not None: - self._avalon_ents_by_name.pop(name, None) - - if self._avalon_archived_by_id is not None: - self._avalon_archived_by_id[mongo_id] = entity - - def _bubble_changeability(self, unchangeable_ids): - unchangeable_queue = collections.deque() - for entity_id in unchangeable_ids: - unchangeable_queue.append((entity_id, False)) - - processed_parents_ids = [] - while unchangeable_queue: - entity_id, child_is_archived = unchangeable_queue.popleft() - # skip if already processed - if entity_id in processed_parents_ids: - continue - - entity = self.avalon_ents_by_id.get(entity_id) - # if entity is not archived but unchageable child was then skip - # - archived entities should not affect not archived? - if entity and child_is_archived: - continue - - # set changeability of current entity to False - self._changeability_by_mongo_id[entity_id] = False - processed_parents_ids.append(entity_id) - # if not entity then is probably archived - if not entity: - entity = self.avalon_archived_by_id.get(entity_id) - child_is_archived = True - - if not entity: - # if entity is not found then it is subset without parent - if entity_id in unchangeable_ids: - self.log.warning(( - "Parent <{}> with subsets does not exist" - ).format(str(entity_id))) - else: - self.log.warning(( - "In avalon are entities without valid parents that" - " lead to Project (should not cause errors)" - " - MongoId <{}>" - ).format(str(entity_id))) - continue - - # skip if parent is project - parent_id = entity["data"]["visualParent"] - if parent_id is None: - continue - unchangeable_queue.append((parent_id, child_is_archived)) - - def reset_variables(self): - """Reset variables so each event callback has clear env.""" - self._cur_project = None - - self._avalon_cust_attrs = None - self._cust_attr_types_by_id = None - - self._avalon_ents = None - self._avalon_ents_by_id = None - self._avalon_ents_by_parent_id = None - self._avalon_ents_by_ftrack_id = None - self._avalon_ents_by_name = None - self._avalon_asset_ids_with_subsets = None - self._changeability_by_mongo_id = None - self._avalon_archived_by_id = None - self._avalon_archived_by_name = None - - self._ent_types_by_name = None - - self.ftrack_ents_by_id = {} - self.obj_id_ent_type_map = {} - self.ftrack_recreated_mapping = {} - - self.ftrack_added = {} - self.ftrack_moved = {} - self.ftrack_renamed = {} - self.ftrack_updated = {} - self.ftrack_removed = {} - - # set of ftrack ids with modified tasks - # handled separately by full wipeout and replace from FTrack - self.modified_tasks_ftrackids = set() - - self.moved_in_avalon = [] - self.renamed_in_avalon = [] - self.hier_cust_attrs_changes = collections.defaultdict(list) - - self.duplicated = [] - self.regex_failed = [] - - self.regex_schemas = {} - self.updates = collections.defaultdict(dict) - - self.report_items = { - "info": collections.defaultdict(list), - "warning": collections.defaultdict(list), - "error": collections.defaultdict(list) - } - - def set_process_session(self, session): - try: - self.process_session.close() - except Exception: - pass - self.process_session = ftrack_api.Session( - server_url=session.server_url, - api_key=session.api_key, - api_user=session.api_user, - auto_connect_event_hub=True - ) - atexit.register(lambda: self.process_session.close()) - - def filter_updated(self, updates): - filtered_updates = {} - for ftrack_id, ent_info in updates.items(): - changed_keys = [k for k in (ent_info.get("keys") or [])] - changes = { - k: v for k, v in (ent_info.get("changes") or {}).items() - } - - entity_type = ent_info["entity_type"] - if entity_type == "Task": - if "name" in changed_keys: - ent_info["keys"] = ["name"] - ent_info["changes"] = {"name": changes.pop("name")} - filtered_updates[ftrack_id] = ent_info - continue - - for _key in self.ignore_keys: - if _key in changed_keys: - changed_keys.remove(_key) - changes.pop(_key, None) - - if not changed_keys: - continue - - # Remove custom attributes starting with `avalon_` from changes - # - these custom attributes are not synchronized - avalon_keys = [] - for key in changes: - if key.startswith("avalon_"): - avalon_keys.append(key) - - for _key in avalon_keys: - changed_keys.remove(_key) - changes.pop(_key, None) - - if not changed_keys: - continue - - ent_info["keys"] = changed_keys - ent_info["changes"] = changes - filtered_updates[ftrack_id] = ent_info - - return filtered_updates - - def get_ent_path(self, ftrack_id): - """ - Looks for entity in FTrack with 'ftrack_id'. If found returns - concatenated paths from its 'link' elemenent's names. Describes - location of entity in tree. - Args: - ftrack_id (string): entityId of FTrack entity - - Returns: - (string) - example : "/test_project/assets/my_asset" - """ - entity = self.ftrack_ents_by_id.get(ftrack_id) - if not entity: - entity = self.process_session.query( - self.entities_query_by_id.format( - self.cur_project["id"], ftrack_id - ) - ).first() - if entity: - self.ftrack_ents_by_id[ftrack_id] = entity - else: - return "unknown hierarchy" - return "/".join([ent["name"] for ent in entity["link"]]) - - def launch(self, session, event): - """ - Main entry port for synchronization. - Goes through event (can contain multiple changes) and decides if - the event is interesting for us (interest_entTypes). - It separates changes into add|remove|update. - All task changes are handled together by refresh from Ftrack. - Args: - session (object): session to Ftrack - event (dictionary): event content - - Returns: - (boolean or None) - """ - # Try to commit and if any error happen then recreate session - try: - self.process_session.commit() - except Exception: - self.set_process_session(session) - # Reset object values for each launch - self.reset_variables() - self._cur_event = event - - entities_by_action = { - "remove": {}, - "update": {}, - "move": {}, - "add": {} - } - - entities_info = event["data"]["entities"] - found_actions = set() - for ent_info in entities_info: - entityType = ent_info["entityType"] - if entityType not in self.interest_entTypes: - continue - - entity_type = ent_info.get("entity_type") - if not entity_type or entity_type in self.ignore_ent_types: - continue - - if entity_type not in self.debug_sync_types[entityType]: - self.debug_sync_types[entityType].append(entity_type) - - action = ent_info["action"] - ftrack_id = ent_info["entityId"] - if isinstance(ftrack_id, list): - self.log.warning(( - "BUG REPORT: Entity info has `entityId` as `list` \"{}\"" - ).format(ent_info)) - if len(ftrack_id) == 0: - continue - ftrack_id = ftrack_id[0] - - # Skip deleted projects - if action == "remove" and entityType == "show": - return True - - # task modified, collect parent id of task, handle separately - if entity_type.lower() == "task": - changes = ent_info.get("changes") or {} - if action == "move": - parent_changes = changes["parent_id"] - self.modified_tasks_ftrackids.add(parent_changes["new"]) - self.modified_tasks_ftrackids.add(parent_changes["old"]) - - elif "typeid" in changes or "name" in changes: - self.modified_tasks_ftrackids.add(ent_info["parentId"]) - continue - - if action == "move": - ent_keys = ent_info["keys"] - # Separate update info from move action - if len(ent_keys) > 1: - _ent_info = ent_info.copy() - for ent_key in ent_keys: - if ent_key == "parent_id": - _ent_info["changes"].pop(ent_key, None) - _ent_info["keys"].remove(ent_key) - else: - ent_info["changes"].pop(ent_key, None) - ent_info["keys"].remove(ent_key) - entities_by_action["update"][ftrack_id] = _ent_info - # regular change process handles all other than Tasks - found_actions.add(action) - entities_by_action[action][ftrack_id] = ent_info - - found_actions = list(found_actions) - if not found_actions and not self.modified_tasks_ftrackids: - return True - - # Check if auto sync was turned on/off - updated = entities_by_action["update"] - for ftrack_id, ent_info in updated.items(): - # filter project - if ent_info["entityType"] != "show": - continue - - changes = ent_info["changes"] - if CUST_ATTR_AUTO_SYNC not in changes: - continue - - auto_sync = changes[CUST_ATTR_AUTO_SYNC]["new"] - turned_on = auto_sync == "1" - ft_project = self.cur_project - username = self._get_username(session, event) - message = ( - "Auto sync was turned {} for project \"{}\" by \"{}\"." - ).format( - "on" if turned_on else "off", - ft_project["full_name"], - username - ) - if turned_on: - message += " Triggering syncToAvalon action." - self.log.debug(message) - - if turned_on: - # Trigger sync to avalon action if auto sync was turned on - selection = [{ - "entityId": ft_project["id"], - "entityType": "show" - }] - self.trigger_action( - action_name="sync.to.avalon.server", - event=event, - selection=selection - ) - # Exit for both cases - return True - - # Filter updated data by changed keys - updated = self.filter_updated(updated) - - # skip most of events where nothing has changed for avalon - if ( - len(found_actions) == 1 - and found_actions[0] == "update" - and not updated - and not self.modified_tasks_ftrackids - ): - return True - - ft_project = self.cur_project - # Check if auto-sync custom attribute exists - if CUST_ATTR_AUTO_SYNC not in ft_project["custom_attributes"]: - # TODO should we sent message to someone? - self.log.error(( - "Custom attribute \"{}\" is not created or user \"{}\" used" - " for Event server don't have permissions to access it!" - ).format(CUST_ATTR_AUTO_SYNC, self.session.api_user)) - return True - - # Skip if auto-sync is not set - auto_sync = ft_project["custom_attributes"][CUST_ATTR_AUTO_SYNC] - if auto_sync is not True: - return True - - debug_msg = "Updated: {}".format(len(updated)) - debug_action_map = { - "add": "Created", - "remove": "Removed", - "move": "Moved" - } - for action, infos in entities_by_action.items(): - if action == "update": - continue - _action = debug_action_map[action] - debug_msg += "| {}: {}".format(_action, len(infos)) - - self.log.debug("Project changes <{}>: {}".format( - ft_project["full_name"], debug_msg - )) - # Get ftrack entities - find all ftrack ids first - ftrack_ids = set(updated.keys()) - - for action, _ftrack_ids in entities_by_action.items(): - # skip updated (already prepared) and removed (not exist in ftrack) - if action not in ("remove", "update"): - ftrack_ids |= set(_ftrack_ids) - - # collect entity records data which might not be in event - if ftrack_ids: - joined_ids = ", ".join(["\"{}\"".format(id) for id in ftrack_ids]) - ftrack_entities = self.process_session.query( - self.entities_query_by_id.format(ft_project["id"], joined_ids) - ).all() - for entity in ftrack_entities: - self.ftrack_ents_by_id[entity["id"]] = entity - - # Filter updates where name is changing - for ftrack_id, ent_info in updated.items(): - ent_keys = ent_info["keys"] - # Separate update info from rename - if "name" not in ent_keys: - continue - - _ent_info = copy.deepcopy(ent_info) - for ent_key in ent_keys: - if ent_key == "name": - ent_info["changes"].pop(ent_key, None) - ent_info["keys"].remove(ent_key) - else: - _ent_info["changes"].pop(ent_key, None) - _ent_info["keys"].remove(ent_key) - - self.ftrack_renamed[ftrack_id] = _ent_info - - self.ftrack_removed = entities_by_action["remove"] - self.ftrack_moved = entities_by_action["move"] - self.ftrack_added = entities_by_action["add"] - self.ftrack_updated = updated - - self.debug_logs() - - self.log.debug("Synchronization begins") - try: - time_1 = time.time() - # 1.) Process removed - may affect all other actions - self.process_removed() - time_2 = time.time() - # 2.) Process renamed - may affect added - self.process_renamed() - time_3 = time.time() - # 3.) Process added - moved entity may be moved to new entity - self.process_added() - time_4 = time.time() - # 4.) Process moved - self.process_moved() - time_5 = time.time() - # 5.) Process updated - self.process_updated() - time_6 = time.time() - # 6.) Process changes in hierarchy or hier custom attributes - self.process_hier_cleanup() - time_7 = time.time() - self.process_task_updates() - if self.updates: - self.update_entities() - time_8 = time.time() - - time_removed = time_2 - time_1 - time_renamed = time_3 - time_2 - time_added = time_4 - time_3 - time_moved = time_5 - time_4 - time_updated = time_6 - time_5 - time_cleanup = time_7 - time_6 - time_task_updates = time_8 - time_7 - time_total = time_8 - time_1 - self.log.debug(( - "Process time: {:.2f} <{:.2f}, {:.2f}, {:.2f}, " - "{:.2f}, {:.2f}, {:.2f}, {:.2f}>" - ).format( - time_total, time_removed, time_renamed, time_added, - time_moved, time_updated, time_cleanup, time_task_updates - )) - - except Exception: - msg = "An error has happened during synchronization" - self.report_items["error"][msg].append(( - str(traceback.format_exc()).replace("\n", "
") - ).replace(" ", " ")) - - self.report() - return True - - def _get_username(self, session, event): - username = "Unknown" - event_source = event.get("source") - if not event_source: - return username - user_info = event_source.get("user") - if not user_info: - return username - user_id = user_info.get("id") - if not user_id: - return username - - user_entity = session.query( - "User where id is {}".format(user_id) - ).first() - if user_entity: - username = user_entity["username"] or username - return username - - - def process_removed(self): - """ - Handles removed entities (not removed tasks - handle separately). - """ - if not self.ftrack_removed: - return - ent_infos = self.ftrack_removed - self.log.debug( - "Processing removed entities: {}".format(str(ent_infos)) - ) - removable_ids = [] - recreate_ents = [] - removed_names = [] - for ftrack_id, removed in ent_infos.items(): - entity_type = removed["entity_type"] - if entity_type.lower() == "task": - continue - - removed_name = removed["changes"]["name"]["old"] - - avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not avalon_ent: - continue - mongo_id = avalon_ent["_id"] - if self.changeability_by_mongo_id[mongo_id]: - removable_ids.append(mongo_id) - removed_names.append(removed_name) - else: - recreate_ents.append(avalon_ent) - - if removable_ids: - # TODO logging - self.log.debug("Assets marked as archived <{}>".format( - ", ".join(removed_names) - )) - self.dbcon.update_many( - {"_id": {"$in": removable_ids}, "type": "asset"}, - {"$set": {"type": "archived_asset"}} - ) - self.remove_cached_by_key("id", removable_ids) - - if recreate_ents: - # sort removed entities by parents len - # - length of parents determine hierarchy level - recreate_ents = sorted( - recreate_ents, - key=(lambda item: len( - (item.get("data", {}).get("parents") or []) - )) - ) - # TODO logging - # TODO report - recreate_msg = ( - "Deleted entity was recreated||Entity was recreated because" - " it or its children contain published data" - ) - proj, ents = self.avalon_entities - for avalon_entity in recreate_ents: - old_ftrack_id = avalon_entity["data"]["ftrackId"] - vis_par = avalon_entity["data"]["visualParent"] - if vis_par is None: - vis_par = proj["_id"] - parent_ent = self.avalon_ents_by_id[vis_par] - - parent_ftrack_id = parent_ent["data"].get("ftrackId") - if parent_ftrack_id is None: - self.handle_missing_ftrack_id(parent_ent) - parent_ftrack_id = parent_ent["data"].get("ftrackId") - if parent_ftrack_id is None: - continue - - parent_ftrack_ent = self.ftrack_ents_by_id.get( - parent_ftrack_id - ) - if not parent_ftrack_ent: - if parent_ent["type"].lower() == "project": - parent_ftrack_ent = self.cur_project - else: - parent_ftrack_ent = self.process_session.query( - self.entities_query_by_id.format( - self.cur_project["id"], parent_ftrack_id - ) - ).one() - entity_type = avalon_entity["data"]["entityType"] - new_entity = self.process_session.create(entity_type, { - "name": avalon_entity["name"], - "parent": parent_ftrack_ent - }) - try: - self.process_session.commit() - except Exception: - # TODO logging - # TODO report - self.process_session.rollback() - ent_path_items = [self.cur_project["full_name"]] - ent_path_items.extend([ - par for par in avalon_entity["data"]["parents"] - ]) - ent_path_items.append(avalon_entity["name"]) - ent_path = "/".join(ent_path_items) - - error_msg = "Couldn't recreate entity in Ftrack" - report_msg = ( - "{}||Trying to recreate because it or its children" - " contain published data" - ).format(error_msg) - self.report_items["warning"][report_msg].append(ent_path) - self.log.warning( - "{}. Process session commit failed! <{}>".format( - error_msg, ent_path - ), - exc_info=True - ) - continue - - new_entity_id = new_entity["id"] - avalon_entity["data"]["ftrackId"] = new_entity_id - - for key, val in avalon_entity["data"].items(): - if not val: - continue - if key not in new_entity["custom_attributes"]: - continue - - new_entity["custom_attributes"][key] = val - - new_entity["custom_attributes"][CUST_ATTR_ID_KEY] = ( - str(avalon_entity["_id"]) - ) - ent_path = self.get_ent_path(new_entity_id) - - try: - self.process_session.commit() - except Exception: - # TODO logging - # TODO report - self.process_session.rollback() - error_msg = ( - "Couldn't update custom attributes after recreation" - " of entity in Ftrack" - ) - report_msg = ( - "{}||Entity was recreated because it or its children" - " contain published data" - ).format(error_msg) - self.report_items["warning"][report_msg].append(ent_path) - self.log.warning( - "{}. Process session commit failed! <{}>".format( - error_msg, ent_path - ), - exc_info=True - ) - continue - - self.report_items["info"][recreate_msg].append(ent_path) - - self.ftrack_recreated_mapping[old_ftrack_id] = new_entity_id - self.process_session.commit() - - found_idx = None - proj_doc, asset_docs = self._avalon_ents - for idx, asset_doc in enumerate(asset_docs): - if asset_doc["_id"] == avalon_entity["_id"]: - found_idx = idx - break - - if found_idx is None: - continue - - # Prepare updates dict for mongo update - if "data" not in self.updates[avalon_entity["_id"]]: - self.updates[avalon_entity["_id"]]["data"] = {} - - self.updates[avalon_entity["_id"]]["data"]["ftrackId"] = ( - new_entity_id - ) - # Update cached entities - asset_docs[found_idx] = avalon_entity - self._avalon_ents = proj_doc, asset_docs - - if self._avalon_ents_by_id is not None: - mongo_id = avalon_entity["_id"] - self._avalon_ents_by_id[mongo_id] = avalon_entity - - if self._avalon_ents_by_parent_id is not None: - vis_par = avalon_entity["data"]["visualParent"] - children = self._avalon_ents_by_parent_id[vis_par] - found_idx = None - for idx, _entity in enumerate(children): - if _entity["_id"] == avalon_entity["_id"]: - found_idx = idx - break - children[found_idx] = avalon_entity - self._avalon_ents_by_parent_id[vis_par] = children - - if self._avalon_ents_by_ftrack_id is not None: - self._avalon_ents_by_ftrack_id.pop(old_ftrack_id) - self._avalon_ents_by_ftrack_id[new_entity_id] = ( - avalon_entity - ) - - if self._avalon_ents_by_name is not None: - name = avalon_entity["name"] - self._avalon_ents_by_name[name] = avalon_entity - - # Check if entities with same name can be synchronized - if not removed_names: - return - - self.check_names_synchronizable(removed_names) - - def check_names_synchronizable(self, names): - """Check if entities with specific names are importable. - - This check should happen after removing entity or renaming entity. - When entity was removed or renamed then it's name is possible to sync. - """ - joined_passed_names = ", ".join( - ["\"{}\"".format(name) for name in names] - ) - same_name_entities = self.process_session.query( - self.entities_name_query_by_name.format( - self.cur_project["id"], joined_passed_names - ) - ).all() - if not same_name_entities: - return - - entities_by_name = collections.defaultdict(list) - for entity in same_name_entities: - entities_by_name[entity["name"]].append(entity) - - synchronizable_ents = [] - self.log.debug(( - "Deleting of entities should allow to synchronize another entities" - " with same name." - )) - for name, ents in entities_by_name.items(): - if len(ents) != 1: - self.log.debug(( - "Name \"{}\" still have more than one entity <{}>" - ).format( - name, "| ".join( - [self.get_ent_path(ent["id"]) for ent in ents] - ) - )) - continue - - entity = ents[0] - ent_path = self.get_ent_path(entity["id"]) - # TODO logging - self.log.debug( - "Checking if can synchronize entity <{}>".format(ent_path) - ) - # skip if already synchronized - ftrack_id = entity["id"] - if ftrack_id in self.avalon_ents_by_ftrack_id: - # TODO logging - self.log.debug( - "- Entity is already synchronized (skipping) <{}>".format( - ent_path - ) - ) - continue - - parent_id = entity["parent_id"] - if parent_id not in self.avalon_ents_by_ftrack_id: - # TODO logging - self.log.debug(( - "- Entity's parent entity doesn't seems to" - " be synchronized (skipping) <{}>" - ).format(ent_path)) - continue - - synchronizable_ents.append(entity) - - if not synchronizable_ents: - return - - synchronizable_ents = sorted( - synchronizable_ents, - key=(lambda entity: len(entity["link"])) - ) - - children_queue = collections.deque() - for entity in synchronizable_ents: - parent_avalon_ent = self.avalon_ents_by_ftrack_id[ - entity["parent_id"] - ] - self.create_entity_in_avalon(entity, parent_avalon_ent) - - for child in entity["children"]: - if child.entity_type.lower() != "task": - children_queue.append(child) - - while children_queue: - entity = children_queue.popleft() - ftrack_id = entity["id"] - name = entity["name"] - ent_by_ftrack_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if ent_by_ftrack_id: - raise Exception(( - "This is bug, parent was just synchronized to avalon" - " but entity is already in database {}" - ).format(dict(entity))) - - # Entity has duplicated name with another entity - # - may be renamed: in that case renaming method will handle that - duplicate_ent = self.avalon_ents_by_name.get(name) - if duplicate_ent: - continue - - passed_regex = avalon_sync.check_regex( - name, "asset", schema_patterns=self.regex_schemas - ) - if not passed_regex: - continue - - parent_id = entity["parent_id"] - parent_avalon_ent = self.avalon_ents_by_ftrack_id[parent_id] - - self.create_entity_in_avalon(entity, parent_avalon_ent) - - for child in entity["children"]: - if child.entity_type.lower() == "task": - continue - children_queue.append(child) - - def create_entity_in_avalon(self, ftrack_ent, parent_avalon): - proj, ents = self.avalon_entities - - # Parents, Hierarchy - ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - - # TODO logging - self.log.debug( - "Trying to synchronize entity <{}>".format( - "/".join(ent_path_items) - ) - ) - - # Add entity to modified so tasks are added at the end - self.modified_tasks_ftrackids.add(ftrack_ent["id"]) - - # Visual Parent - vis_par = None - if parent_avalon["type"].lower() != "project": - vis_par = parent_avalon["_id"] - - mongo_id = ObjectId() - name = ftrack_ent["name"] - final_entity = { - "_id": mongo_id, - "name": name, - "type": "asset", - "schema": CURRENT_ASSET_DOC_SCHEMA, - "parent": proj["_id"], - "data": { - "ftrackId": ftrack_ent["id"], - "entityType": ftrack_ent.entity_type, - "parents": parents, - "tasks": {}, - "visualParent": vis_par, - "description": ftrack_ent["description"] - } - } - invalid_fps_items = [] - cust_attrs = self.get_cust_attr_values(ftrack_ent) - for key, val in cust_attrs.items(): - if key.startswith("avalon_"): - continue - - if key in FPS_KEYS: - try: - val = convert_to_fps(val) - except InvalidFpsValue: - invalid_fps_items.append((ftrack_ent["id"], val)) - continue - - final_entity["data"][key] = val - - if invalid_fps_items: - fps_msg = ( - "These entities have invalid fps value in custom attributes" - ) - items = [] - for entity_id, value in invalid_fps_items: - ent_path = self.get_ent_path(entity_id) - items.append("{} - \"{}\"".format(ent_path, value)) - self.report_items["error"][fps_msg] = items - - _mongo_id_str = cust_attrs.get(CUST_ATTR_ID_KEY) - if _mongo_id_str: - try: - _mongo_id = ObjectId(_mongo_id_str) - if _mongo_id not in self.avalon_ents_by_id: - mongo_id = _mongo_id - final_entity["_id"] = mongo_id - - except Exception: - pass - - ent_path_items = [self.cur_project["full_name"]] - ent_path_items.extend([par for par in parents]) - ent_path_items.append(name) - ent_path = "/".join(ent_path_items) - - try: - schema.validate(final_entity) - except Exception: - # TODO logging - # TODO report - error_msg = ( - "Schema validation failed for new entity (This is a bug)" - ) - error_traceback = ( - str(traceback.format_exc()).replace("\n", "
") - ).replace(" ", " ") - - item_msg = ent_path + "
" + error_traceback - self.report_items["error"][error_msg].append(item_msg) - self.log.error( - "{}: \"{}\"".format(error_msg, str(final_entity)), - exc_info=True - ) - return None - - replaced = False - archived = self.avalon_archived_by_name.get(name) - if archived: - archived_id = archived["_id"] - if ( - archived["data"]["parents"] == parents or - self.changeability_by_mongo_id[archived_id] - ): - # TODO logging - self.log.debug( - "Entity was unarchived instead of creation <{}>".format( - ent_path - ) - ) - mongo_id = archived_id - final_entity["_id"] = mongo_id - self.dbcon.replace_one({"_id": mongo_id}, final_entity) - replaced = True - - if not replaced: - self.dbcon.insert_one(final_entity) - # TODO logging - self.log.debug("Entity was synchronized <{}>".format(ent_path)) - - mongo_id_str = str(mongo_id) - if mongo_id_str != ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY]: - ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY] = mongo_id_str - try: - self.process_session.commit() - except Exception: - self.process_session.rollback() - # TODO logging - # TODO report - error_msg = ( - "Failed to store MongoID to entity's custom attribute" - ) - report_msg = ( - "{}||SyncToAvalon action may solve this issue" - ).format(error_msg) - - self.report_items["warning"][report_msg].append(ent_path) - self.log.error( - "{}: \"{}\"".format(error_msg, ent_path), - exc_info=True - ) - - # modify cached data - # Skip if self._avalon_ents is not set(maybe never happen) - if self._avalon_ents is None: - return final_entity - - if self._avalon_ents is not None: - proj, ents = self._avalon_ents - ents.append(final_entity) - self._avalon_ents = (proj, ents) - - if self._avalon_ents_by_id is not None: - self._avalon_ents_by_id[mongo_id] = final_entity - - if self._avalon_ents_by_parent_id is not None: - self._avalon_ents_by_parent_id[vis_par].append(final_entity) - - if self._avalon_ents_by_ftrack_id is not None: - self._avalon_ents_by_ftrack_id[ftrack_ent["id"]] = final_entity - - if self._avalon_ents_by_name is not None: - self._avalon_ents_by_name[ftrack_ent["name"]] = final_entity - - return final_entity - - def get_cust_attr_values(self, entity): - output = {} - custom_attrs, hier_attrs = self.avalon_cust_attrs - - # Notmal custom attributes - for attr in custom_attrs: - key = attr["key"] - if key in entity["custom_attributes"]: - output[key] = entity["custom_attributes"][key] - - hier_values = avalon_sync.get_hierarchical_attributes_values( - self.process_session, - entity, - hier_attrs, - self.cust_attr_types_by_id.values() - ) - for key, val in hier_values.items(): - output[key] = val - - # Make sure mongo id is not set - output.pop(CUST_ATTR_ID_KEY, None) - - return output - - def process_renamed(self): - ent_infos = self.ftrack_renamed - if not ent_infos: - return - - self.log.debug( - "Processing renamed entities: {}".format(str(ent_infos)) - ) - - changeable_queue = collections.deque() - for ftrack_id, ent_info in ent_infos.items(): - entity_type = ent_info["entity_type"] - if entity_type == "Task": - continue - - new_name = ent_info["changes"]["name"]["new"] - old_name = ent_info["changes"]["name"]["old"] - - ent_path = self.get_ent_path(ftrack_id) - avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not avalon_ent: - # TODO logging - self.log.debug(( - "Entity is not is avalon. Moving to \"add\" process. <{}>" - ).format(ent_path)) - self.ftrack_added[ftrack_id] = ent_info - continue - - if new_name == avalon_ent["name"]: - # TODO logging - self.log.debug(( - "Avalon entity already has the same name <{}>" - ).format(ent_path)) - continue - - mongo_id = avalon_ent["_id"] - if self.changeability_by_mongo_id[mongo_id]: - changeable_queue.append((ftrack_id, avalon_ent, new_name)) - else: - ftrack_ent = self.ftrack_ents_by_id[ftrack_id] - ftrack_ent["name"] = avalon_ent["name"] - try: - self.process_session.commit() - # TODO logging - # TODO report - error_msg = "Entity renamed back" - report_msg = ( - "{}||It is not possible to change" - " the name of an entity or it's parents, " - " if it already contained published data." - ).format(error_msg) - self.report_items["info"][report_msg].append(ent_path) - self.log.warning("{} <{}>".format(error_msg, ent_path)) - - except Exception: - self.process_session.rollback() - # TODO report - # TODO logging - error_msg = ( - "Couldn't rename the entity back to its original name" - ) - report_msg = ( - "{}||Renamed because it is not possible to" - " change the name of an entity or it's parents, " - " if it already contained published data." - ).format(error_msg) - error_traceback = ( - str(traceback.format_exc()).replace("\n", "
") - ).replace(" ", " ") - - item_msg = ent_path + "
" + error_traceback - self.report_items["warning"][report_msg].append(item_msg) - self.log.warning( - "{}: \"{}\"".format(error_msg, ent_path), - exc_info=True - ) - - old_names = [] - # Process renaming in Avalon DB - while changeable_queue: - ftrack_id, avalon_ent, new_name = changeable_queue.popleft() - mongo_id = avalon_ent["_id"] - old_name = avalon_ent["name"] - - _entity_type = "asset" - if entity_type == "Project": - _entity_type = "project" - - passed_regex = avalon_sync.check_regex( - new_name, _entity_type, schema_patterns=self.regex_schemas - ) - if not passed_regex: - self.regex_failed.append(ftrack_id) - continue - - # if avalon does not have same name then can be changed - same_name_avalon_ent = self.avalon_ents_by_name.get(new_name) - if not same_name_avalon_ent: - old_val = self._avalon_ents_by_name.pop(old_name) - old_val["name"] = new_name - self._avalon_ents_by_name[new_name] = old_val - self.updates[mongo_id] = {"name": new_name} - self.renamed_in_avalon.append(mongo_id) - - old_names.append(old_name) - if new_name in old_names: - old_names.remove(new_name) - - # TODO logging - ent_path = self.get_ent_path(ftrack_id) - self.log.debug( - "Name of entity will be changed to \"{}\" <{}>".format( - new_name, ent_path - ) - ) - continue - - # Check if same name is in changable_queue - # - it's name may be changed in next iteration - same_name_ftrack_id = same_name_avalon_ent["data"]["ftrackId"] - same_is_unprocessed = False - for item in changeable_queue: - if same_name_ftrack_id == item[0]: - same_is_unprocessed = True - break - - if same_is_unprocessed: - changeable_queue.append((ftrack_id, avalon_ent, new_name)) - continue - - self.duplicated.append(ftrack_id) - - if old_names: - self.check_names_synchronizable(old_names) - - # not_found are not processed since all not found are - # not found because they are not synchronizable - - def process_added(self): - ent_infos = self.ftrack_added - if not ent_infos: - return - - self.log.debug( - "Processing added entities: {}".format(str(ent_infos)) - ) - - cust_attrs, hier_attrs = self.avalon_cust_attrs - entity_type_conf_ids = {} - # Skip if already exit in avalon db or tasks entities - # - happen when was created by any sync event/action - pop_out_ents = [] - for ftrack_id, ent_info in ent_infos.items(): - if self.avalon_ents_by_ftrack_id.get(ftrack_id): - pop_out_ents.append(ftrack_id) - self.log.warning( - "Added entity is already synchronized <{}>".format( - self.get_ent_path(ftrack_id) - ) - ) - continue - - entity_type = ent_info["entity_type"] - if entity_type == "Task": - continue - - name = ( - ent_info - .get("changes", {}) - .get("name", {}) - .get("new") - ) - avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {} - avalon_ent_by_name_ftrack_id = ( - avalon_ent_by_name - .get("data", {}) - .get("ftrackId") - ) - if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None: - ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) - if not ftrack_ent: - ftrack_ent = self.process_session.query( - self.entities_query_by_id.format( - self.cur_project["id"], ftrack_id - ) - ).one() - self.ftrack_ents_by_id[ftrack_id] = ftrack_ent - - ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - - avalon_ent_parents = ( - avalon_ent_by_name.get("data", {}).get("parents") - ) - if parents == avalon_ent_parents: - self.dbcon.update_one({ - "_id": avalon_ent_by_name["_id"] - }, { - "$set": { - "data.ftrackId": ftrack_id, - "data.entityType": entity_type - } - }) - - avalon_ent_by_name["data"]["ftrackId"] = ftrack_id - avalon_ent_by_name["data"]["entityType"] = entity_type - - self._avalon_ents_by_ftrack_id[ftrack_id] = ( - avalon_ent_by_name - ) - if self._avalon_ents_by_parent_id: - found = None - for _parent_id_, _entities_ in ( - self._avalon_ents_by_parent_id.items() - ): - for _idx_, entity in enumerate(_entities_): - if entity["_id"] == avalon_ent_by_name["_id"]: - found = (_parent_id_, _idx_) - break - - if found: - break - - if found: - _parent_id_, _idx_ = found - self._avalon_ents_by_parent_id[_parent_id_][ - _idx_] = avalon_ent_by_name - - if self._avalon_ents_by_id: - self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = ( - avalon_ent_by_name - ) - - if self._avalon_ents_by_name: - self._avalon_ents_by_name[name] = avalon_ent_by_name - - if self._avalon_ents: - found = None - project, entities = self._avalon_ents - for _idx_, _ent_ in enumerate(entities): - if _ent_["_id"] != avalon_ent_by_name["_id"]: - continue - found = _idx_ - break - - if found is not None: - entities[found] = avalon_ent_by_name - self._avalon_ents = project, entities - - pop_out_ents.append(ftrack_id) - continue - - mongo_id_configuration_id = self._mongo_id_configuration( - ent_info, - cust_attrs, - hier_attrs, - entity_type_conf_ids - ) - if not mongo_id_configuration_id: - self.log.warning(( - "BUG REPORT: Missing MongoID configuration for `{} < {} >`" - ).format(entity_type, ent_info["entityType"])) - continue - - _entity_key = collections.OrderedDict() - _entity_key["configuration_id"] = mongo_id_configuration_id - _entity_key["entity_id"] = ftrack_id - - self.process_session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - ftrack_api.symbol.NOT_SET, - "" - ) - ) - - try: - # Commit changes of mongo_id to empty string - self.process_session.commit() - self.log.debug("Committing unsetting") - except Exception: - self.process_session.rollback() - # TODO logging - msg = ( - "Could not set value of Custom attribute, where mongo id" - " is stored, to empty string. Ftrack ids: \"{}\"" - ).format(", ".join(ent_infos.keys())) - self.log.warning(msg, exc_info=True) - - for ftrack_id in pop_out_ents: - ent_infos.pop(ftrack_id) - - # sort by parents length (same as by hierarchy level) - _ent_infos = sorted( - ent_infos.values(), - key=(lambda ent_info: len(ent_info.get("parents", []))) - ) - to_sync_by_id = collections.OrderedDict() - for ent_info in _ent_infos: - ft_id = ent_info["entityId"] - to_sync_by_id[ft_id] = self.ftrack_ents_by_id[ft_id] - - # cache regex success (for tasks) - for ftrack_id, entity in to_sync_by_id.items(): - if entity.entity_type.lower() == "project": - raise Exception(( - "Project can't be created with event handler!" - "This is a bug" - )) - parent_id = entity["parent_id"] - parent_avalon = self.avalon_ents_by_ftrack_id.get(parent_id) - if not parent_avalon: - # TODO logging - self.log.debug(( - "Skipping synchronization of entity" - " because parent was not found in Avalon DB <{}>" - ).format(self.get_ent_path(ftrack_id))) - continue - - is_synchonizable = True - name = entity["name"] - passed_regex = avalon_sync.check_regex( - name, "asset", schema_patterns=self.regex_schemas - ) - if not passed_regex: - self.regex_failed.append(ftrack_id) - is_synchonizable = False - - if name in self.avalon_ents_by_name: - self.duplicated.append(ftrack_id) - is_synchonizable = False - - if not is_synchonizable: - continue - - self.create_entity_in_avalon(entity, parent_avalon) - - def process_moved(self): - """ - Handles moved entities to different place in hierarchy. - (Not tasks - handled separately.) - """ - if not self.ftrack_moved: - return - - self.log.debug( - "Processing moved entities: {}".format(str(self.ftrack_moved)) - ) - - ftrack_moved = {k: v for k, v in sorted( - self.ftrack_moved.items(), - key=(lambda line: len( - (line[1].get("data", {}).get("parents") or []) - )) - )} - - for ftrack_id, ent_info in ftrack_moved.items(): - avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not avalon_ent: - continue - - new_parent_id = ent_info["changes"]["parent_id"]["new"] - old_parent_id = ent_info["changes"]["parent_id"]["old"] - - mongo_id = avalon_ent["_id"] - if self.changeability_by_mongo_id[mongo_id]: - par_av_ent = self.avalon_ents_by_ftrack_id.get(new_parent_id) - if not par_av_ent: - # TODO logging - # TODO report - ent_path_items = [self.cur_project["full_name"]] - ent_path_items.extend(avalon_ent["data"]["parents"]) - ent_path_items.append(avalon_ent["name"]) - ent_path = "/".join(ent_path_items) - - error_msg = ( - "New parent of entity is not synchronized to avalon" - ) - report_msg = ( - "{}||Parent in Avalon can't be changed. That" - " may cause issues. Please fix parent or move entity" - " under valid entity." - ).format(error_msg) - - self.report_items["warning"][report_msg].append(ent_path) - self.log.warning("{} <{}>".format(error_msg, ent_path)) - continue - - # THIS MUST HAPPEN AFTER CREATING NEW ENTITIES !!!! - # - because may be moved to new created entity - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - - vis_par_id = None - ent_path_items = [self.cur_project["full_name"]] - if par_av_ent["type"].lower() != "project": - vis_par_id = par_av_ent["_id"] - ent_path_items.extend(par_av_ent["data"]["parents"]) - ent_path_items.append(par_av_ent["name"]) - - self.updates[mongo_id]["data"]["visualParent"] = vis_par_id - self.moved_in_avalon.append(mongo_id) - - ent_path_items.append(avalon_ent["name"]) - ent_path = "/".join(ent_path_items) - self.log.debug(( - "Parent of entity ({}) was changed in avalon <{}>" - ).format(str(mongo_id), ent_path) - ) - - else: - avalon_ent = self.avalon_ents_by_id[mongo_id] - avalon_parent_id = avalon_ent["data"]["visualParent"] - if avalon_parent_id is None: - avalon_parent_id = avalon_ent["parent"] - - avalon_parent = self.avalon_ents_by_id[avalon_parent_id] - parent_id = avalon_parent["data"]["ftrackId"] - - # For cases when parent was deleted at the same time - if parent_id in self.ftrack_recreated_mapping: - parent_id = ( - self.ftrack_recreated_mapping[parent_id] - ) - - ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) - if not ftrack_ent: - ftrack_ent = self.process_session.query( - self.entities_query_by_id.format( - self.cur_project["id"], ftrack_id - ) - ).one() - self.ftrack_ents_by_id[ftrack_id] = ftrack_ent - - if parent_id == ftrack_ent["parent_id"]: - continue - - ftrack_ent["parent_id"] = parent_id - try: - self.process_session.commit() - # TODO logging - # TODO report - msg = "Entity was moved back" - report_msg = ( - "{}||Entity can't be moved when" - " it or its children contain published data" - ).format(msg) - ent_path = self.get_ent_path(ftrack_id) - self.report_items["info"][report_msg].append(ent_path) - self.log.warning("{} <{}>".format(msg, ent_path)) - - except Exception: - self.process_session.rollback() - # TODO logging - # TODO report - error_msg = ( - "Couldn't moved the entity back to its original parent" - ) - report_msg = ( - "{}||Moved back because it is not possible to" - " move with an entity or it's parents, " - " if it already contained published data." - ).format(error_msg) - error_traceback = ( - str(traceback.format_exc()).replace("\n", "
") - ).replace(" ", " ") - - item_msg = ent_path + "
" + error_traceback - self.report_items["warning"][report_msg].append(item_msg) - self.log.warning( - "{}: \"{}\"".format(error_msg, ent_path), - exc_info=True - ) - - def process_updated(self): - """ - Only custom attributes changes should get here - """ - if not self.ftrack_updated: - return - - self.log.debug( - "Processing updated entities: {}".format(str(self.ftrack_updated)) - ) - - ent_infos = self.ftrack_updated - ftrack_mongo_mapping = {} - not_found_ids = [] - for ftrack_id, ent_info in ent_infos.items(): - avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not avalon_ent: - not_found_ids.append(ftrack_id) - continue - - ftrack_mongo_mapping[ftrack_id] = avalon_ent["_id"] - - for ftrack_id in not_found_ids: - ent_infos.pop(ftrack_id) - - if not ent_infos: - return - - cust_attrs, hier_attrs = self.avalon_cust_attrs - hier_attrs_by_key = { - attr["key"]: attr - for attr in hier_attrs - } - cust_attrs_by_obj_id = collections.defaultdict(dict) - for cust_attr in cust_attrs: - key = cust_attr["key"] - if key.startswith("avalon_"): - continue - - ca_ent_type = cust_attr["entity_type"] - - if ca_ent_type == "show": - cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr - - elif ca_ent_type == "task": - obj_id = cust_attr["object_type_id"] - cust_attrs_by_obj_id[obj_id][key] = cust_attr - - for ftrack_id, ent_info in ent_infos.items(): - mongo_id = ftrack_mongo_mapping[ftrack_id] - entType = ent_info["entityType"] - ent_path = self.get_ent_path(ftrack_id) - if entType == "show": - ent_cust_attrs = cust_attrs_by_obj_id.get("show") - else: - obj_type_id = ent_info["objectTypeId"] - ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) - - # Ftrack's entity_type does not have defined custom attributes - if ent_cust_attrs is None: - ent_cust_attrs = {} - - ent_changes = ent_info["changes"] - if "description" in ent_changes: - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"]["description"] = ( - ent_changes["description"]["new"] or "" - ) - - for key, values in ent_changes.items(): - if key in hier_attrs_by_key: - self.hier_cust_attrs_changes[key].append(ftrack_id) - continue - - if key not in ent_cust_attrs: - continue - - value = values["new"] - new_value = self.convert_value_by_cust_attr_conf( - value, ent_cust_attrs[key] - ) - - if entType == "show" and key == "applications": - # Store apps to project't config - proj_apps, warnings = ( - avalon_sync.get_project_apps(new_value) - ) - if "config" not in self.updates[mongo_id]: - self.updates[mongo_id]["config"] = {} - self.updates[mongo_id]["config"]["apps"] = proj_apps - - for msg, items in warnings.items(): - if not msg or not items: - continue - self.report_items["warning"][msg] = items - continue - - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"][key] = new_value - self.log.debug( - "Setting data value of \"{}\" to \"{}\" <{}>".format( - key, new_value, ent_path - ) - ) - - def convert_value_by_cust_attr_conf(self, value, cust_attr_conf): - type_id = cust_attr_conf["type_id"] - cust_attr_type_name = self.cust_attr_types_by_id[type_id]["name"] - ignored = ( - "expression", "notificationtype", "dynamic enumerator" - ) - if cust_attr_type_name in ignored: - return None - - if cust_attr_type_name == "text": - return value - - if cust_attr_type_name == "boolean": - if value == "1": - return True - if value == "0": - return False - return bool(value) - - if cust_attr_type_name == "date": - return arrow.get(value) - - cust_attr_config = json.loads(cust_attr_conf["config"]) - - if cust_attr_type_name == "number": - if cust_attr_config["isdecimal"]: - return float(value) - return int(value) - - if cust_attr_type_name == "enumerator": - if not cust_attr_config["multiSelect"]: - return value - return value.split(", ") - return value - - def process_hier_cleanup(self): - if ( - not self.moved_in_avalon and - not self.renamed_in_avalon and - not self.hier_cust_attrs_changes - ): - return - - parent_changes = [] - hier_cust_attrs_ids = [] - hier_cust_attrs_keys = [] - all_keys = False - for mongo_id in self.moved_in_avalon: - parent_changes.append(mongo_id) - hier_cust_attrs_ids.append(mongo_id) - all_keys = True - - for mongo_id in self.renamed_in_avalon: - if mongo_id not in parent_changes: - parent_changes.append(mongo_id) - - for key, ftrack_ids in self.hier_cust_attrs_changes.items(): - if key.startswith("avalon_"): - continue - for ftrack_id in ftrack_ids: - avalon_ent = self.avalon_ents_by_ftrack_id[ftrack_id] - mongo_id = avalon_ent["_id"] - if mongo_id in hier_cust_attrs_ids: - continue - hier_cust_attrs_ids.append(mongo_id) - if not all_keys and key not in hier_cust_attrs_keys: - hier_cust_attrs_keys.append(key) - - # Parents preparation *** - mongo_to_ftrack_parents = {} - missing_ftrack_ents = {} - for mongo_id in parent_changes: - avalon_ent = self.avalon_ents_by_id[mongo_id] - ftrack_id = avalon_ent["data"]["ftrackId"] - if ftrack_id not in self.ftrack_ents_by_id: - missing_ftrack_ents[ftrack_id] = mongo_id - continue - ftrack_ent = self.ftrack_ents_by_id[ftrack_id] - mongo_to_ftrack_parents[mongo_id] = len(ftrack_ent["link"]) - - if missing_ftrack_ents: - joine_ids = ", ".join( - ["\"{}\"".format(id) for id in missing_ftrack_ents.keys()] - ) - entities = self.process_session.query( - self.entities_query_by_id.format( - self.cur_project["id"], joine_ids - ) - ).all() - for entity in entities: - ftrack_id = entity["id"] - self.ftrack_ents_by_id[ftrack_id] = entity - mongo_id = missing_ftrack_ents[ftrack_id] - mongo_to_ftrack_parents[mongo_id] = len(entity["link"]) - - stored_parents_by_mongo = {} - # sort by hierarchy level - mongo_to_ftrack_parents = [k for k, v in sorted( - mongo_to_ftrack_parents.items(), - key=(lambda item: item[1]) - )] - self.log.debug( - "Updating parents and hieararchy because of name/parenting changes" - ) - for mongo_id in mongo_to_ftrack_parents: - avalon_ent = self.avalon_ents_by_id[mongo_id] - vis_par = avalon_ent["data"]["visualParent"] - if vis_par in stored_parents_by_mongo: - parents = [par for par in stored_parents_by_mongo[vis_par]] - if vis_par is not None: - parent_ent = self.avalon_ents_by_id[vis_par] - parents.append(parent_ent["name"]) - stored_parents_by_mongo[mongo_id] = parents - continue - - ftrack_id = avalon_ent["data"]["ftrackId"] - ftrack_ent = self.ftrack_ents_by_id[ftrack_id] - ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - stored_parents_by_mongo[mongo_id] = parents - - for mongo_id, parents in stored_parents_by_mongo.items(): - avalon_ent = self.avalon_ents_by_id[mongo_id] - cur_par = avalon_ent["data"]["parents"] - if cur_par == parents: - continue - - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"]["parents"] = parents - - # Skip custom attributes if didn't change - if not hier_cust_attrs_ids: - # TODO logging - self.log.debug( - "Hierarchical attributes were not changed. Skipping" - ) - self.update_entities() - return - - _, hier_attrs = self.avalon_cust_attrs - - # Hierarchical custom attributes preparation *** - hier_attr_key_by_id = { - attr["id"]: attr["key"] - for attr in hier_attrs - } - hier_attr_id_by_key = { - key: attr_id - for attr_id, key in hier_attr_key_by_id.items() - } - - if all_keys: - hier_cust_attrs_keys = [ - key - for key in hier_attr_id_by_key.keys() - if not key.startswith("avalon_") - ] - - mongo_ftrack_mapping = {} - cust_attrs_ftrack_ids = [] - # ftrack_parenting = collections.defaultdict(list) - entities_dict = collections.defaultdict(dict) - - children_queue = collections.deque() - parent_queue = collections.deque() - - for mongo_id in hier_cust_attrs_ids: - avalon_ent = self.avalon_ents_by_id[mongo_id] - parent_queue.append(avalon_ent) - ftrack_id = avalon_ent["data"]["ftrackId"] - if ftrack_id not in entities_dict: - entities_dict[ftrack_id] = { - "children": [], - "parent_id": None, - "hier_attrs": {} - } - - mongo_ftrack_mapping[mongo_id] = ftrack_id - cust_attrs_ftrack_ids.append(ftrack_id) - children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or [] - for children_ent in children_ents: - _ftrack_id = children_ent["data"]["ftrackId"] - if _ftrack_id in entities_dict: - continue - - entities_dict[_ftrack_id] = { - "children": [], - "parent_id": None, - "hier_attrs": {} - } - # if _ftrack_id not in ftrack_parenting[ftrack_id]: - # ftrack_parenting[ftrack_id].append(_ftrack_id) - entities_dict[_ftrack_id]["parent_id"] = ftrack_id - if _ftrack_id not in entities_dict[ftrack_id]["children"]: - entities_dict[ftrack_id]["children"].append(_ftrack_id) - children_queue.append(children_ent) - - while children_queue: - avalon_ent = children_queue.popleft() - mongo_id = avalon_ent["_id"] - ftrack_id = avalon_ent["data"]["ftrackId"] - if ftrack_id in cust_attrs_ftrack_ids: - continue - - mongo_ftrack_mapping[mongo_id] = ftrack_id - cust_attrs_ftrack_ids.append(ftrack_id) - - children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or [] - for children_ent in children_ents: - _ftrack_id = children_ent["data"]["ftrackId"] - if _ftrack_id in entities_dict: - continue - - entities_dict[_ftrack_id] = { - "children": [], - "parent_id": None, - "hier_attrs": {} - } - entities_dict[_ftrack_id]["parent_id"] = ftrack_id - if _ftrack_id not in entities_dict[ftrack_id]["children"]: - entities_dict[ftrack_id]["children"].append(_ftrack_id) - children_queue.append(children_ent) - - while parent_queue: - avalon_ent = parent_queue.popleft() - if avalon_ent["type"].lower() == "project": - continue - - ftrack_id = avalon_ent["data"]["ftrackId"] - - vis_par = avalon_ent["data"]["visualParent"] - if vis_par is None: - vis_par = avalon_ent["parent"] - - parent_ent = self.avalon_ents_by_id[vis_par] - parent_ftrack_id = parent_ent["data"].get("ftrackId") - if parent_ftrack_id is None: - self.handle_missing_ftrack_id(parent_ent) - parent_ftrack_id = parent_ent["data"].get("ftrackId") - if parent_ftrack_id is None: - continue - - if parent_ftrack_id not in entities_dict: - entities_dict[parent_ftrack_id] = { - "children": [], - "parent_id": None, - "hier_attrs": {} - } - - if ftrack_id not in entities_dict[parent_ftrack_id]["children"]: - entities_dict[parent_ftrack_id]["children"].append(ftrack_id) - - entities_dict[ftrack_id]["parent_id"] = parent_ftrack_id - - if parent_ftrack_id in cust_attrs_ftrack_ids: - continue - mongo_ftrack_mapping[vis_par] = parent_ftrack_id - cust_attrs_ftrack_ids.append(parent_ftrack_id) - # if ftrack_id not in ftrack_parenting[parent_ftrack_id]: - # ftrack_parenting[parent_ftrack_id].append(ftrack_id) - - parent_queue.append(parent_ent) - - # Prepare values to query - configuration_ids = set() - for key in hier_cust_attrs_keys: - configuration_ids.add(hier_attr_id_by_key[key]) - - values = query_custom_attributes( - self.process_session, - configuration_ids, - cust_attrs_ftrack_ids, - True - ) - - ftrack_project_id = self.cur_project["id"] - - attr_types_by_id = self.cust_attr_types_by_id - convert_types_by_id = {} - for attr in hier_attrs: - key = attr["key"] - if key not in hier_cust_attrs_keys: - continue - - type_id = attr["type_id"] - attr_id = attr["id"] - cust_attr_type_name = attr_types_by_id[type_id]["name"] - convert_type = avalon_sync.get_python_type_for_custom_attribute( - attr, cust_attr_type_name - ) - - convert_types_by_id[attr_id] = convert_type - default_value = attr["default"] - if key in FPS_KEYS: - try: - default_value = convert_to_fps(default_value) - except InvalidFpsValue: - pass - - entities_dict[ftrack_project_id]["hier_attrs"][key] = ( - attr["default"] - ) - - # PREPARE DATA BEFORE THIS - invalid_fps_items = [] - avalon_hier = [] - for item in values: - value = item["value"] - if value is None: - continue - entity_id = item["entity_id"] - configuration_id = item["configuration_id"] - - convert_type = convert_types_by_id[configuration_id] - key = hier_attr_key_by_id[configuration_id] - - if convert_type: - value = convert_type(value) - - if key in FPS_KEYS: - try: - value = convert_to_fps(value) - except InvalidFpsValue: - invalid_fps_items.append((entity_id, value)) - continue - entities_dict[entity_id]["hier_attrs"][key] = value - - if invalid_fps_items: - fps_msg = ( - "These entities have invalid fps value in custom attributes" - ) - items = [] - for entity_id, value in invalid_fps_items: - ent_path = self.get_ent_path(entity_id) - items.append("{} - \"{}\"".format(ent_path, value)) - self.report_items["error"][fps_msg] = items - - # Get dictionary with not None hierarchical values to pull to children - project_values = {} - for key, value in ( - entities_dict[ftrack_project_id]["hier_attrs"].items() - ): - if value is not None: - project_values[key] = value - - for key in avalon_hier: - value = entities_dict[ftrack_project_id]["avalon_attrs"][key] - if value is not None: - project_values[key] = value - - hier_down_queue = collections.deque() - hier_down_queue.append( - (project_values, ftrack_project_id) - ) - - while hier_down_queue: - hier_values, parent_id = hier_down_queue.popleft() - for child_id in entities_dict[parent_id]["children"]: - _hier_values = hier_values.copy() - for name in hier_cust_attrs_keys: - value = entities_dict[child_id]["hier_attrs"].get(name) - if value is not None: - _hier_values[name] = value - - entities_dict[child_id]["hier_attrs"].update(_hier_values) - hier_down_queue.append((_hier_values, child_id)) - - ftrack_mongo_mapping = {} - for mongo_id, ftrack_id in mongo_ftrack_mapping.items(): - ftrack_mongo_mapping[ftrack_id] = mongo_id - - for ftrack_id, data in entities_dict.items(): - mongo_id = ftrack_mongo_mapping[ftrack_id] - avalon_ent = self.avalon_ents_by_id[mongo_id] - ent_path = self.get_ent_path(ftrack_id) - # TODO logging - self.log.debug( - "Updating hierarchical attributes <{}>".format(ent_path) - ) - for key, value in data["hier_attrs"].items(): - if ( - key in avalon_ent["data"] and - avalon_ent["data"][key] == value - ): - continue - - self.log.debug("- {}: {}".format(key, value)) - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - - self.updates[mongo_id]["data"][key] = value - - self.update_entities() - - def process_task_updates(self): - """ - Pull task information for selected ftrack ids to replace stored - existing in Avalon. - Solves problem of changing type (even Status in the future) of - task without storing ftrack id for task in the DB. (Which doesn't - bring much advantage currently and it could be troublesome for - all hosts or plugins (for example Nuke) to collect and store. - Returns: - None - """ - self.log.debug( - "Processing task changes for parents: {}".format( - self.modified_tasks_ftrackids - ) - ) - if not self.modified_tasks_ftrackids: - return - - joined_ids = ", ".join([ - "\"{}\"".format(ftrack_id) - for ftrack_id in self.modified_tasks_ftrackids - ]) - task_entities = self.process_session.query( - self.task_entities_query_by_parent_id.format( - self.cur_project["id"], joined_ids - ) - ).all() - - ftrack_mongo_mapping_found = {} - not_found_ids = [] - # Make sure all parents have updated tasks, as they may not have any - tasks_per_ftrack_id = { - ftrack_id: {} - for ftrack_id in self.modified_tasks_ftrackids - } - - # Query all task types at once - task_types = self.process_session.query(self.task_types_query).all() - task_types_by_id = { - task_type["id"]: task_type - for task_type in task_types - } - - # prepare all tasks per parentId, eg. Avalon asset record - for task_entity in task_entities: - task_type = task_types_by_id[task_entity["type_id"]] - ftrack_id = task_entity["parent_id"] - if ftrack_id not in tasks_per_ftrack_id: - tasks_per_ftrack_id[ftrack_id] = {} - - passed_regex = avalon_sync.check_regex( - task_entity["name"], "task", - schema_patterns=self.regex_schemas - ) - if not passed_regex: - self.regex_failed.append(task_entity["id"]) - continue - - tasks_per_ftrack_id[ftrack_id][task_entity["name"]] = { - "type": task_type["name"] - } - - # find avalon entity by parentId - # should be there as create was run first - for ftrack_id in tasks_per_ftrack_id.keys(): - avalon_entity = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not avalon_entity: - not_found_ids.append(ftrack_id) - continue - ftrack_mongo_mapping_found[ftrack_id] = avalon_entity["_id"] - - self._update_avalon_tasks( - ftrack_mongo_mapping_found, - tasks_per_ftrack_id - ) - - def update_entities(self): - """ - Update Avalon entities by mongo bulk changes. - Expects self.updates which are transferred to $set part of update - command. - Resets self.updates afterwards. - """ - mongo_changes_bulk = [] - for mongo_id, changes in self.updates.items(): - avalon_ent = self.avalon_ents_by_id[mongo_id] - is_project = avalon_ent["type"] == "project" - change_data = avalon_sync.from_dict_to_set(changes, is_project) - mongo_changes_bulk.append( - UpdateOne({"_id": mongo_id}, change_data) - ) - - if not mongo_changes_bulk: - return - - self.dbcon.bulk_write(mongo_changes_bulk) - self.updates = collections.defaultdict(dict) - - @property - def duplicated_report(self): - if not self.duplicated: - return [] - - ft_project = self.cur_project - duplicated_names = [] - for ftrack_id in self.duplicated: - ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) - if not ftrack_ent: - ftrack_ent = self.process_session.query( - self.entities_query_by_id.format( - ft_project["id"], ftrack_id - ) - ).one() - self.ftrack_ents_by_id[ftrack_id] = ftrack_ent - name = ftrack_ent["name"] - if name not in duplicated_names: - duplicated_names.append(name) - - joined_names = ", ".join( - ["\"{}\"".format(name) for name in duplicated_names] - ) - ft_ents = self.process_session.query( - self.entities_name_query_by_name.format( - ft_project["id"], joined_names - ) - ).all() - - ft_ents_by_name = collections.defaultdict(list) - for ft_ent in ft_ents: - name = ft_ent["name"] - ft_ents_by_name[name].append(ft_ent) - - if not ft_ents_by_name: - return [] - - subtitle = "Duplicated entity names:" - items = [] - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: It is not allowed to use the same name" - " for multiple entities in the same project

" - ) - }) - - for name, ents in ft_ents_by_name.items(): - items.append({ - "type": "label", - "value": "## {}".format(name) - }) - paths = [] - for ent in ents: - ftrack_id = ent["id"] - ent_path = "/".join([_ent["name"] for _ent in ent["link"]]) - avalon_ent = self.avalon_ents_by_id.get(ftrack_id) - - if avalon_ent: - additional = " (synchronized)" - if avalon_ent["name"] != name: - additional = " (synchronized as {})".format( - avalon_ent["name"] - ) - ent_path += additional - paths.append(ent_path) - - items.append({ - "type": "label", - "value": '

{}

'.format("
".join(paths)) - }) - - return items - - @property - def regex_report(self): - if not self.regex_failed: - return [] - - subtitle = "Entity names contain prohibited symbols:" - items = [] - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: You can use Letters( a-Z )," - " Numbers( 0-9 ) and Underscore( _ )

" - ) - }) - - ft_project = self.cur_project - for ftrack_id in self.regex_failed: - ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) - if not ftrack_ent: - ftrack_ent = self.process_session.query( - self.entities_query_by_id.format( - ft_project["id"], ftrack_id - ) - ).one() - self.ftrack_ents_by_id[ftrack_id] = ftrack_ent - - name = ftrack_ent["name"] - ent_path_items = [_ent["name"] for _ent in ftrack_ent["link"][:-1]] - ent_path_items.append("{}".format(name)) - ent_path = "/".join(ent_path_items) - items.append({ - "type": "label", - "value": "

{} - {}

".format(name, ent_path) - }) - - return items - - def report(self): - msg_len = len(self.duplicated) + len(self.regex_failed) - for msgs in self.report_items.values(): - msg_len += len(msgs) - - if msg_len == 0: - return - - items = [] - project_name = self.cur_project["full_name"] - title = "Synchronization report ({}):".format(project_name) - - keys = ["error", "warning", "info"] - for key in keys: - subitems = [] - if key == "warning": - subitems.extend(self.duplicated_report) - subitems.extend(self.regex_report) - - for _msg, _items in self.report_items[key].items(): - if not _items: - continue - - msg_items = _msg.split("||") - msg = msg_items[0] - subitems.append({ - "type": "label", - "value": "# {}".format(msg) - }) - - if len(msg_items) > 1: - for note in msg_items[1:]: - subitems.append({ - "type": "label", - "value": "

NOTE: {}

".format(note) - }) - - if isinstance(_items, str): - _items = [_items] - subitems.append({ - "type": "label", - "value": '

{}

'.format("
".join(_items)) - }) - - if items and subitems: - items.append(self.report_splitter) - - items.extend(subitems) - - self.show_interface( - items=items, - title=title, - event=self._cur_event - ) - return True - - def _update_avalon_tasks( - self, ftrack_mongo_mapping_found, tasks_per_ftrack_id - ): - """ - Prepare new "tasks" content for existing records in Avalon. - Args: - ftrack_mongo_mapping_found (dictionary): ftrack parentId to - Avalon _id mapping - tasks_per_ftrack_id (dictionary): task dictionaries per ftrack - parentId - - Returns: - None - """ - mongo_changes_bulk = [] - for ftrack_id, mongo_id in ftrack_mongo_mapping_found.items(): - filter = {"_id": mongo_id} - change_data = {"$set": {}} - change_data["$set"]["data.tasks"] = tasks_per_ftrack_id[ftrack_id] - mongo_changes_bulk.append(UpdateOne(filter, change_data)) - - if mongo_changes_bulk: - self.dbcon.bulk_write(mongo_changes_bulk) - - def _mongo_id_configuration( - self, - ent_info, - cust_attrs, - hier_attrs, - temp_dict - ): - # Use hierarchical mongo id attribute if possible. - if "_hierarchical" not in temp_dict: - hier_mongo_id_configuration_id = None - for attr in hier_attrs: - if attr["key"] == CUST_ATTR_ID_KEY: - hier_mongo_id_configuration_id = attr["id"] - break - temp_dict["_hierarchical"] = hier_mongo_id_configuration_id - - hier_mongo_id_configuration_id = temp_dict.get("_hierarchical") - if hier_mongo_id_configuration_id is not None: - return hier_mongo_id_configuration_id - - # Legacy part for cases that MongoID attribute is per entity type. - entity_type = ent_info["entity_type"] - mongo_id_configuration_id = temp_dict.get(entity_type) - if mongo_id_configuration_id is not None: - return mongo_id_configuration_id - - for attr in cust_attrs: - key = attr["key"] - if key != CUST_ATTR_ID_KEY: - continue - - if attr["entity_type"] != ent_info["entityType"]: - continue - - if ( - ent_info["entityType"] == "task" and - attr["object_type_id"] != ent_info["objectTypeId"] - ): - continue - - mongo_id_configuration_id = attr["id"] - break - - temp_dict[entity_type] = mongo_id_configuration_id - - return mongo_id_configuration_id - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - SyncToAvalonEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py deleted file mode 100644 index d2b395a1a3..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py +++ /dev/null @@ -1,429 +0,0 @@ -import collections - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class TaskStatusToParent(BaseEvent): - settings_key = "status_task_to_parent" - - def launch(self, session, event): - """Propagates status from task to parent when changed.""" - - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return - - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) - - def filter_entities_info(self, event): - # Filter if event contain relevant data - entities_info = event["data"].get("entities") - if not entities_info: - return - - filtered_entity_info = collections.defaultdict(list) - status_ids = set() - for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entityType") != "task": - continue - - # Care only about changes of status - changes = entity_info.get("changes") - if not changes: - continue - statusid_changes = changes.get("statusid") - if not statusid_changes: - continue - - new_status_id = entity_info["changes"]["statusid"]["new"] - if ( - statusid_changes.get("old") is None - or new_status_id is None - ): - continue - - project_id = None - for parent_item in reversed(entity_info["parents"]): - if parent_item["entityType"] == "show": - project_id = parent_item["entityId"] - break - - if project_id: - filtered_entity_info[project_id].append(entity_info) - status_ids.add(new_status_id) - - return filtered_entity_info - - def process_by_project(self, session, event, project_id, entities_info): - # Get project name - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return - - # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name - ) - - # Prepare loaded settings and check if can be processed - result = self.prepare_settings(project_settings, project_name) - if not result: - return - - # Unpack the result - parent_object_types, all_match, single_match = result - - # Prepare valid object type ids for object types from settings - object_types = session.query("select id, name from ObjectType").all() - object_type_id_by_low_name = { - object_type["name"].lower(): object_type["id"] - for object_type in object_types - } - - valid_object_type_ids = set() - for object_type_name in parent_object_types: - if object_type_name in object_type_id_by_low_name: - valid_object_type_ids.add( - object_type_id_by_low_name[object_type_name] - ) - else: - self.log.warning( - "Unknown object type \"{}\" set on project \"{}\".".format( - object_type_name, project_name - ) - ) - - if not valid_object_type_ids: - return - - # Prepare parent ids - parent_ids = set() - for entity_info in entities_info: - parent_id = entity_info["parentId"] - if parent_id: - parent_ids.add(parent_id) - - # Query parent ids by object type ids and parent ids - parent_entities = session.query( - ( - "select id, status_id, object_type_id, link from TypedContext" - " where id in ({}) and object_type_id in ({})" - ).format( - self.join_query_keys(parent_ids), - self.join_query_keys(valid_object_type_ids) - ) - ).all() - # Skip if none of parents match the filtering - if not parent_entities: - return - - obj_ids = set() - for entity in parent_entities: - obj_ids.add(entity["object_type_id"]) - - types_mapping = { - _type.lower(): _type - for _type in session.types - } - # Map object type id by lowered and modified object type name - object_type_name_by_id = {} - for object_type in object_types: - mapping_name = object_type["name"].lower().replace(" ", "") - obj_id = object_type["id"] - object_type_name_by_id[obj_id] = types_mapping[mapping_name] - - project_entity = session.get("Project", project_id) - project_schema = project_entity["project_schema"] - available_statuses_by_obj_id = {} - for obj_id in obj_ids: - obj_name = object_type_name_by_id[obj_id] - statuses = project_schema.get_statuses(obj_name) - statuses_by_low_name = { - status["name"].lower(): status - for status in statuses - } - valid = False - for name in all_match.keys(): - if name in statuses_by_low_name: - valid = True - break - - if not valid: - for item in single_match: - if item["new_status"] in statuses_by_low_name: - valid = True - break - if valid: - available_statuses_by_obj_id[obj_id] = statuses_by_low_name - - valid_parent_ids = set() - status_ids = set() - valid_parent_entities = [] - for entity in parent_entities: - if entity["object_type_id"] not in available_statuses_by_obj_id: - continue - - valid_parent_entities.append(entity) - valid_parent_ids.add(entity["id"]) - status_ids.add(entity["status_id"]) - - if not valid_parent_ids: - return - - task_entities = session.query( - ( - "select id, parent_id, status_id from TypedContext" - " where parent_id in ({}) and object_type_id is \"{}\"" - ).format( - self.join_query_keys(valid_parent_ids), - object_type_id_by_low_name["task"] - ) - ).all() - - # This should not happen but it is safer - if not task_entities: - return - - task_entities_by_parent_id = collections.defaultdict(list) - for task_entity in task_entities: - status_ids.add(task_entity["status_id"]) - parent_id = task_entity["parent_id"] - task_entities_by_parent_id[parent_id].append(task_entity) - - status_entities = session.query(( - "select id, name from Status where id in ({})" - ).format(self.join_query_keys(status_ids))).all() - - statuses_by_id = { - entity["id"]: entity - for entity in status_entities - } - - # New status determination logic - new_statuses_by_parent_id = self.new_status_by_all_task_statuses( - task_entities_by_parent_id, statuses_by_id, all_match - ) - - task_entities_by_id = { - task_entity["id"]: task_entity - for task_entity in task_entities - } - # Check if there are remaining any parents that does not have - # determined new status yet - remainder_tasks_by_parent_id = collections.defaultdict(list) - for entity_info in entities_info: - entity_id = entity_info["entityId"] - if entity_id not in task_entities_by_id: - continue - parent_id = entity_info["parentId"] - if ( - # Skip if already has determined new status - parent_id in new_statuses_by_parent_id - # Skip if parent is not in parent mapping - # - if was not found or parent type is not interesting - or parent_id not in task_entities_by_parent_id - ): - continue - - remainder_tasks_by_parent_id[parent_id].append( - task_entities_by_id[entity_id] - ) - - # Try to find new status for remained parents - new_statuses_by_parent_id.update( - self.new_status_by_remainders( - remainder_tasks_by_parent_id, - statuses_by_id, - single_match - ) - ) - - # If there are not new statuses then just skip - if not new_statuses_by_parent_id: - return - - parent_entities_by_id = { - parent_entity["id"]: parent_entity - for parent_entity in valid_parent_entities - } - for parent_id, new_status_name in new_statuses_by_parent_id.items(): - if not new_status_name: - continue - - parent_entity = parent_entities_by_id[parent_id] - ent_path = "/".join( - [ent["name"] for ent in parent_entity["link"]] - ) - - obj_id = parent_entity["object_type_id"] - statuses_by_low_name = available_statuses_by_obj_id.get(obj_id) - if not statuses_by_low_name: - continue - - new_status = statuses_by_low_name.get(new_status_name) - if not new_status: - self.log.warning(( - "\"{}\" Couldn't change status to \"{}\"." - " Status is not available for entity type \"{}\"." - ).format( - ent_path, new_status_name, parent_entity.entity_type - )) - continue - - current_status = parent_entity["status"] - # Do nothing if status is already set - if new_status["id"] == current_status["id"]: - self.log.debug( - "\"{}\" Status \"{}\" already set.".format( - ent_path, current_status["name"] - ) - ) - continue - - try: - parent_entity["status_id"] = new_status["id"] - session.commit() - self.log.info( - "\"{}\" changed status to \"{}\"".format( - ent_path, new_status["name"] - ) - ) - except Exception: - session.rollback() - self.log.warning( - "\"{}\" status couldn't be set to \"{}\"".format( - ent_path, new_status["name"] - ), - exc_info=True - ) - - def prepare_settings(self, project_settings, project_name): - event_settings = ( - project_settings["ftrack"]["events"][self.settings_key] - ) - - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" has disabled {}.".format( - project_name, self.__class__.__name__ - )) - return - - _parent_object_types = event_settings["parent_object_types"] - if not _parent_object_types: - self.log.debug(( - "Project \"{}\" does not have set" - " parent object types filtering." - ).format(project_name)) - return - - _all_match = ( - event_settings["parent_status_match_all_task_statuses"] - ) - _single_match = ( - event_settings["parent_status_by_task_status"] - ) - - if not _all_match and not _single_match: - self.log.debug(( - "Project \"{}\" does not have set" - " parent status mappings." - ).format(project_name)) - return - - parent_object_types = [ - item.lower() - for item in _parent_object_types - ] - all_match = {} - for new_status_name, task_statuses in _all_match.items(): - all_match[new_status_name.lower()] = [ - status_name.lower() - for status_name in task_statuses - ] - - single_match = [] - for item in _single_match: - single_match.append({ - "new_status": item["new_status"].lower(), - "task_statuses": [ - status_name.lower() - for status_name in item["task_statuses"] - ] - }) - return parent_object_types, all_match, single_match - - def new_status_by_all_task_statuses( - self, tasks_by_parent_id, statuses_by_id, all_match - ): - """All statuses of parent entity must match specific status names. - - Only if all task statuses match the condition parent's status name is - determined. - """ - output = {} - for parent_id, task_entities in tasks_by_parent_id.items(): - task_statuses_lowered = set() - for task_entity in task_entities: - task_status = statuses_by_id[task_entity["status_id"]] - low_status_name = task_status["name"].lower() - task_statuses_lowered.add(low_status_name) - - new_status = None - for _new_status, task_statuses in all_match.items(): - valid_item = True - for status_name_low in task_statuses_lowered: - if status_name_low not in task_statuses: - valid_item = False - break - - if valid_item: - new_status = _new_status - break - - if new_status is not None: - output[parent_id] = new_status - - return output - - def new_status_by_remainders( - self, remainder_tasks_by_parent_id, statuses_by_id, single_match - ): - """By new task status can be determined new status of parent.""" - output = {} - if not remainder_tasks_by_parent_id: - return output - - for parent_id, task_entities in remainder_tasks_by_parent_id.items(): - if not task_entities: - continue - - # For cases there are multiple tasks in changes - # - task status which match any new status item by order in the - # list `single_match` is preferred - best_order = len(single_match) - best_order_status = None - for task_entity in task_entities: - task_status = statuses_by_id[task_entity["status_id"]] - low_status_name = task_status["name"].lower() - for order, item in enumerate(single_match): - if order >= best_order: - break - - if low_status_name in item["task_statuses"]: - best_order = order - best_order_status = item["new_status"] - break - - if best_order_status: - output[parent_id] = best_order_status - return output - - -def register(session): - TaskStatusToParent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py deleted file mode 100644 index 91ee2410d7..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_task_to_version_status.py +++ /dev/null @@ -1,383 +0,0 @@ -import collections - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class TaskToVersionStatus(BaseEvent): - """Changes status of task's latest AssetVersions on its status change.""" - - settings_key = "status_task_to_version" - - # Attribute for caching session user id - _cached_user_id = None - - def is_event_invalid(self, session, event): - """Skip task status changes for session user changes. - - It is expected that there may be another event handler that set - version status to task in that case skip all events caused by same - user as session has to avoid infinite loop of status changes. - """ - # Cache user id of currently running session - if self._cached_user_id is None: - session_user_entity = session.query( - "User where username is \"{}\"".format(session.api_user) - ).first() - if not session_user_entity: - self.log.warning( - "Couldn't query Ftrack user with username \"{}\"".format( - session.api_user - ) - ) - return False - self._cached_user_id = session_user_entity["id"] - - # Skip processing if current session user was the user who created - # the event - user_info = event["source"].get("user") or {} - user_id = user_info.get("id") - - # Mark as invalid if user is unknown - if user_id is None: - return True - return user_id == self._cached_user_id - - def filter_event_entities(self, event): - """Filter if event contain relevant data. - - Event cares only about changes of `statusid` on `entity_type` "Task". - """ - - entities_info = event["data"].get("entities") - if not entities_info: - return - - filtered_entity_info = collections.defaultdict(list) - for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entity_type") != "Task": - continue - - # Care only about changes of status - changes = entity_info.get("changes") or {} - statusid_changes = changes.get("statusid") or {} - if ( - statusid_changes.get("new") is None - or statusid_changes.get("old") is None - ): - continue - - # Get project id from entity info - project_id = None - for parent_item in reversed(entity_info["parents"]): - if parent_item["entityType"] == "show": - project_id = parent_item["entityId"] - break - - if project_id: - filtered_entity_info[project_id].append(entity_info) - - return filtered_entity_info - - def _get_ent_path(self, entity): - return "/".join( - [ent["name"] for ent in entity["link"]] - ) - - def launch(self, session, event): - '''Propagates status from version to task when changed''' - if self.is_event_invalid(session, event): - return - - filtered_entity_infos = self.filter_event_entities(event) - if not filtered_entity_infos: - return - - for project_id, entities_info in filtered_entity_infos.items(): - self.process_by_project(session, event, project_id, entities_info) - - def process_by_project(self, session, event, project_id, entities_info): - if not entities_info: - return - - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return - - # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name - ) - - event_settings = ( - project_settings["ftrack"]["events"][self.settings_key] - ) - _status_mapping = event_settings["mapping"] - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" has disabled {}.".format( - project_name, self.__class__.__name__ - )) - return - - if not _status_mapping: - self.log.debug(( - "Project \"{}\" does not have set status mapping for {}." - ).format(project_name, self.__class__.__name__)) - return - - status_mapping = { - key.lower(): value - for key, value in _status_mapping.items() - } - - asset_types_filter = event_settings["asset_types_filter"] - - task_ids = [ - entity_info["entityId"] - for entity_info in entities_info - ] - - last_asset_versions_by_task_id = ( - self.find_last_asset_versions_for_task_ids( - session, task_ids, asset_types_filter - ) - ) - - # Query Task entities for last asset versions - joined_filtered_ids = self.join_query_keys( - last_asset_versions_by_task_id.keys() - ) - if not joined_filtered_ids: - return - - task_entities = session.query( - "select status_id, link from Task where id in ({})".format( - joined_filtered_ids - ) - ).all() - if not task_entities: - return - - status_ids = set() - for task_entity in task_entities: - status_ids.add(task_entity["status_id"]) - - task_status_entities = session.query( - "select id, name from Status where id in ({})".format( - self.join_query_keys(status_ids) - ) - ).all() - task_status_name_by_id = { - status_entity["id"]: status_entity["name"] - for status_entity in task_status_entities - } - - # Final process of changing statuses - project_entity = session.get("Project", project_id) - av_statuses_by_low_name, av_statuses_by_id = ( - self.get_asset_version_statuses(project_entity) - ) - - asset_ids = set() - for asset_versions in last_asset_versions_by_task_id.values(): - for asset_version in asset_versions: - asset_ids.add(asset_version["asset_id"]) - - asset_entities = session.query( - "select name from Asset where id in ({})".format( - self.join_query_keys(asset_ids) - ) - ).all() - asset_names_by_id = { - asset_entity["id"]: asset_entity["name"] - for asset_entity in asset_entities - } - for task_entity in task_entities: - task_id = task_entity["id"] - status_id = task_entity["status_id"] - task_path = self._get_ent_path(task_entity) - - task_status_name = task_status_name_by_id[status_id] - task_status_name_low = task_status_name.lower() - - new_asset_version_status = None - mapped_status_names = status_mapping.get(task_status_name_low) - if mapped_status_names: - for status_name in mapped_status_names: - _status = av_statuses_by_low_name.get(status_name.lower()) - if _status: - new_asset_version_status = _status - break - - if not new_asset_version_status: - new_asset_version_status = av_statuses_by_low_name.get( - task_status_name_low - ) - # Skip if tasks status is not available to AssetVersion - if not new_asset_version_status: - self.log.debug(( - "AssetVersion does not have matching status to \"{}\"" - ).format(task_status_name)) - continue - - last_asset_versions = last_asset_versions_by_task_id[task_id] - for asset_version in last_asset_versions: - version = asset_version["version"] - self.log.debug(( - "Trying to change status of last AssetVersion {}" - " for task \"{}\"" - ).format(version, task_path)) - - asset_id = asset_version["asset_id"] - asset_type_name = asset_names_by_id[asset_id] - av_ent_path = task_path + " Asset {} AssetVersion {}".format( - asset_type_name, - version - ) - - # Skip if current AssetVersion's status is same - status_id = asset_version["status_id"] - current_status_name = av_statuses_by_id[status_id]["name"] - if current_status_name.lower() == task_status_name_low: - self.log.debug(( - "AssetVersion already has set status \"{}\". \"{}\"" - ).format(current_status_name, av_ent_path)) - continue - - new_status_id = new_asset_version_status["id"] - new_status_name = new_asset_version_status["name"] - # Skip if status is already same - if asset_version["status_id"] == new_status_id: - continue - - # Change the status - try: - asset_version["status_id"] = new_status_id - session.commit() - self.log.info("[ {} ] Status updated to [ {} ]".format( - av_ent_path, new_status_name - )) - except Exception: - session.rollback() - self.log.warning( - "[ {} ]Status couldn't be set to \"{}\"".format( - av_ent_path, new_status_name - ), - exc_info=True - ) - - def get_asset_version_statuses(self, project_entity): - """Status entities for AssetVersion from project's schema. - - Load statuses from project's schema and store them by id and name. - - Args: - project_entity (ftrack_api.Entity): Entity of ftrack's project. - - Returns: - tuple: 2 items are returned first are statuses by name - second are statuses by id. - """ - project_schema = project_entity["project_schema"] - # Get all available statuses for Task - statuses = project_schema.get_statuses("AssetVersion") - # map lowered status name with it's object - av_statuses_by_low_name = {} - av_statuses_by_id = {} - for status in statuses: - av_statuses_by_low_name[status["name"].lower()] = status - av_statuses_by_id[status["id"]] = status - - return av_statuses_by_low_name, av_statuses_by_id - - def find_last_asset_versions_for_task_ids( - self, session, task_ids, asset_types_filter - ): - """Find latest AssetVersion entities for task. - - Find first latest AssetVersion for task and all AssetVersions with - same version for the task. - - Args: - asset_versions (list): AssetVersion entities sorted by "version". - task_ids (list): Task ids. - asset_types_filter (list): Asset types short names that will be - used to filter AssetVersions. Filtering is skipped if entered - value is empty list. - """ - - # Allow event only on specific asset type names - asset_query_part = "" - if asset_types_filter: - # Query all AssetTypes - asset_types = session.query( - "select id, short from AssetType" - ).all() - # Store AssetTypes by id - asset_type_short_by_id = { - asset_type["id"]: asset_type["short"] - for asset_type in asset_types - } - - # Lower asset types from settings - # WARNING: not sure if is good idea to lower names as Ftrack may - # contain asset type with name "Scene" and "scene"! - asset_types_filter_low = set( - asset_types_name.lower() - for asset_types_name in asset_types_filter - ) - asset_type_ids = [] - for type_id, short in asset_type_short_by_id.items(): - # TODO log if asset type name is not found - if short.lower() in asset_types_filter_low: - asset_type_ids.append(type_id) - - # TODO log that none of asset type names were found in ftrack - if asset_type_ids: - asset_query_part = " and asset.type_id in ({})".format( - self.join_query_keys(asset_type_ids) - ) - - # Query tasks' AssetVersions - asset_versions = session.query(( - "select status_id, version, task_id, asset_id" - " from AssetVersion where task_id in ({}){}" - " order by version descending" - ).format(self.join_query_keys(task_ids), asset_query_part)).all() - - last_asset_versions_by_task_id = collections.defaultdict(list) - last_version_by_task_id = {} - not_finished_task_ids = set(task_ids) - for asset_version in asset_versions: - task_id = asset_version["task_id"] - # Check if task id is still in `not_finished_task_ids` - if task_id not in not_finished_task_ids: - continue - - version = asset_version["version"] - - # Find last version in `last_version_by_task_id` - last_version = last_version_by_task_id.get(task_id) - if last_version is None: - # If task id does not have version set yet then it's first - # AssetVersion for this task - last_version_by_task_id[task_id] = version - - elif last_version > version: - # Skip processing if version is lower than last version - # and pop task id from `not_finished_task_ids` - not_finished_task_ids.remove(task_id) - continue - - # Add AssetVersion entity to output dictionary - last_asset_versions_by_task_id[task_id].append(asset_version) - - return last_asset_versions_by_task_id - - -def register(session): - TaskToVersionStatus(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py b/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py deleted file mode 100644 index 318e69f414..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_thumbnail_updates.py +++ /dev/null @@ -1,161 +0,0 @@ -import collections - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class ThumbnailEvents(BaseEvent): - settings_key = "thumbnail_updates" - - def launch(self, session, event): - """Updates thumbnails of entities from new AssetVersion.""" - filtered_entities = self.filter_entities(event) - if not filtered_entities: - return - - for project_id, entities_info in filtered_entities.items(): - self.process_project_entities( - session, event, project_id, entities_info - ) - - def process_project_entities( - self, session, event, project_id, entities_info - ): - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return - - # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name - ) - - event_settings = ( - project_settings - ["ftrack"] - ["events"] - [self.settings_key] - ) - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" does not have activated {}.".format( - project_name, self.__class__.__name__ - )) - return - - self.log.debug("Processing {} on project \"{}\".".format( - self.__class__.__name__, project_name - )) - - parent_levels = event_settings["levels"] - if parent_levels < 1: - self.log.debug( - "Project \"{}\" has parent levels set to {}. Skipping".format( - project_name, parent_levels - ) - ) - return - - asset_version_ids = set() - for entity in entities_info: - asset_version_ids.add(entity["entityId"]) - - # Do not use attribute `asset_version_entities` will be filtered - # to when `asset_versions_by_id` is filled - asset_version_entities = session.query(( - "select task_id, thumbnail_id from AssetVersion where id in ({})" - ).format(self.join_query_keys(asset_version_ids))).all() - - asset_versions_by_id = {} - for asset_version_entity in asset_version_entities: - if not asset_version_entity["thumbnail_id"]: - continue - entity_id = asset_version_entity["id"] - asset_versions_by_id[entity_id] = asset_version_entity - - if not asset_versions_by_id: - self.log.debug("None of asset versions has set thumbnail id.") - return - - entity_ids_by_asset_version_id = collections.defaultdict(list) - hierarchy_ids = set() - for entity_info in entities_info: - entity_id = entity_info["entityId"] - if entity_id not in asset_versions_by_id: - continue - - parent_ids = [] - counter = None - for parent_info in entity_info["parents"]: - if counter is not None: - if counter >= parent_levels: - break - parent_ids.append(parent_info["entityId"]) - counter += 1 - - elif parent_info["entityType"] == "asset": - counter = 0 - - for parent_id in parent_ids: - hierarchy_ids.add(parent_id) - entity_ids_by_asset_version_id[entity_id].append(parent_id) - - for asset_version_entity in asset_versions_by_id.values(): - task_id = asset_version_entity["task_id"] - if task_id: - hierarchy_ids.add(task_id) - asset_version_id = asset_version_entity["id"] - entity_ids_by_asset_version_id[asset_version_id].append( - task_id - ) - - entities = session.query(( - "select thumbnail_id, link from TypedContext where id in ({})" - ).format(self.join_query_keys(hierarchy_ids))).all() - entities_by_id = { - entity["id"]: entity - for entity in entities - } - - for version_id, version_entity in asset_versions_by_id.items(): - for entity_id in entity_ids_by_asset_version_id[version_id]: - entity = entities_by_id.get(entity_id) - if not entity: - continue - - entity["thumbnail_id"] = version_entity["thumbnail_id"] - self.log.info("Updating thumbnail for entity [ {} ]".format( - self.get_entity_path(entity) - )) - - try: - session.commit() - except Exception: - session.rollback() - - def filter_entities(self, event): - filtered_entities_info = {} - for entity_info in event["data"].get("entities", []): - action = entity_info.get("action") - if not action: - continue - - if ( - action == "remove" - or entity_info["entityType"].lower() != "assetversion" - or "thumbid" not in (entity_info.get("keys") or []) - ): - continue - - # Get project id from entity info - project_id = entity_info["parents"][-1]["entityId"] - if project_id not in filtered_entities_info: - filtered_entities_info[project_id] = [] - filtered_entities_info[project_id].append(entity_info) - return filtered_entities_info - - -def register(session): - ThumbnailEvents(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py deleted file mode 100644 index 9539a34f5e..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ /dev/null @@ -1,242 +0,0 @@ -import re -import subprocess - -from openpype.client import get_asset_by_id, get_asset_by_name -from openpype.settings import get_project_settings -from openpype.pipeline import Anatomy -from openpype_modules.ftrack.lib import BaseEvent -from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY - - -class UserAssigmentEvent(BaseEvent): - """ - This script will intercept user assignment / de-assignment event and - run shell script, providing as much context as possible. - - It expects configuration file ``presets/ftrack/user_assigment_event.json``. - In it, you define paths to scripts to be run for user assignment event and - for user-deassigment:: - { - "add": [ - "/path/to/script1", - "/path/to/script2" - ], - "remove": [ - "/path/to/script3", - "/path/to/script4" - ] - } - - Those scripts are executed in shell. Three arguments will be passed to - to them: - 1) user name of user (de)assigned - 2) path to workfiles of task user was (de)assigned to - 3) path to publish files of task user was (de)assigned to - """ - - def error(self, *err): - for e in err: - self.log.error(e) - - def _run_script(self, script, args): - """ - Run shell script with arguments as subprocess - - :param script: script path - :type script: str - :param args: list of arguments passed to script - :type args: list - :returns: return code - :rtype: int - """ - p = subprocess.call([script, args], shell=True) - return p - - def _get_task_and_user(self, session, action, changes): - """ - Get Task and User entities from Ftrack session - - :param session: ftrack session - :type session: ftrack_api.session - :param action: event action - :type action: str - :param changes: what was changed by event - :type changes: dict - :returns: User and Task entities - :rtype: tuple - """ - if not changes: - return None, None - - if action == 'add': - task_id = changes.get('context_id', {}).get('new') - user_id = changes.get('resource_id', {}).get('new') - - elif action == 'remove': - task_id = changes.get('context_id', {}).get('old') - user_id = changes.get('resource_id', {}).get('old') - - if not task_id: - return None, None - - if not user_id: - return None, None - - task = session.query('Task where id is "{}"'.format(task_id)).first() - user = session.query('User where id is "{}"'.format(user_id)).first() - - return task, user - - def _get_asset(self, task): - """ - Get asset from task entity - - :param task: Task entity - :type task: dict - :returns: Asset entity - :rtype: dict - """ - parent = task['parent'] - project_name = task["project"]["full_name"] - avalon_entity = None - parent_id = parent['custom_attributes'].get(CUST_ATTR_ID_KEY) - if parent_id: - avalon_entity = get_asset_by_id(project_name, parent_id) - - if not avalon_entity: - avalon_entity = get_asset_by_name(project_name, parent["name"]) - - if not avalon_entity: - msg = 'Entity "{}" not found in avalon database'.format( - parent['name'] - ) - self.error(msg) - return { - 'success': False, - 'message': msg - } - return avalon_entity - - def _get_hierarchy(self, asset): - """ - Get hierarchy from Asset entity - - :param asset: Asset entity - :type asset: dict - :returns: hierarchy string - :rtype: str - """ - return asset['data']['hierarchy'] - - def _get_template_data(self, task): - """ - Get data to fill template from task - - .. seealso:: :mod:`openpype.pipeline.Anatomy` - - :param task: Task entity - :type task: dict - :returns: data for anatomy template - :rtype: dict - """ - project_name = task['project']['full_name'] - project_code = task['project']['name'] - - # fill in template data - asset = self._get_asset(task) - t_data = { - 'project': { - 'name': project_name, - 'code': project_code - }, - 'asset': asset['name'], - 'task': task['name'], - 'hierarchy': self._get_hierarchy(asset) - } - - return t_data - - def launch(self, session, event): - if not event.get("data"): - return - - entities_info = event["data"].get("entities") - if not entities_info: - return - - # load shell scripts presets - tmp_by_project_name = {} - for entity_info in entities_info: - if entity_info.get('entity_type') != 'Appointment': - continue - - task_entity, user_entity = self._get_task_and_user( - session, - entity_info.get('action'), - entity_info.get('changes') - ) - - if not task_entity or not user_entity: - self.log.error("Task or User was not found.") - continue - - # format directories to pass to shell script - project_name = task_entity["project"]["full_name"] - project_data = tmp_by_project_name.get(project_name) or {} - if "scripts_by_action" not in project_data: - project_settings = get_project_settings(project_name) - _settings = ( - project_settings["ftrack"]["events"]["user_assignment"] - ) - project_data["scripts_by_action"] = _settings.get("scripts") - tmp_by_project_name[project_name] = project_data - - scripts_by_action = project_data["scripts_by_action"] - if not scripts_by_action: - continue - - if "anatomy" not in project_data: - project_data["anatomy"] = Anatomy(project_name) - tmp_by_project_name[project_name] = project_data - - anatomy = project_data["anatomy"] - data = self._get_template_data(task_entity) - anatomy_filled = anatomy.format(data) - # formatting work dir is easiest part as we can use whole path - work_dir = anatomy_filled["work"]["folder"] - # we also need publish but not whole - anatomy_filled.strict = False - publish = anatomy_filled["publish"]["folder"] - - # now find path to {asset} - m = re.search( - "(^.+?{})".format(data["asset"]), - publish - ) - - if not m: - msg = 'Cannot get part of publish path {}'.format(publish) - self.log.error(msg) - return { - 'success': False, - 'message': msg - } - publish_dir = m.group(1) - - username = user_entity["username"] - event_entity_action = entity_info["action"] - for script in scripts_by_action.get(event_entity_action): - self.log.info(( - "[{}] : running script for user {}" - ).format(event_entity_action, username)) - self._run_script(script, [username, work_dir, publish_dir]) - - return True - - -def register(session): - """ - Register plugin. Called when used as an plugin. - """ - - UserAssigmentEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py deleted file mode 100644 index fbe44bcba7..0000000000 --- a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py +++ /dev/null @@ -1,244 +0,0 @@ -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseEvent - - -class VersionToTaskStatus(BaseEvent): - """Propagates status from version to task when changed.""" - def launch(self, session, event): - # Filter event entities - # - output is dictionary where key is project id and event info in - # value - filtered_entities_info = self.filter_entity_info(event) - if not filtered_entities_info: - return - - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) - - def filter_entity_info(self, event): - filtered_entity_info = {} - for entity_info in event["data"].get("entities", []): - # Filter AssetVersions - if entity_info["entityType"] != "assetversion": - continue - - # Skip if statusid not in keys (in changes) - keys = entity_info.get("keys") - if not keys or "statusid" not in keys: - continue - - # Get new version task name - version_status_id = ( - entity_info - .get("changes", {}) - .get("statusid", {}) - .get("new", {}) - ) - - # Just check that `new` is set to any value - if not version_status_id: - continue - - # Get project id from entity info - project_id = entity_info["parents"][-1]["entityId"] - if project_id not in filtered_entity_info: - filtered_entity_info[project_id] = [] - filtered_entity_info[project_id].append(entity_info) - return filtered_entity_info - - def process_by_project(self, session, event, project_id, entities_info): - # Check for project data if event is enabled for event handler - project_name = self.get_project_name_from_event( - session, event, project_id - ) - if get_project(project_name) is None: - self.log.debug("Project not found in OpenPype. Skipping") - return - - # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name - ) - - # Load status mapping from presets - event_settings = ( - project_settings["ftrack"]["events"]["status_version_to_task"] - ) - # Skip if event is not enabled or status mapping is not set - if not event_settings["enabled"]: - self.log.debug("Project \"{}\" has disabled {}".format( - project_name, self.__class__.__name__ - )) - return - - _status_mapping = event_settings["mapping"] or {} - status_mapping = { - key.lower(): value - for key, value in _status_mapping.items() - } - - asset_types_to_skip = [ - short_name.lower() - for short_name in event_settings["asset_types_to_skip"] - ] - - # Collect entity ids - asset_version_ids = set() - for entity_info in entities_info: - asset_version_ids.add(entity_info["entityId"]) - - # Query tasks for AssetVersions - _asset_version_entities = session.query( - "AssetVersion where task_id != none and id in ({})".format( - self.join_query_keys(asset_version_ids) - ) - ).all() - if not _asset_version_entities: - return - - # Filter asset versions by asset type and store their task_ids - task_ids = set() - asset_version_entities = [] - for asset_version in _asset_version_entities: - if asset_types_to_skip: - short_name = asset_version["asset"]["type"]["short"].lower() - if short_name in asset_types_to_skip: - continue - asset_version_entities.append(asset_version) - task_ids.add(asset_version["task_id"]) - - # Skip if `task_ids` are empty - if not task_ids: - return - - task_entities = session.query( - "select link from Task where id in ({})".format( - self.join_query_keys(task_ids) - ) - ).all() - task_entities_by_id = { - task_entiy["id"]: task_entiy - for task_entiy in task_entities - } - - # Prepare asset version by their id - asset_versions_by_id = { - asset_version["id"]: asset_version - for asset_version in asset_version_entities - } - - # Query status entities - status_ids = set() - for entity_info in entities_info: - # Skip statuses of asset versions without task - if entity_info["entityId"] not in asset_versions_by_id: - continue - status_ids.add(entity_info["changes"]["statusid"]["new"]) - - version_status_entities = session.query( - "select id, name from Status where id in ({})".format( - self.join_query_keys(status_ids) - ) - ).all() - - # Qeury statuses - statusese_by_obj_id = self.statuses_for_tasks( - session, task_entities, project_id - ) - # Prepare status names by their ids - status_name_by_id = { - status_entity["id"]: status_entity["name"] - for status_entity in version_status_entities - } - for entity_info in entities_info: - entity_id = entity_info["entityId"] - status_id = entity_info["changes"]["statusid"]["new"] - status_name = status_name_by_id.get(status_id) - if not status_name: - continue - status_name_low = status_name.lower() - - # Lower version status name and check if has mapping - new_status_names = [] - mapped = status_mapping.get(status_name_low) - if mapped: - new_status_names.extend(list(mapped)) - - new_status_names.append(status_name_low) - - self.log.debug( - "Processing AssetVersion status change: [ {} ]".format( - status_name - ) - ) - - asset_version = asset_versions_by_id[entity_id] - task_entity = task_entities_by_id[asset_version["task_id"]] - type_id = task_entity["type_id"] - - # Lower all names from presets - new_status_names = [name.lower() for name in new_status_names] - task_statuses_by_low_name = statusese_by_obj_id[type_id] - - new_status = None - for status_name in new_status_names: - if status_name not in task_statuses_by_low_name: - self.log.debug(( - "Task does not have status name \"{}\" available." - ).format(status_name)) - continue - - # store object of found status - new_status = task_statuses_by_low_name[status_name] - self.log.debug("Status to set: [ {} ]".format( - new_status["name"] - )) - break - - # Skip if status names were not found for paticulat entity - if not new_status: - self.log.warning( - "Any of statuses from presets can be set: {}".format( - str(new_status_names) - ) - ) - continue - # Get full path to task for logging - ent_path = "/".join([ent["name"] for ent in task_entity["link"]]) - - # Setting task status - try: - task_entity["status"] = new_status - session.commit() - self.log.debug("[ {} ] Status updated to [ {} ]".format( - ent_path, new_status["name"] - )) - except Exception: - session.rollback() - self.log.warning( - "[ {} ]Status couldn't be set".format(ent_path), - exc_info=True - ) - - def statuses_for_tasks(self, session, task_entities, project_id): - task_type_ids = set() - for task_entity in task_entities: - task_type_ids.add(task_entity["type_id"]) - - project_entity = session.get("Project", project_id) - project_schema = project_entity["project_schema"] - output = {} - for task_type_id in task_type_ids: - statuses = project_schema.get_statuses("Task", task_type_id) - output[task_type_id] = { - status["name"].lower(): status - for status in statuses - } - - return output - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - VersionToTaskStatus(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py deleted file mode 100644 index 30399b463d..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ /dev/null @@ -1,254 +0,0 @@ -import os - -from openpype.client import get_project -from openpype_modules.ftrack.lib import BaseAction -from openpype.lib.applications import ( - ApplicationManager, - ApplicationLaunchFailed, - ApplictionExecutableNotFound, - CUSTOM_LAUNCH_APP_GROUPS -) - - -class AppplicationsAction(BaseAction): - """Applications Action class.""" - - type = "Application" - label = "Application action" - - identifier = "openpype_app" - _launch_identifier_with_id = None - - icon_url = os.environ.get("OPENPYPE_STATICS_SERVER") - - def __init__(self, *args, **kwargs): - super(AppplicationsAction, self).__init__(*args, **kwargs) - - self.application_manager = ApplicationManager() - - @property - def discover_identifier(self): - if self._discover_identifier is None: - self._discover_identifier = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._discover_identifier - - @property - def launch_identifier(self): - if self._launch_identifier is None: - self._launch_identifier = "{}.*".format(self.identifier) - return self._launch_identifier - - @property - def launch_identifier_with_id(self): - if self._launch_identifier_with_id is None: - self._launch_identifier_with_id = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._launch_identifier_with_id - - def construct_requirements_validations(self): - # Override validation as this action does not need them - return - - def register(self): - """Registers the action, subscribing the discover and launch topics.""" - - discovery_subscription = ( - "topic=ftrack.action.discover and source.user.username={0}" - ).format(self.session.api_user) - - self.session.event_hub.subscribe( - discovery_subscription, - self._discover, - priority=self.priority - ) - - launch_subscription = ( - "topic=ftrack.action.launch" - " and data.actionIdentifier={0}" - " and source.user.username={1}" - ).format( - self.launch_identifier, - self.session.api_user - ) - self.session.event_hub.subscribe( - launch_subscription, - self._launch - ) - - def _discover(self, event): - entities = self._translate_event(event) - items = self.discover(self.session, entities, event) - if items: - return {"items": items} - - def discover(self, session, entities, event): - """Return true if we can handle the selected entities. - - Args: - session (ftrack_api.Session): Helps to query necessary data. - entities (list): Object of selected entities. - event (ftrack_api.Event): Ftrack event causing discover callback. - """ - - if ( - len(entities) != 1 - or entities[0].entity_type.lower() != "task" - ): - return False - - entity = entities[0] - if entity["parent"].entity_type.lower() == "project": - return False - - avalon_project_apps = event["data"].get("avalon_project_apps", None) - avalon_project_doc = event["data"].get("avalon_project_doc", None) - if avalon_project_apps is None: - if avalon_project_doc is None: - ft_project = self.get_project_from_entity(entity) - project_name = ft_project["full_name"] - avalon_project_doc = get_project(project_name) or False - event["data"]["avalon_project_doc"] = avalon_project_doc - - if not avalon_project_doc: - return False - - project_apps_config = avalon_project_doc["config"].get("apps", []) - avalon_project_apps = [ - app["name"] for app in project_apps_config - ] or False - event["data"]["avalon_project_apps"] = avalon_project_apps - - if not avalon_project_apps: - return False - - settings = self.get_project_settings_from_event( - event, avalon_project_doc["name"]) - - only_available = settings["applications"]["only_available"] - - items = [] - for app_name in avalon_project_apps: - app = self.application_manager.applications.get(app_name) - if not app or not app.enabled: - continue - - if app.group.name in CUSTOM_LAUNCH_APP_GROUPS: - continue - - # Skip applications without valid executables - if only_available and not app.find_executable(): - continue - - app_icon = app.icon - if app_icon and self.icon_url: - try: - app_icon = app_icon.format(self.icon_url) - except Exception: - self.log.warning(( - "Couldn't fill icon path. Icon template: \"{}\"" - " --- Icon url: \"{}\"" - ).format(app_icon, self.icon_url)) - app_icon = None - - items.append({ - "label": app.group.label, - "variant": app.label, - "description": None, - "actionIdentifier": "{}.{}".format( - self.launch_identifier_with_id, app_name - ), - "icon": app_icon - }) - - return items - - def _launch(self, event): - event_identifier = event["data"]["actionIdentifier"] - # Check if identifier is same - # - show message that acion may not be triggered on this machine - if event_identifier.startswith(self.launch_identifier_with_id): - return BaseAction._launch(self, event) - - return { - "success": False, - "message": ( - "There are running more OpenPype processes" - " where Application can be launched." - ) - } - - def launch(self, session, entities, event): - """Callback method for the custom action. - - return either a bool (True if successful or False if the action failed) - or a dictionary with they keys `message` and `success`, the message - should be a string and will be displayed as feedback to the user, - success should be a bool, True if successful or False if the action - failed. - - *session* is a `ftrack_api.Session` instance - - *entities* is a list of tuples each containing the entity type and - the entity id. If the entity is a hierarchical you will always get - the entity type TypedContext, once retrieved through a get operation - you will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - - *event* the unmodified original event - """ - identifier = event["data"]["actionIdentifier"] - id_identifier_len = len(self.launch_identifier_with_id) + 1 - app_name = identifier[id_identifier_len:] - - entity = entities[0] - - task_name = entity["name"] - asset_name = entity["parent"]["name"] - project_name = entity["project"]["full_name"] - self.log.info(( - "Ftrack launch app: \"{}\" on Project/Asset/Task: {}/{}/{}" - ).format(app_name, project_name, asset_name, task_name)) - try: - self.application_manager.launch( - app_name, - project_name=project_name, - asset_name=asset_name, - task_name=task_name - ) - - except ApplictionExecutableNotFound as exc: - self.log.warning(exc.exc_msg) - return { - "success": False, - "message": exc.msg - } - - except ApplicationLaunchFailed as exc: - self.log.error(str(exc)) - return { - "success": False, - "message": str(exc) - } - - except Exception: - msg = "Unexpected failure of application launch {}".format( - self.label - ) - self.log.error(msg, exc_info=True) - return { - "success": False, - "message": msg - } - - return { - "success": True, - "message": "Launching {0}".format(self.label) - } - - -def register(session): - """Register action. Called when used as an event plugin.""" - AppplicationsAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py deleted file mode 100644 index 06d572601d..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Taken from https://github.com/tokejepsen/ftrack-hooks/tree/master/batch_tasks -""" - -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class BatchTasksAction(BaseAction): - '''Batch Tasks action - `label` a descriptive string identifying your action. - `varaint` To group actions together, give them the same - label and specify a unique variant per action. - `identifier` a unique identifier for your action. - `description` a verbose descriptive text for you action - ''' - label = "Batch Task Create" - variant = None - identifier = "batch-tasks" - description = None - icon = statics_icon("ftrack", "action_icons", "BatchTasks.svg") - - def discover(self, session, entities, event): - '''Return true if we can handle the selected entities. - *session* is a `ftrack_api.Session` instance - *entities* is a list of tuples each containing the entity type and the - entity id. - If the entity is a hierarchical you will always get the entity - type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - *event* the unmodified original event - ''' - - not_allowed = ["assetversion", "project", "ReviewSession"] - if entities[0].entity_type.lower() in not_allowed: - return False - - return True - - - def get_task_form_items(self, session, number_of_tasks): - items = [] - - task_type_options = [ - {'label': task_type["name"], 'value': task_type["id"]} - for task_type in session.query("Type") - ] - - for index in range(0, number_of_tasks): - items.extend( - [ - { - 'value': '##Template for Task{0}##'.format( - index - ), - 'type': 'label' - }, - { - 'label': 'Type', - 'type': 'enumerator', - 'name': 'task_{0}_typeid'.format(index), - 'data': task_type_options - }, - { - 'label': 'Name', - 'type': 'text', - 'name': 'task_{0}_name'.format(index) - } - ] - ) - - return items - - def ensure_task(self, session, name, task_type, parent): - - # Query for existing task. - query = ( - 'Task where type.id is "{0}" and name is "{1}" ' - 'and parent.id is "{2}"' - ) - task = session.query( - query.format( - task_type["id"], - name, - parent["id"] - ) - ).first() - - # Create task. - if not task: - session.create( - "Task", - { - "name": name, - "type": task_type, - "parent": parent - } - ) - - def launch(self, session, entities, event): - '''Callback method for the custom action. - return either a bool ( True if successful or False if the action - failed ) or a dictionary with they keys `message` and `success`, the - message should be a string and will be displayed as feedback to the - user, success should be a bool, True if successful or False if the - action failed. - *session* is a `ftrack_api.Session` instance - *entities* is a list of tuples each containing the entity type and the - entity id. - If the entity is a hierarchical you will always get the entity - type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - *event* the unmodified original event - ''' - if 'values' in event['data']: - values = event['data']['values'] - if 'number_of_tasks' in values: - return { - 'success': True, - 'message': '', - 'items': self.get_task_form_items( - session, int(values['number_of_tasks']) - ) - } - else: - # Create tasks on each entity - for entity in entities: - for count in range(0, int(len(values.keys()) / 2)): - task_type = session.query( - 'Type where id is "{0}"'.format( - values["task_{0}_typeid".format(count)] - ) - ).one() - - # Get name, or assume task type in lower case as name. - name = values["task_{0}_name".format(count)] - if not name: - name = task_type["name"].lower() - - self.ensure_task(session, name, task_type, entity) - - session.commit() - - return { - 'success': True, - 'message': 'Action completed successfully' - } - - return { - 'success': True, - 'message': "", - 'items': [ - { - 'label': 'Number of tasks', - 'type': 'number', - 'name': 'number_of_tasks', - 'value': 2 - } - ] - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - BatchTasksAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py deleted file mode 100644 index f06162bfda..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py +++ /dev/null @@ -1,108 +0,0 @@ -import collections -import ftrack_api -from openpype_modules.ftrack.lib import ( - BaseAction, - statics_icon, - get_openpype_attr -) - - -class CleanHierarchicalAttrsAction(BaseAction): - identifier = "clean.hierarchical.attr" - label = "OpenPype Admin" - variant = "- Clean hierarchical custom attributes" - description = "Unset empty hierarchical attribute values." - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - - all_project_entities_query = ( - "select id, name, parent_id, link" - " from TypedContext where project_id is \"{}\"" - ) - cust_attr_query = ( - "select value, entity_id from CustomAttributeValue" - " where entity_id in ({}) and configuration_id is \"{}\"" - ) - settings_key = "clean_hierarchical_attr" - - def discover(self, session, entities, event): - """Show only on project entity.""" - if ( - len(entities) != 1 - or entities[0].entity_type.lower() != "project" - ): - return False - - return self.valid_roles(session, entities, event) - - def launch(self, session, entities, event): - project = entities[0] - - user_message = "This may take some time" - self.show_message(event, user_message, result=True) - self.log.debug("Preparing entities for cleanup.") - - all_entities = session.query( - self.all_project_entities_query.format(project["id"]) - ).all() - - all_entities_ids = [ - "\"{}\"".format(entity["id"]) - for entity in all_entities - if entity.entity_type.lower() != "task" - ] - self.log.debug( - "Collected {} entities to process.".format(len(all_entities_ids)) - ) - entity_ids_joined = ", ".join(all_entities_ids) - - attrs, hier_attrs = get_openpype_attr(session) - - for attr in hier_attrs: - configuration_key = attr["key"] - self.log.debug( - "Looking for cleanup of custom attribute \"{}\"".format( - configuration_key - ) - ) - configuration_id = attr["id"] - values = session.query( - self.cust_attr_query.format( - entity_ids_joined, configuration_id - ) - ).all() - - data = {} - for item in values: - value = item["value"] - if value is None: - data[item["entity_id"]] = value - - if not data: - self.log.debug( - "Nothing to clean for \"{}\".".format(configuration_key) - ) - continue - - self.log.debug("Cleaning up {} values for \"{}\".".format( - len(data), configuration_key - )) - for entity_id, value in data.items(): - entity_key = collections.OrderedDict(( - ("configuration_id", configuration_id), - ("entity_id", entity_id) - )) - session.recorded_operations.push( - ftrack_api.operation.DeleteEntityOperation( - "CustomAttributeValue", - entity_key - ) - ) - session.commit() - - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - CleanHierarchicalAttrsAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py b/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py deleted file mode 100644 index 5ad5f10e8e..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_client_review_sort.py +++ /dev/null @@ -1,92 +0,0 @@ -from openpype_modules.ftrack.lib import BaseAction, statics_icon -try: - from functools import cmp_to_key -except Exception: - cmp_to_key = None - - -def existence_comaprison(item_a, item_b): - if not item_a and not item_b: - return 0 - if not item_a: - return 1 - if not item_b: - return -1 - return None - - -def task_name_sorter(item_a, item_b): - asset_version_a = item_a["asset_version"] - asset_version_b = item_b["asset_version"] - asset_version_comp = existence_comaprison(asset_version_a, asset_version_b) - if asset_version_comp is not None: - return asset_version_comp - - task_a = asset_version_a["task"] - task_b = asset_version_b["task"] - task_comp = existence_comaprison(task_a, task_b) - if task_comp is not None: - return task_comp - - if task_a["name"] > task_b["name"]: - return 1 - if task_a["name"] < task_b["name"]: - return -1 - return 0 - - -if cmp_to_key: - task_name_sorter = cmp_to_key(task_name_sorter) -task_name_kwarg_key = "key" if cmp_to_key else "cmp" -task_name_sort_kwargs = {task_name_kwarg_key: task_name_sorter} - - -class ClientReviewSort(BaseAction): - '''Custom action.''' - - #: Action identifier. - identifier = 'client.review.sort' - - #: Action label. - label = 'Sort Review' - - icon = statics_icon("ftrack", "action_icons", "SortReview.svg") - - def discover(self, session, entities, event): - ''' Validation ''' - - if (len(entities) == 0 or entities[0].entity_type != 'ReviewSession'): - return False - - return True - - def launch(self, session, entities, event): - entity = entities[0] - - # Get all objects from Review Session and all 'sort order' possibilities - obj_list = [] - sort_order_list = [] - for obj in entity['review_session_objects']: - obj_list.append(obj) - sort_order_list.append(obj['sort_order']) - - # Sort criteria - obj_list = sorted(obj_list, key=lambda k: k['version']) - obj_list.sort(**task_name_sort_kwargs) - obj_list = sorted(obj_list, key=lambda k: k['name']) - # Set 'sort order' to sorted list, so they are sorted in Ftrack also - for i in range(len(obj_list)): - obj_list[i]['sort_order'] = sort_order_list[i] - - session.commit() - - return { - 'success': True, - 'message': 'Client Review sorted!' - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - ClientReviewSort(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_component_open.py b/openpype/modules/ftrack/event_handlers_user/action_component_open.py deleted file mode 100644 index 0efade9d8f..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_component_open.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import sys -import subprocess -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class ComponentOpen(BaseAction): - '''Custom action.''' - - # Action identifier - identifier = 'component.open' - # Action label - label = 'Open File' - # Action icon - icon = statics_icon("ftrack", "action_icons", "ComponentOpen.svg") - - def discover(self, session, entities, event): - ''' Validation ''' - if len(entities) != 1 or entities[0].entity_type != 'FileComponent': - return False - - return True - - def launch(self, session, entities, event): - - entity = entities[0] - - # Return error if component is on ftrack server - location_name = entity['component_locations'][0]['location']['name'] - if location_name == 'ftrack.server': - return { - 'success': False, - 'message': "This component is stored on ftrack server!" - } - - # Get component filepath - # TODO with locations it will be different??? - fpath = entity['component_locations'][0]['resource_identifier'] - fpath = os.path.normpath(os.path.dirname(fpath)) - - if os.path.isdir(fpath): - if 'win' in sys.platform: # windows - subprocess.Popen('explorer "%s"' % fpath) - elif sys.platform == 'darwin': # macOS - subprocess.Popen(['open', fpath]) - else: # linux - try: - subprocess.Popen(['xdg-open', fpath]) - except OSError: - raise OSError('unsupported xdg-open call??') - else: - return { - 'success': False, - 'message': "Didn't find file: " + fpath - } - - return { - 'success': True, - 'message': 'Component folder Opened' - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - ComponentOpen(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py deleted file mode 100644 index 471a8c4182..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ /dev/null @@ -1,811 +0,0 @@ -import collections -import json -import arrow -import ftrack_api -from openpype_modules.ftrack.lib import ( - BaseAction, - statics_icon, - - CUST_ATTR_ID_KEY, - CUST_ATTR_GROUP, - CUST_ATTR_TOOLS, - CUST_ATTR_APPLICATIONS, - CUST_ATTR_INTENT, - FPS_KEYS, - - default_custom_attributes_definition, - app_definitions_from_app_manager, - tool_definitions_from_app_manager -) - -from openpype.settings import get_system_settings -from openpype.lib import ApplicationManager - -""" -This action creates/updates custom attributes. -## First part take care about special attributes - - `avalon_mongo_id` for storing Avalon MongoID - - `applications` based on applications usages - - `tools` based on tools usages - -## Second part is based on json file in ftrack module. -File location: `~/OpenPype/pype/modules/ftrack/ftrack_custom_attributes.json` - -Data in json file is nested dictionary. Keys in first dictionary level -represents Ftrack entity type (task, show, assetversion, user, list, asset) -and dictionary value define attribute. - -There is special key for hierchical attributes `is_hierarchical`. - -Entity types `task` requires to define task object type (Folder, Shot, -Sequence, Task, Library, Milestone, Episode, Asset Build, etc.) at second -dictionary level, task's attributes are nested more. - -*** Not Changeable ********************************************************* - -group (string) - - name of group - - based on attribute `openpype_modules.ftrack.lib.CUST_ATTR_GROUP` - - "pype" by default - -*** Required *************************************************************** - -label (string) - - label that will show in ftrack - -key (string) - - must contain only chars [a-z0-9_] - -type (string) - - type of custom attribute - - possibilities: - text, boolean, date, enumerator, dynamic enumerator, number - -*** Required with conditions *********************************************** - -config (dictionary) - - for each attribute type different requirements and possibilities: - - enumerator: - multiSelect = True/False(default: False) - data = {key_1:value_1,key_2:value_2,..,key_n:value_n} - - 'data' is Required value with enumerator - - 'key' must contain only chars [a-z0-9_] - - - number: - isdecimal = True/False(default: False) - - - text: - markdown = True/False(default: False) - -*** Presetable keys ********************************************************** - -write_security_roles/read_security_roles (array of strings) - - default: ["ALL"] - - strings should be role names (e.g.: ["API", "Administrator"]) - - if set to ["ALL"] - all roles will be available - - if first is 'except' - roles will be set to all except roles in array - - Warning: Be careful with except - roles can be different by company - - example: - write_security_roles = ["except", "User"] - read_security_roles = ["ALL"] # (User is can only read) - -default - - default: None - - sets default value for custom attribute: - - text -> string - - number -> integer - - enumerator -> array with string of key/s - - boolean -> bool true/false - - date -> string in format: 'YYYY.MM.DD' or 'YYYY.MM.DD HH:mm:ss' - - example: "2018.12.24" / "2018.1.1 6:0:0" - - dynamic enumerator -> DON'T HAVE DEFAULT VALUE!!! - -Example: -``` -"show": { - "avalon_auto_sync": { - "label": "Avalon auto-sync", - "type": "boolean", - "write_security_roles": ["API", "Administrator"], - "read_security_roles": ["API", "Administrator"] - } -}, -"is_hierarchical": { - "fps": { - "label": "FPS", - "type": "number", - "config": {"isdecimal": true} - } -}, -"task": { - "library": { - "my_attr_name": { - "label": "My Attr", - "type": "number" - } - } -} -``` -""" - - -class CustAttrException(Exception): - pass - - -class CustomAttributes(BaseAction): - '''Edit meta data action.''' - - #: Action identifier. - identifier = 'create.update.attributes' - #: Action label. - label = "OpenPype Admin" - variant = '- Create/Update Custom Attributes' - #: Action description. - description = 'Creates required custom attributes in ftrack' - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - settings_key = "create_update_attributes" - - required_keys = ("key", "label", "type") - - presetable_keys = ( - "default", - "write_security_roles", - "read_security_roles" - ) - hierarchical_key = "is_hierarchical" - - type_posibilities = ( - "text", "boolean", "date", "enumerator", - "dynamic enumerator", "number" - ) - - def discover(self, session, entities, event): - ''' - Validation - - action is only for Administrators - ''' - return self.valid_roles(session, entities, event) - - def launch(self, session, entities, event): - # JOB SETTINGS - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - - job = session.create('Job', { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Custom Attribute creation.' - }) - }) - session.commit() - - self.app_manager = ApplicationManager() - - try: - self.prepare_global_data(session) - self.avalon_mongo_id_attributes(session, event) - self.applications_attribute(event) - self.tools_attribute(event) - self.intent_attribute(event) - self.custom_attributes_from_file(event) - - job['status'] = 'done' - session.commit() - - except Exception: - session.rollback() - job["status"] = "failed" - session.commit() - self.log.error( - "Creating custom attributes failed ({})", exc_info=True - ) - - return True - - def prepare_global_data(self, session): - self.types_per_name = { - attr_type["name"].lower(): attr_type - for attr_type in session.query("CustomAttributeType").all() - } - - self.security_roles = { - role["name"].lower(): role - for role in session.query("SecurityRole").all() - } - - object_types = session.query("ObjectType").all() - self.object_types_per_id = { - object_type["id"]: object_type for object_type in object_types - } - self.object_types_per_name = { - object_type["name"].lower(): object_type - for object_type in object_types - } - - self.groups = {} - - self.ftrack_settings = get_system_settings()["modules"]["ftrack"] - self.attrs_settings = self.prepare_attribute_settings() - - def prepare_attribute_settings(self): - output = {} - attr_settings = self.ftrack_settings["custom_attributes"] - for entity_type, attr_data in attr_settings.items(): - # Lower entity type - entity_type = entity_type.lower() - # Just store if entity type is not "task" - if entity_type != "task": - output[entity_type] = attr_data - continue - - # Prepare empty dictionary for entity type if not set yet - if entity_type not in output: - output[entity_type] = {} - - # Store presets per lowered object type - for obj_type, _preset in attr_data.items(): - output[entity_type][obj_type.lower()] = _preset - - return output - - def avalon_mongo_id_attributes(self, session, event): - self.create_hierarchical_mongo_attr(session, event) - - hierarchical_attr, object_type_attrs = ( - self.mongo_id_custom_attributes(session) - ) - if object_type_attrs: - self.convert_mongo_id_to_hierarchical( - hierarchical_attr, object_type_attrs, session, event - ) - - def mongo_id_custom_attributes(self, session): - cust_attrs_query = ( - "select id, entity_type, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key = \"{}\"" - ).format(CUST_ATTR_ID_KEY) - - mongo_id_avalon_attr = session.query(cust_attrs_query).all() - heirarchical_attr = None - object_type_attrs = [] - for cust_attr in mongo_id_avalon_attr: - if cust_attr["is_hierarchical"]: - heirarchical_attr = cust_attr - - else: - object_type_attrs.append(cust_attr) - - return heirarchical_attr, object_type_attrs - - def create_hierarchical_mongo_attr(self, session, event): - # Set security roles for attribute - data = { - "key": CUST_ATTR_ID_KEY, - "label": "Avalon/Mongo ID", - "type": "text", - "default": "", - "group": CUST_ATTR_GROUP, - "is_hierarchical": True, - "config": {"markdown": False} - } - - self.process_attr_data(data, event) - - def convert_mongo_id_to_hierarchical( - self, hierarchical_attr, object_type_attrs, session, event - ): - user_msg = "Converting old custom attributes. This may take some time." - self.show_message(event, user_msg, True) - self.log.info(user_msg) - - object_types_per_id = { - object_type["id"]: object_type - for object_type in session.query("ObjectType").all() - } - - cust_attr_query = ( - "select value, entity_id from CustomAttributeValue" - " where configuration_id is {}" - ) - for attr_def in object_type_attrs: - attr_ent_type = attr_def["entity_type"] - if attr_ent_type == "show": - entity_type_label = "Project" - elif attr_ent_type == "task": - entity_type_label = ( - object_types_per_id[attr_def["object_type_id"]]["name"] - ) - else: - self.log.warning( - "Unsupported entity type: \"{}\". Skipping.".format( - attr_ent_type - ) - ) - continue - - self.log.debug(( - "Converting Avalon MongoID attr for Entity type \"{}\"." - ).format(entity_type_label)) - values = session.query( - cust_attr_query.format(attr_def["id"]) - ).all() - for value in values: - table_values = collections.OrderedDict([ - ("configuration_id", hierarchical_attr["id"]), - ("entity_id", value["entity_id"]) - ]) - - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - table_values, - "value", - ftrack_api.symbol.NOT_SET, - value["value"] - ) - ) - - try: - session.commit() - - except Exception: - session.rollback() - self.log.warning( - ( - "Couldn't transfer Avalon Mongo ID" - " attribute for entity type \"{}\"." - ).format(entity_type_label), - exc_info=True - ) - - try: - session.delete(attr_def) - session.commit() - - except Exception: - session.rollback() - self.log.warning( - ( - "Couldn't delete Avalon Mongo ID" - " attribute for entity type \"{}\"." - ).format(entity_type_label), - exc_info=True - ) - - def applications_attribute(self, event): - apps_data = app_definitions_from_app_manager(self.app_manager) - - applications_custom_attr_data = { - "label": "Applications", - "key": CUST_ATTR_APPLICATIONS, - "type": "enumerator", - "entity_type": "show", - "group": CUST_ATTR_GROUP, - "config": { - "multiselect": True, - "data": apps_data - } - } - self.process_attr_data(applications_custom_attr_data, event) - - def tools_attribute(self, event): - tools_data = tool_definitions_from_app_manager(self.app_manager) - - tools_custom_attr_data = { - "label": "Tools", - "key": CUST_ATTR_TOOLS, - "type": "enumerator", - "is_hierarchical": True, - "group": CUST_ATTR_GROUP, - "config": { - "multiselect": True, - "data": tools_data - } - } - self.process_attr_data(tools_custom_attr_data, event) - - def intent_attribute(self, event): - intent_key_values = self.ftrack_settings["intent"]["items"] - - intent_values = [] - for key, label in intent_key_values.items(): - if not key or not label: - self.log.info(( - "Skipping intent row: {{\"{}\": \"{}\"}}" - " because of empty key or label." - ).format(key, label)) - continue - - intent_values.append({key: label}) - - if not intent_values: - return - - intent_custom_attr_data = { - "label": "Intent", - "key": CUST_ATTR_INTENT, - "type": "enumerator", - "entity_type": "assetversion", - "group": CUST_ATTR_GROUP, - "config": { - "multiselect": False, - "data": intent_values - } - } - self.process_attr_data(intent_custom_attr_data, event) - - def custom_attributes_from_file(self, event): - # Load json with custom attributes configurations - cust_attr_def = default_custom_attributes_definition() - attrs_data = [] - - # Prepare data of hierarchical attributes - hierarchical_attrs = cust_attr_def.pop(self.hierarchical_key, {}) - for key, cust_attr_data in hierarchical_attrs.items(): - cust_attr_data["key"] = key - cust_attr_data["is_hierarchical"] = True - attrs_data.append(cust_attr_data) - - # Prepare data of entity specific attributes - for entity_type, cust_attr_datas in cust_attr_def.items(): - if entity_type.lower() != "task": - for key, cust_attr_data in cust_attr_datas.items(): - cust_attr_data["key"] = key - cust_attr_data["entity_type"] = entity_type - attrs_data.append(cust_attr_data) - continue - - # Task should have nested level for object type - for object_type, _cust_attr_datas in cust_attr_datas.items(): - for key, cust_attr_data in _cust_attr_datas.items(): - cust_attr_data["key"] = key - cust_attr_data["entity_type"] = entity_type - cust_attr_data["object_type"] = object_type - attrs_data.append(cust_attr_data) - - # Process prepared data - for cust_attr_data in attrs_data: - # Add group - cust_attr_data["group"] = CUST_ATTR_GROUP - self.process_attr_data(cust_attr_data, event) - - def presets_for_attr_data(self, attr_data): - output = {} - - attr_key = attr_data["key"] - if attr_data.get("is_hierarchical"): - entity_key = self.hierarchical_key - else: - entity_key = attr_data["entity_type"] - - entity_settings = self.attrs_settings.get(entity_key) or {} - if entity_key.lower() == "task": - object_type = attr_data["object_type"] - entity_settings = entity_settings.get(object_type.lower()) or {} - - key_settings = entity_settings.get(attr_key) or {} - for key, value in key_settings.items(): - if key in self.presetable_keys and value: - output[key] = value - return output - - def process_attr_data(self, cust_attr_data, event): - attr_settings = self.presets_for_attr_data(cust_attr_data) - cust_attr_data.update(attr_settings) - - try: - data = {} - # Get key, label, type - data.update(self.get_required(cust_attr_data)) - # Get hierarchical/ entity_type/ object_id - data.update(self.get_entity_type(cust_attr_data)) - # Get group, default, security roles - data.update(self.get_optional(cust_attr_data)) - # Process data - self.process_attribute(data) - - except CustAttrException as cae: - cust_attr_name = cust_attr_data.get("label", cust_attr_data["key"]) - - if cust_attr_name: - msg = 'Custom attribute error "{}" - {}'.format( - cust_attr_name, str(cae) - ) - else: - msg = 'Custom attribute error - {}'.format(str(cae)) - self.log.warning(msg, exc_info=True) - self.show_message(event, msg) - - def process_attribute(self, data): - existing_attrs = self.session.query(( - "select is_hierarchical, key, type, entity_type, object_type_id" - " from CustomAttributeConfiguration" - )).all() - matching = [] - is_hierarchical = data.get("is_hierarchical", False) - for attr in existing_attrs: - if ( - is_hierarchical != attr["is_hierarchical"] - or attr["key"] != data["key"] - ): - continue - - if attr["type"]["name"] != data["type"]["name"]: - if data["key"] in FPS_KEYS and attr["type"]["name"] == "text": - self.log.info("Kept 'fps' as text custom attribute.") - return - continue - - if is_hierarchical: - matching.append(attr) - - elif "object_type_id" in data: - if ( - attr["entity_type"] == data["entity_type"] and - attr["object_type_id"] == data["object_type_id"] - ): - matching.append(attr) - else: - if attr["entity_type"] == data["entity_type"]: - matching.append(attr) - - if len(matching) == 0: - self.session.create("CustomAttributeConfiguration", data) - self.session.commit() - self.log.debug( - "Custom attribute \"{}\" created".format(data["label"]) - ) - - elif len(matching) == 1: - attr_update = matching[0] - for key in data: - if key not in ( - "is_hierarchical", "entity_type", "object_type_id" - ): - attr_update[key] = data[key] - - self.session.commit() - self.log.debug( - "Custom attribute \"{}\" updated".format(data["label"]) - ) - - else: - raise CustAttrException(( - "Custom attribute is duplicated. Key: \"{}\" Type: \"{}\"" - ).format(data["key"], data["type"]["name"])) - - def get_required(self, attr): - output = {} - for key in self.required_keys: - if key not in attr: - raise CustAttrException( - "BUG: Key \"{}\" is required".format(key) - ) - - if attr['type'].lower() not in self.type_posibilities: - raise CustAttrException( - 'Type {} is not valid'.format(attr['type']) - ) - - output['key'] = attr['key'] - output['label'] = attr['label'] - - type_name = attr['type'].lower() - output['type'] = self.types_per_name[type_name] - - config = None - if type_name == 'number': - config = self.get_number_config(attr) - elif type_name == 'text': - config = self.get_text_config(attr) - elif type_name == 'enumerator': - config = self.get_enumerator_config(attr) - - if config is not None: - output['config'] = config - - return output - - def get_number_config(self, attr): - if 'config' in attr and 'isdecimal' in attr['config']: - isdecimal = attr['config']['isdecimal'] - else: - isdecimal = False - - config = json.dumps({'isdecimal': isdecimal}) - - return config - - def get_text_config(self, attr): - if 'config' in attr and 'markdown' in attr['config']: - markdown = attr['config']['markdown'] - else: - markdown = False - config = json.dumps({'markdown': markdown}) - - return config - - def get_enumerator_config(self, attr): - if 'config' not in attr: - raise CustAttrException('Missing config with data') - if 'data' not in attr['config']: - raise CustAttrException('Missing data in config') - - data = [] - for item in attr['config']['data']: - item_data = {} - for key in item: - # TODO key check by regex - item_data['menu'] = item[key] - item_data['value'] = key - data.append(item_data) - - multiSelect = False - for k in attr['config']: - if k.lower() == 'multiselect': - if isinstance(attr['config'][k], bool): - multiSelect = attr['config'][k] - else: - raise CustAttrException('Multiselect must be boolean') - break - - config = json.dumps({ - 'multiSelect': multiSelect, - 'data': json.dumps(data) - }) - - return config - - def get_group(self, attr): - if isinstance(attr, dict): - group_name = attr['group'].lower() - else: - group_name = attr - if group_name in self.groups: - return self.groups[group_name] - - query = 'CustomAttributeGroup where name is "{}"'.format(group_name) - groups = self.session.query(query).all() - - if len(groups) == 1: - group = groups[0] - self.groups[group_name] = group - - return group - - elif len(groups) < 1: - group = self.session.create('CustomAttributeGroup', { - 'name': group_name, - }) - self.session.commit() - - return group - - else: - raise CustAttrException( - 'Found more than one group "{}"'.format(group_name) - ) - - def get_security_roles(self, security_roles): - security_roles_lowered = tuple(name.lower() for name in security_roles) - if ( - len(security_roles_lowered) == 0 - or "all" in security_roles_lowered - ): - return list(self.security_roles.values()) - - output = [] - if security_roles_lowered[0] == "except": - excepts = security_roles_lowered[1:] - for role_name, role in self.security_roles.items(): - if role_name not in excepts: - output.append(role) - - else: - for role_name in security_roles_lowered: - if role_name in self.security_roles: - output.append(self.security_roles[role_name]) - else: - raise CustAttrException(( - "Securit role \"{}\" was not found in Ftrack." - ).format(role_name)) - return output - - def get_default(self, attr): - type = attr['type'] - default = attr['default'] - if default is None: - return default - err_msg = 'Default value is not' - if type == 'number': - if isinstance(default, (str)) and default.isnumeric(): - default = float(default) - - if not isinstance(default, (float, int)): - raise CustAttrException('{} integer'.format(err_msg)) - elif type == 'text': - if not isinstance(default, str): - raise CustAttrException('{} string'.format(err_msg)) - elif type == 'boolean': - if not isinstance(default, bool): - raise CustAttrException('{} boolean'.format(err_msg)) - elif type == 'enumerator': - if not isinstance(default, list): - raise CustAttrException( - '{} array with strings'.format(err_msg) - ) - # TODO check if multiSelect is available - # and if default is one of data menu - if not isinstance(default[0], str): - raise CustAttrException('{} array of strings'.format(err_msg)) - elif type == 'date': - date_items = default.split(' ') - try: - if len(date_items) == 1: - default = arrow.get(default, 'YY.M.D') - elif len(date_items) == 2: - default = arrow.get(default, 'YY.M.D H:m:s') - else: - raise Exception - except Exception: - raise CustAttrException('Date is not in proper format') - elif type == 'dynamic enumerator': - raise CustAttrException('Dynamic enumerator can\'t have default') - - return default - - def get_optional(self, attr): - output = {} - if "group" in attr: - output["group"] = self.get_group(attr) - if "default" in attr: - output["default"] = self.get_default(attr) - - roles_read = [] - roles_write = [] - if "read_security_roles" in attr: - roles_read = attr["read_security_roles"] - if "write_security_roles" in attr: - roles_write = attr["write_security_roles"] - - output["read_security_roles"] = self.get_security_roles(roles_read) - output["write_security_roles"] = self.get_security_roles(roles_write) - return output - - def get_entity_type(self, attr): - if attr.get("is_hierarchical", False): - return { - "is_hierarchical": True, - "entity_type": attr.get("entity_type") or "show" - } - - if 'entity_type' not in attr: - raise CustAttrException('Missing entity_type') - - if attr['entity_type'].lower() != 'task': - return {'entity_type': attr['entity_type']} - - if 'object_type' not in attr: - raise CustAttrException('Missing object_type') - - object_type_name = attr['object_type'] - object_type_name_low = object_type_name.lower() - object_type = self.object_types_per_name.get(object_type_name_low) - if not object_type: - raise CustAttrException(( - 'Object type with name "{}" don\'t exist' - ).format(object_type_name)) - - return { - 'entity_type': attr['entity_type'], - 'object_type_id': object_type["id"] - } - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - CustomAttributes(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py deleted file mode 100644 index cbeff5343f..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ /dev/null @@ -1,283 +0,0 @@ -import os -import collections -import copy -from openpype.pipeline import Anatomy -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class CreateFolders(BaseAction): - identifier = "create.folders" - label = "Create Folders" - icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg") - - def discover(self, session, entities, event): - for entity_item in event["data"]["selection"]: - if entity_item.get("entityType").lower() in ("task", "show"): - return True - return False - - def interface(self, session, entities, event): - if event["data"].get("values", {}): - return - - with_interface = False - for entity in entities: - if entity.entity_type.lower() != "task": - with_interface = True - break - - if "values" not in event["data"]: - event["data"]["values"] = {} - - event["data"]["values"]["with_interface"] = with_interface - if not with_interface: - return - - title = "Create folders" - - entity_name = entity["name"] - msg = ( - "

Do you want create folders also" - " for all children of your selection?

" - ) - if entity.entity_type.lower() == "project": - entity_name = entity["full_name"] - msg = msg.replace(" also", "") - msg += "

(Project root won't be created if not checked)

" - items = [ - { - "type": "label", - "value": msg.format(entity_name) - }, - { - "type": "label", - "value": "With all children entities" - }, - { - "name": "children_included", - "type": "boolean", - "value": False - }, - { - "type": "hidden", - "name": "with_interface", - "value": with_interface - } - ] - - return { - "items": items, - "title": title - } - - def launch(self, session, entities, event): - '''Callback method for custom action.''' - - if "values" not in event["data"]: - return - - with_interface = event["data"]["values"]["with_interface"] - with_childrens = True - if with_interface: - with_childrens = event["data"]["values"]["children_included"] - - filtered_entities = [] - for entity in entities: - low_context_type = entity["context_type"].lower() - if low_context_type in ("task", "show"): - if not with_childrens and low_context_type == "show": - continue - filtered_entities.append(entity) - - if not filtered_entities: - return { - "success": True, - "message": 'Nothing was created' - } - - project_entity = self.get_project_from_entity(filtered_entities[0]) - - project_name = project_entity["full_name"] - project_code = project_entity["name"] - - task_entities = [] - other_entities = [] - self.get_all_entities( - session, entities, task_entities, other_entities - ) - hierarchy = self.get_entities_hierarchy( - session, task_entities, other_entities - ) - task_types = session.query("select id, name from Type").all() - task_type_names_by_id = { - task_type["id"]: task_type["name"] - for task_type in task_types - } - - anatomy = Anatomy(project_name) - - work_keys = ["work", "folder"] - work_template = anatomy.templates - for key in work_keys: - work_template = work_template[key] - - publish_keys = ["publish", "folder"] - publish_template = anatomy.templates - for key in publish_keys: - publish_template = publish_template[key] - - project_data = { - "project": { - "name": project_name, - "code": project_code - } - } - - collected_paths = [] - for item in hierarchy: - parent_entity, task_entities = item - - parent_data = copy.deepcopy(project_data) - - parents = parent_entity["link"][1:-1] - hierarchy_names = [p["name"] for p in parents] - hierarchy = "/".join(hierarchy_names) - - if hierarchy_names: - parent_name = hierarchy_names[-1] - else: - parent_name = project_name - - parent_data.update({ - "asset": parent_entity["name"], - "hierarchy": hierarchy, - "parent": parent_name - }) - - if not task_entities: - # create path for entity - collected_paths.append(self.compute_template( - anatomy, parent_data, work_keys - )) - collected_paths.append(self.compute_template( - anatomy, parent_data, publish_keys - )) - continue - - for task_entity in task_entities: - task_type_id = task_entity["type_id"] - task_type_name = task_type_names_by_id[task_type_id] - task_data = copy.deepcopy(parent_data) - task_data["task"] = { - "name": task_entity["name"], - "type": task_type_name - } - - # Template wok - collected_paths.append(self.compute_template( - anatomy, task_data, work_keys - )) - - # Template publish - collected_paths.append(self.compute_template( - anatomy, task_data, publish_keys - )) - - if len(collected_paths) == 0: - return { - "success": True, - "message": "No project folders to create." - } - - self.log.info("Creating folders:") - - for path in set(collected_paths): - self.log.info(path) - if not os.path.exists(path): - os.makedirs(path) - - return { - "success": True, - "message": "Successfully created project folders." - } - - def get_all_entities( - self, session, entities, task_entities, other_entities - ): - if not entities: - return - - no_task_entities = [] - for entity in entities: - if entity.entity_type.lower() == "task": - task_entities.append(entity) - else: - no_task_entities.append(entity) - - if not no_task_entities: - return task_entities - - other_entities.extend(no_task_entities) - - no_task_entity_ids = [entity["id"] for entity in no_task_entities] - next_entities = session.query(( - "select id, parent_id" - " from TypedContext where parent_id in ({})" - ).format(self.join_query_keys(no_task_entity_ids))).all() - - self.get_all_entities( - session, next_entities, task_entities, other_entities - ) - - def get_entities_hierarchy(self, session, task_entities, other_entities): - task_entity_ids = [entity["id"] for entity in task_entities] - full_task_entities = session.query(( - "select id, name, type_id, parent_id" - " from TypedContext where id in ({})" - ).format(self.join_query_keys(task_entity_ids))) - task_entities_by_parent_id = collections.defaultdict(list) - for entity in full_task_entities: - parent_id = entity["parent_id"] - task_entities_by_parent_id[parent_id].append(entity) - - output = [] - if not task_entities_by_parent_id: - return output - - other_ids = set() - for entity in other_entities: - other_ids.add(entity["id"]) - other_ids |= set(task_entities_by_parent_id.keys()) - - parent_entities = session.query(( - "select id, name from TypedContext where id in ({})" - ).format(self.join_query_keys(other_ids))).all() - - for parent_entity in parent_entities: - parent_id = parent_entity["id"] - output.append(( - parent_entity, - task_entities_by_parent_id[parent_id] - )) - - return output - - def compute_template(self, anatomy, data, anatomy_keys): - filled_template = anatomy.format_all(data) - for key in anatomy_keys: - filled_template = filled_template[key] - - if filled_template.solved: - return os.path.normpath(filled_template) - - self.log.warning( - "Template \"{}\" was not fully filled \"{}\"".format( - filled_template.template, filled_template - ) - ) - return os.path.normpath(filled_template.split("{")[0]) - - -def register(session): - """Register plugin. Called when used as an plugin.""" - CreateFolders(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py deleted file mode 100644 index 7c896570b1..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ /dev/null @@ -1,199 +0,0 @@ -import re - -from openpype.pipeline.project_folders import ( - get_project_basic_paths, - create_project_folders, -) -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class CreateProjectFolders(BaseAction): - """Action create folder structure and may create hierarchy in Ftrack. - - Creation of folder structure and hierarchy in Ftrack is based on presets. - These presets are located in: - `~/pype-config/presets/tools/project_folder_structure.json` - - Example of content: - ```json - { - "__project_root__": { - "prod" : {}, - "resources" : { - "footage": { - "plates": {}, - "offline": {} - }, - "audio": {}, - "art_dept": {} - }, - "editorial" : {}, - "assets[ftrack.Library]": { - "characters[ftrack]": {}, - "locations[ftrack]": {} - }, - "shots[ftrack.Sequence]": { - "scripts": {}, - "editorial[ftrack.Folder]": {} - } - } - } - ``` - Key "__project_root__" indicates root folder (or entity). Each key in - dictionary represents folder name. Value may contain another dictionary - with subfolders. - - Identifier `[ftrack]` in name says that this should be also created in - Ftrack hierarchy. It is possible to specify entity type of item with "." . - If key is `assets[ftrack.Library]` then in ftrack will be created entity - with name "assets" and entity type "Library". It is expected Library entity - type exist in Ftrack. - """ - - identifier = "create.project.structure" - label = "Create Project Structure" - description = "Creates folder structure" - role_list = ["Pypeclub", "Administrator", "Project Manager"] - icon = statics_icon("ftrack", "action_icons", "CreateProjectFolders.svg") - - pattern_array = re.compile(r"\[.*\]") - pattern_ftrack = re.compile(r".*\[[.]*ftrack[.]*") - pattern_ent_ftrack = re.compile(r"ftrack\.[^.,\],\s,]*") - project_root_key = "__project_root__" - - def discover(self, session, entities, event): - if len(entities) != 1: - return False - - if entities[0].entity_type.lower() != "project": - return False - - return True - - def launch(self, session, entities, event): - # Get project entity - project_entity = self.get_project_from_entity(entities[0]) - project_name = project_entity["full_name"] - try: - # Get paths based on presets - basic_paths = get_project_basic_paths(project_name) - if not basic_paths: - return { - "success": False, - "message": "Project structure is not set." - } - - # Invoking OpenPype API to create the project folders - create_project_folders(project_name, basic_paths) - self.create_ftrack_entities(basic_paths, project_entity) - - self.trigger_event( - "openpype.project.structure.created", - {"project_name": project_name} - ) - - except Exception as exc: - self.log.warning("Creating of structure crashed.", exc_info=True) - session.rollback() - return { - "success": False, - "message": str(exc) - } - - return True - - def get_ftrack_paths(self, paths_items): - all_ftrack_paths = [] - for path_items in paths_items: - ftrack_path_items = [] - is_ftrack = False - for item in reversed(path_items): - if item == self.project_root_key: - continue - if is_ftrack: - ftrack_path_items.append(item) - elif re.match(self.pattern_ftrack, item): - ftrack_path_items.append(item) - is_ftrack = True - ftrack_path_items = list(reversed(ftrack_path_items)) - if ftrack_path_items: - all_ftrack_paths.append(ftrack_path_items) - return all_ftrack_paths - - def compute_ftrack_items(self, in_list, keys): - if len(keys) == 0: - return in_list - key = keys[0] - exist = None - for index, subdict in enumerate(in_list): - if key in subdict: - exist = index - break - if exist is not None: - in_list[exist][key] = self.compute_ftrack_items( - in_list[exist][key], keys[1:] - ) - else: - in_list.append({key: self.compute_ftrack_items([], keys[1:])}) - return in_list - - def translate_ftrack_items(self, paths_items): - main = [] - for path_items in paths_items: - main = self.compute_ftrack_items(main, path_items) - return main - - def create_ftrack_entities(self, basic_paths, project_ent): - only_ftrack_items = self.get_ftrack_paths(basic_paths) - ftrack_paths = self.translate_ftrack_items(only_ftrack_items) - - for separation in ftrack_paths: - parent = project_ent - self.trigger_creation(separation, parent) - - def trigger_creation(self, separation, parent): - for item, subvalues in separation.items(): - matches = re.findall(self.pattern_array, item) - ent_type = "Folder" - if len(matches) == 0: - name = item - else: - match = matches[0] - name = item.replace(match, "") - ent_type_match = re.findall(self.pattern_ent_ftrack, match) - if len(ent_type_match) > 0: - ent_type_split = ent_type_match[0].split(".") - if len(ent_type_split) == 2: - ent_type = ent_type_split[1] - new_parent = self.create_ftrack_entity(name, ent_type, parent) - if subvalues: - for subvalue in subvalues: - self.trigger_creation(subvalue, new_parent) - - def create_ftrack_entity(self, name, ent_type, parent): - for children in parent["children"]: - if children["name"] == name: - return children - data = { - "name": name, - "parent_id": parent["id"] - } - if parent.entity_type.lower() == "project": - data["project_id"] = parent["id"] - else: - data["project_id"] = parent["project"]["id"] - - existing_entity = self.session.query(( - "TypedContext where name is \"{}\" and " - "parent_id is \"{}\" and project_id is \"{}\"" - ).format(name, data["parent_id"], data["project_id"])).first() - if existing_entity: - return existing_entity - - new_ent = self.session.create(ent_type, data) - self.session.commit() - return new_ent - - -def register(session): - CreateProjectFolders(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py deleted file mode 100644 index e1df8e1537..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ /dev/null @@ -1,704 +0,0 @@ -import collections -import uuid -from datetime import datetime - -from bson.objectid import ObjectId - -from openpype.client import get_assets, get_subsets -from openpype.pipeline import AvalonMongoDB -from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype_modules.ftrack.lib.avalon_sync import create_chunks - - -class DeleteAssetSubset(BaseAction): - '''Edit meta data action.''' - - # Action identifier. - identifier = "delete.asset.subset" - # Action label. - label = "Delete Asset/Subsets" - # Action description. - description = "Removes from Avalon with all children and asset from Ftrack" - icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg") - - settings_key = "delete_asset_subset" - # Db connection - dbcon = None - - splitter = {"type": "label", "value": "---"} - action_data_by_id = {} - asset_prefix = "asset:" - subset_prefix = "subset:" - - def __init__(self, *args, **kwargs): - self.dbcon = AvalonMongoDB() - - super(DeleteAssetSubset, self).__init__(*args, **kwargs) - - def discover(self, session, entities, event): - """ Validation """ - task_ids = [] - for ent_info in event["data"]["selection"]: - if ent_info.get("entityType") == "task": - task_ids.append(ent_info["entityId"]) - - is_valid = False - for entity in entities: - if ( - entity["id"] in task_ids - and entity.entity_type.lower() != "task" - ): - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def _launch(self, event): - try: - entities = self._translate_event(event) - if "values" not in event["data"]: - self.dbcon.install() - return self._interface(self.session, entities, event) - - confirmation = self.confirm_delete(entities, event) - if confirmation: - return confirmation - - self.dbcon.install() - response = self.launch( - self.session, entities, event - ) - finally: - self.dbcon.uninstall() - - return self._handle_result(response) - - def interface(self, session, entities, event): - self.show_message(event, "Preparing data...", True) - items = [] - title = "Choose items to delete" - - # Filter selection and get ftrack ids - selection = event["data"].get("selection") or [] - ftrack_ids = [] - project_in_selection = False - for entity in selection: - entity_type = (entity.get("entityType") or "").lower() - if entity_type != "task": - if entity_type == "show": - project_in_selection = True - continue - - ftrack_id = entity.get("entityId") - if ftrack_id: - ftrack_ids.append(ftrack_id) - - if project_in_selection: - msg = "It is not possible to use this action on project entity." - self.show_message(event, msg, True) - - # Filter event even more (skip task entities) - # - task entities are not relevant for avalon - entity_mapping = {} - for entity in entities: - ftrack_id = entity["id"] - if ftrack_id not in ftrack_ids: - continue - - if entity.entity_type.lower() == "task": - ftrack_ids.remove(ftrack_id) - - entity_mapping[ftrack_id] = entity - - if not ftrack_ids: - # It is bug if this happens! - return { - "success": False, - "message": "Invalid selection for this action (Bug)" - } - - project = self.get_project_from_entity(entities[0], session) - project_name = project["full_name"] - self.dbcon.Session["AVALON_PROJECT"] = project_name - - asset_docs = list(get_assets( - project_name, - fields=["_id", "name", "data.ftrackId", "data.parents"] - )) - selected_av_entities = [] - found_ftrack_ids = set() - asset_docs_by_name = collections.defaultdict(list) - for asset_doc in asset_docs: - ftrack_id = asset_doc["data"].get("ftrackId") - if ftrack_id: - found_ftrack_ids.add(ftrack_id) - if ftrack_id in entity_mapping: - selected_av_entities.append(asset_doc) - - asset_name = asset_doc["name"] - asset_docs_by_name[asset_name].append(asset_doc) - - found_without_ftrack_id = {} - for ftrack_id, entity in entity_mapping.items(): - if ftrack_id in found_ftrack_ids: - continue - - av_ents_by_name = asset_docs_by_name[entity["name"]] - if not av_ents_by_name: - continue - - ent_path_items = [ent["name"] for ent in entity["link"]] - end_index = len(ent_path_items) - 1 - parents = ent_path_items[1:end_index:] - # TODO we should say to user that - # few of them are missing in avalon - for av_ent in av_ents_by_name: - if av_ent["data"]["parents"] != parents: - continue - - # TODO we should say to user that found entity - # with same name does not match same ftrack id? - if "ftrackId" not in av_ent["data"]: - selected_av_entities.append(av_ent) - found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id - break - - if not selected_av_entities: - return { - "success": True, - "message": ( - "Didn't find entities in avalon." - " You can use Ftrack's Delete button for the selection." - ) - } - - # Remove cached action older than 2 minutes - old_action_ids = [] - for action_id, data in self.action_data_by_id.items(): - created_at = data.get("created_at") - if not created_at: - old_action_ids.append(action_id) - continue - cur_time = datetime.now() - existing_in_sec = (created_at - cur_time).total_seconds() - if existing_in_sec > 60 * 2: - old_action_ids.append(action_id) - - for action_id in old_action_ids: - self.action_data_by_id.pop(action_id, None) - - # Store data for action id - action_id = str(uuid.uuid1()) - self.action_data_by_id[action_id] = { - "attempt": 1, - "created_at": datetime.now(), - "project_name": project_name, - "subset_ids_by_name": {}, - "subset_ids_by_parent": {}, - "without_ftrack_id": found_without_ftrack_id - } - - id_item = { - "type": "hidden", - "name": "action_id", - "value": action_id - } - - items.append(id_item) - asset_ids = [ent["_id"] for ent in selected_av_entities] - subsets_for_selection = get_subsets(project_name, asset_ids=asset_ids) - - asset_ending = "" - if len(selected_av_entities) > 1: - asset_ending = "s" - - asset_title = { - "type": "label", - "value": "# Delete asset{}:".format(asset_ending) - } - asset_note = { - "type": "label", - "value": ( - "

NOTE: Action will delete checked entities" - " in Ftrack and Avalon with all children entities and" - " published content.

" - ) - } - - items.append(asset_title) - items.append(asset_note) - - asset_items = collections.defaultdict(list) - for asset in selected_av_entities: - ent_path_items = [project_name] - ent_path_items.extend(asset.get("data", {}).get("parents") or []) - ent_path_to_parent = "/".join(ent_path_items) + "/" - asset_items[ent_path_to_parent].append(asset) - - for asset_parent_path, assets in sorted(asset_items.items()): - items.append({ - "type": "label", - "value": "## - {}".format(asset_parent_path) - }) - for asset in assets: - items.append({ - "label": asset["name"], - "name": "{}{}".format( - self.asset_prefix, str(asset["_id"]) - ), - "type": 'boolean', - "value": False - }) - - subset_ids_by_name = collections.defaultdict(list) - subset_ids_by_parent = collections.defaultdict(list) - for subset in subsets_for_selection: - subset_id = subset["_id"] - name = subset["name"] - parent_id = subset["parent"] - subset_ids_by_name[name].append(subset_id) - subset_ids_by_parent[parent_id].append(subset_id) - - if not subset_ids_by_name: - return { - "items": items, - "title": title - } - - subset_ending = "" - if len(subset_ids_by_name.keys()) > 1: - subset_ending = "s" - - subset_title = { - "type": "label", - "value": "# Subset{} to delete:".format(subset_ending) - } - subset_note = { - "type": "label", - "value": ( - "

WARNING: Subset{} will be removed" - " for all selected entities.

" - ).format(subset_ending) - } - - items.append(self.splitter) - items.append(subset_title) - items.append(subset_note) - - for name in subset_ids_by_name: - items.append({ - "label": "{}".format(name), - "name": "{}{}".format(self.subset_prefix, name), - "type": "boolean", - "value": False - }) - - self.action_data_by_id[action_id]["subset_ids_by_parent"] = ( - subset_ids_by_parent - ) - self.action_data_by_id[action_id]["subset_ids_by_name"] = ( - subset_ids_by_name - ) - - return { - "items": items, - "title": title - } - - def confirm_delete(self, entities, event): - values = event["data"]["values"] - action_id = values.get("action_id") - spec_data = self.action_data_by_id.get(action_id) - if not spec_data: - # it is a bug if this happens! - return { - "success": False, - "message": "Something bad has happened. Please try again." - } - - # Process Delete confirmation - delete_key = values.get("delete_key") - if delete_key: - delete_key = delete_key.lower().strip() - # Go to launch part if user entered `delete` - if delete_key == "delete": - return - # Skip whole process if user didn't enter any text - elif delete_key == "": - self.action_data_by_id.pop(action_id, None) - return { - "success": True, - "message": "Deleting cancelled (delete entry was empty)" - } - # Get data to show again - to_delete = spec_data["to_delete"] - - else: - to_delete = collections.defaultdict(list) - for key, value in values.items(): - if not value: - continue - if key.startswith(self.asset_prefix): - _key = key.replace(self.asset_prefix, "") - to_delete["assets"].append(_key) - - elif key.startswith(self.subset_prefix): - _key = key.replace(self.subset_prefix, "") - to_delete["subsets"].append(_key) - - self.action_data_by_id[action_id]["to_delete"] = to_delete - - asset_to_delete = len(to_delete.get("assets") or []) > 0 - subset_to_delete = len(to_delete.get("subsets") or []) > 0 - - if not asset_to_delete and not subset_to_delete: - self.action_data_by_id.pop(action_id, None) - return { - "success": True, - "message": "Nothing was selected to delete" - } - - attempt = spec_data["attempt"] - if attempt > 3: - self.action_data_by_id.pop(action_id, None) - return { - "success": False, - "message": "You didn't enter \"DELETE\" properly 3 times!" - } - - self.action_data_by_id[action_id]["attempt"] += 1 - - title = "Confirmation of deleting" - - if asset_to_delete: - asset_len = len(to_delete["assets"]) - asset_ending = "" - if asset_len > 1: - asset_ending = "s" - title += " {} Asset{}".format(asset_len, asset_ending) - if subset_to_delete: - title += " and" - - if subset_to_delete: - sub_len = len(to_delete["subsets"]) - type_ending = "" - sub_ending = "" - if sub_len == 1: - subset_ids_by_name = spec_data["subset_ids_by_name"] - if len(subset_ids_by_name[to_delete["subsets"][0]]) > 1: - sub_ending = "s" - - elif sub_len > 1: - type_ending = "s" - sub_ending = "s" - - title += " {} type{} of subset{}".format( - sub_len, type_ending, sub_ending - ) - - items = [] - - id_item = {"type": "hidden", "name": "action_id", "value": action_id} - delete_label = { - 'type': 'label', - 'value': '# Please enter "DELETE" to confirm #' - } - delete_item = { - "name": "delete_key", - "type": "text", - "value": "", - "empty_text": "Type Delete here..." - } - - items.append(id_item) - items.append(delete_label) - items.append(delete_item) - - return { - "items": items, - "title": title - } - - def launch(self, session, entities, event): - self.show_message(event, "Processing...", True) - values = event["data"]["values"] - action_id = values.get("action_id") - spec_data = self.action_data_by_id.get(action_id) - if not spec_data: - # it is a bug if this happens! - return { - "success": False, - "message": "Something bad has happened. Please try again." - } - - report_messages = collections.defaultdict(list) - - project_name = spec_data["project_name"] - to_delete = spec_data["to_delete"] - self.dbcon.Session["AVALON_PROJECT"] = project_name - - assets_to_delete = to_delete.get("assets") or [] - subsets_to_delete = to_delete.get("subsets") or [] - - # Convert asset ids to ObjectId obj - assets_to_delete = [ - ObjectId(asset_id) - for asset_id in assets_to_delete - if asset_id - ] - - subset_ids_by_parent = spec_data["subset_ids_by_parent"] - subset_ids_by_name = spec_data["subset_ids_by_name"] - - subset_ids_to_archive = [] - asset_ids_to_archive = [] - ftrack_ids_to_delete = [] - if len(assets_to_delete) > 0: - map_av_ftrack_id = spec_data["without_ftrack_id"] - # Prepare data when deleting whole avalon asset - avalon_assets = get_assets( - project_name, - fields=["_id", "data.visualParent", "data.ftrackId"] - ) - avalon_assets_by_parent = collections.defaultdict(list) - for asset in avalon_assets: - asset_id = asset["_id"] - parent_id = asset["data"]["visualParent"] - avalon_assets_by_parent[parent_id].append(asset) - if asset_id in assets_to_delete: - ftrack_id = map_av_ftrack_id.get(str(asset_id)) - if not ftrack_id: - ftrack_id = asset["data"].get("ftrackId") - - if ftrack_id: - ftrack_ids_to_delete.append(ftrack_id) - - children_queue = collections.deque() - for mongo_id in assets_to_delete: - children_queue.append(mongo_id) - - while children_queue: - mongo_id = children_queue.popleft() - if mongo_id in asset_ids_to_archive: - continue - - asset_ids_to_archive.append(mongo_id) - for subset_id in subset_ids_by_parent.get(mongo_id, []): - if subset_id not in subset_ids_to_archive: - subset_ids_to_archive.append(subset_id) - - children = avalon_assets_by_parent.get(mongo_id) - if not children: - continue - - for child in children: - child_id = child["_id"] - if child_id not in asset_ids_to_archive: - children_queue.append(child_id) - - # Prepare names of assets in ftrack and ids of subsets in mongo - asset_names_to_delete = [] - if len(subsets_to_delete) > 0: - for name in subsets_to_delete: - asset_names_to_delete.append(name) - for subset_id in subset_ids_by_name[name]: - if subset_id in subset_ids_to_archive: - continue - subset_ids_to_archive.append(subset_id) - - # Get ftrack ids of entities where will be delete only asset - not_deleted_entities_id = [] - ftrack_id_name_map = {} - if asset_names_to_delete: - for entity in entities: - ftrack_id = entity["id"] - ftrack_id_name_map[ftrack_id] = entity["name"] - if ftrack_id not in ftrack_ids_to_delete: - not_deleted_entities_id.append(ftrack_id) - - mongo_proc_txt = "MongoProcessing: " - ftrack_proc_txt = "Ftrack processing: " - if asset_ids_to_archive: - self.log.debug("{}Archivation of assets <{}>".format( - mongo_proc_txt, - ", ".join([str(id) for id in asset_ids_to_archive]) - )) - self.dbcon.update_many( - { - "_id": {"$in": asset_ids_to_archive}, - "type": "asset" - }, - {"$set": {"type": "archived_asset"}} - ) - - if subset_ids_to_archive: - self.log.debug("{}Archivation of subsets <{}>".format( - mongo_proc_txt, - ", ".join([str(id) for id in subset_ids_to_archive]) - )) - self.dbcon.update_many( - { - "_id": {"$in": subset_ids_to_archive}, - "type": "subset" - }, - {"$set": {"type": "archived_subset"}} - ) - - if ftrack_ids_to_delete: - self.log.debug("{}Deleting Ftrack Entities <{}>".format( - ftrack_proc_txt, ", ".join(ftrack_ids_to_delete) - )) - - entities_by_link_len = self._prepare_entities_before_delete( - ftrack_ids_to_delete, session - ) - for link_len in sorted(entities_by_link_len.keys(), reverse=True): - for entity in entities_by_link_len[link_len]: - session.delete(entity) - - try: - session.commit() - except Exception: - ent_path = "/".join( - [ent["name"] for ent in entity["link"]] - ) - msg = "Failed to delete entity" - report_messages[msg].append(ent_path) - session.rollback() - self.log.warning( - "{} <{}>".format(msg, ent_path), - exc_info=True - ) - - if not_deleted_entities_id and asset_names_to_delete: - joined_not_deleted = ",".join([ - "\"{}\"".format(ftrack_id) - for ftrack_id in not_deleted_entities_id - ]) - joined_asset_names = ",".join([ - "\"{}\"".format(name) - for name in asset_names_to_delete - ]) - # Find assets of selected entities with names of checked subsets - assets = session.query(( - "select id from Asset where" - " context_id in ({}) and name in ({})" - ).format(joined_not_deleted, joined_asset_names)).all() - - self.log.debug("{}Deleting Ftrack Assets <{}>".format( - ftrack_proc_txt, - ", ".join([asset["id"] for asset in assets]) - )) - for asset in assets: - session.delete(asset) - try: - session.commit() - except Exception: - session.rollback() - msg = "Failed to delete asset" - report_messages[msg].append(asset["id"]) - self.log.warning( - "Asset: {} <{}>".format(asset["name"], asset["id"]), - exc_info=True - ) - - return self.report_handle(report_messages, project_name, event) - - def _prepare_entities_before_delete(self, ftrack_ids_to_delete, session): - """Filter children entities to avoid CircularDependencyError.""" - joined_ids_to_delete = ", ".join( - ["\"{}\"".format(id) for id in ftrack_ids_to_delete] - ) - to_delete_entities = session.query( - "select id, link from TypedContext where id in ({})".format( - joined_ids_to_delete - ) - ).all() - # Find all children entities and add them to list - # - Delete tasks first then their parents and continue - parent_ids_to_delete = [ - entity["id"] - for entity in to_delete_entities - ] - while parent_ids_to_delete: - joined_parent_ids_to_delete = ",".join([ - "\"{}\"".format(ftrack_id) - for ftrack_id in parent_ids_to_delete - ]) - _to_delete = session.query(( - "select id, link from TypedContext where parent_id in ({})" - ).format(joined_parent_ids_to_delete)).all() - parent_ids_to_delete = [] - for entity in _to_delete: - parent_ids_to_delete.append(entity["id"]) - to_delete_entities.append(entity) - - # Unset 'task_id' from AssetVersion entities - # - when task is deleted the asset version is not marked for deletion - task_ids = set( - entity["id"] - for entity in to_delete_entities - if entity.entity_type.lower() == "task" - ) - for chunk in create_chunks(task_ids): - asset_versions = session.query(( - "select id, task_id from AssetVersion where task_id in ({})" - ).format(self.join_query_keys(chunk))).all() - for asset_version in asset_versions: - asset_version["task_id"] = None - session.commit() - - entities_by_link_len = collections.defaultdict(list) - for entity in to_delete_entities: - entities_by_link_len[len(entity["link"])].append(entity) - - return entities_by_link_len - - def report_handle(self, report_messages, project_name, event): - if not report_messages: - return { - "success": True, - "message": "Deletion was successful!" - } - - title = "Delete report ({}):".format(project_name) - items = [] - items.append({ - "type": "label", - "value": "# Deleting was not completely successful" - }) - items.append({ - "type": "label", - "value": "

Check logs for more information

" - }) - for msg, _items in report_messages.items(): - if not _items or not msg: - continue - - items.append({ - "type": "label", - "value": "# {}".format(msg) - }) - - if isinstance(_items, str): - _items = [_items] - items.append({ - "type": "label", - "value": '

{}

'.format("
".join(_items)) - }) - items.append(self.splitter) - - self.show_interface(items, title, event) - - return { - "success": False, - "message": "Deleting finished. Read report messages." - } - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - DeleteAssetSubset(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py deleted file mode 100644 index ec14c6918b..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ /dev/null @@ -1,583 +0,0 @@ -import os -import collections -import uuid - -import clique -from pymongo import UpdateOne - -from openpype.client import ( - get_assets, - get_subsets, - get_versions, - get_representations -) -from openpype.lib import ( - StringTemplate, - TemplateUnsolved, - format_file_size, -) -from openpype.pipeline import AvalonMongoDB, Anatomy -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class DeleteOldVersions(BaseAction): - - identifier = "delete.old.versions" - label = "OpenPype Admin" - variant = "- Delete old versions" - description = ( - "Delete files from older publishes so project can be" - " archived with only latest versions." - ) - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - - settings_key = "delete_old_versions" - - dbcon = AvalonMongoDB() - - inteface_title = "Choose your preferences" - splitter_item = {"type": "label", "value": "---"} - sequence_splitter = "__sequence_splitter__" - - def discover(self, session, entities, event): - """ Validation. """ - is_valid = False - for entity in entities: - if entity.entity_type.lower() == "assetversion": - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def interface(self, session, entities, event): - # TODO Add roots existence validation - items = [] - values = event["data"].get("values") - if values: - versions_count = int(values["last_versions_count"]) - if versions_count >= 1: - return - items.append({ - "type": "label", - "value": ( - "# You have to keep at least 1 version!" - ) - }) - - items.append({ - "type": "label", - "value": ( - "WARNING: This will remove published files of older" - " versions from disk so we don't recommend use" - " this action on \"live\" project." - ) - }) - - items.append(self.splitter_item) - - # How many versions to keep - items.append({ - "type": "label", - "value": "## Choose how many versions you want to keep:" - }) - items.append({ - "type": "label", - "value": ( - "NOTE: We do recommend to keep 2 versions." - ) - }) - items.append({ - "type": "number", - "name": "last_versions_count", - "label": "Versions", - "value": 2 - }) - - items.append(self.splitter_item) - - items.append({ - "type": "label", - "value": ( - "## Remove publish folder even if there" - " are other than published files:" - ) - }) - items.append({ - "type": "label", - "value": ( - "WARNING: This may remove more than you want." - ) - }) - items.append({ - "type": "boolean", - "name": "force_delete_publish_folder", - "label": "Are You sure?", - "value": False - }) - - items.append(self.splitter_item) - - items.append({ - "type": "label", - "value": ( - "This will NOT delete any files and only return the " - "total size of the files." - ) - }) - items.append({ - "type": "boolean", - "name": "only_calculate", - "label": "Only calculate size of files.", - "value": False - }) - - return { - "items": items, - "title": self.inteface_title - } - - def launch(self, session, entities, event): - values = event["data"].get("values") - if not values: - return - - versions_count = int(values["last_versions_count"]) - force_to_remove = values["force_delete_publish_folder"] - only_calculate = values["only_calculate"] - - _val1 = "OFF" - if force_to_remove: - _val1 = "ON" - - _val3 = "s" - if versions_count == 1: - _val3 = "" - - self.log.debug(( - "Process started. Force to delete publish folder is set to [{0}]" - " and will keep {1} latest version{2}." - ).format(_val1, versions_count, _val3)) - - self.dbcon.install() - - project = None - avalon_asset_names = [] - asset_versions_by_parent_id = collections.defaultdict(list) - subset_names_by_asset_name = collections.defaultdict(list) - - ftrack_assets_by_name = {} - for entity in entities: - ftrack_asset = entity["asset"] - - parent_ent = ftrack_asset["parent"] - parent_ftrack_id = parent_ent["id"] - parent_name = parent_ent["name"] - - if parent_name not in avalon_asset_names: - avalon_asset_names.append(parent_name) - - # Group asset versions by parent entity - asset_versions_by_parent_id[parent_ftrack_id].append(entity) - - # Get project - if project is None: - project = parent_ent["project"] - - # Collect subset names per asset - subset_name = ftrack_asset["name"] - subset_names_by_asset_name[parent_name].append(subset_name) - - if subset_name not in ftrack_assets_by_name: - ftrack_assets_by_name[subset_name] = ftrack_asset - - # Set Mongo collection - project_name = project["full_name"] - anatomy = Anatomy(project_name) - self.dbcon.Session["AVALON_PROJECT"] = project_name - self.log.debug("Project is set to {}".format(project_name)) - - # Get Assets from avalon database - assets = list( - get_assets(project_name, asset_names=avalon_asset_names) - ) - asset_id_to_name_map = { - asset["_id"]: asset["name"] for asset in assets - } - asset_ids = list(asset_id_to_name_map.keys()) - - self.log.debug("Collected assets ({})".format(len(asset_ids))) - - # Get Subsets - subsets = list( - get_subsets(project_name, asset_ids=asset_ids) - ) - subsets_by_id = {} - subset_ids = [] - for subset in subsets: - asset_id = subset["parent"] - asset_name = asset_id_to_name_map[asset_id] - available_subsets = subset_names_by_asset_name[asset_name] - - if subset["name"] not in available_subsets: - continue - - subset_ids.append(subset["_id"]) - subsets_by_id[subset["_id"]] = subset - - self.log.debug("Collected subsets ({})".format(len(subset_ids))) - - # Get Versions - versions = list( - get_versions(project_name, subset_ids=subset_ids) - ) - - versions_by_parent = collections.defaultdict(list) - for ent in versions: - versions_by_parent[ent["parent"]].append(ent) - - def sort_func(ent): - return int(ent["name"]) - - all_last_versions = [] - for parent_id, _versions in versions_by_parent.items(): - for idx, version in enumerate( - sorted(_versions, key=sort_func, reverse=True) - ): - if idx >= versions_count: - break - all_last_versions.append(version) - - self.log.debug("Collected versions ({})".format(len(versions))) - - # Filter latest versions - for version in all_last_versions: - versions.remove(version) - - # Update versions_by_parent without filtered versions - versions_by_parent = collections.defaultdict(list) - for ent in versions: - versions_by_parent[ent["parent"]].append(ent) - - # Filter already deleted versions - versions_to_pop = [] - for version in versions: - version_tags = version["data"].get("tags") - if version_tags and "deleted" in version_tags: - versions_to_pop.append(version) - - for version in versions_to_pop: - subset = subsets_by_id[version["parent"]] - asset_id = subset["parent"] - asset_name = asset_id_to_name_map[asset_id] - msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format( - asset_name, subset["name"], version["name"] - ) - self.log.warning(( - "Skipping version. Already tagged as `deleted`. < {} >" - ).format(msg)) - versions.remove(version) - - version_ids = [ent["_id"] for ent in versions] - - self.log.debug( - "Filtered versions to delete ({})".format(len(version_ids)) - ) - - if not version_ids: - msg = "Skipping processing. Nothing to delete." - self.log.debug(msg) - return { - "success": True, - "message": msg - } - - repres = list( - get_representations(project_name, version_ids=version_ids) - ) - - self.log.debug( - "Collected representations to remove ({})".format(len(repres)) - ) - - dir_paths = {} - file_paths_by_dir = collections.defaultdict(list) - for repre in repres: - file_path, seq_path = self.path_from_represenation(repre, anatomy) - if file_path is None: - self.log.warning(( - "Could not format path for representation \"{}\"" - ).format(str(repre))) - continue - - dir_path = os.path.dirname(file_path) - dir_id = None - for _dir_id, _dir_path in dir_paths.items(): - if _dir_path == dir_path: - dir_id = _dir_id - break - - if dir_id is None: - dir_id = uuid.uuid4() - dir_paths[dir_id] = dir_path - - file_paths_by_dir[dir_id].append([file_path, seq_path]) - - dir_ids_to_pop = [] - for dir_id, dir_path in dir_paths.items(): - if os.path.exists(dir_path): - continue - - dir_ids_to_pop.append(dir_id) - - # Pop dirs from both dictionaries - for dir_id in dir_ids_to_pop: - dir_paths.pop(dir_id) - paths = file_paths_by_dir.pop(dir_id) - # TODO report of missing directories? - paths_msg = ", ".join([ - "'{}'".format(path[0].replace("\\", "/")) for path in paths - ]) - self.log.warning(( - "Folder does not exist. Deleting it's files skipped: {}" - ).format(paths_msg)) - - # Size of files. - size = 0 - - if only_calculate: - if force_to_remove: - size = self.delete_whole_dir_paths( - dir_paths.values(), delete=False - ) - else: - size = self.delete_only_repre_files( - dir_paths, file_paths_by_dir, delete=False - ) - - msg = "Total size of files: {}".format(format_file_size(size)) - - self.log.warning(msg) - - return {"success": True, "message": msg} - - if force_to_remove: - size = self.delete_whole_dir_paths(dir_paths.values()) - else: - size = self.delete_only_repre_files(dir_paths, file_paths_by_dir) - - mongo_changes_bulk = [] - for version in versions: - orig_version_tags = version["data"].get("tags") or [] - version_tags = [tag for tag in orig_version_tags] - if "deleted" not in version_tags: - version_tags.append("deleted") - - if version_tags == orig_version_tags: - continue - - update_query = {"_id": version["_id"]} - update_data = {"$set": {"data.tags": version_tags}} - mongo_changes_bulk.append(UpdateOne(update_query, update_data)) - - if mongo_changes_bulk: - self.dbcon.bulk_write(mongo_changes_bulk) - - self.dbcon.uninstall() - - # Set attribute `is_published` to `False` on ftrack AssetVersions - for subset_id, _versions in versions_by_parent.items(): - subset_name = None - for subset in subsets: - if subset["_id"] == subset_id: - subset_name = subset["name"] - break - - if subset_name is None: - self.log.warning( - "Subset with ID `{}` was not found.".format(str(subset_id)) - ) - continue - - ftrack_asset = ftrack_assets_by_name.get(subset_name) - if not ftrack_asset: - self.log.warning(( - "Could not find Ftrack asset with name `{}`" - ).format(subset_name)) - continue - - version_numbers = [int(ver["name"]) for ver in _versions] - for version in ftrack_asset["versions"]: - if int(version["version"]) in version_numbers: - version["is_published"] = False - - try: - session.commit() - - except Exception: - msg = ( - "Could not set `is_published` attribute to `False`" - " for selected AssetVersions." - ) - self.log.warning(msg, exc_info=True) - - return { - "success": False, - "message": msg - } - - msg = "Total size of files deleted: {}".format(format_file_size(size)) - - self.log.warning(msg) - - return {"success": True, "message": msg} - - def delete_whole_dir_paths(self, dir_paths, delete=True): - size = 0 - - for dir_path in dir_paths: - # Delete all files and fodlers in dir path - for root, dirs, files in os.walk(dir_path, topdown=False): - for name in files: - file_path = os.path.join(root, name) - size += os.path.getsize(file_path) - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - - for name in dirs: - if delete: - os.rmdir(os.path.join(root, name)) - - if not delete: - continue - - # Delete even the folder and it's parents folders if they are empty - while True: - if not os.path.exists(dir_path): - dir_path = os.path.dirname(dir_path) - continue - - if len(os.listdir(dir_path)) != 0: - break - - os.rmdir(os.path.join(dir_path)) - - return size - - def delete_only_repre_files(self, dir_paths, file_paths, delete=True): - size = 0 - - for dir_id, dir_path in dir_paths.items(): - dir_files = os.listdir(dir_path) - collections, remainders = clique.assemble(dir_files) - for file_path, seq_path in file_paths[dir_id]: - file_path_base = os.path.split(file_path)[1] - # Just remove file if `frame` key was not in context or - # filled path is in remainders (single file sequence) - if not seq_path or file_path_base in remainders: - if not os.path.exists(file_path): - self.log.warning( - "File was not found: {}".format(file_path) - ) - continue - - size += os.path.getsize(file_path) - - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - - if file_path_base in remainders: - remainders.remove(file_path_base) - continue - - seq_path_base = os.path.split(seq_path)[1] - head, tail = seq_path_base.split(self.sequence_splitter) - - final_col = None - for collection in collections: - if head != collection.head or tail != collection.tail: - continue - final_col = collection - break - - if final_col is not None: - # Fill full path to head - final_col.head = os.path.join(dir_path, final_col.head) - for _file_path in final_col: - if os.path.exists(_file_path): - - size += os.path.getsize(_file_path) - - if delete: - os.remove(_file_path) - self.log.debug( - "Removed file: {}".format(_file_path) - ) - - _seq_path = final_col.format("{head}{padding}{tail}") - self.log.debug("Removed files: {}".format(_seq_path)) - collections.remove(final_col) - - elif os.path.exists(file_path): - size += os.path.getsize(file_path) - - if delete: - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) - else: - self.log.warning( - "File was not found: {}".format(file_path) - ) - - # Delete as much as possible parent folders - if not delete: - return size - - for dir_path in dir_paths.values(): - while True: - if not os.path.exists(dir_path): - dir_path = os.path.dirname(dir_path) - continue - - if len(os.listdir(dir_path)) != 0: - break - - self.log.debug("Removed folder: {}".format(dir_path)) - os.rmdir(dir_path) - - return size - - def path_from_represenation(self, representation, anatomy): - try: - template = representation["data"]["template"] - - except KeyError: - return (None, None) - - sequence_path = None - try: - context = representation["context"] - context["root"] = anatomy.roots - path = StringTemplate.format_strict_template(template, context) - if "frame" in context: - context["frame"] = self.sequence_splitter - sequence_path = os.path.normpath( - StringTemplate.format_strict_template( - template, context - ) - ) - - except (KeyError, TemplateUnsolved): - # Template references unavailable data - return (None, None) - - return (os.path.normpath(path), sequence_path) - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - DeleteOldVersions(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py deleted file mode 100644 index c198389b98..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ /dev/null @@ -1,645 +0,0 @@ -import os -import copy -import json -import collections - -from openpype.client import ( - get_project, - get_assets, - get_subsets, - get_versions, - get_representations -) -from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype_modules.ftrack.lib.custom_attributes import ( - query_custom_attributes -) -from openpype.lib.dateutils import get_datetime_data -from openpype.pipeline import Anatomy -from openpype.pipeline.load import get_representation_path_with_anatomy -from openpype.pipeline.delivery import ( - get_format_dict, - check_destination_path, - deliver_single_file, - deliver_sequence, -) - - -class Delivery(BaseAction): - identifier = "delivery.action" - label = "Delivery" - description = "Deliver data to client" - role_list = ["Pypeclub", "Administrator", "Project manager"] - icon = statics_icon("ftrack", "action_icons", "Delivery.svg") - settings_key = "delivery_action" - - def discover(self, session, entities, event): - is_valid = False - for entity in entities: - if entity.entity_type.lower() in ("assetversion", "reviewsession"): - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def interface(self, session, entities, event): - if event["data"].get("values", {}): - return - - title = "Delivery data to Client" - - items = [] - item_splitter = {"type": "label", "value": "---"} - - project_entity = self.get_project_from_entity(entities[0]) - project_name = project_entity["full_name"] - project_doc = get_project(project_name, fields=["name"]) - if not project_doc: - return { - "success": False, - "message": ( - "Didn't find project \"{}\" in avalon." - ).format(project_name) - } - - repre_names = self._get_repre_names(project_name, session, entities) - - items.append({ - "type": "hidden", - "name": "__project_name__", - "value": project_name - }) - - # Prepare anatomy data - anatomy = Anatomy(project_name) - new_anatomies = [] - first = None - for key, template in (anatomy.templates.get("delivery") or {}).items(): - # Use only keys with `{root}` or `{root[*]}` in value - if isinstance(template, str) and "{root" in template: - new_anatomies.append({ - "label": key, - "value": key - }) - if first is None: - first = key - - skipped = False - # Add message if there are any common components - if not repre_names or not new_anatomies: - skipped = True - items.append({ - "type": "label", - "value": "

Something went wrong:

" - }) - - items.append({ - "type": "hidden", - "name": "__skipped__", - "value": skipped - }) - - if not repre_names: - if len(entities) == 1: - items.append({ - "type": "label", - "value": ( - "- Selected entity doesn't have components to deliver." - ) - }) - else: - items.append({ - "type": "label", - "value": ( - "- Selected entities don't have common components." - ) - }) - - # Add message if delivery anatomies are not set - if not new_anatomies: - items.append({ - "type": "label", - "value": ( - "- `\"delivery\"` anatomy key is not set in config." - ) - }) - - # Skip if there are any data shortcomings - if skipped: - return { - "items": items, - "title": title - } - - items.append({ - "value": "

Choose Components to deliver

", - "type": "label" - }) - - for repre_name in repre_names: - items.append({ - "type": "boolean", - "value": False, - "label": repre_name, - "name": repre_name - }) - - items.append(item_splitter) - - items.append({ - "value": "

Location for delivery

", - "type": "label" - }) - - items.append({ - "type": "label", - "value": ( - "NOTE: It is possible to replace `root` key in anatomy." - ) - }) - - items.append({ - "type": "text", - "name": "__location_path__", - "empty_text": "Type location path here...(Optional)" - }) - - items.append(item_splitter) - - items.append({ - "value": "

Anatomy of delivery files

", - "type": "label" - }) - - items.append({ - "type": "label", - "value": ( - "

NOTE: These can be set in Anatomy.yaml" - " within `delivery` key.

" - ) - }) - - items.append({ - "type": "enumerator", - "name": "__new_anatomies__", - "data": new_anatomies, - "value": first - }) - - return { - "items": items, - "title": title - } - - def _get_repre_names(self, project_name, session, entities): - version_ids = self._get_interest_version_ids( - project_name, session, entities - ) - if not version_ids: - return [] - repre_docs = get_representations( - project_name, - version_ids=version_ids, - fields=["name"] - ) - repre_names = {repre_doc["name"] for repre_doc in repre_docs} - return list(sorted(repre_names)) - - def _get_interest_version_ids(self, project_name, session, entities): - # Extract AssetVersion entities - asset_versions = self._extract_asset_versions(session, entities) - # Prepare Asset ids - asset_ids = { - asset_version["asset_id"] - for asset_version in asset_versions - } - # Query Asset entities - assets = session.query(( - "select id, name, context_id from Asset where id in ({})" - ).format(self.join_query_keys(asset_ids))).all() - assets_by_id = { - asset["id"]: asset - for asset in assets - } - parent_ids = set() - subset_names = set() - version_nums = set() - for asset_version in asset_versions: - asset_id = asset_version["asset_id"] - asset = assets_by_id[asset_id] - - parent_ids.add(asset["context_id"]) - subset_names.add(asset["name"]) - version_nums.add(asset_version["version"]) - - asset_docs_by_ftrack_id = self._get_asset_docs( - project_name, session, parent_ids - ) - subset_docs = self._get_subset_docs( - project_name, - asset_docs_by_ftrack_id, - subset_names, - asset_versions, - assets_by_id - ) - version_docs = self._get_version_docs( - project_name, - asset_docs_by_ftrack_id, - subset_docs, - version_nums, - asset_versions, - assets_by_id - ) - - return [version_doc["_id"] for version_doc in version_docs] - - def _extract_asset_versions(self, session, entities): - asset_version_ids = set() - review_session_ids = set() - for entity in entities: - entity_type_low = entity.entity_type.lower() - if entity_type_low == "assetversion": - asset_version_ids.add(entity["id"]) - elif entity_type_low == "reviewsession": - review_session_ids.add(entity["id"]) - - for version_id in self._get_asset_version_ids_from_review_sessions( - session, review_session_ids - ): - asset_version_ids.add(version_id) - - asset_versions = session.query(( - "select id, version, asset_id from AssetVersion where id in ({})" - ).format(self.join_query_keys(asset_version_ids))).all() - - return asset_versions - - def _get_asset_version_ids_from_review_sessions( - self, session, review_session_ids - ): - if not review_session_ids: - return set() - review_session_objects = session.query(( - "select version_id from ReviewSessionObject" - " where review_session_id in ({})" - ).format(self.join_query_keys(review_session_ids))).all() - - return { - review_session_object["version_id"] - for review_session_object in review_session_objects - } - - def _get_version_docs( - self, - project_name, - asset_docs_by_ftrack_id, - subset_docs, - version_nums, - asset_versions, - assets_by_id - ): - subset_docs_by_id = { - subset_doc["_id"]: subset_doc - for subset_doc in subset_docs - } - version_docs = list(get_versions( - project_name, - subset_ids=subset_docs_by_id.keys(), - versions=version_nums - )) - version_docs_by_parent_id = collections.defaultdict(dict) - for version_doc in version_docs: - subset_doc = subset_docs_by_id[version_doc["parent"]] - - asset_id = subset_doc["parent"] - subset_name = subset_doc["name"] - version = version_doc["name"] - if version_docs_by_parent_id[asset_id].get(subset_name) is None: - version_docs_by_parent_id[asset_id][subset_name] = {} - - version_docs_by_parent_id[asset_id][subset_name][version] = ( - version_doc - ) - - filtered_versions = [] - for asset_version in asset_versions: - asset_id = asset_version["asset_id"] - asset = assets_by_id[asset_id] - parent_id = asset["context_id"] - asset_doc = asset_docs_by_ftrack_id.get(parent_id) - if not asset_doc: - continue - - subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"]) - if not subsets_by_name: - continue - - subset_name = asset["name"] - version_docs_by_version = subsets_by_name.get(subset_name) - if not version_docs_by_version: - continue - - version = asset_version["version"] - version_doc = version_docs_by_version.get(version) - if version_doc: - filtered_versions.append(version_doc) - return filtered_versions - - def _get_subset_docs( - self, - project_name, - asset_docs_by_ftrack_id, - subset_names, - asset_versions, - assets_by_id - ): - asset_doc_ids = [ - asset_doc["_id"] - for asset_doc in asset_docs_by_ftrack_id.values() - ] - subset_docs = list(get_subsets( - project_name, - asset_ids=asset_doc_ids, - subset_names=subset_names - )) - subset_docs_by_parent_id = collections.defaultdict(dict) - for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - subset_name = subset_doc["name"] - subset_docs_by_parent_id[asset_id][subset_name] = subset_doc - - filtered_subsets = [] - for asset_version in asset_versions: - asset_id = asset_version["asset_id"] - asset = assets_by_id[asset_id] - - parent_id = asset["context_id"] - asset_doc = asset_docs_by_ftrack_id.get(parent_id) - if not asset_doc: - continue - - subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"]) - if not subsets_by_name: - continue - - subset_name = asset["name"] - subset_doc = subsets_by_name.get(subset_name) - if subset_doc: - filtered_subsets.append(subset_doc) - return filtered_subsets - - def _get_asset_docs(self, project_name, session, parent_ids): - asset_docs = list(get_assets( - project_name, fields=["_id", "name", "data.ftrackId"] - )) - - asset_docs_by_id = {} - asset_docs_by_name = {} - asset_docs_by_ftrack_id = {} - for asset_doc in asset_docs: - asset_id = str(asset_doc["_id"]) - asset_name = asset_doc["name"] - ftrack_id = asset_doc["data"].get("ftrackId") - - asset_docs_by_id[asset_id] = asset_doc - asset_docs_by_name[asset_name] = asset_doc - if ftrack_id: - asset_docs_by_ftrack_id[ftrack_id] = asset_doc - - attr_def = session.query(( - "select id from CustomAttributeConfiguration where key is \"{}\"" - ).format(CUST_ATTR_ID_KEY)).first() - if attr_def is None: - return asset_docs_by_ftrack_id - - avalon_mongo_id_values = query_custom_attributes( - session, [attr_def["id"]], parent_ids, True - ) - missing_ids = set(parent_ids) - for item in avalon_mongo_id_values: - if not item["value"]: - continue - asset_id = item["value"] - entity_id = item["entity_id"] - asset_doc = asset_docs_by_id.get(asset_id) - if asset_doc: - asset_docs_by_ftrack_id[entity_id] = asset_doc - missing_ids.remove(entity_id) - - entity_ids_by_name = {} - if missing_ids: - not_found_entities = session.query(( - "select id, name from TypedContext where id in ({})" - ).format(self.join_query_keys(missing_ids))).all() - entity_ids_by_name = { - entity["name"]: entity["id"] - for entity in not_found_entities - } - - for asset_name, entity_id in entity_ids_by_name.items(): - asset_doc = asset_docs_by_name.get(asset_name) - if asset_doc: - asset_docs_by_ftrack_id[entity_id] = asset_doc - - return asset_docs_by_ftrack_id - - def launch(self, session, entities, event): - if "values" not in event["data"]: - return { - "success": True, - "message": "Nothing to do" - } - - values = event["data"]["values"] - skipped = values.pop("__skipped__") - if skipped: - return { - "success": False, - "message": "Action skipped" - } - - user_id = event["source"]["user"]["id"] - user_entity = session.query( - "User where id is {}".format(user_id) - ).one() - - job = session.create("Job", { - "user": user_entity, - "status": "running", - "data": json.dumps({ - "description": "Delivery processing." - }) - }) - session.commit() - - try: - report = self.real_launch(session, entities, event) - - except Exception as exc: - report = { - "success": False, - "title": "Delivery failed", - "items": [{ - "type": "label", - "value": ( - "Error during delivery action process:
{}" - "

Check logs for more information." - ).format(str(exc)) - }] - } - self.log.warning( - "Failed during processing delivery action.", - exc_info=True - ) - - finally: - if report["success"]: - job["status"] = "done" - else: - job["status"] = "failed" - session.commit() - - if not report["success"]: - self.show_interface( - items=report["items"], - title=report["title"], - event=event - ) - return { - "success": False, - "message": "Errors during delivery process. See report." - } - - return report - - def real_launch(self, session, entities, event): - self.log.info("Delivery action just started.") - report_items = collections.defaultdict(list) - - values = event["data"]["values"] - - location_path = values.pop("__location_path__") - anatomy_name = values.pop("__new_anatomies__") - project_name = values.pop("__project_name__") - - repre_names = [] - for key, value in values.items(): - if value is True: - repre_names.append(key) - - if not repre_names: - return { - "success": True, - "message": "No selected components to deliver." - } - - location_path = location_path.strip() - if location_path: - location_path = os.path.normpath(location_path) - if not os.path.exists(location_path): - os.makedirs(location_path) - - self.log.debug("Collecting representations to process.") - version_ids = self._get_interest_version_ids( - project_name, session, entities - ) - repres_to_deliver = list(get_representations( - project_name, - representation_names=repre_names, - version_ids=version_ids - )) - anatomy = Anatomy(project_name) - - format_dict = get_format_dict(anatomy, location_path) - - datetime_data = get_datetime_data() - for repre in repres_to_deliver: - source_path = repre.get("data", {}).get("path") - debug_msg = "Processing representation {}".format(repre["_id"]) - if source_path: - debug_msg += " with published path {}.".format(source_path) - self.log.debug(debug_msg) - - anatomy_data = copy.deepcopy(repre["context"]) - repre_report_items = check_destination_path(repre["_id"], - anatomy, - anatomy_data, - datetime_data, - anatomy_name) - - if repre_report_items: - report_items.update(repre_report_items) - continue - - # Get source repre path - frame = repre['context'].get('frame') - - if frame: - repre["context"]["frame"] = len(str(frame)) * "#" - - repre_path = get_representation_path_with_anatomy(repre, anatomy) - # TODO add backup solution where root of path from component - # is replaced with root - args = ( - repre_path, - repre, - anatomy, - anatomy_name, - anatomy_data, - format_dict, - report_items, - self.log - ) - if not frame: - deliver_single_file(*args) - else: - deliver_sequence(*args) - - return self.report(report_items) - - def report(self, report_items): - """Returns dict with final status of delivery (success, fail etc.).""" - items = [] - - for msg, _items in report_items.items(): - if not _items: - continue - - if items: - items.append({"type": "label", "value": "---"}) - - items.append({ - "type": "label", - "value": "# {}".format(msg) - }) - if not isinstance(_items, (list, tuple)): - _items = [_items] - __items = [] - for item in _items: - __items.append(str(item)) - - items.append({ - "type": "label", - "value": '

{}

'.format("
".join(__items)) - }) - - if not items: - return { - "success": True, - "message": "Delivery Finished" - } - - return { - "items": items, - "title": "Delivery report", - "success": False - } - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - Delivery(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_djvview.py b/openpype/modules/ftrack/event_handlers_user/action_djvview.py deleted file mode 100644 index cc37faacf2..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_djvview.py +++ /dev/null @@ -1,238 +0,0 @@ -import os -import time -import subprocess -from operator import itemgetter -from openpype.lib import ApplicationManager -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class DJVViewAction(BaseAction): - """Launch DJVView action.""" - identifier = "djvview-launch-action" - label = "DJV View" - description = "DJV View Launcher" - icon = statics_icon("app_icons", "djvView.png") - - type = "Application" - - allowed_types = [ - "cin", "dpx", "avi", "dv", "gif", "flv", "mkv", "mov", "mpg", "mpeg", - "mp4", "m4v", "mxf", "iff", "z", "ifl", "jpeg", "jpg", "jfif", "lut", - "1dl", "exr", "pic", "png", "ppm", "pnm", "pgm", "pbm", "rla", "rpf", - "sgi", "rgba", "rgb", "bw", "tga", "tiff", "tif", "img" - ] - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.application_manager = ApplicationManager() - self._last_check = time.time() - self._check_interval = 10 - - def _get_djv_apps(self): - app_group = self.application_manager.app_groups["djvview"] - - output = [] - for app in app_group: - executable = app.find_executable() - if executable is not None: - output.append(app) - return output - - def get_djv_apps(self): - cur_time = time.time() - if (cur_time - self._last_check) > self._check_interval: - self.application_manager.refresh() - return self._get_djv_apps() - - def discover(self, session, entities, event): - """Return available actions based on *event*. """ - selection = event["data"].get("selection", []) - if len(selection) != 1: - return False - - entityType = selection[0].get("entityType", None) - if entityType not in ["assetversion", "task"]: - return False - - if self.get_djv_apps(): - return True - return False - - def interface(self, session, entities, event): - if event["data"].get("values", {}): - return - - entity = entities[0] - versions = [] - - entity_type = entity.entity_type.lower() - if entity_type == "assetversion": - if ( - entity[ - "components" - ][0]["file_type"][1:] in self.allowed_types - ): - versions.append(entity) - else: - master_entity = entity - if entity_type == "task": - master_entity = entity["parent"] - - for asset in master_entity["assets"]: - for version in asset["versions"]: - # Get only AssetVersion of selected task - if ( - entity_type == "task" and - version["task"]["id"] != entity["id"] - ): - continue - # Get only components with allowed type - filetype = version["components"][0]["file_type"] - if filetype[1:] in self.allowed_types: - versions.append(version) - - if len(versions) < 1: - return { - "success": False, - "message": "There are no Asset Versions to open." - } - - # TODO sort them (somehow?) - enum_items = [] - first_value = None - for app in self.get_djv_apps(): - if first_value is None: - first_value = app.full_name - enum_items.append({ - "value": app.full_name, - "label": app.full_label - }) - - if not enum_items: - return { - "success": False, - "message": "Couldn't find DJV executable." - } - - items = [ - { - "type": "enumerator", - "label": "DJV version:", - "name": "djv_app_name", - "data": enum_items, - "value": first_value - }, - { - "type": "label", - "value": "---" - } - ] - version_items = [] - base_label = "v{0} - {1} - {2}" - default_component = None - last_available = None - select_value = None - for version in versions: - for component in version["components"]: - label = base_label.format( - str(version["version"]).zfill(3), - version["asset"]["type"]["name"], - component["name"] - ) - - try: - location = component[ - "component_locations" - ][0]["location"] - file_path = location.get_filesystem_path(component) - except Exception: - file_path = component[ - "component_locations" - ][0]["resource_identifier"] - - if os.path.isdir(os.path.dirname(file_path)): - last_available = file_path - if component["name"] == default_component: - select_value = file_path - version_items.append( - {"label": label, "value": file_path} - ) - - if len(version_items) == 0: - return { - "success": False, - "message": ( - "There are no Asset Versions with accessible path." - ) - } - - item = { - "label": "Items to view", - "type": "enumerator", - "name": "path", - "data": sorted( - version_items, - key=itemgetter("label"), - reverse=True - ) - } - if select_value is not None: - item["value"] = select_value - else: - item["value"] = last_available - - items.append(item) - - return {"items": items} - - def launch(self, session, entities, event): - """Callback method for DJVView action.""" - - # Launching application - event_values = event["data"].get("values") - if not event_values: - return - - djv_app_name = event_values["djv_app_name"] - app = self.application_manager.applications.get(djv_app_name) - executable = None - if app is not None: - executable = app.find_executable() - - if not executable: - return { - "success": False, - "message": "Couldn't find DJV executable." - } - - filpath = os.path.normpath(event_values["path"]) - - cmd = [ - # DJV path - str(executable), - # PATH TO COMPONENT - filpath - ] - - try: - # Run DJV with these commands - _process = subprocess.Popen(cmd) - # Keep process in memory for some time - time.sleep(0.1) - - except FileNotFoundError: - return { - "success": False, - "message": "File \"{}\" was not found.".format( - os.path.basename(filpath) - ) - } - - return True - - -def register(session): - """Register hooks.""" - - DJVViewAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py deleted file mode 100644 index 36d29db96b..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ /dev/null @@ -1,501 +0,0 @@ -import os -import sys -import json -import collections -import tempfile -import datetime - -import ftrack_api - -from openpype.client import ( - get_project, - get_assets, -) -from openpype.settings import get_project_settings, get_system_settings -from openpype.lib import StringTemplate -from openpype.pipeline import Anatomy -from openpype.pipeline.template_data import get_template_data -from openpype.pipeline.workfile import get_workfile_template_key -from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype_modules.ftrack.lib.avalon_sync import create_chunks - -NOT_SYNCHRONIZED_TITLE = "Not synchronized" - - -class FillWorkfileAttributeAction(BaseAction): - """Action fill work filename into custom attribute on tasks. - - Prerequirements are that the project is synchronized so it is possible to - access project anatomy and project/asset documents. Tasks that are not - synchronized are skipped too. - """ - - identifier = "fill.workfile.attr" - label = "OpenPype Admin" - variant = "- Fill workfile attribute" - description = "Precalculate and fill workfile name into a custom attribute" - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - - settings_key = "fill_workfile_attribute" - - def discover(self, session, entities, event): - """ Validate selection. """ - is_valid = False - for ent in event["data"]["selection"]: - # Ignore entities that are not tasks or projects - if ent["entityType"].lower() in ["show", "task"]: - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, entities, event): - # Separate entities and get project entity - project_entity = None - for entity in entities: - if project_entity is None: - project_entity = self.get_project_from_entity(entity) - break - - if not project_entity: - return { - "message": ( - "Couldn't find project entity." - " Could be an issue with permissions." - ), - "success": False - } - - # Get project settings and check if custom attribute where workfile - # should be set is defined. - project_name = project_entity["full_name"] - project_settings = get_project_settings(project_name) - custom_attribute_key = ( - project_settings - .get("ftrack", {}) - .get("user_handlers", {}) - .get(self.settings_key, {}) - .get("custom_attribute_key") - ) - if not custom_attribute_key: - return { - "success": False, - "message": "Custom attribute key is not set in settings" - } - - # Try to find the custom attribute - # - get Task type object id - task_obj_type = session.query( - "select id from ObjectType where name is \"Task\"" - ).one() - # - get text custom attribute type - text_type = session.query( - "select id from CustomAttributeType where name is \"text\"" - ).one() - # - find the attribute - attr_conf = session.query( - ( - "select id, key from CustomAttributeConfiguration" - " where object_type_id is \"{}\"" - " and type_id is \"{}\"" - " and key is \"{}\"" - ).format( - task_obj_type["id"], text_type["id"], custom_attribute_key - ) - ).first() - if not attr_conf: - return { - "success": False, - "message": ( - "Could not find Task (text) Custom attribute \"{}\"" - ).format(custom_attribute_key) - } - - # Store report information - report = collections.defaultdict(list) - user_entity = session.query( - "User where id is {}".format(event["source"]["user"]["id"]) - ).one() - job_entity = session.create("Job", { - "user": user_entity, - "status": "running", - "data": json.dumps({ - "description": "(0/3) Fill of workfiles started" - }) - }) - session.commit() - - try: - self.in_job_process( - session, - entities, - job_entity, - project_entity, - project_settings, - attr_conf, - report - ) - except Exception: - self.log.error( - "Fill of workfiles to custom attribute failed", exc_info=True - ) - session.rollback() - - description = "Fill of workfiles Failed (Download traceback)" - self.add_traceback_to_job( - job_entity, session, sys.exc_info(), description - ) - return { - "message": ( - "Fill of workfiles failed." - " Check job for more information" - ), - "success": False - } - - job_entity["status"] = "done" - job_entity["data"] = json.dumps({ - "description": "Fill of workfiles completed." - }) - session.commit() - if report: - temp_obj = tempfile.NamedTemporaryFile( - mode="w", - prefix="openpype_ftrack_", - suffix=".json", - delete=False - ) - temp_obj.close() - temp_filepath = temp_obj.name - with open(temp_filepath, "w") as temp_file: - json.dump(report, temp_file) - - component_name = "{}_{}".format( - "FillWorkfilesReport", - datetime.datetime.now().strftime("%y-%m-%d-%H%M") - ) - self.add_file_component_to_job( - job_entity, session, temp_filepath, component_name - ) - # Delete temp file - os.remove(temp_filepath) - self._show_report(event, report, project_name) - return { - "message": ( - "Fill of workfiles finished with few issues." - " Check job for more information" - ), - "success": True - } - - return { - "success": True, - "message": "Finished with filling of work filenames" - } - - def _show_report(self, event, report, project_name): - items = [] - title = "Fill workfiles report ({}):".format(project_name) - - for subtitle, lines in report.items(): - if items: - items.append({ - "type": "label", - "value": "---" - }) - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": '

{}

'.format("
".join(lines)) - }) - - self.show_interface( - items=items, - title=title, - event=event - ) - - def in_job_process( - self, - session, - entities, - job_entity, - project_entity, - project_settings, - attr_conf, - report - ): - task_entities = [] - other_entities = [] - project_selected = False - for entity in entities: - ent_type_low = entity.entity_type.lower() - if ent_type_low == "project": - project_selected = True - break - - elif ent_type_low == "task": - task_entities.append(entity) - else: - other_entities.append(entity) - - project_name = project_entity["full_name"] - - # Find matching asset documents and map them by ftrack task entities - # - result stored to 'asset_docs_with_task_entities' is list with - # tuple `(asset document, [task entitis, ...])` - # Quety all asset documents - asset_docs = list(get_assets(project_name)) - job_entity["data"] = json.dumps({ - "description": "(1/3) Asset documents queried." - }) - session.commit() - - # When project is selected then we can query whole project - if project_selected: - asset_docs_with_task_entities = self._get_asset_docs_for_project( - session, project_entity, asset_docs, report - ) - - else: - asset_docs_with_task_entities = self._get_tasks_for_selection( - session, other_entities, task_entities, asset_docs, report - ) - - job_entity["data"] = json.dumps({ - "description": "(2/3) Queried related task entities." - }) - session.commit() - - # Keep placeholders in the template unfilled - host_name = "{app}" - extension = "{ext}" - project_doc = get_project(project_name) - project_settings = get_project_settings(project_name) - system_settings = get_system_settings() - anatomy = Anatomy(project_name) - templates_by_key = {} - - operations = [] - for asset_doc, task_entities in asset_docs_with_task_entities: - for task_entity in task_entities: - workfile_data = get_template_data( - project_doc, - asset_doc, - task_entity["name"], - host_name, - system_settings - ) - # Use version 1 for each workfile - workfile_data["version"] = 1 - workfile_data["ext"] = extension - - task_type = workfile_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name, - project_settings=project_settings - ) - if template_key in templates_by_key: - template = templates_by_key[template_key] - else: - template = StringTemplate( - anatomy.templates[template_key]["file"] - ) - templates_by_key[template_key] = template - - result = template.format(workfile_data) - if not result.solved: - # TODO report - pass - else: - table_values = collections.OrderedDict(( - ("configuration_id", attr_conf["id"]), - ("entity_id", task_entity["id"]) - )) - operations.append( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - table_values, - "value", - ftrack_api.symbol.NOT_SET, - str(result) - ) - ) - - if operations: - for sub_operations in create_chunks(operations, 50): - for op in sub_operations: - session.recorded_operations.push(op) - session.commit() - - job_entity["data"] = json.dumps({ - "description": "(3/3) Set custom attribute values." - }) - session.commit() - - def _get_entity_path(self, entity): - path_items = [] - for item in entity["link"]: - if item["type"].lower() != "project": - path_items.append(item["name"]) - return "/".join(path_items) - - def _get_asset_docs_for_project( - self, session, project_entity, asset_docs, report - ): - asset_docs_task_names = {} - - for asset_doc in asset_docs: - asset_data = asset_doc["data"] - ftrack_id = asset_data.get("ftrackId") - if not ftrack_id: - hierarchy = list(asset_data.get("parents") or []) - hierarchy.append(asset_doc["name"]) - path = "/".join(hierarchy) - report[NOT_SYNCHRONIZED_TITLE].append(path) - continue - - asset_tasks = asset_data.get("tasks") or {} - asset_docs_task_names[ftrack_id] = ( - asset_doc, list(asset_tasks.keys()) - ) - - task_entities = session.query(( - "select id, name, parent_id, link from Task where project_id is {}" - ).format(project_entity["id"])).all() - task_entities_by_parent_id = collections.defaultdict(list) - for task_entity in task_entities: - parent_id = task_entity["parent_id"] - task_entities_by_parent_id[parent_id].append(task_entity) - - output = [] - for ftrack_id, item in asset_docs_task_names.items(): - asset_doc, task_names = item - valid_task_entities = [] - for task_entity in task_entities_by_parent_id[ftrack_id]: - if task_entity["name"] in task_names: - valid_task_entities.append(task_entity) - else: - path = self._get_entity_path(task_entity) - report[NOT_SYNCHRONIZED_TITLE].append(path) - - if valid_task_entities: - output.append((asset_doc, valid_task_entities)) - - return output - - def _get_tasks_for_selection( - self, session, other_entities, task_entities, asset_docs, report - ): - all_tasks = object() - asset_docs_by_ftrack_id = {} - asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in asset_docs: - asset_data = asset_doc["data"] - ftrack_id = asset_data.get("ftrackId") - parent_id = asset_data.get("visualParent") - asset_docs_by_parent_id[parent_id].append(asset_doc) - if ftrack_id: - asset_docs_by_ftrack_id[ftrack_id] = asset_doc - - missing_doc_ftrack_ids = {} - all_tasks_ids = set() - task_names_by_ftrack_id = collections.defaultdict(list) - for other_entity in other_entities: - ftrack_id = other_entity["id"] - if ftrack_id not in asset_docs_by_ftrack_id: - missing_doc_ftrack_ids[ftrack_id] = None - continue - all_tasks_ids.add(ftrack_id) - task_names_by_ftrack_id[ftrack_id] = all_tasks - - for task_entity in task_entities: - parent_id = task_entity["parent_id"] - if parent_id not in asset_docs_by_ftrack_id: - missing_doc_ftrack_ids[parent_id] = None - continue - - if all_tasks_ids not in all_tasks_ids: - task_names_by_ftrack_id[ftrack_id].append(task_entity["name"]) - - ftrack_ids = set() - asset_doc_with_task_names_by_id = {} - for ftrack_id, task_names in task_names_by_ftrack_id.items(): - asset_doc = asset_docs_by_ftrack_id[ftrack_id] - asset_data = asset_doc["data"] - asset_tasks = asset_data.get("tasks") or {} - - if task_names is all_tasks: - task_names = list(asset_tasks.keys()) - else: - new_task_names = [] - for task_name in task_names: - if task_name in asset_tasks: - new_task_names.append(task_name) - continue - - if ftrack_id not in missing_doc_ftrack_ids: - missing_doc_ftrack_ids[ftrack_id] = [] - if missing_doc_ftrack_ids[ftrack_id] is not None: - missing_doc_ftrack_ids[ftrack_id].append(task_name) - - task_names = new_task_names - - if task_names: - ftrack_ids.add(ftrack_id) - asset_doc_with_task_names_by_id[ftrack_id] = ( - asset_doc, task_names - ) - - task_entities = session.query(( - "select id, name, parent_id from Task where parent_id in ({})" - ).format(self.join_query_keys(ftrack_ids))).all() - task_entitiy_by_parent_id = collections.defaultdict(list) - for task_entity in task_entities: - parent_id = task_entity["parent_id"] - task_entitiy_by_parent_id[parent_id].append(task_entity) - - output = [] - for ftrack_id, item in asset_doc_with_task_names_by_id.items(): - asset_doc, task_names = item - valid_task_entities = [] - for task_entity in task_entitiy_by_parent_id[ftrack_id]: - if task_entity["name"] in task_names: - valid_task_entities.append(task_entity) - else: - if ftrack_id not in missing_doc_ftrack_ids: - missing_doc_ftrack_ids[ftrack_id] = [] - if missing_doc_ftrack_ids[ftrack_id] is not None: - missing_doc_ftrack_ids[ftrack_id].append(task_name) - if valid_task_entities: - output.append((asset_doc, valid_task_entities)) - - # Store report information about not synchronized entities - if missing_doc_ftrack_ids: - missing_entities = session.query( - "select id, link from TypedContext where id in ({})".format( - self.join_query_keys(missing_doc_ftrack_ids.keys()) - ) - ).all() - for missing_entity in missing_entities: - path = self._get_entity_path(missing_entity) - task_names = missing_doc_ftrack_ids[missing_entity["id"]] - if task_names is None: - report[NOT_SYNCHRONIZED_TITLE].append(path) - else: - for task_name in task_names: - task_path = "/".join([path, task_name]) - report[NOT_SYNCHRONIZED_TITLE].append(task_path) - - return output - - -def register(session): - FillWorkfileAttributeAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py deleted file mode 100644 index 250670f016..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py +++ /dev/null @@ -1,134 +0,0 @@ -import json -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class JobKiller(BaseAction): - """Kill jobs that are marked as running.""" - - identifier = "job.killer" - label = "OpenPype Admin" - variant = "- Job Killer" - description = "Killing selected running jobs" - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - settings_key = "job_killer" - - def discover(self, session, entities, event): - """Check if action is available for user role.""" - return self.valid_roles(session, entities, event) - - def interface(self, session, entities, event): - if event["data"].get("values"): - return - - title = "Select jobs to kill" - - jobs = session.query( - "select id, user_id, status, created_at, data from Job" - " where status in (\"queued\", \"running\")" - ).all() - if not jobs: - return { - "success": True, - "message": "Didn't find any running jobs" - } - - # Collect user ids from jobs - user_ids = set() - for job in jobs: - user_id = job["user_id"] - if user_id: - user_ids.add(user_id) - - # Store usernames by their ids - usernames_by_id = {} - if user_ids: - users = session.query( - "select id, username from User where id in ({})".format( - self.join_query_keys(user_ids) - ) - ).all() - for user in users: - usernames_by_id[user["id"]] = user["username"] - - items = [] - for job in jobs: - try: - data = json.loads(job["data"]) - description = data["description"] - except Exception: - description = "*No description*" - user_id = job["user_id"] - username = usernames_by_id.get(user_id) or "Unknown user" - created = job["created_at"].strftime('%d.%m.%Y %H:%M:%S') - label = "{} - {} - {}".format( - username, description, created - ) - item_label = { - "type": "label", - "value": label - } - item = { - "name": job["id"], - "type": "boolean", - "value": False - } - if len(items) > 0: - items.append({"type": "label", "value": "---"}) - items.append(item_label) - items.append(item) - - return { - "items": items, - "title": title - } - - def launch(self, session, entities, event): - if "values" not in event["data"]: - return - - values = event["data"]["values"] - if len(values) < 1: - return { - "success": True, - "message": "No jobs to kill!" - } - - job_ids = set() - for job_id, kill_job in values.items(): - if kill_job: - job_ids.add(job_id) - - jobs = session.query( - "select id, status from Job where id in ({})".format( - self.join_query_keys(job_ids) - ) - ).all() - - # Update all the queried jobs, setting the status to failed. - for job in jobs: - try: - origin_status = job["status"] - self.log.debug(( - 'Changing Job ({}) status: {} -> failed' - ).format(job["id"], origin_status)) - - job["status"] = "failed" - session.commit() - - except Exception: - session.rollback() - self.log.warning(( - "Changing Job ({}) has failed" - ).format(job["id"])) - - self.log.info("All selected jobs were killed Successfully!") - return { - "success": True, - "message": "All selected jobs were killed Successfully!" - } - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - JobKiller(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py b/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py deleted file mode 100644 index 825fd97b06..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_multiple_notes.py +++ /dev/null @@ -1,118 +0,0 @@ -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class MultipleNotes(BaseAction): - '''Edit meta data action.''' - - #: Action identifier. - identifier = 'multiple.notes' - #: Action label. - label = 'Multiple Notes' - #: Action description. - description = 'Add same note to multiple entities' - icon = statics_icon("ftrack", "action_icons", "MultipleNotes.svg") - - def discover(self, session, entities, event): - ''' Validation ''' - valid = True - - # Check for multiple selection. - if len(entities) < 2: - valid = False - - # Check for valid entities. - valid_entity_types = ['assetversion', 'task'] - for entity in entities: - if entity.entity_type.lower() not in valid_entity_types: - valid = False - break - - return valid - - def interface(self, session, entities, event): - if not event['data'].get('values', {}): - note_label = { - 'type': 'label', - 'value': '# Enter note: #' - } - - note_value = { - 'name': 'note', - 'type': 'textarea' - } - - category_label = { - 'type': 'label', - 'value': '## Category: ##' - } - - category_data = [] - category_data.append({ - 'label': '- None -', - 'value': 'none' - }) - all_categories = session.query('NoteCategory').all() - for cat in all_categories: - category_data.append({ - 'label': cat['name'], - 'value': cat['id'] - }) - category_value = { - 'type': 'enumerator', - 'name': 'category', - 'data': category_data, - 'value': 'none' - } - - splitter = { - 'type': 'label', - 'value': '{}'.format(200 * "-") - } - - items = [] - items.append(note_label) - items.append(note_value) - items.append(splitter) - items.append(category_label) - items.append(category_value) - return items - - def launch(self, session, entities, event): - if 'values' not in event['data']: - return - - values = event['data']['values'] - if len(values) <= 0 or 'note' not in values: - return False - # Get Note text - note_value = values['note'] - if note_value.lower().strip() == '': - return False - # Get User - user = session.query( - 'User where username is "{}"'.format(session.api_user) - ).one() - # Base note data - note_data = { - 'content': note_value, - 'author': user - } - # Get category - category_value = values['category'] - if category_value != 'none': - category = session.query( - 'NoteCategory where id is "{}"'.format(category_value) - ).one() - note_data['category'] = category - # Create notes for entities - for entity in entities: - new_note = session.create('Note', note_data) - entity['notes'].append(new_note) - session.commit() - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - MultipleNotes(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py deleted file mode 100644 index 19d5701e08..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ /dev/null @@ -1,448 +0,0 @@ -import json -import copy - -from openpype.client import get_project, create_project -from openpype.settings import ProjectSettings, SaveWarningExc - -from openpype_modules.ftrack.lib import ( - BaseAction, - statics_icon, - get_openpype_attr, - CUST_ATTR_AUTO_SYNC -) - - -class PrepareProjectLocal(BaseAction): - """Prepare project attributes in Anatomy.""" - - identifier = "prepare.project.local" - label = "Prepare Project" - description = "Set basic attributes on the project" - icon = statics_icon("ftrack", "action_icons", "PrepareProject.svg") - - role_list = ["Pypeclub", "Administrator", "Project Manager"] - - settings_key = "prepare_project" - - # Key to store info about triggering create folder structure - create_project_structure_key = "create_folder_structure" - create_project_structure_identifier = "create.project.structure" - item_splitter = {"type": "label", "value": "---"} - _keys_order = ( - "fps", - "frameStart", - "frameEnd", - "handleStart", - "handleEnd", - "clipIn", - "clipOut", - "resolutionHeight", - "resolutionWidth", - "pixelAspect", - "applications", - "tools_env", - "library_project", - ) - - def discover(self, session, entities, event): - """Show only on project.""" - if ( - len(entities) != 1 - or entities[0].entity_type.lower() != "project" - ): - return False - - return self.valid_roles(session, entities, event) - - def interface(self, session, entities, event): - if event['data'].get('values', {}): - return - - # Inform user that this may take a while - self.show_message(event, "Preparing data... Please wait", True) - self.log.debug("Preparing data which will be shown") - - self.log.debug("Loading custom attributes") - - project_entity = entities[0] - project_name = project_entity["full_name"] - - project_settings = ProjectSettings(project_name) - - project_anatom_settings = project_settings["project_anatomy"] - root_items = self.prepare_root_items(project_anatom_settings) - - ca_items, multiselect_enumerators = ( - self.prepare_custom_attribute_items(project_anatom_settings) - ) - - self.log.debug("Heavy items are ready. Preparing last items group.") - - title = "Prepare Project" - items = [] - - # Add root items - items.extend(root_items) - - items.append(self.item_splitter) - items.append({ - "type": "label", - "value": "

Set basic Attributes:

" - }) - - items.extend(ca_items) - - # Set value of auto synchronization - auto_sync_value = project_entity["custom_attributes"].get( - CUST_ATTR_AUTO_SYNC, False - ) - auto_sync_item = { - "name": CUST_ATTR_AUTO_SYNC, - "type": "boolean", - "value": auto_sync_value, - "label": "AutoSync to Avalon" - } - # Add autosync attribute - items.append(auto_sync_item) - - # This item will be last before enumerators - # Ask if want to trigger Action Create Folder Structure - create_project_structure_checked = ( - project_settings - ["project_settings"] - ["ftrack"] - ["user_handlers"] - ["prepare_project"] - ["create_project_structure_checked"] - ).value - items.append({ - "type": "label", - "value": "

Want to create basic Folder Structure?

" - }) - items.append({ - "name": self.create_project_structure_key, - "type": "boolean", - "value": create_project_structure_checked, - "label": "Check if Yes" - }) - - # Add enumerator items at the end - for item in multiselect_enumerators: - items.append(item) - - return { - "items": items, - "title": title - } - - def prepare_root_items(self, project_anatom_settings): - self.log.debug("Root items preparation begins.") - - root_items = [] - root_items.append({ - "type": "label", - "value": "

Check your Project root settings

" - }) - root_items.append({ - "type": "label", - "value": ( - "

NOTE: Roots are crucial for path filling" - " (and creating folder structure).

" - ) - }) - root_items.append({ - "type": "label", - "value": ( - "

WARNING: Do not change roots on running project," - " that will cause workflow issues.

" - ) - }) - - empty_text = "Enter root path here..." - - roots_entity = project_anatom_settings["roots"] - for root_name, root_entity in roots_entity.items(): - root_items.append(self.item_splitter) - root_items.append({ - "type": "label", - "value": "Root: \"{}\"".format(root_name) - }) - for platform_name, value_entity in root_entity.items(): - root_items.append({ - "label": platform_name, - "name": "__root__{}__{}".format(root_name, platform_name), - "type": "text", - "value": value_entity.value, - "empty_text": empty_text - }) - - root_items.append({ - "type": "hidden", - "name": "__rootnames__", - "value": json.dumps(list(roots_entity.keys())) - }) - - self.log.debug("Root items preparation ended.") - return root_items - - def _attributes_to_set(self, project_anatom_settings): - attributes_to_set = {} - - attribute_values_by_key = {} - for key, entity in project_anatom_settings["attributes"].items(): - attribute_values_by_key[key] = entity.value - - cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) - - for attr in hier_cust_attrs: - key = attr["key"] - if key.startswith("avalon_"): - continue - attributes_to_set[key] = { - "label": attr["label"], - "object": attr, - "default": attribute_values_by_key.get(key) - } - - for attr in cust_attrs: - if attr["entity_type"].lower() != "show": - continue - key = attr["key"] - if key.startswith("avalon_"): - continue - attributes_to_set[key] = { - "label": attr["label"], - "object": attr, - "default": attribute_values_by_key.get(key) - } - - # Sort by label - attributes_to_set = dict(sorted( - attributes_to_set.items(), - key=lambda x: x[1]["label"] - )) - return attributes_to_set - - def prepare_custom_attribute_items(self, project_anatom_settings): - items = [] - multiselect_enumerators = [] - attributes_to_set = self._attributes_to_set(project_anatom_settings) - - self.log.debug("Preparing interface for keys: \"{}\"".format( - str([key for key in attributes_to_set]) - )) - - attribute_keys = set(attributes_to_set.keys()) - keys_order = [] - for key in self._keys_order: - if key in attribute_keys: - keys_order.append(key) - - attribute_keys = attribute_keys - set(keys_order) - for key in sorted(attribute_keys): - keys_order.append(key) - - for key in keys_order: - in_data = attributes_to_set[key] - attr = in_data["object"] - - # initial item definition - item = { - "name": key, - "label": in_data["label"] - } - - # cust attr type - may have different visualization - type_name = attr["type"]["name"].lower() - easy_types = ["text", "boolean", "date", "number"] - - easy_type = False - if type_name in easy_types: - easy_type = True - - elif type_name == "enumerator": - - attr_config = json.loads(attr["config"]) - attr_config_data = json.loads(attr_config["data"]) - - if attr_config["multiSelect"] is True: - multiselect_enumerators.append(self.item_splitter) - multiselect_enumerators.append({ - "type": "label", - "value": "

{}

".format(in_data["label"]) - }) - - default = in_data["default"] - names = [] - for option in sorted( - attr_config_data, key=lambda x: x["menu"] - ): - name = option["value"] - new_name = "__{}__{}".format(key, name) - names.append(new_name) - item = { - "name": new_name, - "type": "boolean", - "label": "- {}".format(option["menu"]) - } - if default: - if isinstance(default, (list, tuple)): - if name in default: - item["value"] = True - else: - if name == default: - item["value"] = True - - multiselect_enumerators.append(item) - - multiselect_enumerators.append({ - "type": "hidden", - "name": "__hidden__{}".format(key), - "value": json.dumps(names) - }) - else: - easy_type = True - item["data"] = attr_config_data - - else: - self.log.warning(( - "Custom attribute \"{}\" has type \"{}\"." - " I don't know how to handle" - ).format(key, type_name)) - items.append({ - "type": "label", - "value": ( - "!!! Can't handle Custom attritubte type \"{}\"" - " (key: \"{}\")" - ).format(type_name, key) - }) - - if easy_type: - item["type"] = type_name - - # default value in interface - default = in_data["default"] - if default is not None: - item["value"] = default - - items.append(item) - - return items, multiselect_enumerators - - def launch(self, session, entities, event): - in_data = event["data"].get("values") - if not in_data: - return - - create_project_structure_checked = in_data.pop( - self.create_project_structure_key - ) - - root_values = {} - root_key = "__root__" - for key in tuple(in_data.keys()): - if key.startswith(root_key): - _key = key[len(root_key):] - root_values[_key] = in_data.pop(key) - - root_names = in_data.pop("__rootnames__", None) - root_data = {} - for root_name in json.loads(root_names): - root_data[root_name] = {} - for key, value in tuple(root_values.items()): - prefix = "{}__".format(root_name) - if not key.startswith(prefix): - continue - - _key = key[len(prefix):] - root_data[root_name][_key] = value - - # Find hidden items for multiselect enumerators - keys_to_process = [] - for key in in_data: - if key.startswith("__hidden__"): - keys_to_process.append(key) - - self.log.debug("Preparing data for Multiselect Enumerators") - enumerators = {} - for key in keys_to_process: - new_key = key.replace("__hidden__", "") - enumerator_items = in_data.pop(key) - enumerators[new_key] = json.loads(enumerator_items) - - # find values set for multiselect enumerator - for key, enumerator_items in enumerators.items(): - in_data[key] = [] - - name = "__{}__".format(key) - - for item in enumerator_items: - value = in_data.pop(item) - if value is True: - new_key = item.replace(name, "") - in_data[key].append(new_key) - - self.log.debug("Setting Custom Attribute values") - - project_entity = entities[0] - project_name = project_entity["full_name"] - - # Try to find project document - project_doc = get_project(project_name) - - # Create project if is not available - # - creation is required to be able set project anatomy and attributes - if not project_doc: - project_code = project_entity["name"] - self.log.info("Creating project \"{} [{}]\"".format( - project_name, project_code - )) - create_project(project_name, project_code) - self.trigger_event( - "openpype.project.created", - {"project_name": project_name} - ) - - project_settings = ProjectSettings(project_name) - project_anatomy_settings = project_settings["project_anatomy"] - project_anatomy_settings["roots"] = root_data - - custom_attribute_values = {} - attributes_entity = project_anatomy_settings["attributes"] - for key, value in in_data.items(): - if key not in attributes_entity: - custom_attribute_values[key] = value - else: - attributes_entity[key] = value - - try: - project_settings.save() - except SaveWarningExc as exc: - self.log.info("Few warnings happened during settings save:") - for warning in exc.warnings: - self.log.info(str(warning)) - - # Change custom attributes on project - if custom_attribute_values: - for key, value in custom_attribute_values.items(): - project_entity["custom_attributes"][key] = value - self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value)) - session.commit() - - # Trigger create project structure action - if create_project_structure_checked: - trigger_identifier = "{}.{}".format( - self.create_project_structure_identifier, - self.process_identifier() - ) - self.trigger_action(trigger_identifier, event) - - event_data = copy.deepcopy(in_data) - event_data["project_name"] = project_name - self.trigger_event("openpype.project.prepared", event_data) - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - PrepareProjectLocal(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py deleted file mode 100644 index 39cf33d605..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ /dev/null @@ -1,331 +0,0 @@ -import os -import subprocess -import traceback -import json - -import ftrack_api - -from openpype.client import ( - get_asset_by_name, - get_subset_by_name, - get_version_by_name, - get_representation_by_name -) -from openpype.pipeline import ( - get_representation_path, - AvalonMongoDB, - Anatomy, -) -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class RVAction(BaseAction): - """ Launch RV action """ - identifier = "rv.launch.action" - label = "rv" - description = "rv Launcher" - icon = statics_icon("ftrack", "action_icons", "RV.png") - - type = 'Application' - - allowed_types = ["img", "mov", "exr", "mp4"] - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # QUESTION load RV application data from AppplicationManager? - rv_path = None - - # RV_HOME should be set if properly installed - if os.environ.get('RV_HOME'): - rv_path = os.path.join( - os.environ.get('RV_HOME'), - 'bin', - 'rv' - ) - if not os.path.exists(rv_path): - rv_path = None - - if not rv_path: - self.log.info("RV path was not found.") - self.ignore_me = True - - self.rv_path = rv_path - - def discover(self, session, entities, event): - """Return available actions based on *event*. """ - return True - - def preregister(self): - if self.rv_path is None: - return ( - 'RV is not installed or paths in presets are not set correctly' - ) - return True - - def get_components_from_entity(self, session, entity, components): - """Get components from various entity types. - - The components dictionary is modified in place, so nothing is returned. - - Args: - entity (Ftrack entity) - components (dict) - """ - - if entity.entity_type.lower() == "assetversion": - for component in entity["components"]: - if component["file_type"][1:] not in self.allowed_types: - continue - - try: - components[entity["asset"]["parent"]["name"]].append( - component - ) - except KeyError: - components[entity["asset"]["parent"]["name"]] = [component] - - return - - if entity.entity_type.lower() == "task": - query = "AssetVersion where task_id is '{0}'".format(entity["id"]) - for assetversion in session.query(query): - self.get_components_from_entity( - session, assetversion, components - ) - - return - - if entity.entity_type.lower() == "shot": - query = "AssetVersion where asset.parent.id is '{0}'".format( - entity["id"] - ) - for assetversion in session.query(query): - self.get_components_from_entity( - session, assetversion, components - ) - - return - - raise NotImplementedError( - "\"{}\" entity type is not implemented yet.".format( - entity.entity_type - ) - ) - - def interface(self, session, entities, event): - if event['data'].get('values', {}): - return - - user = session.query( - "User where username is '{0}'".format( - os.environ["FTRACK_API_USER"] - ) - ).one() - job = session.create( - "Job", - { - "user": user, - "status": "running", - "data": json.dumps({ - "description": "RV: Collecting components." - }) - } - ) - # Commit to feedback to user. - session.commit() - - items = [] - try: - items = self.get_interface_items(session, entities) - except Exception: - self.log.error(traceback.format_exc()) - job["status"] = "failed" - else: - job["status"] = "done" - - # Commit to end job. - session.commit() - - return {"items": items} - - def get_interface_items(self, session, entities): - - components = {} - for entity in entities: - self.get_components_from_entity(session, entity, components) - - # Sort by version - for parent_name, entities in components.items(): - version_mapping = {} - for entity in entities: - try: - version_mapping[entity["version"]["version"]].append( - entity - ) - except KeyError: - version_mapping[entity["version"]["version"]] = [entity] - - # Sort same versions by date. - for version, entities in version_mapping.items(): - version_mapping[version] = sorted( - entities, key=lambda x: x["version"]["date"], reverse=True - ) - - components[parent_name] = [] - for version in reversed(sorted(version_mapping.keys())): - components[parent_name].extend(version_mapping[version]) - - # Items to present to user. - items = [] - label = "{} - v{} - {}" - for parent_name, entities in components.items(): - data = [] - for entity in entities: - data.append( - { - "label": label.format( - entity["version"]["asset"]["name"], - str(entity["version"]["version"]).zfill(3), - entity["file_type"][1:] - ), - "value": entity["id"] - } - ) - - items.append( - { - "label": parent_name, - "type": "enumerator", - "name": parent_name, - "data": data, - "value": data[0]["value"] - } - ) - - return items - - def launch(self, session, entities, event): - """Callback method for RV action.""" - # Launching application - if "values" not in event["data"]: - return - - user = session.query( - "User where username is '{0}'".format( - os.environ["FTRACK_API_USER"] - ) - ).one() - job = session.create( - "Job", - { - "user": user, - "status": "running", - "data": json.dumps({ - "description": "RV: Collecting file paths." - }) - } - ) - # Commit to feedback to user. - session.commit() - - paths = [] - try: - paths = self.get_file_paths(session, event) - except Exception: - self.log.error(traceback.format_exc()) - job["status"] = "failed" - else: - job["status"] = "done" - - # Commit to end job. - session.commit() - - args = [os.path.normpath(self.rv_path)] - - fps = entities[0].get("custom_attributes", {}).get("fps", None) - if fps is not None: - args.extend(["-fps", str(fps)]) - - args.extend(paths) - - self.log.info("Running rv: {}".format(args)) - - subprocess.Popen(args) - - return True - - def get_file_paths(self, session, event): - """Get file paths from selected components.""" - - link = session.get( - "Component", list(event["data"]["values"].values())[0] - )["version"]["asset"]["parent"]["link"][0] - project = session.get(link["type"], link["id"]) - project_name = project["full_name"] - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - anatomy = Anatomy(project_name) - - location = ftrack_api.Session().pick_location() - - paths = [] - for parent_name in sorted(event["data"]["values"].keys()): - component = session.get( - "Component", event["data"]["values"][parent_name] - ) - - # Newer publishes have the source referenced in Ftrack. - online_source = False - for neighbour_component in component["version"]["components"]: - if neighbour_component["name"] != "ftrackreview-mp4_src": - continue - - paths.append( - location.get_filesystem_path(neighbour_component) - ) - online_source = True - - if online_source: - continue - - subset_name = component["version"]["asset"]["name"] - version_name = component["version"]["version"] - representation_name = component["file_type"][1:] - - asset_doc = get_asset_by_name( - project_name, parent_name, fields=["_id"] - ) - subset_doc = get_subset_by_name( - project_name, - subset_name=subset_name, - asset_id=asset_doc["_id"] - ) - version_doc = get_version_by_name( - project_name, - version=version_name, - subset_id=subset_doc["_id"] - ) - repre_doc = get_representation_by_name( - project_name, - version_id=version_doc["_id"], - representation_name=representation_name - ) - if not repre_doc: - repre_doc = get_representation_by_name( - project_name, - version_id=version_doc["_id"], - representation_name="preview" - ) - - paths.append(get_representation_path( - repre_doc, root=anatomy.roots, dbcon=dbcon - )) - - return paths - - -def register(session): - """Register hooks.""" - - RVAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_seed.py b/openpype/modules/ftrack/event_handlers_user/action_seed.py deleted file mode 100644 index 657cd07a9f..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_seed.py +++ /dev/null @@ -1,436 +0,0 @@ -import os -from operator import itemgetter -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class SeedDebugProject(BaseAction): - '''Edit meta data action.''' - - #: Action identifier. - identifier = "seed.debug.project" - #: Action label. - label = "Seed Debug Project" - #: Action description. - description = "Description" - #: priority - priority = 100 - #: roles that are allowed to register this action - icon = statics_icon("ftrack", "action_icons", "SeedProject.svg") - - # Asset names which will be created in `Assets` entity - assets = [ - "Addax", "Alpaca", "Ant", "Antelope", "Aye", "Badger", "Bear", "Bee", - "Beetle", "Bluebird", "Bongo", "Bontebok", "Butterflie", "Caiman", - "Capuchin", "Capybara", "Cat", "Caterpillar", "Coyote", "Crocodile", - "Cuckoo", "Deer", "Dragonfly", "Duck", "Eagle", "Egret", "Elephant", - "Falcon", "Fossa", "Fox", "Gazelle", "Gecko", "Gerbil", - "GiantArmadillo", "Gibbon", "Giraffe", "Goose", "Gorilla", - "Grasshoper", "Hare", "Hawk", "Hedgehog", "Heron", "Hog", - "Hummingbird", "Hyena", "Chameleon", "Cheetah", "Iguana", "Jackal", - "Jaguar", "Kingfisher", "Kinglet", "Kite", "Komodo", "Lemur", - "Leopard", "Lion", "Lizard", "Macaw", "Malachite", "Mandrill", - "Mantis", "Marmoset", "Meadowlark", "Meerkat", "Mockingbird", - "Mongoose", "Monkey", "Nyal", "Ocelot", "Okapi", "Oribi", "Oriole", - "Otter", "Owl", "Panda", "Parrot", "Pelican", "Pig", "Porcupine", - "Reedbuck", "Rhinocero", "Sandpiper", "Servil", "Skink", "Sloth", - "Snake", "Spider", "Squirrel", "Sunbird", "Swallow", "Swift", "Tiger", - "Sylph", "Tanager", "Vulture", "Warthog", "Waterbuck", "Woodpecker", - "Zebra" - ] - - # Tasks which will be created for Assets - asset_tasks = [ - "Modeling", "Lookdev", "Rigging" - ] - # Tasks which will be created for Shots - shot_tasks = [ - "Animation", "Lighting", "Compositing", "FX" - ] - - # Define how much sequences will be created - default_seq_count = 5 - # Define how much shots will be created for each sequence - default_shots_count = 10 - - max_entities_created_at_one_commit = 50 - - existing_projects = None - new_project_item = "< New Project >" - current_project_item = "< Current Project >" - settings_key = "seed_project" - - def discover(self, session, entities, event): - ''' Validation ''' - if not self.valid_roles(session, entities, event): - return False - return True - - def interface(self, session, entities, event): - if event["data"].get("values", {}): - return - - title = "Select Project where you want to create seed data" - - items = [] - item_splitter = {"type": "label", "value": "---"} - - description_label = { - "type": "label", - "value": ( - "WARNING: Action does NOT check if entities already exist !!!" - ) - } - items.append(description_label) - - all_projects = session.query("select full_name from Project").all() - self.existing_projects = [proj["full_name"] for proj in all_projects] - projects_items = [ - {"label": proj, "value": proj} for proj in self.existing_projects - ] - - data_items = [] - - data_items.append({ - "label": self.new_project_item, - "value": self.new_project_item - }) - - data_items.append({ - "label": self.current_project_item, - "value": self.current_project_item - }) - - data_items.extend(sorted( - projects_items, - key=itemgetter("label"), - reverse=False - )) - projects_item = { - "label": "Choose Project", - "type": "enumerator", - "name": "project_name", - "data": data_items, - "value": self.current_project_item - } - items.append(projects_item) - items.append(item_splitter) - - items.append({ - "label": "Number of assets", - "type": "number", - "name": "asset_count", - "value": len(self.assets) - }) - items.append({ - "label": "Number of sequences", - "type": "number", - "name": "seq_count", - "value": self.default_seq_count - }) - items.append({ - "label": "Number of shots", - "type": "number", - "name": "shots_count", - "value": self.default_shots_count - }) - items.append(item_splitter) - - note_label = { - "type": "label", - "value": ( - "

NOTE: Enter project name and choose schema if you " - "chose `\"< New Project >\"`(code is optional)

" - ) - } - items.append(note_label) - items.append({ - "label": "Project name", - "name": "new_project_name", - "type": "text", - "value": "" - }) - - project_schemas = [ - sch["name"] for sch in self.session.query("ProjectSchema").all() - ] - schemas_item = { - "label": "Choose Schema", - "type": "enumerator", - "name": "new_schema_name", - "data": [ - {"label": sch, "value": sch} for sch in project_schemas - ], - "value": project_schemas[0] - } - items.append(schemas_item) - - items.append({ - "label": "*Project code", - "name": "new_project_code", - "type": "text", - "value": "", - "empty_text": "Optional..." - }) - - return { - "items": items, - "title": title - } - - def launch(self, session, in_entities, event): - if "values" not in event["data"]: - return - - # THIS IS THE PROJECT PART - values = event["data"]["values"] - selected_project = values["project_name"] - if selected_project == self.new_project_item: - project_name = values["new_project_name"] - if project_name in self.existing_projects: - msg = "Project \"{}\" already exist".format(project_name) - self.log.error(msg) - return {"success": False, "message": msg} - - project_code = values["new_project_code"] - project_schema_name = values["new_schema_name"] - if not project_code: - project_code = project_name - project_code = project_code.lower().replace(" ", "_").strip() - _project = session.query( - "Project where name is \"{}\"".format(project_code) - ).first() - if _project: - msg = "Project with code \"{}\" already exist".format( - project_code - ) - self.log.error(msg) - return {"success": False, "message": msg} - - project_schema = session.query( - "ProjectSchema where name is \"{}\"".format( - project_schema_name - ) - ).one() - # Create the project with the chosen schema. - self.log.debug(( - "*** Creating Project: name <{}>, code <{}>, schema <{}>" - ).format(project_name, project_code, project_schema_name)) - project = session.create("Project", { - "name": project_code, - "full_name": project_name, - "project_schema": project_schema - }) - session.commit() - - elif selected_project == self.current_project_item: - entity = in_entities[0] - if entity.entity_type.lower() == "project": - project = entity - else: - if "project" in entity: - project = entity["project"] - else: - project = entity["parent"]["project"] - project_schema = project["project_schema"] - self.log.debug(( - "*** Using Project: name <{}>, code <{}>, schema <{}>" - ).format( - project["full_name"], project["name"], project_schema["name"] - )) - else: - project = session.query("Project where full_name is \"{}\"".format( - selected_project - )).one() - project_schema = project["project_schema"] - self.log.debug(( - "*** Using Project: name <{}>, code <{}>, schema <{}>" - ).format( - project["full_name"], project["name"], project_schema["name"] - )) - - # THIS IS THE MAGIC PART - task_types = {} - for _type in project_schema["_task_type_schema"]["types"]: - if _type["name"] not in task_types: - task_types[_type["name"]] = _type - self.task_types = task_types - - asset_count = values.get("asset_count") or len(self.assets) - seq_count = values.get("seq_count") or self.default_seq_count - shots_count = values.get("shots_count") or self.default_shots_count - - self.create_assets(project, asset_count) - self.create_shots(project, seq_count, shots_count) - - return True - - def create_assets(self, project, asset_count): - self.log.debug("*** Creating assets:") - - try: - asset_count = int(asset_count) - except ValueError: - asset_count = 0 - - if asset_count <= 0: - self.log.debug("No assets to create") - return - - main_entity = self.session.create("Folder", { - "name": "Assets", - "parent": project - }) - self.log.debug("- Assets") - available_assets = len(self.assets) - repetitive_times = ( - int(asset_count / available_assets) + - (asset_count % available_assets > 0) - ) - - index = 0 - created_entities = 0 - to_create_length = asset_count + (asset_count * len(self.asset_tasks)) - for _asset_name in self.assets: - if created_entities >= to_create_length: - break - for asset_num in range(1, repetitive_times + 1): - if created_entities >= asset_count: - break - asset_name = "%s_%02d" % (_asset_name, asset_num) - asset = self.session.create("AssetBuild", { - "name": asset_name, - "parent": main_entity - }) - self.log.debug("- Assets/{}".format(asset_name)) - - created_entities += 1 - index += 1 - if self.temp_commit(index, created_entities, to_create_length): - index = 0 - - for task_name in self.asset_tasks: - self.session.create("Task", { - "name": task_name, - "parent": asset, - "type": self.task_types[task_name] - }) - self.log.debug("- Assets/{}/{}".format( - asset_name, task_name - )) - - created_entities += 1 - index += 1 - if self.temp_commit( - index, created_entities, to_create_length - ): - index = 0 - - self.log.debug("*** Committing Assets") - self.log.debug("Committing entities. {}/{}".format( - created_entities, to_create_length - )) - self.session.commit() - - def create_shots(self, project, seq_count, shots_count): - self.log.debug("*** Creating shots:") - - # Convert counts to integers - try: - seq_count = int(seq_count) - except ValueError: - seq_count = 0 - - try: - shots_count = int(shots_count) - except ValueError: - shots_count = 0 - - # Check if both are higher than 0 - missing = [] - if seq_count <= 0: - missing.append("sequences") - - if shots_count <= 0: - missing.append("shots") - - if missing: - self.log.debug("No {} to create".format(" and ".join(missing))) - return - - # Create Folder "Shots" - main_entity = self.session.create("Folder", { - "name": "Shots", - "parent": project - }) - self.log.debug("- Shots") - - index = 0 - created_entities = 0 - to_create_length = ( - seq_count - + (seq_count * shots_count) - + (seq_count * shots_count * len(self.shot_tasks)) - ) - for seq_num in range(1, seq_count + 1): - seq_name = "sq%03d" % seq_num - seq = self.session.create("Sequence", { - "name": seq_name, - "parent": main_entity - }) - self.log.debug("- Shots/{}".format(seq_name)) - - created_entities += 1 - index += 1 - if self.temp_commit(index, created_entities, to_create_length): - index = 0 - - for shot_num in range(1, shots_count + 1): - shot_name = "%ssh%04d" % (seq_name, (shot_num * 10)) - shot = self.session.create("Shot", { - "name": shot_name, - "parent": seq - }) - self.log.debug("- Shots/{}/{}".format(seq_name, shot_name)) - - created_entities += 1 - index += 1 - if self.temp_commit(index, created_entities, to_create_length): - index = 0 - - for task_name in self.shot_tasks: - self.session.create("Task", { - "name": task_name, - "parent": shot, - "type": self.task_types[task_name] - }) - self.log.debug("- Shots/{}/{}/{}".format( - seq_name, shot_name, task_name - )) - - created_entities += 1 - index += 1 - if self.temp_commit( - index, created_entities, to_create_length - ): - index = 0 - - self.log.debug("*** Committing Shots") - self.log.debug("Committing entities. {}/{}".format( - created_entities, to_create_length - )) - self.session.commit() - - def temp_commit(self, index, created_entities, to_create_length): - if index < self.max_entities_created_at_one_commit: - return False - self.log.debug("Committing {} entities. {}/{}".format( - index, created_entities, to_create_length - )) - self.session.commit() - return True - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - SeedDebugProject(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py deleted file mode 100644 index c9e0901623..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ /dev/null @@ -1,470 +0,0 @@ -import os -import errno -import json -import requests - -from bson.objectid import ObjectId - -from openpype.client import ( - get_project, - get_asset_by_id, - get_assets, - get_subset_by_name, - get_version_by_name, - get_representations -) -from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.pipeline import AvalonMongoDB, Anatomy - -from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY - - -class StoreThumbnailsToAvalon(BaseAction): - # Action identifier - identifier = "store.thubmnail.to.avalon" - # Action label - label = "OpenPype Admin" - # Action variant - variant = "- Store Thumbnails to avalon" - # Action description - description = 'Test action' - # roles that are allowed to register this action - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - settings_key = "store_thubmnail_to_avalon" - - thumbnail_key = "AVALON_THUMBNAIL_ROOT" - - def __init__(self, *args, **kwargs): - self.db_con = AvalonMongoDB() - super(StoreThumbnailsToAvalon, self).__init__(*args, **kwargs) - - def discover(self, session, entities, event): - is_valid = False - for entity in entities: - if entity.entity_type.lower() == "assetversion": - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, entities, event): - user = session.query( - "User where username is '{0}'".format(session.api_user) - ).one() - action_job = session.create("Job", { - "user": user, - "status": "running", - "data": json.dumps({ - "description": "Storing thumbnails to avalon." - }) - }) - session.commit() - - project = self.get_project_from_entity(entities[0]) - project_name = project["full_name"] - anatomy = Anatomy(project_name) - - if "publish" not in anatomy.templates: - msg = "Anatomy does not have set publish key!" - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - if "thumbnail" not in anatomy.templates["publish"]: - msg = ( - "There is not set \"thumbnail\"" - " template in Antomy for project \"{}\"" - ).format(project_name) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - thumbnail_roots = os.environ.get(self.thumbnail_key) - if ( - "{thumbnail_root}" in anatomy.templates["publish"]["thumbnail"] - and not thumbnail_roots - ): - msg = "`{}` environment is not set".format(self.thumbnail_key) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - existing_thumbnail_root = None - for path in thumbnail_roots.split(os.pathsep): - if os.path.exists(path): - existing_thumbnail_root = path - break - - if existing_thumbnail_root is None: - msg = ( - "Can't access paths, set in `{}` ({})" - ).format(self.thumbnail_key, thumbnail_roots) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - example_template_data = { - "_id": "ID", - "thumbnail_root": "THUBMNAIL_ROOT", - "thumbnail_type": "THUMBNAIL_TYPE", - "ext": ".EXT", - "project": { - "name": "PROJECT_NAME", - "code": "PROJECT_CODE" - }, - "asset": "ASSET_NAME", - "subset": "SUBSET_NAME", - "version": "VERSION_NAME", - "hierarchy": "HIERARCHY" - } - tmp_filled = anatomy.format_all(example_template_data) - thumbnail_result = tmp_filled["publish"]["thumbnail"] - if not thumbnail_result.solved: - missing_keys = thumbnail_result.missing_keys - invalid_types = thumbnail_result.invalid_types - submsg = "" - if missing_keys: - submsg += "Missing keys: {}".format(", ".join( - ["\"{}\"".format(key) for key in missing_keys] - )) - - if invalid_types: - items = [] - for key, value in invalid_types.items(): - items.append("{}{}".format(str(key), str(value))) - submsg += "Invalid types: {}".format(", ".join(items)) - - msg = ( - "Thumbnail Anatomy template expects more keys than action" - " can offer. {}" - ).format(submsg) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - thumbnail_template = anatomy.templates["publish"]["thumbnail"] - - self.db_con.install() - - for entity in entities: - # Skip if entity is not AssetVersion (should never happen, but..) - if entity.entity_type.lower() != "assetversion": - continue - - # Skip if AssetVersion don't have thumbnail - thumbnail_ent = entity["thumbnail"] - if thumbnail_ent is None: - self.log.debug(( - "Skipping. AssetVersion don't " - "have set thumbnail. {}" - ).format(entity["id"])) - continue - - avalon_ents_result = self.get_avalon_entities_for_assetversion( - entity, self.db_con - ) - version_full_path = ( - "Asset: \"{project_name}/{asset_path}\"" - " | Subset: \"{subset_name}\"" - " | Version: \"{version_name}\"" - ).format(**avalon_ents_result) - - version = avalon_ents_result["version"] - if not version: - self.log.warning(( - "AssetVersion does not have version in avalon. {}" - ).format(version_full_path)) - continue - - thumbnail_id = version["data"].get("thumbnail_id") - if thumbnail_id: - self.log.info(( - "AssetVersion skipped, already has thubmanil set. {}" - ).format(version_full_path)) - continue - - # Get thumbnail extension - file_ext = thumbnail_ent["file_type"] - if not file_ext.startswith("."): - file_ext = ".{}".format(file_ext) - - avalon_project = avalon_ents_result["project"] - avalon_asset = avalon_ents_result["asset"] - hierarchy = "" - parents = avalon_asset["data"].get("parents") or [] - if parents: - hierarchy = "/".join(parents) - - # Prepare anatomy template fill data - # 1. Create new id for thumbnail entity - thumbnail_id = ObjectId() - - template_data = { - "_id": str(thumbnail_id), - "thumbnail_root": existing_thumbnail_root, - "thumbnail_type": "thumbnail", - "ext": file_ext, - "project": { - "name": avalon_project["name"], - "code": avalon_project["data"].get("code") - }, - "asset": avalon_ents_result["asset_name"], - "subset": avalon_ents_result["subset_name"], - "version": avalon_ents_result["version_name"], - "hierarchy": hierarchy - } - - anatomy_filled = anatomy.format(template_data) - thumbnail_path = anatomy_filled["publish"]["thumbnail"] - thumbnail_path = thumbnail_path.replace("..", ".") - thumbnail_path = os.path.normpath(thumbnail_path) - - downloaded = False - for loc in (thumbnail_ent.get("component_locations") or []): - res_id = loc.get("resource_identifier") - if not res_id: - continue - - thubmnail_url = self.get_thumbnail_url(res_id) - if self.download_file(thubmnail_url, thumbnail_path): - downloaded = True - break - - if not downloaded: - self.log.warning( - "Could not download thumbnail for {}".format( - version_full_path - ) - ) - continue - - # Clean template data from keys that are dynamic - template_data.pop("_id") - template_data.pop("thumbnail_root") - - thumbnail_entity = { - "_id": thumbnail_id, - "type": "thumbnail", - "schema": "openpype:thumbnail-1.0", - "data": { - "template": thumbnail_template, - "template_data": template_data - } - } - - # Create thumbnail entity - self.db_con.insert_one(thumbnail_entity) - self.log.debug( - "Creating entity in database {}".format(str(thumbnail_entity)) - ) - - # Set thumbnail id for version - self.db_con.update_one( - {"_id": version["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} - ) - - self.db_con.update_one( - {"_id": avalon_asset["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} - ) - - action_job["status"] = "done" - session.commit() - - return True - - def get_thumbnail_url(self, resource_identifier, size=None): - # TODO use ftrack_api method rather (find way how to use it) - url_string = ( - u'{url}/component/thumbnail?id={id}&username={username}' - u'&apiKey={apiKey}' - ) - url = url_string.format( - url=self.session.server_url, - id=resource_identifier, - username=self.session.api_user, - apiKey=self.session.api_key - ) - if size: - url += u'&size={0}'.format(size) - - return url - - def download_file(self, source_url, dst_file_path): - dir_path = os.path.dirname(dst_file_path) - try: - os.makedirs(dir_path) - except OSError as exc: - if exc.errno != errno.EEXIST: - self.log.warning( - "Could not create folder: \"{}\"".format(dir_path) - ) - return False - - self.log.debug( - "Downloading file \"{}\" -> \"{}\"".format( - source_url, dst_file_path - ) - ) - file_open = open(dst_file_path, "wb") - try: - file_open.write(requests.get(source_url).content) - except Exception: - self.log.warning( - "Download of image `{}` failed.".format(source_url) - ) - return False - finally: - file_open.close() - return True - - def get_avalon_entities_for_assetversion(self, asset_version, db_con): - output = { - "success": True, - "message": None, - "project": None, - "project_name": None, - "asset": None, - "asset_name": None, - "asset_path": None, - "subset": None, - "subset_name": None, - "version": None, - "version_name": None, - "representations": None - } - - db_con.install() - - ft_asset = asset_version["asset"] - subset_name = ft_asset["name"] - version = asset_version["version"] - parent = ft_asset["parent"] - ent_path = "/".join( - [ent["name"] for ent in parent["link"]] - ) - project = self.get_project_from_entity(asset_version) - project_name = project["full_name"] - - output["project_name"] = project_name - output["asset_name"] = parent["name"] - output["asset_path"] = ent_path - output["subset_name"] = subset_name - output["version_name"] = version - - db_con.Session["AVALON_PROJECT"] = project_name - - avalon_project = get_project(project_name) - output["project"] = avalon_project - - if not avalon_project: - output["success"] = False - output["message"] = ( - "Project not synchronized to avalon `{}`".format(project_name) - ) - return output - - asset_ent = None - asset_mongo_id = parent["custom_attributes"].get(CUST_ATTR_ID_KEY) - if asset_mongo_id: - try: - asset_ent = get_asset_by_id(project_name, asset_mongo_id) - except Exception: - pass - - if not asset_ent: - asset_docs = get_assets(project_name, asset_names=[parent["name"]]) - for asset_doc in asset_docs: - ftrack_id = asset_doc.get("data", {}).get("ftrackId") - if ftrack_id == parent["id"]: - asset_ent = asset_doc - break - - output["asset"] = asset_ent - - if not asset_ent: - output["success"] = False - output["message"] = ( - "Not synchronized entity to avalon `{}`".format(ent_path) - ) - return output - - subset_ent = get_subset_by_name( - project_name, - subset_name=subset_name, - asset_id=asset_ent["_id"] - ) - - output["subset"] = subset_ent - - if not subset_ent: - output["success"] = False - output["message"] = ( - "Subset `{}` does not exist under Asset `{}`" - ).format(subset_name, ent_path) - return output - - version_ent = get_version_by_name( - project_name, - version, - subset_ent["_id"] - ) - - output["version"] = version_ent - - if not version_ent: - output["success"] = False - output["message"] = ( - "Version `{}` does not exist under Subset `{}` | Asset `{}`" - ).format(version, subset_name, ent_path) - return output - - repre_ents = list(get_representations( - project_name, - version_ids=[version_ent["_id"]] - )) - - output["representations"] = repre_ents - return output - - -def register(session): - StoreThumbnailsToAvalon(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py deleted file mode 100644 index e52a061471..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py +++ /dev/null @@ -1,225 +0,0 @@ -import time -import sys -import json - -import ftrack_api - -from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory - - -class SyncToAvalonLocal(BaseAction): - """ - Synchronizing data action - from Ftrack to Avalon DB - - Stores all information about entity. - - Name(string) - Most important information = identifier of entity - - Parent(ObjectId) - Avalon Project Id, if entity is not project itself - - Data(dictionary): - - VisualParent(ObjectId) - Avalon Id of parent asset - - Parents(array of string) - All parent names except project - - Tasks(array of string) - Tasks on asset - - FtrackId(string) - - entityType(string) - entity's type on Ftrack - * All Custom attributes in group 'Avalon' - - custom attributes that start with 'avalon_' are skipped - - * These information are stored for entities in whole project. - - Avalon ID of asset is stored to Ftrack - - Custom attribute 'avalon_mongo_id'. - - action IS NOT creating this Custom attribute if doesn't exist - - run 'Create Custom Attributes' action - - or do it manually (Not recommended) - """ - - identifier = "sync.to.avalon.local" - label = "OpenPype Admin" - variant = "- Sync To Avalon (Local)" - priority = 200 - icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") - - settings_key = "sync_to_avalon_local" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.entities_factory = SyncEntitiesFactory(self.log, self.session) - - def discover(self, session, entities, event): - """ Validate selection. """ - is_valid = False - for ent in event["data"]["selection"]: - # Ignore entities that are not tasks or projects - if ent["entityType"].lower() in ["show", "task"]: - is_valid = True - break - - if is_valid: - is_valid = self.valid_roles(session, entities, event) - return is_valid - - def launch(self, session, in_entities, event): - self.log.debug("{}: Creating job".format(self.label)) - - user_entity = session.query( - "User where id is {}".format(event["source"]["user"]["id"]) - ).one() - job_entity = session.create("Job", { - "user": user_entity, - "status": "running", - "data": json.dumps({ - "description": "Sync to avalon is running..." - }) - }) - session.commit() - - project_entity = self.get_project_from_entity(in_entities[0]) - project_name = project_entity["full_name"] - - try: - result = self.synchronization(event, project_name) - - except Exception: - self.log.error( - "Synchronization failed due to code error", exc_info=True - ) - - description = "Sync to avalon Crashed (Download traceback)" - self.add_traceback_to_job( - job_entity, session, sys.exc_info(), description - ) - - msg = "An error has happened during synchronization" - title = "Synchronization report ({}):".format(project_name) - items = [] - items.append({ - "type": "label", - "value": "# {}".format(msg) - }) - items.append({ - "type": "label", - "value": ( - "

Download report from job for more information.

" - ) - }) - - report = {} - try: - report = self.entities_factory.report() - except Exception: - pass - - _items = report.get("items") or [] - if _items: - items.append(self.entities_factory.report_splitter) - items.extend(_items) - - self.show_interface(items, title, event, submit_btn_label="Ok") - - return {"success": True, "message": msg} - - job_entity["status"] = "done" - job_entity["data"] = json.dumps({ - "description": "Sync to avalon finished." - }) - session.commit() - - return result - - def synchronization(self, event, project_name): - time_start = time.time() - - self.show_message(event, "Synchronization - Preparing data", True) - - try: - output = self.entities_factory.launch_setup(project_name) - if output is not None: - return output - - time_1 = time.time() - - self.entities_factory.set_cutom_attributes() - time_2 = time.time() - - # This must happen before all filtering!!! - self.entities_factory.prepare_avalon_entities(project_name) - time_3 = time.time() - - self.entities_factory.filter_by_ignore_sync() - time_4 = time.time() - - self.entities_factory.duplicity_regex_check() - time_5 = time.time() - - self.entities_factory.prepare_ftrack_ent_data() - time_6 = time.time() - - self.entities_factory.synchronize() - time_7 = time.time() - - self.log.debug( - "*** Synchronization finished ***" - ) - self.log.debug( - "preparation <{}>".format(time_1 - time_start) - ) - self.log.debug( - "set_cutom_attributes <{}>".format(time_2 - time_1) - ) - self.log.debug( - "prepare_avalon_entities <{}>".format(time_3 - time_2) - ) - self.log.debug( - "filter_by_ignore_sync <{}>".format(time_4 - time_3) - ) - self.log.debug( - "duplicity_regex_check <{}>".format(time_5 - time_4) - ) - self.log.debug( - "prepare_ftrack_ent_data <{}>".format(time_6 - time_5) - ) - self.log.debug( - "synchronize <{}>".format(time_7 - time_6) - ) - self.log.debug( - "* Total time: {}".format(time_7 - time_start) - ) - - if self.entities_factory.project_created: - event = ftrack_api.event.base.Event( - topic="openpype.project.created", - data={"project_name": project_name} - ) - self.session.event_hub.publish(event) - - report = self.entities_factory.report() - if report and report.get("items"): - default_title = "Synchronization report ({}):".format( - project_name - ) - self.show_interface( - items=report["items"], - title=report.get("title", default_title), - event=event - ) - return { - "success": True, - "message": "Synchronization Finished" - } - - finally: - try: - self.entities_factory.dbcon.uninstall() - except Exception: - pass - - try: - self.entities_factory.session.close() - except Exception: - pass - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - SyncToAvalonLocal(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_test.py b/openpype/modules/ftrack/event_handlers_user/action_test.py deleted file mode 100644 index bd71ba5bf9..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_test.py +++ /dev/null @@ -1,26 +0,0 @@ -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class TestAction(BaseAction): - """Action for testing purpose or as base for new actions.""" - - ignore_me = True - - identifier = 'test.action' - label = 'Test action' - description = 'Test action' - priority = 10000 - role_list = ['Pypeclub'] - icon = statics_icon("ftrack", "action_icons", "TestAction.svg") - - def discover(self, session, entities, event): - return True - - def launch(self, session, entities, event): - self.log.info(event) - - return True - - -def register(session): - TestAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py b/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py deleted file mode 100644 index 3b90960160..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_childern.py +++ /dev/null @@ -1,63 +0,0 @@ -import json -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class ThumbToChildren(BaseAction): - '''Custom action.''' - - # Action identifier - identifier = 'thumb.to.children' - # Action label - label = 'Thumbnail' - # Action variant - variant = " to Children" - # Action icon - icon = statics_icon("ftrack", "action_icons", "Thumbnail.svg") - - def discover(self, session, entities, event): - """Show only on project.""" - if (len(entities) != 1 or entities[0].entity_type in ["Project"]): - return False - return True - - def launch(self, session, entities, event): - '''Callback method for action.''' - - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - - job = session.create('Job', { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Push thumbnails to Childrens' - }) - }) - session.commit() - try: - for entity in entities: - thumbid = entity['thumbnail_id'] - if thumbid: - for child in entity['children']: - child['thumbnail_id'] = thumbid - - # inform the user that the job is done - job['status'] = 'done' - except Exception as exc: - session.rollback() - # fail the job if something goes wrong - job['status'] = 'failed' - raise exc - finally: - session.commit() - - return { - 'success': True, - 'message': 'Created job for updating thumbnails!' - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - ThumbToChildren(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py b/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py deleted file mode 100644 index 2f0110b7aa..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_thumbnail_to_parent.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -from openpype_modules.ftrack.lib import BaseAction, statics_icon - - -class ThumbToParent(BaseAction): - '''Custom action.''' - - # Action identifier - identifier = 'thumb.to.parent' - # Action label - label = 'Thumbnail' - # Action variant - variant = " to Parent" - # Action icon - icon = statics_icon("ftrack", "action_icons", "Thumbnail.svg") - - def discover(self, session, entities, event): - '''Return action config if triggered on asset versions.''' - - if len(entities) <= 0 or entities[0].entity_type in ['Project']: - return False - - return True - - def launch(self, session, entities, event): - '''Callback method for action.''' - - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - - job = session.create('Job', { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Push thumbnails to parents' - }) - }) - session.commit() - try: - for entity in entities: - parent = None - thumbid = None - if entity.entity_type.lower() == 'assetversion': - parent = entity['task'] - - if parent is None: - par_ent = entity['link'][-2] - parent = session.get(par_ent['type'], par_ent['id']) - else: - try: - parent = entity['parent'] - except Exception as e: - msg = ( - "During Action 'Thumb to Parent'" - " went something wrong" - ) - self.log.error(msg) - raise e - thumbid = entity['thumbnail_id'] - - if parent and thumbid: - parent['thumbnail_id'] = thumbid - status = 'done' - else: - raise Exception( - "Parent or thumbnail id not found. Parent: {}. " - "Thumbnail id: {}".format(parent, thumbid) - ) - - # inform the user that the job is done - job['status'] = status or 'done' - - except Exception as exc: - session.rollback() - # fail the job if something goes wrong - job['status'] = 'failed' - raise exc - - finally: - session.commit() - - return { - 'success': True, - 'message': 'Created job for updating thumbnails!' - } - - -def register(session): - '''Register action. Called when used as an event plugin.''' - - ThumbToParent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py b/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py deleted file mode 100644 index 65d1b42d82..0000000000 --- a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py +++ /dev/null @@ -1,98 +0,0 @@ -import platform -import socket -import getpass - -from openpype_modules.ftrack.lib import BaseAction -from openpype_modules.ftrack.ftrack_server.lib import get_host_ip - - -class ActionWhereIRun(BaseAction): - """Show where same user has running OpenPype instances.""" - - identifier = "ask.where.i.run" - show_identifier = "show.where.i.run" - label = "OpenPype Admin" - variant = "- Where I run" - description = "Show PC info where user have running OpenPype" - - def _discover(self, _event): - return { - "items": [{ - "label": self.label, - "variant": self.variant, - "description": self.description, - "actionIdentifier": self.discover_identifier, - "icon": self.icon, - }] - } - - def _launch(self, event): - self.trigger_action(self.show_identifier, event) - - def register(self): - # Register default action callbacks - super(ActionWhereIRun, self).register() - - # Add show identifier - show_subscription = ( - "topic=ftrack.action.launch" - " and data.actionIdentifier={}" - " and source.user.username={}" - ).format( - self.show_identifier, - self.session.api_user - ) - self.session.event_hub.subscribe( - show_subscription, - self._show_info - ) - - def _show_info(self, event): - title = "Where Do I Run?" - msgs = {} - all_keys = ["Hostname", "IP", "Username", "System name", "PC name"] - try: - host_name = socket.gethostname() - msgs["Hostname"] = host_name - msgs["IP"] = get_host_ip() or "N/A" - except Exception: - pass - - try: - system_name, pc_name, *_ = platform.uname() - msgs["System name"] = system_name - msgs["PC name"] = pc_name - except Exception: - pass - - try: - msgs["Username"] = getpass.getuser() - except Exception: - pass - - for key in all_keys: - if not msgs.get(key): - msgs[key] = "-Undefined-" - - items = [] - first = True - separator = {"type": "label", "value": "---"} - for key, value in msgs.items(): - if first: - first = False - else: - items.append(separator) - self.log.debug("{}: {}".format(key, value)) - - subtitle = {"type": "label", "value": "

{}

".format(key)} - items.append(subtitle) - message = {"type": "label", "value": "

{}

".format(value)} - items.append(message) - - self.show_interface(items, title, event=event) - - -def register(session): - '''Register plugin. Called when used as an plugin.''' - - ActionWhereIRun(session).register() diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py deleted file mode 100644 index 2042367a7e..0000000000 --- a/openpype/modules/ftrack/ftrack_module.py +++ /dev/null @@ -1,587 +0,0 @@ -import os -import json -import collections -import platform - -from openpype.modules import ( - click_wrap, - OpenPypeModule, - ITrayModule, - IPluginPaths, - ISettingsChangeListener -) -from openpype.settings import SaveWarningExc -from openpype.lib import Logger - -FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) -_URL_NOT_SET = object() - - -class FtrackModule( - OpenPypeModule, - ITrayModule, - IPluginPaths, - ISettingsChangeListener -): - name = "ftrack" - - def initialize(self, settings): - ftrack_settings = settings[self.name] - - self.enabled = ftrack_settings["enabled"] - self._settings_ftrack_url = ftrack_settings["ftrack_server"] - self._ftrack_url = _URL_NOT_SET - - current_dir = os.path.dirname(os.path.abspath(__file__)) - low_platform = platform.system().lower() - - # Server event handler paths - server_event_handlers_paths = [ - os.path.join(current_dir, "event_handlers_server") - ] - settings_server_paths = ftrack_settings["ftrack_events_path"] - if isinstance(settings_server_paths, dict): - settings_server_paths = settings_server_paths[low_platform] - server_event_handlers_paths.extend(settings_server_paths) - - # User event handler paths - user_event_handlers_paths = [ - os.path.join(current_dir, "event_handlers_user") - ] - settings_action_paths = ftrack_settings["ftrack_actions_path"] - if isinstance(settings_action_paths, dict): - settings_action_paths = settings_action_paths[low_platform] - user_event_handlers_paths.extend(settings_action_paths) - - # Prepare attribute - self.server_event_handlers_paths = server_event_handlers_paths - self.user_event_handlers_paths = user_event_handlers_paths - self.tray_module = None - - # TimersManager connection - self.timers_manager_connector = None - self._timers_manager_module = None - - def get_ftrack_url(self): - """Resolved ftrack url. - - Resolving is trying to fill missing information in url and tried to - connect to the server. - - Returns: - Union[str, None]: Final variant of url or None if url could not be - reached. - """ - - if self._ftrack_url is _URL_NOT_SET: - self._ftrack_url = resolve_ftrack_url( - self._settings_ftrack_url, - logger=self.log - ) - return self._ftrack_url - - ftrack_url = property(get_ftrack_url) - - @property - def settings_ftrack_url(self): - """Ftrack url from settings in a format as it is. - - Returns: - str: Ftrack url from settings. - """ - - return self._settings_ftrack_url - - def get_global_environments(self): - """Ftrack's global environments.""" - - return { - "FTRACK_SERVER": self.ftrack_url - } - - def get_plugin_paths(self): - """Ftrack plugin paths.""" - return { - "publish": [os.path.join(FTRACK_MODULE_DIR, "plugins", "publish")] - } - - def get_launch_hook_paths(self): - """Implementation for applications launch hooks.""" - - return os.path.join(FTRACK_MODULE_DIR, "launch_hooks") - - def modify_application_launch_arguments(self, application, env): - if not application.use_python_2: - return - - self.log.info("Adding Ftrack Python 2 packages to PYTHONPATH.") - - # Prepare vendor dir path - python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") - - # Add Python 2 modules - python_paths = [ - # `python-ftrack-api` - os.path.join(python_2_vendor, "ftrack-python-api", "source") - ] - - # Load PYTHONPATH from current launch context - python_path = env.get("PYTHONPATH") - if python_path: - python_paths.append(python_path) - - # Set new PYTHONPATH to launch context environments - env["PYTHONPATH"] = os.pathsep.join(python_paths) - - def connect_with_modules(self, enabled_modules): - for module in enabled_modules: - if not hasattr(module, "get_ftrack_event_handler_paths"): - continue - - try: - paths_by_type = module.get_ftrack_event_handler_paths() - except Exception: - continue - - if not isinstance(paths_by_type, dict): - continue - - for key, value in paths_by_type.items(): - if not value: - continue - - if key not in ("server", "user"): - self.log.warning( - "Unknown event handlers key \"{}\" skipping.".format( - key - ) - ) - continue - - if not isinstance(value, (list, tuple, set)): - value = [value] - - if key == "server": - self.server_event_handlers_paths.extend(value) - elif key == "user": - self.user_event_handlers_paths.extend(value) - - def on_system_settings_save( - self, old_value, new_value, changes, new_value_metadata - ): - """Implementation of ISettingsChangeListener interface.""" - if not self.ftrack_url: - raise SaveWarningExc(( - "Ftrack URL is not set." - " Can't propagate changes to Ftrack server." - )) - - ftrack_changes = changes.get("modules", {}).get("ftrack", {}) - url_change_msg = None - if "ftrack_server" in ftrack_changes: - url_change_msg = ( - "Ftrack URL was changed." - " This change may need to restart OpenPype to take affect." - ) - - try: - session = self.create_ftrack_session() - except Exception: - self.log.warning("Couldn't create ftrack session.", exc_info=True) - - if url_change_msg: - raise SaveWarningExc(url_change_msg) - - raise SaveWarningExc(( - "Saving of attributes to ftrack wasn't successful," - " try running Create/Update Avalon Attributes in ftrack." - )) - - from .lib import ( - get_openpype_attr, - CUST_ATTR_APPLICATIONS, - CUST_ATTR_TOOLS, - app_definitions_from_app_manager, - tool_definitions_from_app_manager - ) - from openpype.lib import ApplicationManager - query_keys = [ - "id", - "key", - "config" - ] - custom_attributes = get_openpype_attr( - session, - split_hierarchical=False, - query_keys=query_keys - ) - app_attribute = None - tool_attribute = None - for custom_attribute in custom_attributes: - key = custom_attribute["key"] - if key == CUST_ATTR_APPLICATIONS: - app_attribute = custom_attribute - elif key == CUST_ATTR_TOOLS: - tool_attribute = custom_attribute - - app_manager = ApplicationManager(new_value_metadata) - missing_attributes = [] - if not app_attribute: - missing_attributes.append(CUST_ATTR_APPLICATIONS) - else: - config = json.loads(app_attribute["config"]) - new_data = app_definitions_from_app_manager(app_manager) - prepared_data = [] - for item in new_data: - for key, label in item.items(): - prepared_data.append({ - "menu": label, - "value": key - }) - - config["data"] = json.dumps(prepared_data) - app_attribute["config"] = json.dumps(config) - - if not tool_attribute: - missing_attributes.append(CUST_ATTR_TOOLS) - else: - config = json.loads(tool_attribute["config"]) - new_data = tool_definitions_from_app_manager(app_manager) - prepared_data = [] - for item in new_data: - for key, label in item.items(): - prepared_data.append({ - "menu": label, - "value": key - }) - config["data"] = json.dumps(prepared_data) - tool_attribute["config"] = json.dumps(config) - - session.commit() - - if missing_attributes: - raise SaveWarningExc(( - "Couldn't find custom attribute/s ({}) to update." - " Try running Create/Update Avalon Attributes in ftrack." - ).format(", ".join(missing_attributes))) - - if url_change_msg: - raise SaveWarningExc(url_change_msg) - - def on_project_settings_save(self, *_args, **_kwargs): - """Implementation of ISettingsChangeListener interface.""" - # Ignore - return - - def on_project_anatomy_save( - self, old_value, new_value, changes, project_name, new_value_metadata - ): - """Implementation of ISettingsChangeListener interface.""" - if not project_name: - return - - new_attr_values = new_value.get("attributes") - if not new_attr_values: - return - - import ftrack_api - from openpype_modules.ftrack.lib import ( - get_openpype_attr, - default_custom_attributes_definition, - CUST_ATTR_TOOLS, - CUST_ATTR_APPLICATIONS, - CUST_ATTR_INTENT - ) - - try: - session = self.create_ftrack_session() - except Exception: - self.log.warning("Couldn't create ftrack session.", exc_info=True) - raise SaveWarningExc(( - "Saving of attributes to ftrack wasn't successful," - " try running Create/Update Avalon Attributes in ftrack." - )) - - project_entity = session.query( - "Project where full_name is \"{}\"".format(project_name) - ).first() - - if not project_entity: - msg = ( - "Ftrack project with name \"{}\" was not found in Ftrack." - " Can't push attribute changes." - ).format(project_name) - self.log.warning(msg) - raise SaveWarningExc(msg) - - project_id = project_entity["id"] - - ca_defs = default_custom_attributes_definition() - hierarchical_attrs = ca_defs.get("is_hierarchical") or {} - project_attrs = ca_defs.get("show") or {} - ca_keys = ( - set(hierarchical_attrs.keys()) - | set(project_attrs.keys()) - | {CUST_ATTR_TOOLS, CUST_ATTR_APPLICATIONS, CUST_ATTR_INTENT} - ) - - cust_attr, hier_attr = get_openpype_attr(session) - cust_attr_by_key = {attr["key"]: attr for attr in cust_attr} - hier_attrs_by_key = {attr["key"]: attr for attr in hier_attr} - - failed = {} - missing = {} - for key, value in new_attr_values.items(): - if key not in ca_keys: - continue - - configuration = hier_attrs_by_key.get(key) - if not configuration: - configuration = cust_attr_by_key.get(key) - if not configuration: - self.log.warning( - "Custom attribute \"{}\" was not found.".format(key) - ) - missing[key] = value - continue - - # TODO add add permissions check - # TODO add value validations - # - value type and list items - entity_key = collections.OrderedDict([ - ("configuration_id", configuration["id"]), - ("entity_id", project_id) - ]) - - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - entity_key, - "value", - ftrack_api.symbol.NOT_SET, - value - ) - ) - try: - session.commit() - self.log.debug( - "Changed project custom attribute \"{}\" to \"{}\"".format( - key, value - ) - ) - except Exception: - self.log.warning( - "Failed to set \"{}\" to \"{}\"".format(key, value), - exc_info=True - ) - session.rollback() - failed[key] = value - - if not failed and not missing: - return - - error_msg = ( - "Values were not updated on Ftrack which may cause issues." - " try running Create/Update Avalon Attributes in ftrack " - " and resave project settings." - ) - if missing: - error_msg += "\nMissing Custom attributes on Ftrack: {}.".format( - ", ".join([ - '"{}"'.format(key) - for key in missing.keys() - ]) - ) - if failed: - joined_failed = ", ".join([ - '"{}": "{}"'.format(key, value) - for key, value in failed.items() - ]) - error_msg += "\nFailed to set: {}".format(joined_failed) - raise SaveWarningExc(error_msg) - - def create_ftrack_session(self, **session_kwargs): - import ftrack_api - - if "server_url" not in session_kwargs: - session_kwargs["server_url"] = self.ftrack_url - - api_key = session_kwargs.get("api_key") - api_user = session_kwargs.get("api_user") - # First look into environments - # - both OpenPype tray and ftrack event server should have set them - # - ftrack event server may crash when credentials are tried to load - # from keyring - if not api_key or not api_user: - api_key = os.environ.get("FTRACK_API_KEY") - api_user = os.environ.get("FTRACK_API_USER") - - if not api_key or not api_user: - from .lib import credentials - cred = credentials.get_credentials() - api_user = cred.get("username") - api_key = cred.get("api_key") - - session_kwargs["api_user"] = api_user - session_kwargs["api_key"] = api_key - return ftrack_api.Session(**session_kwargs) - - def tray_init(self): - from .tray import FtrackTrayWrapper - - self.tray_module = FtrackTrayWrapper(self) - # Module is it's own connector to TimersManager - self.timers_manager_connector = self - - def tray_menu(self, parent_menu): - return self.tray_module.tray_menu(parent_menu) - - def tray_start(self): - return self.tray_module.validate() - - def tray_exit(self): - self.tray_module.tray_exit() - - def set_credentials_to_env(self, username, api_key): - os.environ["FTRACK_API_USER"] = username or "" - os.environ["FTRACK_API_KEY"] = api_key or "" - - # --- TimersManager connection methods --- - def start_timer(self, data): - if self.tray_module: - self.tray_module.start_timer_manager(data) - - def stop_timer(self): - if self.tray_module: - self.tray_module.stop_timer_manager() - - def register_timers_manager(self, timer_manager_module): - self._timers_manager_module = timer_manager_module - - def timer_started(self, data): - if self._timers_manager_module is not None: - self._timers_manager_module.timer_started(self.id, data) - - def timer_stopped(self): - if self._timers_manager_module is not None: - self._timers_manager_module.timer_stopped(self.id) - - def get_task_time(self, project_name, asset_name, task_name): - session = self.create_ftrack_session() - query = ( - 'Task where name is "{}"' - ' and parent.name is "{}"' - ' and project.full_name is "{}"' - ).format(task_name, asset_name, project_name) - task_entity = session.query(query).first() - if not task_entity: - return 0 - hours_logged = (task_entity["time_logged"] / 60) / 60 - return hours_logged - - def get_credentials(self): - # type: () -> tuple - """Get local Ftrack credentials.""" - from .lib import credentials - - cred = credentials.get_credentials(self.ftrack_url) - return cred.get("username"), cred.get("api_key") - - def cli(self, click_group): - click_group.add_command(cli_main.to_click_obj()) - - -def _check_ftrack_url(url): - import requests - - try: - result = requests.get(url, allow_redirects=False) - except requests.exceptions.RequestException: - return False - - if (result.status_code != 200 or "FTRACK_VERSION" not in result.headers): - return False - return True - - -def resolve_ftrack_url(url, logger=None): - """Checks if Ftrack server is responding.""" - - if logger is None: - logger = Logger.get_logger(__name__) - - url = url.strip("/ ") - if not url: - logger.error("Ftrack URL is not set!") - return None - - if not url.startswith("http"): - url = "https://" + url - - ftrack_url = None - if url and _check_ftrack_url(url): - ftrack_url = url - - if not ftrack_url and not url.endswith("ftrackapp.com"): - ftrackapp_url = url + ".ftrackapp.com" - if _check_ftrack_url(ftrackapp_url): - ftrack_url = ftrackapp_url - - if not ftrack_url and _check_ftrack_url(url): - ftrack_url = url - - if ftrack_url: - logger.debug("Ftrack server \"{}\" is accessible.".format(ftrack_url)) - - else: - logger.error("Ftrack server \"{}\" is not accessible!".format(url)) - - return ftrack_url - - -@click_wrap.group(FtrackModule.name, help="Ftrack module related commands.") -def cli_main(): - pass - - -@cli_main.command() -@click_wrap.option("-d", "--debug", is_flag=True, help="Print debug messages") -@click_wrap.option("--ftrack-url", envvar="FTRACK_SERVER", - help="Ftrack server url") -@click_wrap.option("--ftrack-user", envvar="FTRACK_API_USER", - help="Ftrack api user") -@click_wrap.option("--ftrack-api-key", envvar="FTRACK_API_KEY", - help="Ftrack api key") -@click_wrap.option("--legacy", is_flag=True, - help="run event server without mongo storing") -@click_wrap.option("--clockify-api-key", envvar="CLOCKIFY_API_KEY", - help="Clockify API key.") -@click_wrap.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE", - help="Clockify workspace") -def eventserver( - debug, - ftrack_url, - ftrack_user, - ftrack_api_key, - legacy, - clockify_api_key, - clockify_workspace -): - """Launch ftrack event server. - - This should be ideally used by system service (such us systemd or upstart - on linux and window service). - """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "3" - - from .ftrack_server.event_server_cli import run_event_server - - return run_event_server( - ftrack_url, - ftrack_user, - ftrack_api_key, - legacy, - clockify_api_key, - clockify_workspace - ) diff --git a/openpype/modules/ftrack/ftrack_server/__init__.py b/openpype/modules/ftrack/ftrack_server/__init__.py deleted file mode 100644 index 8e5f7c4c51..0000000000 --- a/openpype/modules/ftrack/ftrack_server/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .ftrack_server import FtrackServer - - -__all__ = ( - "FtrackServer", -) diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py deleted file mode 100644 index 77f479ee20..0000000000 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ /dev/null @@ -1,467 +0,0 @@ -import os -import signal -import datetime -import subprocess -import socket -import json -import getpass -import atexit -import time -import uuid - -import ftrack_api -import pymongo -from openpype.client.mongo import ( - OpenPypeMongoConnection, - validate_mongo_connection, -) -from openpype.lib import ( - get_openpype_execute_args, - get_openpype_version, - get_build_version, -) -from openpype_modules.ftrack import ( - FTRACK_MODULE_DIR, - resolve_ftrack_url, -) -from openpype_modules.ftrack.lib import credentials -from openpype_modules.ftrack.ftrack_server import socket_thread -from openpype_modules.ftrack.ftrack_server.lib import get_host_ip - - -class MongoPermissionsError(Exception): - """Is used when is created multiple objects of same RestApi class.""" - def __init__(self, message=None): - if not message: - message = "Exiting because have issue with access to MongoDB" - super().__init__(message) - - -def check_mongo_url(mongo_uri, log_error=False): - """Checks if mongo server is responding""" - try: - validate_mongo_connection(mongo_uri) - - except pymongo.errors.InvalidURI as err: - if log_error: - print("Can't connect to MongoDB at {} because: {}".format( - mongo_uri, err - )) - return False - - except pymongo.errors.ServerSelectionTimeoutError as err: - if log_error: - print("Can't connect to MongoDB at {} because: {}".format( - mongo_uri, err - )) - return False - - return True - - -def validate_credentials(url, user, api): - first_validation = True - if not user: - print('- Ftrack Username is not set') - first_validation = False - if not api: - print('- Ftrack API key is not set') - first_validation = False - if not first_validation: - return False - - try: - session = ftrack_api.Session( - server_url=url, - api_user=user, - api_key=api - ) - session.close() - except Exception as e: - print("Can't log into Ftrack with used credentials:") - ftrack_cred = { - "Ftrack server": str(url), - "Username": str(user), - "API key": str(api) - } - item_lens = [len(key) + 1 for key in ftrack_cred.keys()] - justify_len = max(*item_lens) - for key, value in ftrack_cred.items(): - print("{} {}".format( - (key + ":").ljust(justify_len, " "), - value - )) - return False - - print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format( - user, api - )) - return True - - -def legacy_server(ftrack_url): - # Current file - scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts") - - min_fail_seconds = 5 - max_fail_count = 3 - wait_time_after_max_fail = 10 - - subproc = None - subproc_path = "{}/sub_legacy_server.py".format(scripts_dir) - subproc_last_failed = datetime.datetime.now() - subproc_failed_count = 0 - - ftrack_accessible = False - printed_ftrack_error = False - - while True: - if not ftrack_accessible: - ftrack_accessible = resolve_ftrack_url(ftrack_url) - - # Run threads only if Ftrack is accessible - if not ftrack_accessible and not printed_ftrack_error: - print("Can't access Ftrack {} <{}>".format( - ftrack_url, str(datetime.datetime.now()) - )) - if subproc is not None: - if subproc.poll() is None: - subproc.terminate() - - subproc = None - - printed_ftrack_error = True - - time.sleep(1) - continue - - printed_ftrack_error = False - - if subproc is None: - if subproc_failed_count < max_fail_count: - args = get_openpype_execute_args("run", subproc_path) - subproc = subprocess.Popen( - args, - stdout=subprocess.PIPE - ) - elif subproc_failed_count == max_fail_count: - print(( - "Storer failed {}times I'll try to run again {}s later" - ).format(str(max_fail_count), str(wait_time_after_max_fail))) - subproc_failed_count += 1 - elif (( - datetime.datetime.now() - subproc_last_failed - ).seconds > wait_time_after_max_fail): - subproc_failed_count = 0 - - # If thread failed test Ftrack and Mongo connection - elif subproc.poll() is not None: - subproc = None - ftrack_accessible = False - - _subproc_last_failed = datetime.datetime.now() - delta_time = (_subproc_last_failed - subproc_last_failed).seconds - if delta_time < min_fail_seconds: - subproc_failed_count += 1 - else: - subproc_failed_count = 0 - subproc_last_failed = _subproc_last_failed - - time.sleep(1) - - -def main_loop(ftrack_url): - """ This is main loop of event handling. - - Loop is handling threads which handles subprocesses of event storer and - processor. When one of threads is stopped it is tested to connect to - ftrack and mongo server. Threads are not started when ftrack or mongo - server is not accessible. When threads are started it is checked for socket - signals as heartbeat. Heartbeat must become at least once per 30sec - otherwise thread will be killed. - """ - - os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1()) - - mongo_uri = OpenPypeMongoConnection.get_default_mongo_url() - - # Current file - scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts") - - min_fail_seconds = 5 - max_fail_count = 3 - wait_time_after_max_fail = 10 - - # Threads data - storer_name = "StorerThread" - storer_port = 10001 - storer_path = "{}/sub_event_storer.py".format(scripts_dir) - storer_thread = None - storer_last_failed = datetime.datetime.now() - storer_failed_count = 0 - - processor_name = "ProcessorThread" - processor_port = 10011 - processor_path = "{}/sub_event_processor.py".format(scripts_dir) - processor_thread = None - processor_last_failed = datetime.datetime.now() - processor_failed_count = 0 - - statuser_name = "StorerThread" - statuser_port = 10021 - statuser_path = "{}/sub_event_status.py".format(scripts_dir) - statuser_thread = None - statuser_last_failed = datetime.datetime.now() - statuser_failed_count = 0 - - ftrack_accessible = False - mongo_accessible = False - - printed_ftrack_error = False - printed_mongo_error = False - - # stop threads on exit - # TODO check if works and args have thread objects! - def on_exit(processor_thread, storer_thread, statuser_thread): - if processor_thread is not None: - processor_thread.stop() - processor_thread.join() - processor_thread = None - - if storer_thread is not None: - storer_thread.stop() - storer_thread.join() - storer_thread = None - - if statuser_thread is not None: - statuser_thread.stop() - statuser_thread.join() - statuser_thread = None - - atexit.register( - on_exit, - processor_thread=processor_thread, - storer_thread=storer_thread, - statuser_thread=statuser_thread - ) - - host_name = socket.gethostname() - host_ip = get_host_ip() - - main_info = [ - ["created_at", datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")], - ["Username", getpass.getuser()], - ["Host Name", host_name], - ["Host IP", host_ip or "N/A"], - ["OpenPype executable", get_openpype_execute_args()[-1]], - ["OpenPype version", get_openpype_version() or "N/A"], - ["OpenPype build version", get_build_version() or "N/A"] - ] - main_info_str = json.dumps(main_info) - # Main loop - while True: - # Check if accessible Ftrack and Mongo url - if not ftrack_accessible: - ftrack_accessible = resolve_ftrack_url(ftrack_url) - - if not mongo_accessible: - mongo_accessible = check_mongo_url(mongo_uri) - - # Run threads only if Ftrack is accessible - if not ftrack_accessible or not mongo_accessible: - if not mongo_accessible and not printed_mongo_error: - print("Can't access Mongo {}".format(mongo_uri)) - - if not ftrack_accessible and not printed_ftrack_error: - print("Can't access Ftrack {}".format(ftrack_url)) - - if storer_thread is not None: - storer_thread.stop() - storer_thread.join() - storer_thread = None - - if processor_thread is not None: - processor_thread.stop() - processor_thread.join() - processor_thread = None - - printed_ftrack_error = True - printed_mongo_error = True - - time.sleep(1) - continue - - printed_ftrack_error = False - printed_mongo_error = False - - # ====== STATUSER ======= - if statuser_thread is None: - if statuser_failed_count < max_fail_count: - statuser_thread = socket_thread.StatusSocketThread( - statuser_name, statuser_port, statuser_path, - [main_info_str] - ) - statuser_thread.start() - - elif statuser_failed_count == max_fail_count: - print(( - "Statuser failed {}times in row" - " I'll try to run again {}s later" - ).format(str(max_fail_count), str(wait_time_after_max_fail))) - statuser_failed_count += 1 - - elif (( - datetime.datetime.now() - statuser_last_failed - ).seconds > wait_time_after_max_fail): - statuser_failed_count = 0 - - # If thread failed test Ftrack and Mongo connection - elif not statuser_thread.is_alive(): - statuser_thread.join() - statuser_thread = None - ftrack_accessible = False - mongo_accessible = False - - _processor_last_failed = datetime.datetime.now() - delta_time = ( - _processor_last_failed - statuser_last_failed - ).seconds - - if delta_time < min_fail_seconds: - statuser_failed_count += 1 - else: - statuser_failed_count = 0 - statuser_last_failed = _processor_last_failed - - elif statuser_thread.stop_subprocess: - print("Main process was stopped by action") - on_exit(processor_thread, storer_thread, statuser_thread) - os.kill(os.getpid(), signal.SIGTERM) - return 1 - - # ====== STORER ======= - # Run backup thread which does not require mongo to work - if storer_thread is None: - if storer_failed_count < max_fail_count: - storer_thread = socket_thread.SocketThread( - storer_name, storer_port, storer_path - ) - storer_thread.start() - - elif storer_failed_count == max_fail_count: - print(( - "Storer failed {}times I'll try to run again {}s later" - ).format(str(max_fail_count), str(wait_time_after_max_fail))) - storer_failed_count += 1 - elif (( - datetime.datetime.now() - storer_last_failed - ).seconds > wait_time_after_max_fail): - storer_failed_count = 0 - - # If thread failed test Ftrack and Mongo connection - elif not storer_thread.is_alive(): - if storer_thread.mongo_error: - raise MongoPermissionsError() - storer_thread.join() - storer_thread = None - ftrack_accessible = False - mongo_accessible = False - - _storer_last_failed = datetime.datetime.now() - delta_time = (_storer_last_failed - storer_last_failed).seconds - if delta_time < min_fail_seconds: - storer_failed_count += 1 - else: - storer_failed_count = 0 - storer_last_failed = _storer_last_failed - - # ====== PROCESSOR ======= - if processor_thread is None: - if processor_failed_count < max_fail_count: - processor_thread = socket_thread.SocketThread( - processor_name, processor_port, processor_path - ) - processor_thread.start() - - elif processor_failed_count == max_fail_count: - print(( - "Processor failed {}times in row" - " I'll try to run again {}s later" - ).format(str(max_fail_count), str(wait_time_after_max_fail))) - processor_failed_count += 1 - - elif (( - datetime.datetime.now() - processor_last_failed - ).seconds > wait_time_after_max_fail): - processor_failed_count = 0 - - # If thread failed test Ftrack and Mongo connection - elif not processor_thread.is_alive(): - if processor_thread.mongo_error: - raise Exception( - "Exiting because have issue with access to MongoDB" - ) - processor_thread.join() - processor_thread = None - ftrack_accessible = False - mongo_accessible = False - - _processor_last_failed = datetime.datetime.now() - delta_time = ( - _processor_last_failed - processor_last_failed - ).seconds - - if delta_time < min_fail_seconds: - processor_failed_count += 1 - else: - processor_failed_count = 0 - processor_last_failed = _processor_last_failed - - if statuser_thread is not None: - statuser_thread.set_process("storer", storer_thread) - statuser_thread.set_process("processor", processor_thread) - - time.sleep(1) - - -def run_event_server( - ftrack_url, - ftrack_user, - ftrack_api_key, - legacy, - clockify_api_key, - clockify_workspace -): - if not ftrack_user or not ftrack_api_key: - print(( - "Ftrack user/api key were not passed." - " Trying to use credentials from user keyring." - )) - cred = credentials.get_credentials(ftrack_url) - ftrack_user = cred.get("username") - ftrack_api_key = cred.get("api_key") - - if clockify_workspace and clockify_api_key: - os.environ["CLOCKIFY_WORKSPACE"] = clockify_workspace - os.environ["CLOCKIFY_API_KEY"] = clockify_api_key - - # Check url regex and accessibility - ftrack_url = resolve_ftrack_url(ftrack_url) - if not ftrack_url: - print('Exiting! < Please enter Ftrack server url >') - return 1 - - # Validate entered credentials - if not validate_credentials(ftrack_url, ftrack_user, ftrack_api_key): - print('Exiting! < Please enter valid credentials >') - return 1 - - # Set Ftrack environments - os.environ["FTRACK_SERVER"] = ftrack_url - os.environ["FTRACK_API_USER"] = ftrack_user - os.environ["FTRACK_API_KEY"] = ftrack_api_key - - if legacy: - return legacy_server(ftrack_url) - - return main_loop(ftrack_url) diff --git a/openpype/modules/ftrack/ftrack_server/ftrack_server.py b/openpype/modules/ftrack/ftrack_server/ftrack_server.py deleted file mode 100644 index c75b8f7172..0000000000 --- a/openpype/modules/ftrack/ftrack_server/ftrack_server.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import time -import types -import logging -import traceback - -import ftrack_api - -from openpype.lib import ( - Logger, - modules_from_path -) - -""" -# Required - Needed for connection to Ftrack -FTRACK_SERVER # Ftrack server e.g. "https://myFtrack.ftrackapp.com" -FTRACK_API_KEY # Ftrack user's API key "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" -FTRACK_API_USER # Ftrack username e.g. "user.name" - -# Required - Paths to folder with actions -FTRACK_ACTIONS_PATH # Paths to folders where are located actions - - EXAMPLE: "M:/FtrackApi/../actions/" -FTRACK_EVENTS_PATH # Paths to folders where are located actions - - EXAMPLE: "M:/FtrackApi/../events/" - -# Required - Needed for import included modules -PYTHONPATH # Path to ftrack_api and paths to all modules used in actions - - path to ftrack_action_handler, etc. -""" - - -class FtrackServer: - def __init__(self, handler_paths=None): - """ - - 'type' is by default set to 'action' - Runs Action server - - enter 'event' for Event server - - EXAMPLE FOR EVENT SERVER: - ... - server = FtrackServer() - server.run_server() - .. - """ - - # set Ftrack logging to Warning only - OPTIONAL - ftrack_log = logging.getLogger("ftrack_api") - ftrack_log.setLevel(logging.WARNING) - - self.log = Logger.get_logger(__name__) - - self.stopped = True - self.is_running = False - - self.handler_paths = handler_paths or [] - - def stop_session(self): - self.stopped = True - if self.session.event_hub.connected is True: - self.session.event_hub.disconnect() - self.session.close() - self.session = None - - def set_files(self, paths): - # Iterate all paths - register_functions = [] - for path in paths: - # Try to format path with environments - try: - path = path.format(**os.environ) - except BaseException: - pass - - # Get all modules with functions - modules, crashed = modules_from_path(path) - for filepath, exc_info in crashed: - self.log.warning("Filepath load crashed {}.\n{}".format( - filepath, traceback.format_exception(*exc_info) - )) - - for filepath, module in modules: - register_function = None - for name, attr in module.__dict__.items(): - if ( - name == "register" - and isinstance(attr, types.FunctionType) - ): - register_function = attr - break - - if not register_function: - self.log.warning( - "\"{}\" - Missing register method".format(filepath) - ) - continue - - register_functions.append( - (filepath, register_function) - ) - - if not register_functions: - self.log.warning(( - "There are no events with `register` function" - " in registered paths: \"{}\"" - ).format("| ".join(paths))) - - for filepath, register_func in register_functions: - try: - register_func(self.session) - except Exception: - self.log.warning( - "\"{}\" - register was not successful".format(filepath), - exc_info=True - ) - - def set_handler_paths(self, paths): - self.handler_paths = paths - if self.is_running: - self.stop_session() - self.run_server() - - elif not self.stopped: - self.run_server() - - def run_server(self, session=None, load_files=True): - self.stopped = False - self.is_running = True - if not session: - session = ftrack_api.Session(auto_connect_event_hub=True) - - # Wait until session has connected event hub - if session._auto_connect_event_hub_thread: - # Use timeout from session (since ftrack-api 2.1.0) - timeout = getattr(session, "request_timeout", 60) - started = time.time() - while not session.event_hub.connected: - if (time.time() - started) > timeout: - raise RuntimeError(( - "Connection to Ftrack was not created in {} seconds" - ).format(timeout)) - time.sleep(0.1) - - self.session = session - if load_files: - if not self.handler_paths: - self.log.warning(( - "Paths to event handlers are not set." - " Ftrack server won't launch." - )) - self.is_running = False - return - - self.set_files(self.handler_paths) - - msg = "Registration of event handlers has finished!" - self.log.info(len(msg) * "*") - self.log.info(msg) - - # keep event_hub on session running - self.session.event_hub.wait() - self.is_running = False diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py deleted file mode 100644 index 2226c85ef9..0000000000 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ /dev/null @@ -1,408 +0,0 @@ -import os -import sys -import logging -import getpass -import atexit -import threading -import datetime -import time -import queue -import collections -import appdirs -import socket - -import pymongo -import requests -import ftrack_api -import ftrack_api.session -import ftrack_api.cache -import ftrack_api.operation -import ftrack_api._centralized_storage_scenario -import ftrack_api.event -from ftrack_api.logging import LazyLogMessage as L -try: - from weakref import WeakMethod -except ImportError: - from ftrack_api._weakref import WeakMethod -from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info - -from openpype.client import OpenPypeMongoConnection -from openpype.lib import Logger - -TOPIC_STATUS_SERVER = "openpype.event.server.status" -TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" - - -def get_host_ip(): - host_name = socket.gethostname() - try: - return socket.gethostbyname(host_name) - except Exception: - pass - - return None - - -class SocketBaseEventHub(ftrack_api.event.hub.EventHub): - - hearbeat_msg = b"hearbeat" - heartbeat_callbacks = [] - - def __init__(self, *args, **kwargs): - self.sock = kwargs.pop("sock") - super(SocketBaseEventHub, self).__init__(*args, **kwargs) - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "heartbeat": - # Reply with heartbeat. - for callback in self.heartbeat_callbacks: - callback() - - self.sock.sendall(self.hearbeat_msg) - return self._send_packet(self._code_name_mapping["heartbeat"]) - - return super(SocketBaseEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - -class StatusEventHub(SocketBaseEventHub): - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "connect": - event = ftrack_api.event.base.Event( - topic="openpype.status.started", - data={}, - source={ - "id": self.id, - "user": {"username": self._api_user} - } - ) - self._event_queue.put(event) - - return super(StatusEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - -class StorerEventHub(SocketBaseEventHub): - - hearbeat_msg = b"storer" - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "connect": - event = ftrack_api.event.base.Event( - topic="openpype.storer.started", - data={}, - source={ - "id": self.id, - "user": {"username": self._api_user} - } - ) - self._event_queue.put(event) - - return super(StorerEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - -class ProcessEventHub(SocketBaseEventHub): - hearbeat_msg = b"processor" - - is_collection_created = False - pypelog = Logger.get_logger("Session Processor") - - def __init__(self, *args, **kwargs): - self.mongo_url = None - self.dbcon = None - - super(ProcessEventHub, self).__init__(*args, **kwargs) - - def prepare_dbcon(self): - try: - database_name, collection_name = get_ftrack_event_mongo_info() - mongo_client = OpenPypeMongoConnection.get_mongo_client() - self.dbcon = mongo_client[database_name][collection_name] - self.mongo_client = mongo_client - - except pymongo.errors.AutoReconnect: - self.pypelog.error(( - "Mongo server \"{}\" is not responding, exiting." - ).format(OpenPypeMongoConnection.get_default_mongo_url())) - sys.exit(0) - - except pymongo.errors.OperationFailure: - self.pypelog.error(( - "Error with Mongo access, probably permissions." - "Check if exist database with name \"{}\"" - " and collection \"{}\" inside." - ).format(self.database, self.collection_name)) - self.sock.sendall(b"MongoError") - sys.exit(0) - - def wait(self, duration=None): - """Overridden wait - Event are loaded from Mongo DB when queue is empty. Handled event is - set as processed in Mongo DB. - """ - started = time.time() - self.prepare_dbcon() - while True: - try: - event = self._event_queue.get(timeout=0.1) - except queue.Empty: - if not self.load_events(): - time.sleep(0.5) - else: - try: - self._handle(event) - - mongo_id = event["data"].get("_event_mongo_id") - if mongo_id is None: - continue - - self.dbcon.update_one( - {"_id": mongo_id}, - {"$set": {"pype_data.is_processed": True}} - ) - - except pymongo.errors.AutoReconnect: - self.pypelog.error(( - "Mongo server \"{}\" is not responding, exiting." - ).format(os.environ["OPENPYPE_MONGO"])) - sys.exit(0) - # Additional special processing of events. - if event['topic'] == 'ftrack.meta.disconnected': - break - - if duration is not None: - if (time.time() - started) > duration: - break - - def load_events(self): - """Load not processed events sorted by stored date""" - ago_date = datetime.datetime.now() - datetime.timedelta(days=3) - self.dbcon.delete_many({ - "pype_data.stored": {"$lte": ago_date}, - "pype_data.is_processed": True - }) - - not_processed_events = self.dbcon.find( - {"pype_data.is_processed": False} - ).sort( - [("pype_data.stored", pymongo.ASCENDING)] - ).limit(100) - - found = False - for event_data in not_processed_events: - new_event_data = { - k: v for k, v in event_data.items() - if k not in ["_id", "pype_data"] - } - try: - event = ftrack_api.event.base.Event(**new_event_data) - event["data"]["_event_mongo_id"] = event_data["_id"] - except Exception: - self.logger.exception(L( - 'Failed to convert payload into event: {0}', - event_data - )) - continue - found = True - self._event_queue.put(event) - - return found - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which skip events and extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "event": - return - - return super()._handle_packet(code, packet_identifier, path, data) - - -class CustomEventHubSession(ftrack_api.session.Session): - '''An isolated session for interaction with an ftrack server.''' - def __init__( - self, server_url=None, api_key=None, api_user=None, auto_populate=True, - plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=False, schema_cache_path=None, - plugin_arguments=None, timeout=60, **kwargs - ): - self.kwargs = kwargs - - super(ftrack_api.session.Session, self).__init__() - self.logger = logging.getLogger( - __name__ + '.' + self.__class__.__name__ - ) - self._closed = False - - if server_url is None: - server_url = os.environ.get('FTRACK_SERVER') - - if not server_url: - raise TypeError( - 'Required "server_url" not specified. Pass as argument or set ' - 'in environment variable FTRACK_SERVER.' - ) - - self._server_url = server_url - - if api_key is None: - api_key = os.environ.get( - 'FTRACK_API_KEY', - # Backwards compatibility - os.environ.get('FTRACK_APIKEY') - ) - - if not api_key: - raise TypeError( - 'Required "api_key" not specified. Pass as argument or set in ' - 'environment variable FTRACK_API_KEY.' - ) - - self._api_key = api_key - - if api_user is None: - api_user = os.environ.get('FTRACK_API_USER') - if not api_user: - try: - api_user = getpass.getuser() - except Exception: - pass - - if not api_user: - raise TypeError( - 'Required "api_user" not specified. Pass as argument, set in ' - 'environment variable FTRACK_API_USER or one of the standard ' - 'environment variables used by Python\'s getpass module.' - ) - - self._api_user = api_user - - # Currently pending operations. - self.recorded_operations = ftrack_api.operation.Operations() - - # OpenPype change - In new API are operations properties - new_api = hasattr(self.__class__, "record_operations") - - if new_api: - self._record_operations = collections.defaultdict( - lambda: True - ) - self._auto_populate = collections.defaultdict( - lambda: auto_populate - ) - else: - self.record_operations = True - self.auto_populate = auto_populate - - self.cache_key_maker = cache_key_maker - if self.cache_key_maker is None: - self.cache_key_maker = ftrack_api.cache.StringKeyMaker() - - # Enforce always having a memory cache at top level so that the same - # in-memory instance is returned from session. - self.cache = ftrack_api.cache.LayeredCache([ - ftrack_api.cache.MemoryCache() - ]) - - if cache is not None: - if callable(cache): - cache = cache(self) - - if cache is not None: - self.cache.caches.append(cache) - - if new_api: - self.merge_lock = threading.RLock() - - self._managed_request = None - self._request = requests.Session() - self._request.auth = ftrack_api.session.SessionAuthentication( - self._api_key, self._api_user - ) - self.request_timeout = timeout - - # Fetch server information and in doing so also check credentials. - self._server_information = self._fetch_server_information() - - # Now check compatibility of server based on retrieved information. - self.check_server_compatibility() - - # Construct event hub and load plugins. - self._event_hub = self._create_event_hub() - - self._auto_connect_event_hub_thread = None - if auto_connect_event_hub: - # Connect to event hub in background thread so as not to block main - # session usage waiting for event hub connection. - self._auto_connect_event_hub_thread = threading.Thread( - target=self._event_hub.connect - ) - self._auto_connect_event_hub_thread.daemon = True - self._auto_connect_event_hub_thread.start() - - # Register to auto-close session on exit. - atexit.register(WeakMethod(self.close)) - - self._plugin_paths = plugin_paths - if self._plugin_paths is None: - self._plugin_paths = os.environ.get( - 'FTRACK_EVENT_PLUGIN_PATH', '' - ).split(os.pathsep) - - self._discover_plugins(plugin_arguments=plugin_arguments) - - # TODO: Make schemas read-only and non-mutable (or at least without - # rebuilding types)? - if schema_cache_path is not False: - if schema_cache_path is None: - schema_cache_path = appdirs.user_cache_dir() - schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path - ) - - schema_cache_path = os.path.join( - schema_cache_path, 'ftrack_api_schema_cache.json' - ) - - self.schemas = self._load_schemas(schema_cache_path) - self.types = self._build_entity_type_classes(self.schemas) - - ftrack_api._centralized_storage_scenario.register(self) - - self._configure_locations() - self.event_hub.publish( - ftrack_api.event.base.Event( - topic='ftrack.api.session.ready', - data=dict( - session=self - ) - ), - synchronous=True - ) - - def _create_event_hub(self): - return ftrack_api.event.hub.EventHub( - self._server_url, - self._api_user, - self._api_key - ) - - -class SocketSession(CustomEventHubSession): - def _create_event_hub(self): - self.sock = self.kwargs["sock"] - return self.kwargs["Eventhub"]( - self._server_url, - self._api_user, - self._api_key, - sock=self.sock - ) diff --git a/openpype/modules/ftrack/ftrack_server/socket_thread.py b/openpype/modules/ftrack/ftrack_server/socket_thread.py deleted file mode 100644 index 3ef55f8daa..0000000000 --- a/openpype/modules/ftrack/ftrack_server/socket_thread.py +++ /dev/null @@ -1,199 +0,0 @@ -import os -import sys -import time -import socket -import threading -import traceback -import subprocess - -from openpype.lib import get_openpype_execute_args, Logger - - -class SocketThread(threading.Thread): - """Thread that checks suprocess of storer of processor of events""" - - MAX_TIMEOUT = int(os.environ.get("OPENPYPE_FTRACK_SOCKET_TIMEOUT", 45)) - - def __init__(self, name, port, filepath, additional_args=[]): - super(SocketThread, self).__init__() - self.log = Logger.get_logger(self.__class__.__name__) - self.setName(name) - self.name = name - self.port = port - self.filepath = filepath - self.additional_args = additional_args - - self.sock = None - self.subproc = None - self.connection = None - self._is_running = False - self.finished = False - - self.mongo_error = False - - self._temp_data = {} - - def stop(self): - self._is_running = False - - def run(self): - self._is_running = True - time_socket = time.time() - # Create a TCP/IP socket - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock = sock - - # Bind the socket to the port - skip already used ports - while True: - try: - server_address = ("localhost", self.port) - sock.bind(server_address) - break - except OSError: - self.port += 1 - - self.log.debug( - "Running Socked thread on {}:{}".format(*server_address) - ) - - env = os.environ.copy() - env["OPENPYPE_PROCESS_MONGO_ID"] = str(Logger.mongo_process_id) - # OpenPype executable (with path to start script if not build) - args = get_openpype_execute_args( - # Add `run` command - "run", - self.filepath, - *self.additional_args, - str(self.port) - ) - kwargs = { - "env": env, - "stdin": subprocess.PIPE - } - if not sys.stdout: - # Redirect to devnull if stdout is None - kwargs["stdout"] = subprocess.DEVNULL - kwargs["stderr"] = subprocess.DEVNULL - - self.subproc = subprocess.Popen(args, **kwargs) - - # Listen for incoming connections - sock.listen(1) - sock.settimeout(1.0) - while True: - if not self._is_running: - break - try: - connection, client_address = sock.accept() - time_socket = time.time() - connection.settimeout(1.0) - self.connection = connection - - except socket.timeout: - if (time.time() - time_socket) > self.MAX_TIMEOUT: - self.log.error("Connection timeout passed. Terminating.") - self._is_running = False - self.subproc.terminate() - break - continue - - try: - time_con = time.time() - # Receive the data in small chunks and retransmit it - while True: - try: - if not self._is_running: - break - data = None - try: - data = self.get_data_from_con(connection) - time_con = time.time() - - except socket.timeout: - if (time.time() - time_con) > self.MAX_TIMEOUT: - self.log.error( - "Connection timeout passed. Terminating." - ) - self._is_running = False - self.subproc.terminate() - break - continue - - except ConnectionResetError: - self._is_running = False - break - - self._handle_data(connection, data) - - except Exception as exc: - self.log.error( - "Event server process failed", exc_info=True - ) - - finally: - # Clean up the connection - connection.close() - if self.subproc.poll() is None: - self.subproc.terminate() - - self.finished = True - - def get_data_from_con(self, connection): - return connection.recv(16) - - def _handle_data(self, connection, data): - if not data: - return - - if data == b"MongoError": - self.mongo_error = True - connection.sendall(data) - - -class StatusSocketThread(SocketThread): - process_name_mapping = { - b"RestartS": "storer", - b"RestartP": "processor", - b"RestartM": "main" - } - - def __init__(self, *args, **kwargs): - self.process_threads = {} - self.stop_subprocess = False - super(StatusSocketThread, self).__init__(*args, **kwargs) - - def set_process(self, process_name, thread): - try: - if not self.subproc: - self.process_threads[process_name] = None - return - - if ( - process_name in self.process_threads and - self.process_threads[process_name] == thread - ): - return - - self.process_threads[process_name] = thread - self.subproc.stdin.write( - str.encode("reset:{}\r\n".format(process_name)) - ) - self.subproc.stdin.flush() - - except Exception: - print("Could not set thread in StatusSocketThread") - traceback.print_exception(*sys.exc_info()) - - def _handle_data(self, connection, data): - if not data: - return - - process_name = self.process_name_mapping.get(data) - if process_name: - if process_name == "main": - self.stop_subprocess = True - else: - subp = self.process_threads.get(process_name) - if subp: - subp.stop() - connection.sendall(data) diff --git a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py deleted file mode 100644 index 5c780a51c4..0000000000 --- a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py +++ /dev/null @@ -1,165 +0,0 @@ -import os - -import ftrack_api -from openpype.settings import get_project_settings -from openpype.lib.applications import PostLaunchHook, LaunchTypes - - -class PostFtrackHook(PostLaunchHook): - order = None - launch_types = {LaunchTypes.local} - - def execute(self): - project_name = self.data.get("project_name") - asset_name = self.data.get("asset_name") - task_name = self.data.get("task_name") - - missing_context_keys = set() - if not project_name: - missing_context_keys.add("project_name") - if not asset_name: - missing_context_keys.add("asset_name") - if not task_name: - missing_context_keys.add("task_name") - - if missing_context_keys: - missing_keys_str = ", ".join([ - "\"{}\"".format(key) for key in missing_context_keys - ]) - self.log.debug("Hook {} skipped. Missing data keys: {}".format( - self.__class__.__name__, missing_keys_str - )) - return - - required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY") - for key in required_keys: - if not os.environ.get(key): - self.log.debug(( - "Missing required environment \"{}\"" - " for Ftrack after launch procedure." - ).format(key)) - return - - try: - session = ftrack_api.Session(auto_connect_event_hub=True) - self.log.debug("Ftrack session created") - except Exception: - self.log.warning("Couldn't create Ftrack session") - return - - try: - entity = self.find_ftrack_task_entity( - session, project_name, asset_name, task_name - ) - if entity: - self.ftrack_status_change(session, entity, project_name) - - except Exception: - self.log.warning( - "Couldn't finish Ftrack procedure.", exc_info=True - ) - return - - finally: - session.close() - - def find_ftrack_task_entity( - self, session, project_name, asset_name, task_name - ): - project_entity = session.query( - "Project where full_name is \"{}\"".format(project_name) - ).first() - if not project_entity: - self.log.warning( - "Couldn't find project \"{}\" in Ftrack.".format(project_name) - ) - return - - potential_task_entities = session.query(( - "TypedContext where parent.name is \"{}\" and project_id is \"{}\"" - ).format(asset_name, project_entity["id"])).all() - filtered_entities = [] - for _entity in potential_task_entities: - if ( - _entity.entity_type.lower() == "task" - and _entity["name"] == task_name - ): - filtered_entities.append(_entity) - - if not filtered_entities: - self.log.warning(( - "Couldn't find task \"{}\" under parent \"{}\" in Ftrack." - ).format(task_name, asset_name)) - return - - if len(filtered_entities) > 1: - self.log.warning(( - "Found more than one task \"{}\"" - " under parent \"{}\" in Ftrack." - ).format(task_name, asset_name)) - return - - return filtered_entities[0] - - def ftrack_status_change(self, session, entity, project_name): - project_settings = get_project_settings(project_name) - status_update = project_settings["ftrack"]["events"]["status_update"] - if not status_update["enabled"]: - self.log.debug( - "Status changes are disabled for project \"{}\"".format( - project_name - ) - ) - return - - status_mapping = status_update["mapping"] - if not status_mapping: - self.log.warning( - "Project \"{}\" does not have set status changes.".format( - project_name - ) - ) - return - - actual_status = entity["status"]["name"].lower() - already_tested = set() - ent_path = "/".join( - [ent["name"] for ent in entity["link"]] - ) - while True: - next_status_name = None - for key, value in status_mapping.items(): - if key in already_tested: - continue - - value = value.lower() - if actual_status in value or "__any__" in value: - if key != "__ignore__": - next_status_name = key - already_tested.add(key) - break - already_tested.add(key) - - if next_status_name is None: - break - - try: - query = "Status where name is \"{}\"".format( - next_status_name - ) - status = session.query(query).one() - - entity["status"] = status - session.commit() - self.log.debug("Changing status to \"{}\" <{}>".format( - next_status_name, ent_path - )) - break - - except Exception: - session.rollback() - msg = ( - "Status \"{}\" in presets wasn't found" - " on Ftrack entity type \"{}\"" - ).format(next_status_name, entity.entity_type) - self.log.warning(msg) diff --git a/openpype/modules/ftrack/lib/__init__.py b/openpype/modules/ftrack/lib/__init__.py deleted file mode 100644 index 7fc2bc99eb..0000000000 --- a/openpype/modules/ftrack/lib/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .constants import ( - CUST_ATTR_ID_KEY, - CUST_ATTR_AUTO_SYNC, - CUST_ATTR_GROUP, - CUST_ATTR_TOOLS, - CUST_ATTR_APPLICATIONS, - CUST_ATTR_INTENT, - FPS_KEYS -) -from .settings import ( - get_ftrack_event_mongo_info -) -from .custom_attributes import ( - default_custom_attributes_definition, - app_definitions_from_app_manager, - tool_definitions_from_app_manager, - get_openpype_attr, - query_custom_attributes -) - -from . import avalon_sync -from . import credentials -from .ftrack_base_handler import BaseHandler -from .ftrack_event_handler import BaseEvent -from .ftrack_action_handler import BaseAction, ServerAction, statics_icon - - -__all__ = ( - "CUST_ATTR_ID_KEY", - "CUST_ATTR_AUTO_SYNC", - "CUST_ATTR_GROUP", - "CUST_ATTR_TOOLS", - "CUST_ATTR_APPLICATIONS", - "CUST_ATTR_INTENT", - "FPS_KEYS", - - "get_ftrack_event_mongo_info", - - "default_custom_attributes_definition", - "app_definitions_from_app_manager", - "tool_definitions_from_app_manager", - "get_openpype_attr", - "query_custom_attributes", - - "avalon_sync", - - "credentials", - - "BaseHandler", - - "BaseEvent", - - "BaseAction", - "ServerAction", - "statics_icon" -) diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py deleted file mode 100644 index 7c3ba1a30c..0000000000 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ /dev/null @@ -1,2809 +0,0 @@ -import re -import json -import collections -import copy -import numbers - -import six - -from openpype.client import ( - get_project, - get_assets, - get_archived_assets, - get_subsets, - get_versions, - get_representations -) -from openpype.client.operations import ( - CURRENT_ASSET_DOC_SCHEMA, - CURRENT_PROJECT_SCHEMA, - CURRENT_PROJECT_CONFIG_SCHEMA, -) -from openpype.settings import get_anatomy_settings -from openpype.lib import ApplicationManager, Logger -from openpype.pipeline import AvalonMongoDB, schema - -from .constants import CUST_ATTR_ID_KEY, FPS_KEYS -from .custom_attributes import get_openpype_attr, query_custom_attributes - -from bson.objectid import ObjectId -from bson.errors import InvalidId -from pymongo import UpdateOne, ReplaceOne -import ftrack_api - -log = Logger.get_logger(__name__) - - -class InvalidFpsValue(Exception): - pass - - -def is_string_number(value): - """Can string value be converted to number (float).""" - if not isinstance(value, six.string_types): - raise TypeError("Expected {} got {}".format( - ", ".join(str(t) for t in six.string_types), str(type(value)) - )) - if value == ".": - return False - - if value.startswith("."): - value = "0" + value - elif value.endswith("."): - value = value + "0" - - if re.match(r"^\d+(\.\d+)?$", value) is None: - return False - return True - - -def convert_to_fps(source_value): - """Convert value into fps value. - - Non string values are kept untouched. String is tried to convert. - Valid values: - "1000" - "1000.05" - "1000,05" - ",05" - ".05" - "1000," - "1000." - "1000/1000" - "1000.05/1000" - "1000/1000.05" - "1000.05/1000.05" - "1000,05/1000" - "1000/1000,05" - "1000,05/1000,05" - - Invalid values: - "/" - "/1000" - "1000/" - "," - "." - ...any other string - - Returns: - float: Converted value. - - Raises: - InvalidFpsValue: When value can't be converted to float. - """ - if not isinstance(source_value, six.string_types): - if isinstance(source_value, numbers.Number): - return float(source_value) - return source_value - - value = source_value.strip().replace(",", ".") - if not value: - raise InvalidFpsValue("Got empty value") - - subs = value.split("/") - if len(subs) == 1: - str_value = subs[0] - if not is_string_number(str_value): - raise InvalidFpsValue( - "Value \"{}\" can't be converted to number.".format(value) - ) - return float(str_value) - - elif len(subs) == 2: - divident, divisor = subs - if not divident or not is_string_number(divident): - raise InvalidFpsValue( - "Divident value \"{}\" can't be converted to number".format( - divident - ) - ) - - if not divisor or not is_string_number(divisor): - raise InvalidFpsValue( - "Divisor value \"{}\" can't be converted to number".format( - divident - ) - ) - divisor_float = float(divisor) - if divisor_float == 0.0: - raise InvalidFpsValue("Can't divide by zero") - return float(divident) / divisor_float - - raise InvalidFpsValue( - "Value can't be converted to number \"{}\"".format(source_value) - ) - - -def create_chunks(iterable, chunk_size=None): - """Separate iterable into multiple chunks by size. - - Args: - iterable(list|tuple|set): Object that will be separated into chunks. - chunk_size(int): Size of one chunk. Default value is 200. - - Returns: - list: Chunked items. - """ - chunks = [] - - tupled_iterable = tuple(iterable) - if not tupled_iterable: - return chunks - iterable_size = len(tupled_iterable) - if chunk_size is None: - chunk_size = 200 - - if chunk_size < 1: - chunk_size = 1 - - for idx in range(0, iterable_size, chunk_size): - chunks.append(tupled_iterable[idx:idx + chunk_size]) - return chunks - - -def check_regex(name, entity_type, in_schema=None, schema_patterns=None): - schema_name = "asset-3.0" - if in_schema: - schema_name = in_schema - elif entity_type == "project": - schema_name = "project-2.1" - elif entity_type == "task": - schema_name = "task" - - name_pattern = None - if schema_patterns is not None: - name_pattern = schema_patterns.get(schema_name) - - if not name_pattern: - default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = schema._cache.get(schema_name + ".json") - if not schema_obj: - name_pattern = default_pattern - else: - name_pattern = ( - schema_obj - .get("properties", {}) - .get("name", {}) - .get("pattern", default_pattern) - ) - if schema_patterns is not None: - schema_patterns[schema_name] = name_pattern - - if re.match(name_pattern, name): - return True - return False - - -def join_query_keys(keys): - return ",".join(["\"{}\"".format(key) for key in keys]) - - -def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None): - """Python type that should value of custom attribute have. - - This function is mainly for number type which is always float from ftrack. - - Returns: - type: Python type which call be called on object to convert the object - to the type or None if can't figure out. - """ - if cust_attr_type_name is None: - cust_attr_type_name = cust_attr["type"]["name"] - - if cust_attr_type_name == "text": - return str - - if cust_attr_type_name == "boolean": - return bool - - if cust_attr_type_name in ("number", "enumerator"): - cust_attr_config = json.loads(cust_attr["config"]) - if cust_attr_type_name == "number": - if cust_attr_config["isdecimal"]: - return float - return int - - if cust_attr_type_name == "enumerator": - if cust_attr_config["multiSelect"]: - return list - return str - # "date", "expression", "notificationtype", "dynamic enumerator" - return None - - -def from_dict_to_set(data, is_project): - """ - Converts 'data' into $set part of MongoDB update command. - Sets new or modified keys. - Tasks are updated completely, not per task. (Eg. change in any of the - tasks results in full update of "tasks" from Ftrack. - Args: - data (dictionary): up-to-date data from Ftrack - is_project (boolean): true for project - - Returns: - (dictionary) - { "$set" : "{..}"} - """ - not_set = object() - task_changes = not_set - if ( - is_project - and "config" in data - and "tasks" in data["config"] - ): - task_changes = data["config"].pop("tasks") - task_changes_key = "config.tasks" - if not data["config"]: - data.pop("config") - elif ( - not is_project - and "data" in data - and "tasks" in data["data"] - ): - task_changes = data["data"].pop("tasks") - task_changes_key = "data.tasks" - if not data["data"]: - data.pop("data") - - result = {"$set": {}} - dict_queue = collections.deque() - dict_queue.append((None, data)) - - while dict_queue: - _key, _data = dict_queue.popleft() - for key, value in _data.items(): - new_key = key - if _key is not None: - new_key = "{}.{}".format(_key, key) - - if not isinstance(value, dict) or \ - (isinstance(value, dict) and not bool(value)): # empty dic - result["$set"][new_key] = value - continue - dict_queue.append((new_key, value)) - - if task_changes is not not_set and task_changes_key: - result["$set"][task_changes_key] = task_changes - return result - - -def get_project_apps(in_app_list): - """ Application definitions for app name. - - Args: - in_app_list: (list) - names of applications - - Returns: - tuple (list, dictionary) - list of dictionaries with apps definitions - dictionary of warnings - """ - apps = [] - warnings = collections.defaultdict(list) - - if not in_app_list: - return apps, warnings - - missing_app_msg = "Missing definition of application" - application_manager = ApplicationManager() - for app_name in in_app_list: - if application_manager.applications.get(app_name): - apps.append({"name": app_name}) - else: - warnings[missing_app_msg].append(app_name) - return apps, warnings - - -def get_hierarchical_attributes_values( - session, entity, hier_attrs, cust_attr_types=None -): - if not cust_attr_types: - cust_attr_types = session.query( - "select id, name from CustomAttributeType" - ).all() - - cust_attr_name_by_id = { - cust_attr_type["id"]: cust_attr_type["name"] - for cust_attr_type in cust_attr_types - } - # Hierarchical cust attrs - attr_key_by_id = {} - convert_types_by_attr_id = {} - defaults = {} - for attr in hier_attrs: - attr_id = attr["id"] - key = attr["key"] - type_id = attr["type_id"] - - attr_key_by_id[attr_id] = key - defaults[key] = attr["default"] - - cust_attr_type_name = cust_attr_name_by_id[type_id] - convert_type = get_python_type_for_custom_attribute( - attr, cust_attr_type_name - ) - convert_types_by_attr_id[attr_id] = convert_type - - entity_ids = [item["id"] for item in entity["link"]] - - values = query_custom_attributes( - session, list(attr_key_by_id.keys()), entity_ids, True - ) - - hier_values = {} - for key, val in defaults.items(): - hier_values[key] = val - - if not values: - return hier_values - - values_by_entity_id = collections.defaultdict(dict) - for item in values: - value = item["value"] - if value is None: - continue - - attr_id = item["configuration_id"] - - convert_type = convert_types_by_attr_id[attr_id] - if convert_type: - value = convert_type(value) - - key = attr_key_by_id[attr_id] - entity_id = item["entity_id"] - values_by_entity_id[entity_id][key] = value - - for entity_id in entity_ids: - for key in attr_key_by_id.values(): - value = values_by_entity_id[entity_id].get(key) - if value is not None: - hier_values[key] = value - - return hier_values - - -class SyncEntitiesFactory: - dbcon = AvalonMongoDB() - - cust_attr_query_keys = [ - "id", - "key", - "entity_type", - "object_type_id", - "is_hierarchical", - "config", - "default" - ] - - project_query = ( - "select full_name, name, custom_attributes" - ", project_schema._task_type_schema.types.name" - " from Project where full_name is \"{}\"" - ) - entities_query = ( - "select id, name, type_id, parent_id, link, description" - " from TypedContext where project_id is \"{}\"" - ) - ignore_custom_attr_key = "avalon_ignore_sync" - ignore_entity_types = ["milestone"] - - report_splitter = {"type": "label", "value": "---"} - - def __init__(self, log_obj, session): - self.log = log_obj - self._server_url = session.server_url - self._api_key = session.api_key - self._api_user = session.api_user - - def launch_setup(self, project_full_name): - try: - self.session.close() - except Exception: - pass - - self.session = ftrack_api.Session( - server_url=self._server_url, - api_key=self._api_key, - api_user=self._api_user, - auto_connect_event_hub=False - ) - - self.duplicates = {} - self.failed_regex = {} - self.tasks_failed_regex = collections.defaultdict(list) - self.report_items = { - "info": collections.defaultdict(list), - "warning": collections.defaultdict(list), - "error": collections.defaultdict(list) - } - - self.create_list = [] - self.project_created = False - self.unarchive_list = [] - self.updates = collections.defaultdict(dict) - - self.avalon_project = None - self.avalon_entities = None - - self._avalon_ents_by_id = None - self._avalon_ents_by_ftrack_id = None - self._avalon_ents_by_name = None - self._avalon_ents_by_parent_id = None - - self._avalon_archived_ents = None - self._avalon_archived_by_id = None - self._avalon_archived_by_parent_id = None - self._avalon_archived_by_name = None - - self._subsets_by_parent_id = None - self._changeability_by_mongo_id = None - - self._object_types_by_name = None - - self.all_filtered_entities = {} - self.filtered_ids = [] - self.not_selected_ids = [] - - self.hier_cust_attr_ids_by_key = {} - - self._ent_paths_by_ftrack_id = {} - - self.ftrack_avalon_mapper = None - self.avalon_ftrack_mapper = None - self.create_ftrack_ids = None - self.update_ftrack_ids = None - self.deleted_entities = None - - # Get Ftrack project - ft_project = self.session.query( - self.project_query.format(project_full_name) - ).one() - ft_project_id = ft_project["id"] - - # Skip if project is ignored - if ft_project["custom_attributes"].get( - self.ignore_custom_attr_key - ) is True: - msg = ( - "Project \"{}\" has set `Ignore Sync` custom attribute to True" - ).format(project_full_name) - self.log.warning(msg) - return {"success": False, "message": msg} - - self.log.debug(( - "*** Synchronization initialization started <{}>." - ).format(project_full_name)) - # Check if `avalon_mongo_id` custom attribute exist or is accessible - if CUST_ATTR_ID_KEY not in ft_project["custom_attributes"]: - items = [] - items.append({ - "type": "label", - "value": ( - "# Can't access Custom attribute: \"{}\"" - ).format(CUST_ATTR_ID_KEY) - }) - items.append({ - "type": "label", - "value": ( - "

- Check if your User and API key has permissions" - " to access the Custom attribute." - "
Username:\"{}\"" - "
API key:\"{}\"

" - ).format(self._api_user, self._api_key) - }) - items.append({ - "type": "label", - "value": "

- Check if the Custom attribute exist

" - }) - return { - "items": items, - "title": "Synchronization failed", - "success": False, - "message": "Synchronization failed" - } - - # Store entities by `id` and `parent_id` - entities_dict = collections.defaultdict(lambda: { - "children": list(), - "parent_id": None, - "entity": None, - "entity_type": None, - "name": None, - "custom_attributes": {}, - "hier_attrs": {}, - "avalon_attrs": {}, - "tasks": {} - }) - - # Find all entities in project - all_project_entities = self.session.query( - self.entities_query.format(ft_project_id) - ).all() - task_types = self.session.query("select id, name from Type").all() - task_type_names_by_id = { - task_type["id"]: task_type["name"] - for task_type in task_types - } - for entity in all_project_entities: - parent_id = entity["parent_id"] - entity_type = entity.entity_type - entity_type_low = entity_type.lower() - if entity_type_low in self.ignore_entity_types: - continue - - elif entity_type_low == "task": - # enrich task info with additional metadata - task_type_name = task_type_names_by_id[entity["type_id"]] - task = {"type": task_type_name} - entities_dict[parent_id]["tasks"][entity["name"]] = task - continue - - entity_id = entity["id"] - entities_dict[entity_id].update({ - "entity": entity, - "parent_id": parent_id, - "entity_type": entity_type_low, - "entity_type_orig": entity_type, - "name": entity["name"] - }) - entities_dict[parent_id]["children"].append(entity_id) - - entities_dict[ft_project_id]["entity"] = ft_project - entities_dict[ft_project_id]["entity_type"] = ( - ft_project.entity_type.lower() - ) - entities_dict[ft_project_id]["entity_type_orig"] = ( - ft_project.entity_type - ) - entities_dict[ft_project_id]["name"] = ft_project["full_name"] - - self.ft_project_id = ft_project_id - self.entities_dict = entities_dict - - @property - def project_name(self): - return self.entities_dict[self.ft_project_id]["name"] - - @property - def avalon_ents_by_id(self): - """ - Returns dictionary of avalon tracked entities (assets stored in - MongoDB) accessible by its '_id' - (mongo intenal ID - example ObjectId("5f48de5830a9467b34b69798")) - Returns: - (dictionary) - {"(_id)": whole entity asset} - """ - if self._avalon_ents_by_id is None: - self._avalon_ents_by_id = {} - for entity in self.avalon_entities: - self._avalon_ents_by_id[str(entity["_id"])] = entity - - return self._avalon_ents_by_id - - @property - def avalon_ents_by_ftrack_id(self): - """ - Returns dictionary of Mongo ids of avalon tracked entities - (assets stored in MongoDB) accessible by its 'ftrackId' - (id from ftrack) - (example '431ee3f2-e91a-11ea-bfa4-92591a5b5e3e') - Returns: - (dictionary) - {"(ftrackId)": "_id"} - """ - if self._avalon_ents_by_ftrack_id is None: - self._avalon_ents_by_ftrack_id = {} - for entity in self.avalon_entities: - key = entity.get("data", {}).get("ftrackId") - if not key: - continue - self._avalon_ents_by_ftrack_id[key] = str(entity["_id"]) - - return self._avalon_ents_by_ftrack_id - - @property - def avalon_ents_by_name(self): - """ - Returns dictionary of Mongo ids of avalon tracked entities - (assets stored in MongoDB) accessible by its 'name' - (example 'Hero') - Returns: - (dictionary) - {"(name)": "_id"} - """ - if self._avalon_ents_by_name is None: - self._avalon_ents_by_name = {} - for entity in self.avalon_entities: - self._avalon_ents_by_name[entity["name"]] = str(entity["_id"]) - - return self._avalon_ents_by_name - - @property - def avalon_ents_by_parent_id(self): - """ - Returns dictionary of avalon tracked entities - (assets stored in MongoDB) accessible by its 'visualParent' - (example ObjectId("5f48de5830a9467b34b69798")) - - Fills 'self._avalon_archived_ents' for performance - Returns: - (dictionary) - {"(_id)": whole entity} - """ - if self._avalon_ents_by_parent_id is None: - self._avalon_ents_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_entities: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_ents_by_parent_id[parent_id].append(entity) - - return self._avalon_ents_by_parent_id - - @property - def avalon_archived_ents(self): - """ - Returns list of archived assets from DB - (their "type" == 'archived_asset') - - Fills 'self._avalon_archived_ents' for performance - Returns: - (list) of assets - """ - if self._avalon_archived_ents is None: - self._avalon_archived_ents = list( - get_archived_assets(self.project_name) - ) - return self._avalon_archived_ents - - @property - def avalon_archived_by_name(self): - """ - Returns list of archived assets from DB - (their "type" == 'archived_asset') - - Fills 'self._avalon_archived_by_name' for performance - Returns: - (dictionary of lists) of assets accessible by asset name - """ - if self._avalon_archived_by_name is None: - self._avalon_archived_by_name = collections.defaultdict(list) - for ent in self.avalon_archived_ents: - self._avalon_archived_by_name[ent["name"]].append(ent) - return self._avalon_archived_by_name - - @property - def avalon_archived_by_id(self): - """ - Returns dictionary of archived assets from DB - (their "type" == 'archived_asset') - - Fills 'self._avalon_archived_by_id' for performance - Returns: - (dictionary) of assets accessible by asset mongo _id - """ - if self._avalon_archived_by_id is None: - self._avalon_archived_by_id = { - str(ent["_id"]): ent for ent in self.avalon_archived_ents - } - return self._avalon_archived_by_id - - @property - def avalon_archived_by_parent_id(self): - """ - Returns dictionary of archived assets from DB per their's parent - (their "type" == 'archived_asset') - - Fills 'self._avalon_archived_by_parent_id' for performance - Returns: - (dictionary of lists) of assets accessible by asset parent - mongo _id - """ - if self._avalon_archived_by_parent_id is None: - self._avalon_archived_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_archived_ents: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_archived_by_parent_id[parent_id].append(entity) - - return self._avalon_archived_by_parent_id - - @property - def subsets_by_parent_id(self): - """ - Returns dictionary of subsets from Mongo ("type": "subset") - grouped by their parent. - - Fills 'self._subsets_by_parent_id' for performance - Returns: - (dictionary of lists) - """ - if self._subsets_by_parent_id is None: - self._subsets_by_parent_id = collections.defaultdict(list) - for subset in get_subsets(self.project_name): - self._subsets_by_parent_id[str(subset["parent"])].append( - subset - ) - - return self._subsets_by_parent_id - - @property - def changeability_by_mongo_id(self): - if self._changeability_by_mongo_id is None: - self._changeability_by_mongo_id = collections.defaultdict( - lambda: True - ) - self._changeability_by_mongo_id[self.avalon_project_id] = False - self._bubble_changeability(list(self.subsets_by_parent_id.keys())) - return self._changeability_by_mongo_id - - @property - def object_types_by_name(self): - if self._object_types_by_name is None: - object_types_by_name = self.session.query( - "select id, name from ObjectType" - ).all() - self._object_types_by_name = { - object_type["name"]: object_type - for object_type in object_types_by_name - } - return self._object_types_by_name - - @property - def all_ftrack_names(self): - """ - Returns lists of names of all entities in Ftrack - Returns: - (list) - """ - return [ - ent_dict["name"] for ent_dict in self.entities_dict.values() if ( - ent_dict.get("name") - ) - ] - - def duplicity_regex_check(self): - self.log.debug("* Checking duplicities and invalid symbols") - # Duplicity and regex check - entity_ids_by_name = {} - duplicates = [] - failed_regex = [] - task_names = {} - _schema_patterns = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - regex_check = True - name = entity_dict["name"] - entity_type = entity_dict["entity_type"] - # Tasks must be checked too - for task in entity_dict["tasks"].items(): - task_name, task = task - passed = task_names.get(task_name) - if passed is None: - passed = check_regex( - task_name, "task", schema_patterns=_schema_patterns - ) - task_names[task_name] = passed - - if not passed: - self.tasks_failed_regex[task_name].append(ftrack_id) - - if name in entity_ids_by_name: - duplicates.append(name) - else: - entity_ids_by_name[name] = [] - regex_check = check_regex( - name, entity_type, schema_patterns=_schema_patterns - ) - - entity_ids_by_name[name].append(ftrack_id) - if not regex_check: - failed_regex.append(name) - - for name in failed_regex: - self.failed_regex[name] = entity_ids_by_name[name] - - for name in duplicates: - self.duplicates[name] = entity_ids_by_name[name] - - self.filter_by_duplicate_regex() - - def filter_by_duplicate_regex(self): - filter_queue = collections.deque() - failed_regex_msg = "{} - Entity has invalid symbols in the name" - duplicate_msg = "There are multiple entities with the name: \"{}\":" - - for ids in self.failed_regex.values(): - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format(ent_path)) - filter_queue.append(id) - - for name, ids in self.duplicates.items(): - self.log.warning(duplicate_msg.format(name)) - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(ent_path) - filter_queue.append(id) - - filtered_ids = [] - while filter_queue: - ftrack_id = filter_queue.popleft() - if ftrack_id in filtered_ids: - continue - - entity_dict = self.entities_dict.pop(ftrack_id, {}) - if not entity_dict: - continue - - self.all_filtered_entities[ftrack_id] = entity_dict - parent_id = entity_dict.get("parent_id") - if parent_id and parent_id in self.entities_dict: - if ftrack_id in self.entities_dict[parent_id]["children"]: - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - filtered_ids.append(ftrack_id) - for child_id in entity_dict.get("children", []): - filter_queue.append(child_id) - - for name, ids in self.tasks_failed_regex.items(): - for id in ids: - if id not in self.entities_dict: - continue - self.entities_dict[id]["tasks"].pop(name) - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format( - "/".join([ent_path, name]) - )) - - def filter_by_ignore_sync(self): - # skip filtering if `ignore_sync` attribute do not exist - if self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - self.ignore_custom_attr_key, "_notset_" - ) == "_notset_": - return - - filter_queue = collections.deque() - filter_queue.append((self.ft_project_id, False)) - while filter_queue: - parent_id, remove = filter_queue.popleft() - if remove: - parent_dict = self.entities_dict.pop(parent_id, {}) - self.all_filtered_entities[parent_id] = parent_dict - self.filtered_ids.append(parent_id) - else: - parent_dict = self.entities_dict.get(parent_id, {}) - - for child_id in list(parent_dict.get("children", [])): - # keep original `remove` value for all children - _remove = (remove is True) - if not _remove: - if self.entities_dict[child_id]["avalon_attrs"].get( - self.ignore_custom_attr_key - ): - self.entities_dict[parent_id]["children"].remove( - child_id - ) - _remove = True - filter_queue.append((child_id, _remove)) - - def filter_by_selection(self, event): - # BUGGY!!!! cause that entities are in deleted list - # TODO may be working when filtering happen after preparations - # - But this part probably does not have any functional reason - # - Time of synchronization probably won't be changed much - selected_ids = [] - for entity in event["data"]["selection"]: - # Skip if project is in selection - if entity["entityType"] == "show": - return - selected_ids.append(entity["entityId"]) - - sync_ids = [self.ft_project_id] - parents_queue = collections.deque() - children_queue = collections.deque() - for selected_id in selected_ids: - # skip if already filtered with ignore sync custom attribute - if selected_id in self.filtered_ids: - continue - - parents_queue.append(selected_id) - children_queue.append(selected_id) - - while parents_queue: - ftrack_id = parents_queue.popleft() - while True: - # Stops when parent is in sync_ids - if ( - ftrack_id in self.filtered_ids - or ftrack_id in sync_ids - or ftrack_id is None - ): - break - sync_ids.append(ftrack_id) - ftrack_id = self.entities_dict[ftrack_id]["parent_id"] - - while children_queue: - parent_id = children_queue.popleft() - for child_id in self.entities_dict[parent_id]["children"]: - if child_id in sync_ids or child_id in self.filtered_ids: - continue - sync_ids.append(child_id) - children_queue.append(child_id) - - # separate not selected and to process entities - for key, value in self.entities_dict.items(): - if key not in sync_ids: - self.not_selected_ids.append(key) - - for ftrack_id in self.not_selected_ids: - # pop from entities - value = self.entities_dict.pop(ftrack_id) - # remove entity from parent's children - parent_id = value["parent_id"] - if parent_id not in sync_ids: - continue - - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - def set_cutom_attributes(self): - self.log.debug("* Preparing custom attributes") - # Get custom attributes and values - custom_attrs, hier_attrs = get_openpype_attr( - self.session, query_keys=self.cust_attr_query_keys - ) - ent_types_by_name = self.object_types_by_name - # Custom attribute types - cust_attr_types = self.session.query( - "select id, name from CustomAttributeType" - ).all() - cust_attr_type_name_by_id = { - cust_attr_type["id"]: cust_attr_type["name"] - for cust_attr_type in cust_attr_types - } - - # store default values per entity type - attrs_per_entity_type = collections.defaultdict(dict) - avalon_attrs = collections.defaultdict(dict) - # store also custom attribute configuration id for future use (create) - attrs_per_entity_type_ca_id = collections.defaultdict(dict) - avalon_attrs_ca_id = collections.defaultdict(dict) - - attribute_key_by_id = {} - convert_types_by_attr_id = {} - for cust_attr in custom_attrs: - key = cust_attr["key"] - attr_id = cust_attr["id"] - type_id = cust_attr["type_id"] - - attribute_key_by_id[attr_id] = key - cust_attr_type_name = cust_attr_type_name_by_id[type_id] - - convert_type = get_python_type_for_custom_attribute( - cust_attr, cust_attr_type_name - ) - convert_types_by_attr_id[attr_id] = convert_type - - ca_ent_type = cust_attr["entity_type"] - if key.startswith("avalon_"): - if ca_ent_type == "show": - avalon_attrs[ca_ent_type][key] = cust_attr["default"] - avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"] - elif ca_ent_type == "task": - obj_id = cust_attr["object_type_id"] - avalon_attrs[obj_id][key] = cust_attr["default"] - avalon_attrs_ca_id[obj_id][key] = cust_attr["id"] - continue - - if ca_ent_type == "show": - attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"] - elif ca_ent_type == "task": - obj_id = cust_attr["object_type_id"] - attrs_per_entity_type[obj_id][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"] - - obj_id_ent_type_map = {} - sync_ids = [] - for entity_id, entity_dict in self.entities_dict.items(): - sync_ids.append(entity_id) - entity_type = entity_dict["entity_type"] - entity_type_orig = entity_dict["entity_type_orig"] - - if entity_type == "project": - attr_key = "show" - else: - map_key = obj_id_ent_type_map.get(entity_type_orig) - if not map_key: - # Put space between capitals - # (e.g. 'AssetBuild' -> 'Asset Build') - map_key = re.sub( - r"(\w)([A-Z])", r"\1 \2", entity_type_orig - ) - obj_id_ent_type_map[entity_type_orig] = map_key - - # Get object id of entity type - attr_key = ent_types_by_name.get(map_key) - - # Backup soluction when id is not found by prequeried objects - if not attr_key: - query = "ObjectType where name is \"{}\"".format(map_key) - attr_key = self.session.query(query).one()["id"] - ent_types_by_name[map_key] = attr_key - - prepared_attrs = attrs_per_entity_type.get(attr_key) - prepared_avalon_attr = avalon_attrs.get(attr_key) - prepared_attrs_ca_id = attrs_per_entity_type_ca_id.get(attr_key) - prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) - if prepared_attrs: - self.entities_dict[entity_id]["custom_attributes"] = ( - copy.deepcopy(prepared_attrs) - ) - if prepared_attrs_ca_id: - self.entities_dict[entity_id]["custom_attributes_id"] = ( - copy.deepcopy(prepared_attrs_ca_id) - ) - if prepared_avalon_attr: - self.entities_dict[entity_id]["avalon_attrs"] = ( - copy.deepcopy(prepared_avalon_attr) - ) - if prepared_avalon_attr_ca_id: - self.entities_dict[entity_id]["avalon_attrs_id"] = ( - copy.deepcopy(prepared_avalon_attr_ca_id) - ) - - items = query_custom_attributes( - self.session, - list(attribute_key_by_id.keys()), - sync_ids - ) - - invalid_fps_items = [] - for item in items: - entity_id = item["entity_id"] - attr_id = item["configuration_id"] - key = attribute_key_by_id[attr_id] - store_key = "custom_attributes" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - - convert_type = convert_types_by_attr_id[attr_id] - value = item["value"] - if convert_type: - value = convert_type(value) - - if key in FPS_KEYS: - try: - value = convert_to_fps(value) - except InvalidFpsValue: - invalid_fps_items.append((entity_id, value)) - self.entities_dict[entity_id][store_key][key] = value - - if invalid_fps_items: - fps_msg = ( - "These entities have invalid fps value in custom attributes" - ) - items = [] - for entity_id, value in invalid_fps_items: - ent_path = self.get_ent_path(entity_id) - items.append("{} - \"{}\"".format(ent_path, value)) - self.report_items["error"][fps_msg] = items - - # process hierarchical attributes - self.set_hierarchical_attribute( - hier_attrs, sync_ids, cust_attr_type_name_by_id - ) - - def set_hierarchical_attribute( - self, hier_attrs, sync_ids, cust_attr_type_name_by_id - ): - # collect all hierarchical attribute keys - # and prepare default values to project - attributes_by_key = {} - attribute_key_by_id = {} - convert_types_by_attr_id = {} - for attr in hier_attrs: - key = attr["key"] - attr_id = attr["id"] - type_id = attr["type_id"] - attribute_key_by_id[attr_id] = key - attributes_by_key[key] = attr - - cust_attr_type_name = cust_attr_type_name_by_id[type_id] - convert_type = get_python_type_for_custom_attribute( - attr, cust_attr_type_name - ) - convert_types_by_attr_id[attr_id] = convert_type - - self.hier_cust_attr_ids_by_key[key] = attr["id"] - - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - - default_value = attr["default"] - if key in FPS_KEYS: - try: - default_value = convert_to_fps(default_value) - except InvalidFpsValue: - pass - - self.entities_dict[self.ft_project_id][store_key][key] = ( - default_value - ) - - # Add attribute ids to entities dictionary - avalon_attribute_id_by_key = { - attr_key: attr_id - for attr_id, attr_key in attribute_key_by_id.items() - if attr_key.startswith("avalon_") - } - for entity_id in self.entities_dict.keys(): - if "avalon_attrs_id" not in self.entities_dict[entity_id]: - self.entities_dict[entity_id]["avalon_attrs_id"] = {} - - for attr_key, attr_id in avalon_attribute_id_by_key.items(): - self.entities_dict[entity_id]["avalon_attrs_id"][attr_key] = ( - attr_id - ) - - # Prepare dict with all hier keys and None values - prepare_dict = {} - prepare_dict_avalon = {} - for key in attributes_by_key.keys(): - if key.startswith("avalon_"): - prepare_dict_avalon[key] = None - else: - prepare_dict[key] = None - - for entity_dict in self.entities_dict.values(): - # Skip project because has stored defaults at the moment - if entity_dict["entity_type"] == "project": - continue - entity_dict["hier_attrs"] = copy.deepcopy(prepare_dict) - for key, val in prepare_dict_avalon.items(): - entity_dict["avalon_attrs"][key] = val - - items = query_custom_attributes( - self.session, - list(attribute_key_by_id.keys()), - sync_ids, - True - ) - - invalid_fps_items = [] - avalon_hier = [] - for item in items: - value = item["value"] - # WARNING It is not possible to propagate enumerate hierarchical - # attributes with multiselection 100% right. Unsetting all values - # will cause inheritance from parent. - if ( - value is None - or (isinstance(value, (tuple, list)) and not value) - ): - continue - - attr_id = item["configuration_id"] - convert_type = convert_types_by_attr_id[attr_id] - if convert_type: - value = convert_type(value) - - entity_id = item["entity_id"] - key = attribute_key_by_id[attr_id] - if key in FPS_KEYS: - try: - value = convert_to_fps(value) - except InvalidFpsValue: - invalid_fps_items.append((entity_id, value)) - continue - - if key.startswith("avalon_"): - store_key = "avalon_attrs" - avalon_hier.append(key) - else: - store_key = "hier_attrs" - self.entities_dict[entity_id][store_key][key] = value - - if invalid_fps_items: - fps_msg = ( - "These entities have invalid fps value in custom attributes" - ) - items = [] - for entity_id, value in invalid_fps_items: - ent_path = self.get_ent_path(entity_id) - items.append("{} - \"{}\"".format(ent_path, value)) - self.report_items["error"][fps_msg] = items - - # Get dictionary with not None hierarchical values to pull to children - top_id = self.ft_project_id - project_values = {} - for key, value in self.entities_dict[top_id]["hier_attrs"].items(): - if value is not None: - project_values[key] = value - - for key in avalon_hier: - if key == CUST_ATTR_ID_KEY: - continue - value = self.entities_dict[top_id]["avalon_attrs"][key] - if value is not None: - project_values[key] = value - - hier_down_queue = collections.deque() - hier_down_queue.append((project_values, top_id)) - - while hier_down_queue: - hier_values, parent_id = hier_down_queue.popleft() - for child_id in self.entities_dict[parent_id]["children"]: - _hier_values = copy.deepcopy(hier_values) - for key in attributes_by_key.keys(): - if key.startswith("avalon_"): - store_key = "avalon_attrs" - else: - store_key = "hier_attrs" - value = self.entities_dict[child_id][store_key][key] - if value is not None: - _hier_values[key] = value - - self.entities_dict[child_id]["hier_attrs"].update(_hier_values) - hier_down_queue.append((_hier_values, child_id)) - - def remove_from_archived(self, mongo_id): - entity = self.avalon_archived_by_id.pop(mongo_id, None) - if not entity: - return - - if self._avalon_archived_ents is not None: - if entity in self._avalon_archived_ents: - self._avalon_archived_ents.remove(entity) - - if self._avalon_archived_by_name is not None: - name = entity["name"] - if name in self._avalon_archived_by_name: - name_ents = self._avalon_archived_by_name[name] - if entity in name_ents: - if len(name_ents) == 1: - self._avalon_archived_by_name.pop(name) - else: - self._avalon_archived_by_name[name].remove(entity) - - # TODO use custom None instead of __NOTSET__ - if self._avalon_archived_by_parent_id is not None: - parent_id = entity.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if parent_id is not None: - parent_id = str(parent_id) - - if parent_id in self._avalon_archived_by_parent_id: - parent_list = self._avalon_archived_by_parent_id[parent_id] - if entity not in parent_list: - self._avalon_archived_by_parent_id[parent_id].remove( - entity - ) - - def _get_input_links(self, ftrack_ids): - tupled_ids = tuple(ftrack_ids) - mapping_by_to_id = { - ftrack_id: set() - for ftrack_id in tupled_ids - } - ids_len = len(tupled_ids) - chunk_size = int(5000 / ids_len) - all_links = [] - for chunk in create_chunks(ftrack_ids, chunk_size): - entity_ids_joined = join_query_keys(chunk) - - all_links.extend(self.session.query(( - "select from_id, to_id from" - " TypedContextLink where to_id in ({})" - ).format(entity_ids_joined)).all()) - - for context_link in all_links: - to_id = context_link["to_id"] - from_id = context_link["from_id"] - if from_id == to_id: - continue - mapping_by_to_id[to_id].add(from_id) - return mapping_by_to_id - - def prepare_ftrack_ent_data(self): - not_set_ids = [] - for ftrack_id, entity_dict in self.entities_dict.items(): - entity = entity_dict["entity"] - if entity is None: - not_set_ids.append(ftrack_id) - continue - - self.entities_dict[ftrack_id]["final_entity"] = {} - self.entities_dict[ftrack_id]["final_entity"]["name"] = ( - entity_dict["name"] - ) - data = {} - data["ftrackId"] = entity["id"] - data["entityType"] = entity_dict["entity_type_orig"] - - for key, val in entity_dict.get("custom_attributes", []).items(): - data[key] = val - - for key, val in entity_dict.get("hier_attrs", []).items(): - data[key] = val - - if ftrack_id != self.ft_project_id: - data["description"] = entity["description"] - - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items) - 1:] - - data["parents"] = parents - data["tasks"] = self.entities_dict[ftrack_id].pop("tasks", {}) - self.entities_dict[ftrack_id]["final_entity"]["data"] = data - self.entities_dict[ftrack_id]["final_entity"]["type"] = "asset" - continue - project_name = entity["full_name"] - data["code"] = entity["name"] - self.entities_dict[ftrack_id]["final_entity"]["data"] = data - self.entities_dict[ftrack_id]["final_entity"]["type"] = ( - "project" - ) - - proj_schema = entity["project_schema"] - task_types = proj_schema["_task_type_schema"]["types"] - proj_apps, warnings = get_project_apps( - data.pop("applications", []) - ) - for msg, items in warnings.items(): - if not msg or not items: - continue - self.report_items["warning"][msg] = items - - current_project_anatomy_data = get_anatomy_settings( - project_name, exclude_locals=True - ) - anatomy_tasks = current_project_anatomy_data["tasks"] - tasks = {} - default_type_data = { - "short_name": "" - } - for task_type in task_types: - task_type_name = task_type["name"] - tasks[task_type_name] = copy.deepcopy( - anatomy_tasks.get(task_type_name) - or default_type_data - ) - - project_config = { - "tasks": tasks, - "apps": proj_apps - } - for key, value in current_project_anatomy_data.items(): - if key in project_config or key == "attributes": - continue - project_config[key] = value - - self.entities_dict[ftrack_id]["final_entity"]["config"] = ( - project_config - ) - - if not_set_ids: - self.log.debug(( - "- Debug information: Filtering bug, there are empty dicts" - "in entities dict (functionality should not be affected) <{}>" - ).format("| ".join(not_set_ids))) - for id in not_set_ids: - self.entities_dict.pop(id) - - def get_ent_path(self, ftrack_id): - ent_path = self._ent_paths_by_ftrack_id.get(ftrack_id) - if not ent_path: - entity = self.entities_dict[ftrack_id]["entity"] - ent_path = "/".join( - [ent["name"] for ent in entity["link"]] - ) - self._ent_paths_by_ftrack_id[ftrack_id] = ent_path - - return ent_path - - def prepare_avalon_entities(self, ft_project_name): - self.log.debug(( - "* Preparing avalon entities " - "(separate to Create, Update and Deleted groups)" - )) - # Avalon entities - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ft_project_name - avalon_project = get_project(ft_project_name) - avalon_entities = get_assets(ft_project_name) - self.avalon_project = avalon_project - self.avalon_entities = avalon_entities - - ftrack_avalon_mapper = {} - avalon_ftrack_mapper = {} - create_ftrack_ids = [] - update_ftrack_ids = [] - - same_mongo_id = [] - all_mongo_ids = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - mongo_id = entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) - if not mongo_id: - continue - if mongo_id in all_mongo_ids: - same_mongo_id.append(mongo_id) - else: - all_mongo_ids[mongo_id] = [] - all_mongo_ids[mongo_id].append(ftrack_id) - - if avalon_project: - mongo_id = str(avalon_project["_id"]) - ftrack_avalon_mapper[self.ft_project_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = self.ft_project_id - update_ftrack_ids.append(self.ft_project_id) - else: - create_ftrack_ids.append(self.ft_project_id) - - # make it go hierarchically - prepare_queue = collections.deque() - - for child_id in self.entities_dict[self.ft_project_id]["children"]: - prepare_queue.append(child_id) - - while prepare_queue: - ftrack_id = prepare_queue.popleft() - for child_id in self.entities_dict[ftrack_id]["children"]: - prepare_queue.append(child_id) - - entity_dict = self.entities_dict[ftrack_id] - ent_path = self.get_ent_path(ftrack_id) - - mongo_id = entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) - av_ent_by_mongo_id = self.avalon_ents_by_id.get(mongo_id) - if av_ent_by_mongo_id: - av_ent_ftrack_id = av_ent_by_mongo_id.get("data", {}).get( - "ftrackId" - ) - is_right = False - else_match_better = False - if av_ent_ftrack_id and av_ent_ftrack_id == ftrack_id: - is_right = True - - elif mongo_id not in same_mongo_id: - is_right = True - - else: - ftrack_ids_with_same_mongo = all_mongo_ids[mongo_id] - for _ftrack_id in ftrack_ids_with_same_mongo: - if _ftrack_id == av_ent_ftrack_id: - continue - - _entity_dict = self.entities_dict[_ftrack_id] - _mongo_id = ( - _entity_dict["avalon_attrs"][CUST_ATTR_ID_KEY] - ) - _av_ent_by_mongo_id = self.avalon_ents_by_id.get( - _mongo_id - ) - _av_ent_ftrack_id = _av_ent_by_mongo_id.get( - "data", {} - ).get("ftrackId") - if _av_ent_ftrack_id == ftrack_id: - else_match_better = True - break - - if not is_right and not else_match_better: - entity = entity_dict["entity"] - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items) - 1:] - av_parents = av_ent_by_mongo_id["data"]["parents"] - if av_parents == parents: - is_right = True - else: - name = entity_dict["name"] - av_name = av_ent_by_mongo_id["name"] - if name == av_name: - is_right = True - - if is_right: - self.log.debug( - "Existing (by MongoID) <{}>".format(ent_path) - ) - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - mongo_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not mongo_id: - mongo_id = self.avalon_ents_by_name.get(entity_dict["name"]) - if mongo_id: - self.log.debug( - "Existing (by matching name) <{}>".format(ent_path) - ) - else: - self.log.debug( - "Existing (by FtrackID in mongo) <{}>".format(ent_path) - ) - - if mongo_id: - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - self.log.debug("New <{}>".format(ent_path)) - create_ftrack_ids.append(ftrack_id) - - deleted_entities = [] - for mongo_id in self.avalon_ents_by_id: - if mongo_id in avalon_ftrack_mapper: - continue - deleted_entities.append(mongo_id) - - av_ent = self.avalon_ents_by_id[mongo_id] - av_ent_path_items = list(av_ent["data"]["parents"]) - av_ent_path_items.append(av_ent["name"]) - self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items))) - - self.ftrack_avalon_mapper = ftrack_avalon_mapper - self.avalon_ftrack_mapper = avalon_ftrack_mapper - self.create_ftrack_ids = create_ftrack_ids - self.update_ftrack_ids = update_ftrack_ids - self.deleted_entities = deleted_entities - - self.log.debug(( - "Ftrack -> Avalon comparison: New <{}> " - "| Existing <{}> | Deleted <{}>" - ).format( - len(create_ftrack_ids), - len(update_ftrack_ids), - len(deleted_entities) - )) - - def filter_with_children(self, ftrack_id): - if ftrack_id not in self.entities_dict: - return - ent_dict = self.entities_dict[ftrack_id] - parent_id = ent_dict["parent_id"] - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - children_queue = collections.deque() - children_queue.append(ftrack_id) - while children_queue: - _ftrack_id = children_queue.popleft() - entity_dict = self.entities_dict.pop(_ftrack_id, {"children": []}) - for child_id in entity_dict["children"]: - children_queue.append(child_id) - - def set_input_links(self): - ftrack_ids = set(self.create_ftrack_ids) | set(self.update_ftrack_ids) - - input_links_by_ftrack_id = self._get_input_links(ftrack_ids) - - for ftrack_id in ftrack_ids: - input_links = [] - final_entity = self.entities_dict[ftrack_id]["final_entity"] - final_entity["data"]["inputLinks"] = input_links - link_ids = input_links_by_ftrack_id[ftrack_id] - if not link_ids: - continue - - for ftrack_link_id in link_ids: - mongo_id = self.ftrack_avalon_mapper.get(ftrack_link_id) - if mongo_id is not None: - input_links.append({ - "id": ObjectId(mongo_id), - "linkedBy": "ftrack", - "type": "breakdown" - }) - - def prepare_changes(self): - self.log.debug("* Preparing changes for avalon/ftrack") - hierarchy_changing_ids = [] - ignore_keys = collections.defaultdict(list) - - update_queue = collections.deque() - for ftrack_id in self.update_ftrack_ids: - update_queue.append(ftrack_id) - - while update_queue: - ftrack_id = update_queue.popleft() - if ftrack_id == self.ft_project_id: - changes = self.prepare_project_changes() - if changes: - self.updates[self.avalon_project_id] = changes - continue - - ftrack_ent_dict = self.entities_dict[ftrack_id] - - # *** check parents - parent_check = False - - ftrack_parent_id = ftrack_ent_dict["parent_id"] - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - avalon_parent_id = avalon_entity["data"]["visualParent"] - if avalon_parent_id is not None: - avalon_parent_id = str(avalon_parent_id) - - ftrack_parent_mongo_id = self.ftrack_avalon_mapper[ - ftrack_parent_id - ] - - # if parent is project - if (ftrack_parent_mongo_id == avalon_parent_id) or ( - ftrack_parent_id == self.ft_project_id and - avalon_parent_id is None - ): - parent_check = True - - # check name - ftrack_name = ftrack_ent_dict["name"] - avalon_name = avalon_entity["name"] - name_check = ftrack_name == avalon_name - - # IDEAL STATE: both parent and name check passed - if parent_check and name_check: - continue - - # If entity is changeable then change values of parent or name - if self.changeability_by_mongo_id[avalon_id]: - # TODO logging - if not parent_check: - if ftrack_parent_mongo_id == str(self.avalon_project_id): - new_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - new_parent_id = None - else: - new_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] - new_parent_id = ObjectId(ftrack_parent_mongo_id) - - if avalon_parent_id == str(self.avalon_project_id): - old_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - else: - old_parent_name = "N/A" - if ftrack_parent_mongo_id in self.avalon_ents_by_id: - old_parent_name = ( - self.avalon_ents_by_id - [ftrack_parent_mongo_id] - ["name"] - ) - - self.updates[avalon_id]["data"] = { - "visualParent": new_parent_id - } - ignore_keys[ftrack_id].append("data.visualParent") - self.log.debug(( - "Avalon entity \"{}\" changed parent \"{}\" -> \"{}\"" - ).format(avalon_name, old_parent_name, new_parent_name)) - - if not name_check: - self.updates[avalon_id]["name"] = ftrack_name - ignore_keys[ftrack_id].append("name") - self.log.debug( - "Avalon entity \"{}\" was renamed to \"{}\"".format( - avalon_name, ftrack_name - ) - ) - continue - - # parents and hierarchy must be recalculated - hierarchy_changing_ids.append(ftrack_id) - - # Parent is project if avalon_parent_id is set to None - if avalon_parent_id is None: - avalon_parent_id = str(self.avalon_project_id) - - if not name_check: - ent_path = self.get_ent_path(ftrack_id) - # TODO report - # TODO logging - self.entities_dict[ftrack_id]["name"] = avalon_name - self.entities_dict[ftrack_id]["entity"]["name"] = ( - avalon_name - ) - self.entities_dict[ftrack_id]["final_entity"]["name"] = ( - avalon_name - ) - self.log.warning("Name was changed back to {} <{}>".format( - avalon_name, ent_path - )) - self._ent_paths_by_ftrack_id.pop(ftrack_id, None) - msg = ( - " It is not possible to change" - " the name of an entity or it's parents, " - " if it already contained published data." - ) - self.report_items["warning"][msg].append(ent_path) - - # skip parent oricessing if hierarchy didn't change - if parent_check: - continue - - # Logic when parenting(hierarchy) has changed and should not - old_ftrack_parent_id = self.avalon_ftrack_mapper.get( - avalon_parent_id - ) - - # If last ftrack parent id from mongo entity exist then just - # remap paren_id on entity - if old_ftrack_parent_id: - # TODO report - # TODO logging - ent_path = self.get_ent_path(ftrack_id) - msg = ( - " It is not possible" - " to change the hierarchy of an entity or it's parents," - " if it already contained published data." - ) - self.report_items["warning"][msg].append(ent_path) - self.log.warning(( - " Entity contains published data so it was moved" - " back to it's original hierarchy <{}>" - ).format(ent_path)) - self.entities_dict[ftrack_id]["entity"]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[ftrack_id]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[old_ftrack_parent_id][ - "children" - ].append(ftrack_id) - - continue - - old_parent_ent = self.avalon_ents_by_id.get(avalon_parent_id) - if not old_parent_ent: - old_parent_ent = self.avalon_archived_by_id.get( - avalon_parent_id - ) - - # TODO report - # TODO logging - if not old_parent_ent: - self.log.warning(( - "Parent entity was not found by id" - " - Trying to find by parent name" - )) - ent_path = self.get_ent_path(ftrack_id) - - parents = avalon_entity["data"]["parents"] - parent_name = parents[-1] - matching_entity_id = None - for id, entity_dict in self.entities_dict.items(): - if entity_dict["name"] == parent_name: - matching_entity_id = id - break - - if matching_entity_id is None: - # TODO logging - # TODO report (turn off auto-sync?) - self.log.error(( - "The entity contains published data but it was moved" - " to a different place in the hierarchy and it's" - " previous parent cannot be found." - " It's impossible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Hierarchy of an entity" - " can't be changed due to published data and missing" - " previous parent" - ) - self.report_items["error"][msg].append(ent_path) - self.filter_with_children(ftrack_id) - continue - - matching_ent_dict = self.entities_dict.get(matching_entity_id) - match_ent_parents = matching_ent_dict.get( - "final_entity", {}).get( - "data", {}).get( - "parents", ["__NOTSET__"] - ) - # TODO logging - # TODO report - if ( - len(match_ent_parents) >= len(parents) or - match_ent_parents[:-1] != parents - ): - ent_path = self.get_ent_path(ftrack_id) - self.log.error(( - "The entity contains published data but it was moved" - " to a different place in the hierarchy and it's" - " previous parents were moved too." - " It's impossible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Hierarchy of an entity" - " can't be changed due to published data and scrambled" - "hierarchy" - ) - continue - - old_parent_ent = matching_ent_dict["final_entity"] - - parent_id = self.ft_project_id - entities_to_create = [] - # TODO logging - self.log.warning( - "Ftrack entities must be recreated because they were deleted," - " but they contain published data." - ) - - _avalon_ent = old_parent_ent - - self.updates[avalon_parent_id] = {"type": "asset"} - success = True - while True: - _vis_par = _avalon_ent["data"]["visualParent"] - _name = _avalon_ent["name"] - if _name in self.all_ftrack_names: - av_ent_path_items = list(_avalon_ent["data"]["parents"]) - av_ent_path_items.append(_name) - av_ent_path = "/".join(av_ent_path_items) - # TODO report - # TODO logging - self.log.error(( - "Can't recreate the entity in Ftrack because an entity" - " with the same name already exists in a different" - " place in the hierarchy <{}>" - ).format(av_ent_path)) - msg = ( - " Hierarchy of an entity" - " can't be changed. I contains published data and it's" - " previous parent had a name, that is duplicated at a " - " different hierarchy level" - ) - self.report_items["error"][msg].append(av_ent_path) - self.filter_with_children(ftrack_id) - success = False - break - - entities_to_create.append(_avalon_ent) - if _vis_par is None: - break - - _vis_par = str(_vis_par) - _mapped = self.avalon_ftrack_mapper.get(_vis_par) - if _mapped: - parent_id = _mapped - break - - _avalon_ent = self.avalon_ents_by_id.get(_vis_par) - if not _avalon_ent: - _avalon_ent = self.avalon_archived_by_id.get(_vis_par) - - if success is False: - continue - - new_entity_id = None - for av_entity in reversed(entities_to_create): - new_entity_id = self.create_ftrack_ent_from_avalon_ent( - av_entity, parent_id - ) - update_queue.append(new_entity_id) - - if new_entity_id: - ftrack_ent_dict["entity"]["parent_id"] = new_entity_id - - if hierarchy_changing_ids: - self.reload_parents(hierarchy_changing_ids) - - for ftrack_id in self.update_ftrack_ids: - if ftrack_id == self.ft_project_id: - continue - - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - - avalon_attrs = self.entities_dict[ftrack_id]["avalon_attrs"] - if ( - CUST_ATTR_ID_KEY not in avalon_attrs or - avalon_attrs[CUST_ATTR_ID_KEY] != avalon_id - ): - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id"][CUST_ATTR_ID_KEY] - - _entity_key = collections.OrderedDict([ - ("configuration_id", configuration_id), - ("entity_id", ftrack_id) - ]) - - self.session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - ftrack_api.symbol.NOT_SET, - avalon_id - ) - ) - # Prepare task changes as they have to be stored as one key - final_doc = self.entities_dict[ftrack_id]["final_entity"] - final_doc_tasks = final_doc["data"].pop("tasks", None) or {} - current_doc_tasks = avalon_entity["data"].get("tasks") or {} - if not final_doc_tasks: - update_tasks = True - else: - update_tasks = final_doc_tasks != current_doc_tasks - - # check rest of data - data_changes = self.compare_dict( - final_doc, - avalon_entity, - ignore_keys[ftrack_id] - ) - if data_changes: - self.updates[avalon_id] = self.merge_dicts( - data_changes, - self.updates[avalon_id] - ) - - # Add tasks back to final doc object - final_doc["data"]["tasks"] = final_doc_tasks - # Add tasks to updates if there are different - if update_tasks: - if "data" not in self.updates[avalon_id]: - self.updates[avalon_id]["data"] = {} - self.updates[avalon_id]["data"]["tasks"] = final_doc_tasks - - def synchronize(self): - self.log.debug("* Synchronization begins") - avalon_project_id = self.ftrack_avalon_mapper.get(self.ft_project_id) - if avalon_project_id: - self.avalon_project_id = ObjectId(avalon_project_id) - - # remove filtered ftrack ids from create/update list - for ftrack_id in self.all_filtered_entities: - if ftrack_id in self.create_ftrack_ids: - self.create_ftrack_ids.remove(ftrack_id) - elif ftrack_id in self.update_ftrack_ids: - self.update_ftrack_ids.remove(ftrack_id) - - self.log.debug("* Processing entities for archivation") - self.delete_entities() - - self.log.debug("* Processing new entities") - # Create not created entities - for ftrack_id in self.create_ftrack_ids: - # CHECK it is possible that entity was already created - # because is parent of another entity which was processed first - if ftrack_id not in self.ftrack_avalon_mapper: - self.create_avalon_entity(ftrack_id) - - self.set_input_links() - - unarchive_writes = [] - for item in self.unarchive_list: - mongo_id = item["_id"] - unarchive_writes.append(ReplaceOne( - {"_id": mongo_id}, - item - )) - av_ent_path_items = list(item["data"]["parents"]) - av_ent_path_items.append(item["name"]) - av_ent_path = "/".join(av_ent_path_items) - self.log.debug( - "Entity was unarchived <{}>".format(av_ent_path) - ) - self.remove_from_archived(mongo_id) - - if unarchive_writes: - self.dbcon.bulk_write(unarchive_writes) - - if len(self.create_list) > 0: - self.dbcon.insert_many(self.create_list) - - self.session.commit() - - self.log.debug("* Processing entities for update") - self.prepare_changes() - self.update_entities() - self.session.commit() - - def create_avalon_entity(self, ftrack_id): - if ftrack_id == self.ft_project_id: - self.create_avalon_project() - return - - entity_dict = self.entities_dict[ftrack_id] - parent_ftrack_id = entity_dict["parent_id"] - avalon_parent = None - if parent_ftrack_id != self.ft_project_id: - avalon_parent = self.ftrack_avalon_mapper.get(parent_ftrack_id) - # if not avalon_parent: - # self.create_avalon_entity(parent_ftrack_id) - # avalon_parent = self.ftrack_avalon_mapper[parent_ftrack_id] - avalon_parent = ObjectId(avalon_parent) - - # avalon_archived_by_id avalon_archived_by_name - current_id = ( - entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) or "" - ).strip() - mongo_id = current_id - name = entity_dict["name"] - - # Check if exist archived asset in mongo - by ID - unarchive = False - unarchive_id = self.check_unarchivation(ftrack_id, mongo_id, name) - if unarchive_id is not None: - unarchive = True - mongo_id = unarchive_id - - item = entity_dict["final_entity"] - try: - new_id = ObjectId(mongo_id) - if mongo_id in self.avalon_ftrack_mapper: - new_id = ObjectId() - except InvalidId: - new_id = ObjectId() - - item["_id"] = new_id - item["parent"] = self.avalon_project_id - item["schema"] = CURRENT_ASSET_DOC_SCHEMA - item["data"]["visualParent"] = avalon_parent - - new_id_str = str(new_id) - self.ftrack_avalon_mapper[ftrack_id] = new_id_str - self.avalon_ftrack_mapper[new_id_str] = ftrack_id - - self._avalon_ents_by_id[new_id_str] = item - self._avalon_ents_by_ftrack_id[ftrack_id] = new_id_str - self._avalon_ents_by_name[item["name"]] = new_id_str - - if current_id != new_id_str: - # store mongo id to ftrack entity - configuration_id = self.hier_cust_attr_ids_by_key.get( - CUST_ATTR_ID_KEY - ) - if not configuration_id: - # NOTE this is for cases when CUST_ATTR_ID_KEY key is not - # hierarchical custom attribute but per entity type - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id" - ][CUST_ATTR_ID_KEY] - - _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, - "entity_id": ftrack_id - }) - - self.session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - ftrack_api.symbol.NOT_SET, - new_id_str - ) - ) - - if unarchive is False: - self.create_list.append(item) - else: - self.unarchive_list.append(item) - - def check_unarchivation(self, ftrack_id, mongo_id, name): - archived_by_id = self.avalon_archived_by_id.get(mongo_id) - archived_by_name = self.avalon_archived_by_name.get(name) - - # if not found in archived then skip - if not archived_by_id and not archived_by_name: - return None - - entity_dict = self.entities_dict[ftrack_id] - - final_parents = entity_dict["final_entity"]["data"]["parents"] - if archived_by_id: - # if is changeable then unarchive (nothing to check here) - if self.changeability_by_mongo_id[mongo_id]: - return mongo_id - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived_by_id["data"].get( - "visualParent", "__NOTSET__" - ) - archived_parents = archived_by_id["data"].get("parents") - archived_name = archived_by_id["name"] - - if ( - archived_name != entity_dict["name"] - or archived_parents != final_parents - ): - return None - - return mongo_id - - # First check if there is any that have same parents - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - archived_parents = archived.get("data", {}).get("parents") - if archived_parents == final_parents: - return mongo_id - - # Secondly try to find more close to current ftrack entity - first_changeable = None - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - if not self.changeability_by_mongo_id[mongo_id]: - continue - - if first_changeable is None: - first_changeable = mongo_id - - ftrack_parent_id = entity_dict["parent_id"] - map_ftrack_parent_id = self.ftrack_avalon_mapper.get( - ftrack_parent_id - ) - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if archived_parent_id is not None: - archived_parent_id = str(archived_parent_id) - - # skip if parent is archived - How this should be possible? - parent_entity = self.avalon_ents_by_id.get(archived_parent_id) - if ( - parent_entity and ( - map_ftrack_parent_id is not None and - map_ftrack_parent_id == str(parent_entity["_id"]) - ) - ): - return mongo_id - # Last return first changeable with same name (or None) - return first_changeable - - def create_avalon_project(self): - project_item = self.entities_dict[self.ft_project_id]["final_entity"] - mongo_id = ( - self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - CUST_ATTR_ID_KEY - ) or "" - ).strip() - - try: - new_id = ObjectId(mongo_id) - except InvalidId: - new_id = ObjectId() - - project_item["_id"] = new_id - project_item["parent"] = None - project_item["schema"] = CURRENT_PROJECT_SCHEMA - project_item["config"]["schema"] = CURRENT_PROJECT_CONFIG_SCHEMA - - self.ftrack_avalon_mapper[self.ft_project_id] = new_id - self.avalon_ftrack_mapper[new_id] = self.ft_project_id - - self.avalon_project_id = new_id - - self._avalon_ents_by_id[str(new_id)] = project_item - if self._avalon_ents_by_ftrack_id is None: - self._avalon_ents_by_ftrack_id = {} - self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) - if self._avalon_ents_by_name is None: - self._avalon_ents_by_name = {} - self._avalon_ents_by_name[project_item["name"]] = str(new_id) - - self.create_list.append(project_item) - self.project_created = True - - # store mongo id to ftrack entity - entity = self.entities_dict[self.ft_project_id]["entity"] - entity["custom_attributes"][CUST_ATTR_ID_KEY] = str(new_id) - - def _bubble_changeability(self, unchangeable_ids): - unchangeable_queue = collections.deque() - for entity_id in unchangeable_ids: - unchangeable_queue.append((entity_id, False)) - - processed_parents_ids = [] - subsets_to_remove = [] - while unchangeable_queue: - entity_id, child_is_archived = unchangeable_queue.popleft() - # skip if already processed - if entity_id in processed_parents_ids: - continue - - entity = self.avalon_ents_by_id.get(entity_id) - # if entity is not archived but unchageable child was then skip - # - archived entities should not affect not archived? - if entity and child_is_archived: - continue - - # set changeability of current entity to False - self._changeability_by_mongo_id[entity_id] = False - processed_parents_ids.append(entity_id) - # if not entity then is probably archived - if not entity: - entity = self.avalon_archived_by_id.get(entity_id) - child_is_archived = True - - if not entity: - # if entity is not found then it is subset without parent - if entity_id in unchangeable_ids: - subsets_to_remove.append(entity_id) - else: - # TODO logging - What is happening here? - self.log.warning(( - "Avalon contains entities without valid parents that" - " lead to Project (should not cause errors)" - " - MongoId <{}>" - ).format(str(entity_id))) - continue - - # skip if parent is project - parent_id = entity["data"]["visualParent"] - if parent_id is None: - continue - unchangeable_queue.append( - (str(parent_id), child_is_archived) - ) - - self._delete_subsets_without_asset(subsets_to_remove) - - def _delete_subsets_without_asset(self, not_existing_parents): - repre_ids = [] - to_delete = [] - - subset_ids = [] - for parent_id in not_existing_parents: - subsets = self.subsets_by_parent_id.get(parent_id) - if not subsets: - continue - for subset in subsets: - if subset.get("type") == "subset": - subset_ids.append(subset["_id"]) - - db_versions = get_versions( - self.project_name, - subset_ids=subset_ids, - fields=["_id"] - ) - version_ids = [ver["_id"] for ver in db_versions] - db_repres = get_representations( - self.project_name, - version_ids=version_ids, - fields=["_id"] - ) - repre_ids = [repre["_id"] for repre in db_repres] - - to_delete.extend(subset_ids) - to_delete.extend(version_ids) - to_delete.extend(repre_ids) - - if to_delete: - self.dbcon.delete_many({"_id": {"$in": to_delete}}) - - # Probably deprecated - def _check_changeability(self, parent_id=None): - for entity in self.avalon_ents_by_parent_id[parent_id]: - mongo_id = str(entity["_id"]) - is_changeable = self._changeability_by_mongo_id.get(mongo_id) - if is_changeable is not None: - continue - - self._check_changeability(mongo_id) - is_changeable = True - for child in self.avalon_ents_by_parent_id[parent_id]: - if not self._changeability_by_mongo_id[str(child["_id"])]: - is_changeable = False - break - - if is_changeable is True: - is_changeable = (mongo_id in self.subsets_by_parent_id) - self._changeability_by_mongo_id[mongo_id] = is_changeable - - def update_entities(self): - """ - Runs changes converted to "$set" queries in bulk. - """ - mongo_changes_bulk = [] - for mongo_id, changes in self.updates.items(): - mongo_id = ObjectId(mongo_id) - is_project = mongo_id == self.avalon_project_id - change_data = from_dict_to_set(changes, is_project) - - filter = {"_id": mongo_id} - mongo_changes_bulk.append(UpdateOne(filter, change_data)) - if not mongo_changes_bulk: - # TODO LOG - return - self.dbcon.bulk_write(mongo_changes_bulk) - - def reload_parents(self, hierarchy_changing_ids): - parents_queue = collections.deque() - parents_queue.append((self.ft_project_id, [], False)) - while parents_queue: - ftrack_id, parent_parents, changed = parents_queue.popleft() - _parents = copy.deepcopy(parent_parents) - if ftrack_id not in hierarchy_changing_ids and not changed: - if ftrack_id != self.ft_project_id: - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.append( - (child_id, _parents, changed) - ) - continue - - changed = True - parents = list(_parents) - self.entities_dict[ftrack_id][ - "final_entity"]["data"]["parents"] = parents - - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.append( - (child_id, _parents, changed) - ) - - if ftrack_id in self.create_ftrack_ids: - mongo_id = self.ftrack_avalon_mapper[ftrack_id] - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"]["parents"] = parents - - def prepare_project_changes(self): - ftrack_ent_dict = self.entities_dict[self.ft_project_id] - ftrack_entity = ftrack_ent_dict["entity"] - avalon_code = self.avalon_project["data"]["code"] - # TODO Is possible to sync if full name was changed? - # if ftrack_ent_dict["name"] != self.avalon_project["name"]: - # ftrack_entity["full_name"] = avalon_name - # self.entities_dict[self.ft_project_id]["name"] = avalon_name - # self.entities_dict[self.ft_project_id]["final_entity"][ - # "name" - # ] = avalon_name - - # TODO logging - # TODO report - # TODO May this happen? Is possible to change project code? - if ftrack_entity["name"] != avalon_code: - ftrack_entity["name"] = avalon_code - self.entities_dict[self.ft_project_id]["final_entity"]["data"][ - "code" - ] = avalon_code - self.session.commit() - sub_msg = ( - "Project code was changed back to \"{}\"".format(avalon_code) - ) - msg = ( - "It is not possible to change" - " project code after synchronization" - ) - self.report_items["warning"][msg] = sub_msg - self.log.warning(sub_msg) - - # Compare tasks from current project schema and previous project schema - final_doc_data = self.entities_dict[self.ft_project_id]["final_entity"] - final_doc_tasks = final_doc_data["config"].pop("tasks") - current_doc_tasks = self.avalon_project.get("config", {}).get("tasks") - # Update project's task types - if not current_doc_tasks: - update_tasks = True - else: - # Check if task types are same - update_tasks = False - for task_type in final_doc_tasks: - if task_type not in current_doc_tasks: - update_tasks = True - break - - # Update new task types - # - but keep data about existing types and only add new one - if update_tasks: - for task_type, type_data in current_doc_tasks.items(): - final_doc_tasks[task_type] = type_data - - changes = self.compare_dict(final_doc_data, self.avalon_project) - - # Put back tasks data to final entity object - final_doc_data["config"]["tasks"] = final_doc_tasks - - # Add tasks updates if tasks changed - if update_tasks: - if "config" not in changes: - changes["config"] = {} - changes["config"]["tasks"] = final_doc_tasks - return changes - - def compare_dict(self, dict_new, dict_old, _ignore_keys=[]): - """ - Recursively compares and list changes between dictionaries - 'dict_new' and 'dict_old'. - Keys in '_ignore_keys' are skipped and not compared. - Args: - dict_new (dictionary): - dict_old (dictionary): - _ignore_keys (list): - - Returns: - (dictionary) of new or updated keys and theirs values - """ - # _ignore_keys may be used for keys nested dict like"data.visualParent" - changes = {} - ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) == 1: - ignore_keys.append(key_items[0]) - - for key, value in dict_new.items(): - if key in ignore_keys: - continue - - if key not in dict_old: - changes[key] = value - continue - - if isinstance(value, dict): - if not isinstance(dict_old[key], dict): - changes[key] = value - continue - - _new_ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) <= 1: - continue - _new_ignore_keys.append(".".join(key_items[1:])) - - _changes = self.compare_dict( - value, dict_old[key], _new_ignore_keys - ) - if _changes: - changes[key] = _changes - continue - - if value != dict_old[key]: - changes[key] = value - - return changes - - def merge_dicts(self, dict_new, dict_old): - """ - Apply all new or updated keys from 'dict_new' on 'dict_old'. - Recursively. - Doesn't recognise that 'dict_new' doesn't contain some keys - anymore. - Args: - dict_new (dictionary): from Ftrack most likely - dict_old (dictionary): current in DB - - Returns: - (dictionary) of applied changes to original dictionary - """ - for key, value in dict_new.items(): - if key not in dict_old: - dict_old[key] = value - continue - - if isinstance(value, dict): - dict_old[key] = self.merge_dicts(value, dict_old[key]) - continue - - dict_old[key] = value - - return dict_old - - def delete_entities(self): - if not self.deleted_entities: - return - # Try to order so child is not processed before parent - deleted_entities = [] - _deleted_entities = [id for id in self.deleted_entities] - - while True: - if not _deleted_entities: - break - _ready = [] - for mongo_id in _deleted_entities: - ent = self.avalon_ents_by_id[mongo_id] - vis_par = ent["data"]["visualParent"] - if ( - vis_par is not None and - str(vis_par) in _deleted_entities - ): - continue - _ready.append(mongo_id) - - for id in _ready: - deleted_entities.append(id) - _deleted_entities.remove(id) - - delete_ids = [] - for mongo_id in deleted_entities: - # delete if they are deletable - if self.changeability_by_mongo_id[mongo_id]: - delete_ids.append(ObjectId(mongo_id)) - continue - - # check if any new created entity match same entity - # - name and parents must match - deleted_entity = self.avalon_ents_by_id[mongo_id] - name = deleted_entity["name"] - parents = deleted_entity["data"]["parents"] - similar_ent_id = None - for ftrack_id in self.create_ftrack_ids: - _ent_final = self.entities_dict[ftrack_id]["final_entity"] - if _ent_final["name"] != name: - continue - if _ent_final["data"]["parents"] != parents: - continue - - # If in create is "same" then we can "archive" current - # since will be unarchived in create method - similar_ent_id = ftrack_id - break - - # If similar entity(same name and parents) is in create - # entities list then just change from create to update - if similar_ent_id is not None: - self.create_ftrack_ids.remove(similar_ent_id) - self.update_ftrack_ids.append(similar_ent_id) - self.avalon_ftrack_mapper[mongo_id] = similar_ent_id - self.ftrack_avalon_mapper[similar_ent_id] = mongo_id - continue - - found_by_name_id = None - for ftrack_id, ent_dict in self.entities_dict.items(): - if not ent_dict.get("name"): - continue - - if name == ent_dict["name"]: - found_by_name_id = ftrack_id - break - - if found_by_name_id is not None: - # * THESE conditins are too complex to implement in first stage - # - probably not possible to solve if this happen - # if found_by_name_id in self.create_ftrack_ids: - # # reparent entity of the new one create? - # pass - # - # elif found_by_name_id in self.update_ftrack_ids: - # found_mongo_id = self.ftrack_avalon_mapper[found_by_name_id] - # - # ent_dict = self.entities_dict[found_by_name_id] - - # TODO report - CRITICAL entity with same name already exists - # in different hierarchy - can't recreate entity - continue - - _vis_parent = deleted_entity["data"]["visualParent"] - if _vis_parent is None: - _vis_parent = self.avalon_project_id - _vis_parent = str(_vis_parent) - ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] - self.create_ftrack_ent_from_avalon_ent( - deleted_entity, ftrack_parent_id - ) - - filter = {"_id": {"$in": delete_ids}, "type": "asset"} - self.dbcon.update_many(filter, {"$set": {"type": "archived_asset"}}) - - def create_ftrack_ent_from_avalon_ent(self, av_entity, parent_id): - new_entity = None - parent_entity = self.entities_dict[parent_id]["entity"] - - _name = av_entity["name"] - _type = av_entity["data"].get("entityType") - # Check existence of object type - if _type and _type not in self.object_types_by_name: - _type = None - - if not _type: - _type = "Folder" - - self.log.debug(( - "Re-ceating deleted entity {} <{}>" - ).format(_name, _type)) - - new_entity = self.session.create(_type, { - "name": _name, - "parent": parent_entity - }) - self.session.commit() - - final_entity = {} - for k, v in av_entity.items(): - final_entity[k] = v - - if final_entity.get("type") != "asset": - final_entity["type"] = "asset" - - new_entity_id = new_entity["id"] - new_entity_data = { - "entity": new_entity, - "parent_id": parent_id, - "entity_type": _type.lower(), - "entity_type_orig": _type, - "name": _name, - "final_entity": final_entity - } - for k, v in new_entity_data.items(): - self.entities_dict[new_entity_id][k] = v - - p_chilren = self.entities_dict[parent_id]["children"] - if new_entity_id not in p_chilren: - self.entities_dict[parent_id]["children"].append(new_entity_id) - - cust_attr, _ = get_openpype_attr(self.session) - for _attr in cust_attr: - key = _attr["key"] - if key not in av_entity["data"]: - continue - - if key not in new_entity["custom_attributes"]: - continue - - value = av_entity["data"][key] - if not value: - continue - - new_entity["custom_attributes"][key] = value - - av_entity_id = str(av_entity["_id"]) - new_entity["custom_attributes"][CUST_ATTR_ID_KEY] = av_entity_id - - self.ftrack_avalon_mapper[new_entity_id] = av_entity_id - self.avalon_ftrack_mapper[av_entity_id] = new_entity_id - - self.session.commit() - - ent_path = self.get_ent_path(new_entity_id) - msg = ( - "Deleted entity was recreated because it or its children" - " contain published data" - ) - - self.report_items["info"][msg].append(ent_path) - - return new_entity_id - - def regex_duplicate_interface(self): - items = [] - if self.failed_regex or self.tasks_failed_regex: - subtitle = "Entity names contain prohibited symbols:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: You can use Letters( a-Z )," - " Numbers( 0-9 ) and Underscore( _ )

" - ) - }) - log_msgs = [] - for name, ids in self.failed_regex.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - for name, ids in self.tasks_failed_regex.items(): - error_title = { - "type": "label", - "value": "## Task: {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - ent_path = "/".join([ent_path, name]) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - if self.duplicates: - subtitle = "Duplicated entity names:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: It is not allowed to use the same name" - " for multiple entities in the same project

" - ) - }) - log_msgs = [] - for name, ids in self.duplicates.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ", ".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - return items - - def report(self): - items = [] - title = "Synchronization report ({}):".format(self.project_name) - - keys = ["error", "warning", "info"] - for key in keys: - subitems = [] - if key == "warning": - for _item in self.regex_duplicate_interface(): - subitems.append(_item) - - for msg, _items in self.report_items[key].items(): - if not _items: - continue - - subitems.append({ - "type": "label", - "value": "# {}".format(msg) - }) - if isinstance(_items, str): - _items = [_items] - subitems.append({ - "type": "label", - "value": '

{}

'.format("
".join(_items)) - }) - - if items and subitems: - items.append(self.report_splitter) - - items.extend(subitems) - - return { - "items": items, - "title": title, - "success": False, - "message": "Synchronization Finished" - } diff --git a/openpype/modules/ftrack/lib/constants.py b/openpype/modules/ftrack/lib/constants.py deleted file mode 100644 index 636dcfbc3d..0000000000 --- a/openpype/modules/ftrack/lib/constants.py +++ /dev/null @@ -1,20 +0,0 @@ -# Group name of custom attributes -CUST_ATTR_GROUP = "openpype" - -# name of Custom attribute that stores mongo_id from avalon db -CUST_ATTR_ID_KEY = "avalon_mongo_id" -# Auto sync of project -CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" - -# Applications custom attribute name -CUST_ATTR_APPLICATIONS = "applications" -# Environment tools custom attribute -CUST_ATTR_TOOLS = "tools_env" -# Intent custom attribute name -CUST_ATTR_INTENT = "intent" - -FPS_KEYS = { - "fps", - # For development purposes - "fps_string" -} diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py deleted file mode 100644 index 2eb64254d1..0000000000 --- a/openpype/modules/ftrack/lib/credentials.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -import ftrack_api - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse - - -from openpype.lib import OpenPypeSecureRegistry - -USERNAME_KEY = "username" -API_KEY_KEY = "api_key" - - -def get_ftrack_hostname(ftrack_server=None): - if not ftrack_server: - ftrack_server = os.environ.get("FTRACK_SERVER") - - if not ftrack_server: - return None - - if "//" not in ftrack_server: - ftrack_server = "//" + ftrack_server - - return urlparse(ftrack_server).hostname - - -def _get_ftrack_secure_key(hostname, key): - """Secure item key for entered hostname.""" - return "/".join(("ftrack", hostname, key)) - - -def get_credentials(ftrack_server=None): - output = { - USERNAME_KEY: None, - API_KEY_KEY: None - } - hostname = get_ftrack_hostname(ftrack_server) - if not hostname: - return output - - username_name = _get_ftrack_secure_key(hostname, USERNAME_KEY) - api_key_name = _get_ftrack_secure_key(hostname, API_KEY_KEY) - - username_registry = OpenPypeSecureRegistry(username_name) - api_key_registry = OpenPypeSecureRegistry(api_key_name) - - output[USERNAME_KEY] = username_registry.get_item(USERNAME_KEY, None) - output[API_KEY_KEY] = api_key_registry.get_item(API_KEY_KEY, None) - - return output - - -def save_credentials(username, api_key, ftrack_server=None): - hostname = get_ftrack_hostname(ftrack_server) - username_name = _get_ftrack_secure_key(hostname, USERNAME_KEY) - api_key_name = _get_ftrack_secure_key(hostname, API_KEY_KEY) - - # Clear credentials - clear_credentials(ftrack_server) - - username_registry = OpenPypeSecureRegistry(username_name) - api_key_registry = OpenPypeSecureRegistry(api_key_name) - - username_registry.set_item(USERNAME_KEY, username) - api_key_registry.set_item(API_KEY_KEY, api_key) - - -def clear_credentials(ftrack_server=None): - hostname = get_ftrack_hostname(ftrack_server) - username_name = _get_ftrack_secure_key(hostname, USERNAME_KEY) - api_key_name = _get_ftrack_secure_key(hostname, API_KEY_KEY) - - username_registry = OpenPypeSecureRegistry(username_name) - api_key_registry = OpenPypeSecureRegistry(api_key_name) - - current_username = username_registry.get_item(USERNAME_KEY, None) - current_api_key = api_key_registry.get_item(API_KEY_KEY, None) - - if current_username is not None: - username_registry.delete_item(USERNAME_KEY) - - if current_api_key is not None: - api_key_registry.delete_item(API_KEY_KEY) - - -def check_credentials(username, api_key, ftrack_server=None): - if not ftrack_server: - ftrack_server = os.environ.get("FTRACK_SERVER") - - if not ftrack_server or not username or not api_key: - return False - - user_exists = False - try: - session = ftrack_api.Session( - server_url=ftrack_server, - api_key=api_key, - api_user=username - ) - # Validated that the username actually exists - user = session.query("User where username is \"{}\"".format(username)) - user_exists = user is not None - session.close() - - except Exception: - pass - return user_exists diff --git a/openpype/modules/ftrack/lib/custom_attributes.json b/openpype/modules/ftrack/lib/custom_attributes.json deleted file mode 100644 index 3945dfaf6e..0000000000 --- a/openpype/modules/ftrack/lib/custom_attributes.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "show": { - "avalon_auto_sync": { - "label": "Avalon auto-sync", - "type": "boolean" - }, - "library_project": { - "label": "Library Project", - "type": "boolean" - } - }, - "is_hierarchical": { - "fps": { - "label": "FPS", - "type": "number", - "config": {"isdecimal": true} - }, - "clipIn": { - "label": "Clip in", - "type": "number" - }, - "clipOut": { - "label": "Clip out", - "type": "number" - }, - "frameStart": { - "label": "Frame start", - "type": "number" - }, - "frameEnd": { - "label": "Frame end", - "type": "number" - }, - "resolutionWidth": { - "label": "Resolution Width", - "type": "number" - }, - "resolutionHeight": { - "label": "Resolution Height", - "type": "number" - }, - "pixelAspect": { - "label": "Pixel aspect", - "type": "number", - "config": {"isdecimal": true} - }, - "handleStart": { - "label": "Frame handles start", - "type": "number" - }, - "handleEnd": { - "label": "Frame handles end", - "type": "number" - } - } -} diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py deleted file mode 100644 index 76c7bcd403..0000000000 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import json - -from .constants import CUST_ATTR_GROUP - - -def default_custom_attributes_definition(): - json_file_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "custom_attributes.json" - ) - with open(json_file_path, "r") as json_stream: - data = json.load(json_stream) - return data - - -def app_definitions_from_app_manager(app_manager): - _app_definitions = [] - for app_name, app in app_manager.applications.items(): - if app.enabled: - _app_definitions.append( - (app_name, app.full_label) - ) - - # Sort items by label - app_definitions = [] - for key, label in sorted(_app_definitions, key=lambda item: item[1]): - app_definitions.append({key: label}) - - if not app_definitions: - app_definitions.append({"empty": "< Empty >"}) - return app_definitions - - -def tool_definitions_from_app_manager(app_manager): - _tools_data = [] - for tool_name, tool in app_manager.tools.items(): - _tools_data.append( - (tool_name, tool.label) - ) - - # Sort items by label - tools_data = [] - for key, label in sorted(_tools_data, key=lambda item: item[1]): - tools_data.append({key: label}) - - # Make sure there is at least one item - if not tools_data: - tools_data.append({"empty": "< Empty >"}) - return tools_data - - -def get_openpype_attr(session, split_hierarchical=True, query_keys=None): - custom_attributes = [] - hier_custom_attributes = [] - if not query_keys: - query_keys = [ - "id", - "entity_type", - "object_type_id", - "is_hierarchical", - "default" - ] - # TODO remove deprecated "pype" group from query - cust_attrs_query = ( - "select {}" - " from CustomAttributeConfiguration" - # Kept `pype` for Backwards Compatibility - " where group.name in (\"pype\", \"ayon\", \"{}\")" - ).format(", ".join(query_keys), CUST_ATTR_GROUP) - all_avalon_attr = session.query(cust_attrs_query).all() - for cust_attr in all_avalon_attr: - if split_hierarchical and cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - - custom_attributes.append(cust_attr) - - if split_hierarchical: - # return tuple - return custom_attributes, hier_custom_attributes - - return custom_attributes - - -def join_query_keys(keys): - """Helper to join keys to query.""" - return ",".join(["\"{}\"".format(key) for key in keys]) - - -def query_custom_attributes( - session, conf_ids, entity_ids, only_set_values=False -): - """Query custom attribute values from ftrack database. - - Using ftrack call method result may differ based on used table name and - version of ftrack server. - - For hierarchical attributes you shou always use `only_set_values=True` - otherwise result will be default value of custom attribute and it would not - be possible to differentiate if value is set on entity or default value is - used. - - Args: - session(ftrack_api.Session): Connected ftrack session. - conf_id(list, set, tuple): Configuration(attribute) ids which are - queried. - entity_ids(list, set, tuple): Entity ids for which are values queried. - only_set_values(bool): Entities that don't have explicitly set - value won't return a value. If is set to False then default custom - attribute value is returned if value is not set. - """ - output = [] - # Just skip - if not conf_ids or not entity_ids: - return output - - if only_set_values: - table_name = "CustomAttributeValue" - else: - table_name = "ContextCustomAttributeValue" - - # Prepare values to query - attributes_joined = join_query_keys(conf_ids) - attributes_len = len(conf_ids) - - # Query values in chunks - chunk_size = int(5000 / attributes_len) - # Make sure entity_ids is `list` for chunk selection - entity_ids = list(entity_ids) - for idx in range(0, len(entity_ids), chunk_size): - entity_ids_joined = join_query_keys( - entity_ids[idx:idx + chunk_size] - ) - output.extend( - session.query( - ( - "select value, entity_id, configuration_id from {}" - " where entity_id in ({}) and configuration_id in ({})" - ).format( - table_name, - entity_ids_joined, - attributes_joined - ) - ).all() - ) - return output diff --git a/openpype/modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/ftrack/lib/ftrack_action_handler.py deleted file mode 100644 index 1be4353b26..0000000000 --- a/openpype/modules/ftrack/lib/ftrack_action_handler.py +++ /dev/null @@ -1,446 +0,0 @@ -import os -from .ftrack_base_handler import BaseHandler - - -def statics_icon(*icon_statics_file_parts): - statics_server = os.environ.get("OPENPYPE_STATICS_SERVER") - if not statics_server: - return None - return "/".join((statics_server, *icon_statics_file_parts)) - - -class BaseAction(BaseHandler): - '''Custom Action base class - - `label` a descriptive string identifying your action. - - `varaint` To group actions together, give them the same - label and specify a unique variant per action. - - `identifier` a unique identifier for your action. - - `description` a verbose descriptive text for you action - - ''' - label = None - variant = None - identifier = None - description = None - icon = None - type = 'Action' - - _discover_identifier = None - _launch_identifier = None - - settings_frack_subkey = "user_handlers" - settings_enabled_key = "enabled" - - def __init__(self, session): - '''Expects a ftrack_api.Session instance''' - if self.label is None: - raise ValueError('Action missing label.') - - if self.identifier is None: - raise ValueError('Action missing identifier.') - - super().__init__(session) - - @property - def discover_identifier(self): - if self._discover_identifier is None: - self._discover_identifier = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._discover_identifier - - @property - def launch_identifier(self): - if self._launch_identifier is None: - self._launch_identifier = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._launch_identifier - - def register(self): - ''' - Registers the action, subscribing the the discover and launch topics. - - highest priority event will show last - ''' - self.session.event_hub.subscribe( - 'topic=ftrack.action.discover and source.user.username={0}'.format( - self.session.api_user - ), - self._discover, - priority=self.priority - ) - - launch_subscription = ( - 'topic=ftrack.action.launch' - ' and data.actionIdentifier={0}' - ' and source.user.username={1}' - ).format( - self.launch_identifier, - self.session.api_user - ) - self.session.event_hub.subscribe( - launch_subscription, - self._launch - ) - - def _discover(self, event): - entities = self._translate_event(event) - if not entities: - return - - accepts = self.discover(self.session, entities, event) - if not accepts: - return - - self.log.debug(u'Discovering action with selection: {0}'.format( - event['data'].get('selection', []) - )) - - return { - 'items': [{ - 'label': self.label, - 'variant': self.variant, - 'description': self.description, - 'actionIdentifier': self.discover_identifier, - 'icon': self.icon, - }] - } - - def discover(self, session, entities, event): - '''Return true if we can handle the selected entities. - - *session* is a `ftrack_api.Session` instance - - - *entities* is a list of tuples each containing the entity type and the - entity id. If the entity is a hierarchical you will always get the - entity type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - - *event* the unmodified original event - - ''' - - return False - - def _interface(self, session, entities, event): - interface = self.interface(session, entities, event) - if not interface: - return - - if isinstance(interface, (tuple, list)): - return {"items": interface} - - if isinstance(interface, dict): - if ( - "items" in interface - or ("success" in interface and "message" in interface) - ): - return interface - - raise ValueError(( - "Invalid interface output expected key: \"items\" or keys:" - " \"success\" and \"message\". Got: \"{}\"" - ).format(str(interface))) - - raise ValueError( - "Invalid interface output type \"{}\"".format( - str(type(interface)) - ) - ) - - def interface(self, session, entities, event): - '''Return a interface if applicable or None - - *session* is a `ftrack_api.Session` instance - - *entities* is a list of tuples each containing the entity type and - the entity id. If the entity is a hierarchical you will always get the - entity type TypedContext, once retrieved through a get operation you - will have the "real" entity type ie. example Shot, Sequence - or Asset Build. - - *event* the unmodified original event - ''' - return None - - def _launch(self, event): - entities = self._translate_event(event) - if not entities: - return - - preactions_launched = self._handle_preactions(self.session, event) - if preactions_launched is False: - return - - interface = self._interface(self.session, entities, event) - if interface: - return interface - - response = self.launch(self.session, entities, event) - - return self._handle_result(response) - - def _handle_result(self, result): - '''Validate the returned result from the action callback''' - if isinstance(result, bool): - if result is True: - msg = 'Action {0} finished.' - else: - msg = 'Action {0} failed.' - - return { - 'success': result, - 'message': msg.format(self.label) - } - - if isinstance(result, dict): - if 'items' in result: - if not isinstance(result['items'], list): - raise ValueError('Invalid items format, must be list!') - - else: - for key in ('success', 'message'): - if key not in result: - raise KeyError( - "Missing required key: {0}.".format(key) - ) - return result - - self.log.warning(( - 'Invalid result type \"{}\" must be bool or dictionary!' - ).format(str(type(result)))) - - return result - - @staticmethod - def roles_check(settings_roles, user_roles, default=True): - """Compare roles from setting and user's roles. - - Args: - settings_roles(list): List of role names from settings. - user_roles(list): User's lowered role names. - default(bool): If `settings_roles` is empty list. - - Returns: - bool: `True` if user has at least one role from settings or - default if `settings_roles` is empty. - """ - if not settings_roles: - return default - - user_roles = { - role_name.lower() - for role_name in user_roles - } - for role_name in settings_roles: - if role_name.lower() in user_roles: - return True - return False - - @classmethod - def get_user_entity_from_event(cls, session, event): - """Query user entity from event.""" - not_set = object() - - # Check if user is already stored in event data - user_entity = event["data"].get("user_entity", not_set) - if user_entity is not_set: - # Query user entity from event - user_info = event.get("source", {}).get("user", {}) - user_id = user_info.get("id") - username = user_info.get("username") - if user_id: - user_entity = session.query( - "User where id is {}".format(user_id) - ).first() - if not user_entity and username: - user_entity = session.query( - "User where username is {}".format(username) - ).first() - event["data"]["user_entity"] = user_entity - - return user_entity - - @classmethod - def get_user_roles_from_event(cls, session, event, lower=True): - """Get user roles based on data in event. - - Args: - session (ftrack_api.Session): Prepared ftrack session. - event (ftrack_api.event.Event): Event which is processed. - lower (Optional[bool]): Lower the role names. Default 'True'. - """ - - not_set = object() - - user_roles = event["data"].get("user_roles", not_set) - if user_roles is not_set: - user_roles = [] - user_entity = cls.get_user_entity_from_event(session, event) - for role in user_entity["user_security_roles"]: - role_name = role["security_role"]["name"] - if lower: - role_name = role_name.lower() - user_roles.append(role_name) - event["data"]["user_roles"] = user_roles - return user_roles - - def get_project_name_from_event(self, session, event, entities): - """Load or query and fill project entity from/to event data. - - Project data are stored by ftrack id because in most cases it is - easier to access project id than project name. - - Args: - session (ftrack_api.Session): Current session. - event (ftrack_api.Event): Processed event by session. - entities (list): Ftrack entities of selection. - """ - - # Try to get project entity from event - project_name = event["data"].get("project_name") - if not project_name: - project_entity = self.get_project_from_entity( - entities[0], session - ) - project_name = project_entity["full_name"] - - event["data"]["project_name"] = project_name - return project_name - - def get_ftrack_settings(self, session, event, entities): - project_name = self.get_project_name_from_event( - session, event, entities - ) - project_settings = self.get_project_settings_from_event( - event, project_name - ) - return project_settings["ftrack"] - - def valid_roles(self, session, entities, event): - """Validate user roles by settings. - - Method requires to have set `settings_key` attribute. - """ - ftrack_settings = self.get_ftrack_settings(session, event, entities) - settings = ( - ftrack_settings[self.settings_frack_subkey][self.settings_key] - ) - if self.settings_enabled_key: - if not settings.get(self.settings_enabled_key, True): - return False - - user_role_list = self.get_user_roles_from_event( - session, event, lower=False) - if not self.roles_check(settings.get("role_list"), user_role_list): - return False - return True - - -class LocalAction(BaseAction): - """Action that warn user when more Processes with same action are running. - - Action is launched all the time but if id does not match id of current - instanace then message is shown to user. - - Handy for actions where matters if is executed on specific machine. - """ - _full_launch_identifier = None - - @property - def discover_identifier(self): - if self._discover_identifier is None: - self._discover_identifier = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._discover_identifier - - @property - def launch_identifier(self): - """Catch all topics with same identifier.""" - if self._launch_identifier is None: - self._launch_identifier = "{}.*".format(self.identifier) - return self._launch_identifier - - @property - def full_launch_identifier(self): - """Catch all topics with same identifier.""" - if self._full_launch_identifier is None: - self._full_launch_identifier = "{}.{}".format( - self.identifier, self.process_identifier() - ) - return self._full_launch_identifier - - def _discover(self, event): - entities = self._translate_event(event) - if not entities: - return - - accepts = self.discover(self.session, entities, event) - if not accepts: - return - - self.log.debug("Discovering action with selection: {0}".format( - event["data"].get("selection", []) - )) - - return { - "items": [{ - "label": self.label, - "variant": self.variant, - "description": self.description, - "actionIdentifier": self.discover_identifier, - "icon": self.icon, - }] - } - - def _launch(self, event): - event_identifier = event["data"]["actionIdentifier"] - # Check if identifier is same - # - show message that acion may not be triggered on this machine - if event_identifier != self.full_launch_identifier: - return { - "success": False, - "message": ( - "There are running more OpenPype processes" - " where this action could be launched." - ) - } - return super(LocalAction, self)._launch(event) - - -class ServerAction(BaseAction): - """Action class meant to be used on event server. - - Unlike the `BaseAction` roles are not checked on register but on discover. - For the same reason register is modified to not filter topics by username. - """ - - settings_frack_subkey = "events" - - @property - def discover_identifier(self): - return self.identifier - - @property - def launch_identifier(self): - return self.identifier - - def register(self): - """Register subcription to Ftrack event hub.""" - self.session.event_hub.subscribe( - "topic=ftrack.action.discover", - self._discover, - priority=self.priority - ) - - launch_subscription = ( - "topic=ftrack.action.launch and data.actionIdentifier={0}" - ).format(self.launch_identifier) - self.session.event_hub.subscribe(launch_subscription, self._launch) diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py deleted file mode 100644 index 55400c22ab..0000000000 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ /dev/null @@ -1,723 +0,0 @@ -import os -import tempfile -import json -import functools -import uuid -import datetime -import traceback -import time -from openpype.lib import Logger -from openpype.settings import get_project_settings - -import ftrack_api -from openpype_modules.ftrack import ftrack_server - - -class MissingPermision(Exception): - def __init__(self, message=None): - if message is None: - message = 'Ftrack' - super().__init__(message) - - -class PreregisterException(Exception): - def __init__(self, message=None): - if not message: - message = "Pre-registration conditions were not met" - super().__init__(message) - - -class BaseHandler(object): - '''Custom Action base class - -